/linux-4.1.27/drivers/net/ethernet/broadcom/ |
H A D | bnx2.c | 249 static void bnx2_init_napi(struct bnx2 *bp); 250 static void bnx2_del_napi(struct bnx2 *bp); 252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) bnx2_tx_avail() argument 268 return bp->tx_ring_size - diff; bnx2_tx_avail() 272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) bnx2_reg_rd_ind() argument 276 spin_lock_bh(&bp->indirect_lock); bnx2_reg_rd_ind() 277 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); bnx2_reg_rd_ind() 278 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW); bnx2_reg_rd_ind() 279 spin_unlock_bh(&bp->indirect_lock); bnx2_reg_rd_ind() 284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) bnx2_reg_wr_ind() argument 286 spin_lock_bh(&bp->indirect_lock); bnx2_reg_wr_ind() 287 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); bnx2_reg_wr_ind() 288 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val); bnx2_reg_wr_ind() 289 spin_unlock_bh(&bp->indirect_lock); bnx2_reg_wr_ind() 293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val) bnx2_shmem_wr() argument 295 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val); bnx2_shmem_wr() 299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset) bnx2_shmem_rd() argument 301 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset); bnx2_shmem_rd() 305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) bnx2_ctx_wr() argument 308 spin_lock_bh(&bp->indirect_lock); bnx2_ctx_wr() 309 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_ctx_wr() 312 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val); bnx2_ctx_wr() 313 BNX2_WR(bp, BNX2_CTX_CTX_CTRL, bnx2_ctx_wr() 316 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL); bnx2_ctx_wr() 322 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset); bnx2_ctx_wr() 323 BNX2_WR(bp, BNX2_CTX_DATA, val); bnx2_ctx_wr() 325 spin_unlock_bh(&bp->indirect_lock); bnx2_ctx_wr() 332 struct bnx2 *bp = netdev_priv(dev); bnx2_drv_ctl() local 337 bnx2_reg_wr_ind(bp, io->offset, io->data); bnx2_drv_ctl() 340 io->data = bnx2_reg_rd_ind(bp, io->offset); bnx2_drv_ctl() 343 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data); bnx2_drv_ctl() 351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp) bnx2_setup_cnic_irq_info() argument 353 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2_setup_cnic_irq_info() 354 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; bnx2_setup_cnic_irq_info() 357 if (bp->flags & BNX2_FLAG_USING_MSIX) { bnx2_setup_cnic_irq_info() 360 sb_id = bp->irq_nvecs; bnx2_setup_cnic_irq_info() 370 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector; bnx2_setup_cnic_irq_info() 381 struct bnx2 *bp = netdev_priv(dev); bnx2_register_cnic() local 382 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2_register_cnic() 390 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN)) bnx2_register_cnic() 393 bp->cnic_data = data; bnx2_register_cnic() 394 rcu_assign_pointer(bp->cnic_ops, ops); bnx2_register_cnic() 399 bnx2_setup_cnic_irq_info(bp); bnx2_register_cnic() 406 struct bnx2 *bp = netdev_priv(dev); bnx2_unregister_cnic() local 407 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; bnx2_unregister_cnic() 408 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2_unregister_cnic() 410 mutex_lock(&bp->cnic_lock); bnx2_unregister_cnic() 413 RCU_INIT_POINTER(bp->cnic_ops, NULL); bnx2_unregister_cnic() 414 mutex_unlock(&bp->cnic_lock); bnx2_unregister_cnic() 421 struct bnx2 *bp = netdev_priv(dev); bnx2_cnic_probe() local 422 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2_cnic_probe() 428 cp->chip_id = bp->chip_id; bnx2_cnic_probe() 429 cp->pdev = bp->pdev; bnx2_cnic_probe() 430 cp->io_base = bp->regview; bnx2_cnic_probe() 439 bnx2_cnic_stop(struct bnx2 *bp) bnx2_cnic_stop() argument 444 mutex_lock(&bp->cnic_lock); bnx2_cnic_stop() 445 c_ops = rcu_dereference_protected(bp->cnic_ops, bnx2_cnic_stop() 446 lockdep_is_held(&bp->cnic_lock)); bnx2_cnic_stop() 449 c_ops->cnic_ctl(bp->cnic_data, &info); bnx2_cnic_stop() 451 mutex_unlock(&bp->cnic_lock); bnx2_cnic_stop() 455 bnx2_cnic_start(struct bnx2 *bp) bnx2_cnic_start() argument 460 mutex_lock(&bp->cnic_lock); bnx2_cnic_start() 461 c_ops = rcu_dereference_protected(bp->cnic_ops, bnx2_cnic_start() 462 lockdep_is_held(&bp->cnic_lock)); bnx2_cnic_start() 464 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { bnx2_cnic_start() 465 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; bnx2_cnic_start() 470 c_ops->cnic_ctl(bp->cnic_data, &info); bnx2_cnic_start() 472 mutex_unlock(&bp->cnic_lock); bnx2_cnic_start() 478 bnx2_cnic_stop(struct bnx2 *bp) bnx2_cnic_stop() argument 483 bnx2_cnic_start(struct bnx2 *bp) bnx2_cnic_start() argument 490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) bnx2_read_phy() argument 495 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { bnx2_read_phy() 496 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_read_phy() 499 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); bnx2_read_phy() 500 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_read_phy() 505 val1 = (bp->phy_addr << 21) | (reg << 16) | bnx2_read_phy() 508 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1); bnx2_read_phy() 513 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); bnx2_read_phy() 517 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); bnx2_read_phy() 533 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { bnx2_read_phy() 534 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_read_phy() 537 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); bnx2_read_phy() 538 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_read_phy() 547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) bnx2_write_phy() argument 552 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { bnx2_write_phy() 553 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_write_phy() 556 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); bnx2_write_phy() 557 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_write_phy() 562 val1 = (bp->phy_addr << 21) | (reg << 16) | val | bnx2_write_phy() 565 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1); bnx2_write_phy() 570 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); bnx2_write_phy() 582 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { bnx2_write_phy() 583 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_write_phy() 586 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); bnx2_write_phy() 587 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_write_phy() 596 bnx2_disable_int(struct bnx2 *bp) bnx2_disable_int() argument 601 for (i = 0; i < bp->irq_nvecs; i++) { bnx2_disable_int() 602 bnapi = &bp->bnx2_napi[i]; bnx2_disable_int() 603 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | bnx2_disable_int() 606 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD); bnx2_disable_int() 610 bnx2_enable_int(struct bnx2 *bp) bnx2_enable_int() argument 615 for (i = 0; i < bp->irq_nvecs; i++) { bnx2_enable_int() 616 bnapi = &bp->bnx2_napi[i]; bnx2_enable_int() 618 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | bnx2_enable_int() 623 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | bnx2_enable_int() 627 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); bnx2_enable_int() 631 bnx2_disable_int_sync(struct bnx2 *bp) bnx2_disable_int_sync() argument 635 atomic_inc(&bp->intr_sem); bnx2_disable_int_sync() 636 if (!netif_running(bp->dev)) bnx2_disable_int_sync() 639 bnx2_disable_int(bp); bnx2_disable_int_sync() 640 for (i = 0; i < bp->irq_nvecs; i++) bnx2_disable_int_sync() 641 synchronize_irq(bp->irq_tbl[i].vector); bnx2_disable_int_sync() 645 bnx2_napi_disable(struct bnx2 *bp) bnx2_napi_disable() argument 649 for (i = 0; i < bp->irq_nvecs; i++) bnx2_napi_disable() 650 napi_disable(&bp->bnx2_napi[i].napi); bnx2_napi_disable() 654 bnx2_napi_enable(struct bnx2 *bp) bnx2_napi_enable() argument 658 for (i = 0; i < bp->irq_nvecs; i++) bnx2_napi_enable() 659 napi_enable(&bp->bnx2_napi[i].napi); bnx2_napi_enable() 663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic) bnx2_netif_stop() argument 666 bnx2_cnic_stop(bp); bnx2_netif_stop() 667 if (netif_running(bp->dev)) { bnx2_netif_stop() 668 bnx2_napi_disable(bp); bnx2_netif_stop() 669 netif_tx_disable(bp->dev); bnx2_netif_stop() 671 bnx2_disable_int_sync(bp); bnx2_netif_stop() 672 netif_carrier_off(bp->dev); /* prevent tx timeout */ bnx2_netif_stop() 676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic) bnx2_netif_start() argument 678 if (atomic_dec_and_test(&bp->intr_sem)) { bnx2_netif_start() 679 if (netif_running(bp->dev)) { bnx2_netif_start() 680 netif_tx_wake_all_queues(bp->dev); bnx2_netif_start() 681 spin_lock_bh(&bp->phy_lock); bnx2_netif_start() 682 if (bp->link_up) bnx2_netif_start() 683 netif_carrier_on(bp->dev); bnx2_netif_start() 684 spin_unlock_bh(&bp->phy_lock); bnx2_netif_start() 685 bnx2_napi_enable(bp); bnx2_netif_start() 686 bnx2_enable_int(bp); bnx2_netif_start() 688 bnx2_cnic_start(bp); bnx2_netif_start() 694 bnx2_free_tx_mem(struct bnx2 *bp) bnx2_free_tx_mem() argument 698 for (i = 0; i < bp->num_tx_rings; i++) { bnx2_free_tx_mem() 699 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_free_tx_mem() 703 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE, bnx2_free_tx_mem() 714 bnx2_free_rx_mem(struct bnx2 *bp) bnx2_free_rx_mem() argument 718 for (i = 0; i < bp->num_rx_rings; i++) { bnx2_free_rx_mem() 719 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_free_rx_mem() 723 for (j = 0; j < bp->rx_max_ring; j++) { bnx2_free_rx_mem() 725 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, bnx2_free_rx_mem() 733 for (j = 0; j < bp->rx_max_pg_ring; j++) { bnx2_free_rx_mem() 735 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, bnx2_free_rx_mem() 746 bnx2_alloc_tx_mem(struct bnx2 *bp) bnx2_alloc_tx_mem() argument 750 for (i = 0; i < bp->num_tx_rings; i++) { bnx2_alloc_tx_mem() 751 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_alloc_tx_mem() 759 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE, bnx2_alloc_tx_mem() 768 bnx2_alloc_rx_mem(struct bnx2 *bp) bnx2_alloc_rx_mem() argument 772 for (i = 0; i < bp->num_rx_rings; i++) { bnx2_alloc_rx_mem() 773 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_alloc_rx_mem() 778 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); bnx2_alloc_rx_mem() 782 for (j = 0; j < bp->rx_max_ring; j++) { bnx2_alloc_rx_mem() 784 dma_alloc_coherent(&bp->pdev->dev, bnx2_alloc_rx_mem() 793 if (bp->rx_pg_ring_size) { bnx2_alloc_rx_mem() 795 bp->rx_max_pg_ring); bnx2_alloc_rx_mem() 801 for (j = 0; j < bp->rx_max_pg_ring; j++) { bnx2_alloc_rx_mem() 803 dma_alloc_coherent(&bp->pdev->dev, bnx2_alloc_rx_mem() 816 bnx2_free_mem(struct bnx2 *bp) bnx2_free_mem() argument 819 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; bnx2_free_mem() 821 bnx2_free_tx_mem(bp); bnx2_free_mem() 822 bnx2_free_rx_mem(bp); bnx2_free_mem() 824 for (i = 0; i < bp->ctx_pages; i++) { bnx2_free_mem() 825 if (bp->ctx_blk[i]) { bnx2_free_mem() 826 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE, bnx2_free_mem() 827 bp->ctx_blk[i], bnx2_free_mem() 828 bp->ctx_blk_mapping[i]); bnx2_free_mem() 829 bp->ctx_blk[i] = NULL; bnx2_free_mem() 833 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size, bnx2_free_mem() 835 bp->status_blk_mapping); bnx2_free_mem() 837 bp->stats_blk = NULL; bnx2_free_mem() 842 bnx2_alloc_mem(struct bnx2 *bp) bnx2_alloc_mem() argument 850 if (bp->flags & BNX2_FLAG_MSIX_CAP) bnx2_alloc_mem() 853 bp->status_stats_size = status_blk_size + bnx2_alloc_mem() 856 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, bnx2_alloc_mem() 857 &bp->status_blk_mapping, GFP_KERNEL); bnx2_alloc_mem() 861 bnapi = &bp->bnx2_napi[0]; bnx2_alloc_mem() 867 if (bp->flags & BNX2_FLAG_MSIX_CAP) { bnx2_alloc_mem() 868 for (i = 1; i < bp->irq_nvecs; i++) { bnx2_alloc_mem() 871 bnapi = &bp->bnx2_napi[i]; bnx2_alloc_mem() 883 bp->stats_blk = status_blk + status_blk_size; bnx2_alloc_mem() 885 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; bnx2_alloc_mem() 887 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_alloc_mem() 888 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE; bnx2_alloc_mem() 889 if (bp->ctx_pages == 0) bnx2_alloc_mem() 890 bp->ctx_pages = 1; bnx2_alloc_mem() 891 for (i = 0; i < bp->ctx_pages; i++) { bnx2_alloc_mem() 892 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev, bnx2_alloc_mem() 894 &bp->ctx_blk_mapping[i], bnx2_alloc_mem() 896 if (bp->ctx_blk[i] == NULL) bnx2_alloc_mem() 901 err = bnx2_alloc_rx_mem(bp); bnx2_alloc_mem() 905 err = bnx2_alloc_tx_mem(bp); bnx2_alloc_mem() 912 bnx2_free_mem(bp); bnx2_alloc_mem() 917 bnx2_report_fw_link(struct bnx2 *bp) bnx2_report_fw_link() argument 921 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_report_fw_link() 924 if (bp->link_up) { bnx2_report_fw_link() 927 switch (bp->line_speed) { bnx2_report_fw_link() 929 if (bp->duplex == DUPLEX_HALF) bnx2_report_fw_link() 935 if (bp->duplex == DUPLEX_HALF) bnx2_report_fw_link() 941 if (bp->duplex == DUPLEX_HALF) bnx2_report_fw_link() 947 if (bp->duplex == DUPLEX_HALF) bnx2_report_fw_link() 956 if (bp->autoneg) { bnx2_report_fw_link() 959 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); bnx2_report_fw_link() 960 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); bnx2_report_fw_link() 963 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) bnx2_report_fw_link() 972 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status); bnx2_report_fw_link() 976 bnx2_xceiver_str(struct bnx2 *bp) bnx2_xceiver_str() argument 978 return (bp->phy_port == PORT_FIBRE) ? "SerDes" : bnx2_xceiver_str() 979 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" : bnx2_xceiver_str() 984 bnx2_report_link(struct bnx2 *bp) bnx2_report_link() argument 986 if (bp->link_up) { bnx2_report_link() 987 netif_carrier_on(bp->dev); bnx2_report_link() 988 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex", bnx2_report_link() 989 bnx2_xceiver_str(bp), bnx2_report_link() 990 bp->line_speed, bnx2_report_link() 991 bp->duplex == DUPLEX_FULL ? "full" : "half"); bnx2_report_link() 993 if (bp->flow_ctrl) { bnx2_report_link() 994 if (bp->flow_ctrl & FLOW_CTRL_RX) { bnx2_report_link() 996 if (bp->flow_ctrl & FLOW_CTRL_TX) bnx2_report_link() 1006 netif_carrier_off(bp->dev); bnx2_report_link() 1007 netdev_err(bp->dev, "NIC %s Link is Down\n", bnx2_report_link() 1008 bnx2_xceiver_str(bp)); bnx2_report_link() 1011 bnx2_report_fw_link(bp); bnx2_report_link() 1015 bnx2_resolve_flow_ctrl(struct bnx2 *bp) bnx2_resolve_flow_ctrl() argument 1019 bp->flow_ctrl = 0; bnx2_resolve_flow_ctrl() 1020 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != bnx2_resolve_flow_ctrl() 1023 if (bp->duplex == DUPLEX_FULL) { bnx2_resolve_flow_ctrl() 1024 bp->flow_ctrl = bp->req_flow_ctrl; bnx2_resolve_flow_ctrl() 1029 if (bp->duplex != DUPLEX_FULL) { bnx2_resolve_flow_ctrl() 1033 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_resolve_flow_ctrl() 1034 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { bnx2_resolve_flow_ctrl() 1037 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); bnx2_resolve_flow_ctrl() 1039 bp->flow_ctrl |= FLOW_CTRL_TX; bnx2_resolve_flow_ctrl() 1041 bp->flow_ctrl |= FLOW_CTRL_RX; bnx2_resolve_flow_ctrl() 1045 bnx2_read_phy(bp, bp->mii_adv, &local_adv); bnx2_resolve_flow_ctrl() 1046 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); bnx2_resolve_flow_ctrl() 1048 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_resolve_flow_ctrl() 1069 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; bnx2_resolve_flow_ctrl() 1072 bp->flow_ctrl = FLOW_CTRL_RX; bnx2_resolve_flow_ctrl() 1077 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; bnx2_resolve_flow_ctrl() 1085 bp->flow_ctrl = FLOW_CTRL_TX; bnx2_resolve_flow_ctrl() 1091 bnx2_5709s_linkup(struct bnx2 *bp) bnx2_5709s_linkup() argument 1095 bp->link_up = 1; bnx2_5709s_linkup() 1097 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS); bnx2_5709s_linkup() 1098 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val); bnx2_5709s_linkup() 1099 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); bnx2_5709s_linkup() 1101 if ((bp->autoneg & AUTONEG_SPEED) == 0) { bnx2_5709s_linkup() 1102 bp->line_speed = bp->req_line_speed; bnx2_5709s_linkup() 1103 bp->duplex = bp->req_duplex; bnx2_5709s_linkup() 1109 bp->line_speed = SPEED_10; bnx2_5709s_linkup() 1112 bp->line_speed = SPEED_100; bnx2_5709s_linkup() 1116 bp->line_speed = SPEED_1000; bnx2_5709s_linkup() 1119 bp->line_speed = SPEED_2500; bnx2_5709s_linkup() 1123 bp->duplex = DUPLEX_FULL; bnx2_5709s_linkup() 1125 bp->duplex = DUPLEX_HALF; bnx2_5709s_linkup() 1130 bnx2_5708s_linkup(struct bnx2 *bp) bnx2_5708s_linkup() argument 1134 bp->link_up = 1; bnx2_5708s_linkup() 1135 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); bnx2_5708s_linkup() 1138 bp->line_speed = SPEED_10; bnx2_5708s_linkup() 1141 bp->line_speed = SPEED_100; bnx2_5708s_linkup() 1144 bp->line_speed = SPEED_1000; bnx2_5708s_linkup() 1147 bp->line_speed = SPEED_2500; bnx2_5708s_linkup() 1151 bp->duplex = DUPLEX_FULL; bnx2_5708s_linkup() 1153 bp->duplex = DUPLEX_HALF; bnx2_5708s_linkup() 1159 bnx2_5706s_linkup(struct bnx2 *bp) bnx2_5706s_linkup() argument 1163 bp->link_up = 1; bnx2_5706s_linkup() 1164 bp->line_speed = SPEED_1000; bnx2_5706s_linkup() 1166 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_5706s_linkup() 1168 bp->duplex = DUPLEX_FULL; bnx2_5706s_linkup() 1171 bp->duplex = DUPLEX_HALF; bnx2_5706s_linkup() 1178 bnx2_read_phy(bp, bp->mii_adv, &local_adv); bnx2_5706s_linkup() 1179 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); bnx2_5706s_linkup() 1185 bp->duplex = DUPLEX_FULL; bnx2_5706s_linkup() 1188 bp->duplex = DUPLEX_HALF; bnx2_5706s_linkup() 1196 bnx2_copper_linkup(struct bnx2 *bp) bnx2_copper_linkup() argument 1200 bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX; bnx2_copper_linkup() 1202 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_copper_linkup() 1206 bnx2_read_phy(bp, MII_CTRL1000, &local_adv); bnx2_copper_linkup() 1207 bnx2_read_phy(bp, MII_STAT1000, &remote_adv); bnx2_copper_linkup() 1211 bp->line_speed = SPEED_1000; bnx2_copper_linkup() 1212 bp->duplex = DUPLEX_FULL; bnx2_copper_linkup() 1215 bp->line_speed = SPEED_1000; bnx2_copper_linkup() 1216 bp->duplex = DUPLEX_HALF; bnx2_copper_linkup() 1219 bnx2_read_phy(bp, bp->mii_adv, &local_adv); bnx2_copper_linkup() 1220 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); bnx2_copper_linkup() 1224 bp->line_speed = SPEED_100; bnx2_copper_linkup() 1225 bp->duplex = DUPLEX_FULL; bnx2_copper_linkup() 1228 bp->line_speed = SPEED_100; bnx2_copper_linkup() 1229 bp->duplex = DUPLEX_HALF; bnx2_copper_linkup() 1232 bp->line_speed = SPEED_10; bnx2_copper_linkup() 1233 bp->duplex = DUPLEX_FULL; bnx2_copper_linkup() 1236 bp->line_speed = SPEED_10; bnx2_copper_linkup() 1237 bp->duplex = DUPLEX_HALF; bnx2_copper_linkup() 1240 bp->line_speed = 0; bnx2_copper_linkup() 1241 bp->link_up = 0; bnx2_copper_linkup() 1247 bp->line_speed = SPEED_100; bnx2_copper_linkup() 1250 bp->line_speed = SPEED_10; bnx2_copper_linkup() 1253 bp->duplex = DUPLEX_FULL; bnx2_copper_linkup() 1256 bp->duplex = DUPLEX_HALF; bnx2_copper_linkup() 1260 if (bp->link_up) { bnx2_copper_linkup() 1263 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status); bnx2_copper_linkup() 1265 bp->phy_flags |= BNX2_PHY_FLAG_MDIX; bnx2_copper_linkup() 1272 bnx2_init_rx_context(struct bnx2 *bp, u32 cid) bnx2_init_rx_context() argument 1280 if (bp->flow_ctrl & FLOW_CTRL_TX) bnx2_init_rx_context() 1283 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val); bnx2_init_rx_context() 1287 bnx2_init_all_rx_contexts(struct bnx2 *bp) bnx2_init_all_rx_contexts() argument 1292 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) { bnx2_init_all_rx_contexts() 1295 bnx2_init_rx_context(bp, cid); bnx2_init_all_rx_contexts() 1300 bnx2_set_mac_link(struct bnx2 *bp) bnx2_set_mac_link() argument 1304 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620); bnx2_set_mac_link() 1305 if (bp->link_up && (bp->line_speed == SPEED_1000) && bnx2_set_mac_link() 1306 (bp->duplex == DUPLEX_HALF)) { bnx2_set_mac_link() 1307 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff); bnx2_set_mac_link() 1311 val = BNX2_RD(bp, BNX2_EMAC_MODE); bnx2_set_mac_link() 1317 if (bp->link_up) { bnx2_set_mac_link() 1318 switch (bp->line_speed) { bnx2_set_mac_link() 1320 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) { bnx2_set_mac_link() 1341 if (bp->duplex == DUPLEX_HALF) bnx2_set_mac_link() 1343 BNX2_WR(bp, BNX2_EMAC_MODE, val); bnx2_set_mac_link() 1346 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN; bnx2_set_mac_link() 1348 if (bp->flow_ctrl & FLOW_CTRL_RX) bnx2_set_mac_link() 1349 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN; bnx2_set_mac_link() 1350 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode); bnx2_set_mac_link() 1353 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE); bnx2_set_mac_link() 1356 if (bp->flow_ctrl & FLOW_CTRL_TX) bnx2_set_mac_link() 1358 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val); bnx2_set_mac_link() 1361 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); bnx2_set_mac_link() 1363 bnx2_init_all_rx_contexts(bp); bnx2_set_mac_link() 1367 bnx2_enable_bmsr1(struct bnx2 *bp) bnx2_enable_bmsr1() argument 1369 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_enable_bmsr1() 1370 (BNX2_CHIP(bp) == BNX2_CHIP_5709)) bnx2_enable_bmsr1() 1371 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_enable_bmsr1() 1376 bnx2_disable_bmsr1(struct bnx2 *bp) bnx2_disable_bmsr1() argument 1378 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_disable_bmsr1() 1379 (BNX2_CHIP(bp) == BNX2_CHIP_5709)) bnx2_disable_bmsr1() 1380 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_disable_bmsr1() 1385 bnx2_test_and_enable_2g5(struct bnx2 *bp) bnx2_test_and_enable_2g5() argument 1390 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) bnx2_test_and_enable_2g5() 1393 if (bp->autoneg & AUTONEG_SPEED) bnx2_test_and_enable_2g5() 1394 bp->advertising |= ADVERTISED_2500baseX_Full; bnx2_test_and_enable_2g5() 1396 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_and_enable_2g5() 1397 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); bnx2_test_and_enable_2g5() 1399 bnx2_read_phy(bp, bp->mii_up1, &up1); bnx2_test_and_enable_2g5() 1402 bnx2_write_phy(bp, bp->mii_up1, up1); bnx2_test_and_enable_2g5() 1406 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_and_enable_2g5() 1407 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_test_and_enable_2g5() 1414 bnx2_test_and_disable_2g5(struct bnx2 *bp) bnx2_test_and_disable_2g5() argument 1419 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) bnx2_test_and_disable_2g5() 1422 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_and_disable_2g5() 1423 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); bnx2_test_and_disable_2g5() 1425 bnx2_read_phy(bp, bp->mii_up1, &up1); bnx2_test_and_disable_2g5() 1428 bnx2_write_phy(bp, bp->mii_up1, up1); bnx2_test_and_disable_2g5() 1432 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_and_disable_2g5() 1433 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_test_and_disable_2g5() 1440 bnx2_enable_forced_2g5(struct bnx2 *bp) bnx2_enable_forced_2g5() argument 1445 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) bnx2_enable_forced_2g5() 1448 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_enable_forced_2g5() 1451 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_enable_forced_2g5() 1453 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { bnx2_enable_forced_2g5() 1457 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); bnx2_enable_forced_2g5() 1460 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_enable_forced_2g5() 1462 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_enable_forced_2g5() 1464 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) { bnx2_enable_forced_2g5() 1465 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_enable_forced_2g5() 1475 if (bp->autoneg & AUTONEG_SPEED) { bnx2_enable_forced_2g5() 1477 if (bp->req_duplex == DUPLEX_FULL) bnx2_enable_forced_2g5() 1480 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); bnx2_enable_forced_2g5() 1484 bnx2_disable_forced_2g5(struct bnx2 *bp) bnx2_disable_forced_2g5() argument 1489 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) bnx2_disable_forced_2g5() 1492 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_disable_forced_2g5() 1495 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_disable_forced_2g5() 1497 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { bnx2_disable_forced_2g5() 1499 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); bnx2_disable_forced_2g5() 1502 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_disable_forced_2g5() 1504 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_disable_forced_2g5() 1506 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) { bnx2_disable_forced_2g5() 1507 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_disable_forced_2g5() 1517 if (bp->autoneg & AUTONEG_SPEED) bnx2_disable_forced_2g5() 1519 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); bnx2_disable_forced_2g5() 1523 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start) bnx2_5706s_force_link_dn() argument 1527 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL); bnx2_5706s_force_link_dn() 1528 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); bnx2_5706s_force_link_dn() 1530 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f); bnx2_5706s_force_link_dn() 1532 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0); bnx2_5706s_force_link_dn() 1536 bnx2_set_link(struct bnx2 *bp) bnx2_set_link() argument 1541 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) { bnx2_set_link() 1542 bp->link_up = 1; bnx2_set_link() 1546 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_set_link() 1549 link_up = bp->link_up; bnx2_set_link() 1551 bnx2_enable_bmsr1(bp); bnx2_set_link() 1552 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); bnx2_set_link() 1553 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); bnx2_set_link() 1554 bnx2_disable_bmsr1(bp); bnx2_set_link() 1556 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_set_link() 1557 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) { bnx2_set_link() 1560 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) { bnx2_set_link() 1561 bnx2_5706s_force_link_dn(bp, 0); bnx2_set_link() 1562 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN; bnx2_set_link() 1564 val = BNX2_RD(bp, BNX2_EMAC_STATUS); bnx2_set_link() 1566 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); bnx2_set_link() 1567 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); bnx2_set_link() 1568 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); bnx2_set_link() 1578 bp->link_up = 1; bnx2_set_link() 1580 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_set_link() 1581 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) bnx2_set_link() 1582 bnx2_5706s_linkup(bp); bnx2_set_link() 1583 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) bnx2_set_link() 1584 bnx2_5708s_linkup(bp); bnx2_set_link() 1585 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_set_link() 1586 bnx2_5709s_linkup(bp); bnx2_set_link() 1589 bnx2_copper_linkup(bp); bnx2_set_link() 1591 bnx2_resolve_flow_ctrl(bp); bnx2_set_link() 1594 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_set_link() 1595 (bp->autoneg & AUTONEG_SPEED)) bnx2_set_link() 1596 bnx2_disable_forced_2g5(bp); bnx2_set_link() 1598 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) { bnx2_set_link() 1601 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_set_link() 1603 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); bnx2_set_link() 1605 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; bnx2_set_link() 1607 bp->link_up = 0; bnx2_set_link() 1610 if (bp->link_up != link_up) { bnx2_set_link() 1611 bnx2_report_link(bp); bnx2_set_link() 1614 bnx2_set_mac_link(bp); bnx2_set_link() 1620 bnx2_reset_phy(struct bnx2 *bp) bnx2_reset_phy() argument 1625 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET); bnx2_reset_phy() 1631 bnx2_read_phy(bp, bp->mii_bmcr, ®); bnx2_reset_phy() 1644 bnx2_phy_get_pause_adv(struct bnx2 *bp) bnx2_phy_get_pause_adv() argument 1648 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) == bnx2_phy_get_pause_adv() 1651 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_phy_get_pause_adv() 1658 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) { bnx2_phy_get_pause_adv() 1659 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_phy_get_pause_adv() 1666 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) { bnx2_phy_get_pause_adv() 1667 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_phy_get_pause_adv() 1680 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) 1681 __releases(&bp->phy_lock) 1682 __acquires(&bp->phy_lock) 1686 pause_adv = bnx2_phy_get_pause_adv(bp); 1688 if (bp->autoneg & AUTONEG_SPEED) { 1690 if (bp->advertising & ADVERTISED_10baseT_Half) 1692 if (bp->advertising & ADVERTISED_10baseT_Full) 1694 if (bp->advertising & ADVERTISED_100baseT_Half) 1696 if (bp->advertising & ADVERTISED_100baseT_Full) 1698 if (bp->advertising & ADVERTISED_1000baseT_Full) 1700 if (bp->advertising & ADVERTISED_2500baseX_Full) 1703 if (bp->req_line_speed == SPEED_2500) 1705 else if (bp->req_line_speed == SPEED_1000) 1707 else if (bp->req_line_speed == SPEED_100) { 1708 if (bp->req_duplex == DUPLEX_FULL) 1712 } else if (bp->req_line_speed == SPEED_10) { 1713 if (bp->req_duplex == DUPLEX_FULL) 1729 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg); 1731 spin_unlock_bh(&bp->phy_lock); 1732 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0); 1733 spin_lock_bh(&bp->phy_lock); 1739 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) 1740 __releases(&bp->phy_lock) 1741 __acquires(&bp->phy_lock) 1746 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) 1747 return bnx2_setup_remote_phy(bp, port); 1749 if (!(bp->autoneg & AUTONEG_SPEED)) { 1753 if (bp->req_line_speed == SPEED_2500) { 1754 if (!bnx2_test_and_enable_2g5(bp)) 1756 } else if (bp->req_line_speed == SPEED_1000) { 1757 if (bnx2_test_and_disable_2g5(bp)) 1760 bnx2_read_phy(bp, bp->mii_adv, &adv); 1763 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1767 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { 1768 if (bp->req_line_speed == SPEED_2500) 1769 bnx2_enable_forced_2g5(bp); variable 1770 else if (bp->req_line_speed == SPEED_1000) { 1771 bnx2_disable_forced_2g5(bp); variable 1775 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) { 1776 if (bp->req_line_speed == SPEED_2500) 1782 if (bp->req_duplex == DUPLEX_FULL) { 1792 if (bp->link_up) { 1793 bnx2_write_phy(bp, bp->mii_adv, adv & 1796 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | 1799 bp->link_up = 0; 1800 netif_carrier_off(bp->dev); 1801 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); 1802 bnx2_report_link(bp); variable 1804 bnx2_write_phy(bp, bp->mii_adv, adv); 1805 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); 1807 bnx2_resolve_flow_ctrl(bp); variable 1808 bnx2_set_mac_link(bp); variable 1813 bnx2_test_and_enable_2g5(bp); variable 1815 if (bp->advertising & ADVERTISED_1000baseT_Full) 1818 new_adv |= bnx2_phy_get_pause_adv(bp); 1820 bnx2_read_phy(bp, bp->mii_adv, &adv); 1821 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1823 bp->serdes_an_pending = 0; 1826 if (bp->link_up) { 1827 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); 1828 spin_unlock_bh(&bp->phy_lock); 1830 spin_lock_bh(&bp->phy_lock); 1833 bnx2_write_phy(bp, bp->mii_adv, new_adv); 1834 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | 1844 bp->current_interval = BNX2_SERDES_AN_TIMEOUT; 1845 bp->serdes_an_pending = 1; 1846 mod_timer(&bp->timer, jiffies + bp->current_interval); 1848 bnx2_resolve_flow_ctrl(bp); variable 1849 bnx2_set_mac_link(bp); variable 1856 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \ 1871 bnx2_set_default_remote_link(struct bnx2 *bp) bnx2_set_default_remote_link() argument 1875 if (bp->phy_port == PORT_TP) bnx2_set_default_remote_link() 1876 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK); bnx2_set_default_remote_link() 1878 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK); bnx2_set_default_remote_link() 1881 bp->req_line_speed = 0; bnx2_set_default_remote_link() 1882 bp->autoneg |= AUTONEG_SPEED; bnx2_set_default_remote_link() 1883 bp->advertising = ADVERTISED_Autoneg; bnx2_set_default_remote_link() 1885 bp->advertising |= ADVERTISED_10baseT_Half; bnx2_set_default_remote_link() 1887 bp->advertising |= ADVERTISED_10baseT_Full; bnx2_set_default_remote_link() 1889 bp->advertising |= ADVERTISED_100baseT_Half; bnx2_set_default_remote_link() 1891 bp->advertising |= ADVERTISED_100baseT_Full; bnx2_set_default_remote_link() 1893 bp->advertising |= ADVERTISED_1000baseT_Full; bnx2_set_default_remote_link() 1895 bp->advertising |= ADVERTISED_2500baseX_Full; bnx2_set_default_remote_link() 1897 bp->autoneg = 0; bnx2_set_default_remote_link() 1898 bp->advertising = 0; bnx2_set_default_remote_link() 1899 bp->req_duplex = DUPLEX_FULL; bnx2_set_default_remote_link() 1901 bp->req_line_speed = SPEED_10; bnx2_set_default_remote_link() 1903 bp->req_duplex = DUPLEX_HALF; bnx2_set_default_remote_link() 1906 bp->req_line_speed = SPEED_100; bnx2_set_default_remote_link() 1908 bp->req_duplex = DUPLEX_HALF; bnx2_set_default_remote_link() 1911 bp->req_line_speed = SPEED_1000; bnx2_set_default_remote_link() 1913 bp->req_line_speed = SPEED_2500; bnx2_set_default_remote_link() 1918 bnx2_set_default_link(struct bnx2 *bp) bnx2_set_default_link() argument 1920 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { bnx2_set_default_link() 1921 bnx2_set_default_remote_link(bp); bnx2_set_default_link() 1925 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; bnx2_set_default_link() 1926 bp->req_line_speed = 0; bnx2_set_default_link() 1927 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_set_default_link() 1930 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; bnx2_set_default_link() 1932 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG); bnx2_set_default_link() 1935 bp->autoneg = 0; bnx2_set_default_link() 1936 bp->req_line_speed = bp->line_speed = SPEED_1000; bnx2_set_default_link() 1937 bp->req_duplex = DUPLEX_FULL; bnx2_set_default_link() 1940 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; bnx2_set_default_link() 1944 bnx2_send_heart_beat(struct bnx2 *bp) bnx2_send_heart_beat() argument 1949 spin_lock(&bp->indirect_lock); bnx2_send_heart_beat() 1950 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK); bnx2_send_heart_beat() 1951 addr = bp->shmem_base + BNX2_DRV_PULSE_MB; bnx2_send_heart_beat() 1952 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); bnx2_send_heart_beat() 1953 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); bnx2_send_heart_beat() 1954 spin_unlock(&bp->indirect_lock); bnx2_send_heart_beat() 1958 bnx2_remote_phy_event(struct bnx2 *bp) bnx2_remote_phy_event() argument 1961 u8 link_up = bp->link_up; bnx2_remote_phy_event() 1964 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS); bnx2_remote_phy_event() 1967 bnx2_send_heart_beat(bp); bnx2_remote_phy_event() 1972 bp->link_up = 0; bnx2_remote_phy_event() 1976 bp->link_up = 1; bnx2_remote_phy_event() 1978 bp->duplex = DUPLEX_FULL; bnx2_remote_phy_event() 1981 bp->duplex = DUPLEX_HALF; bnx2_remote_phy_event() 1984 bp->line_speed = SPEED_10; bnx2_remote_phy_event() 1987 bp->duplex = DUPLEX_HALF; bnx2_remote_phy_event() 1991 bp->line_speed = SPEED_100; bnx2_remote_phy_event() 1994 bp->duplex = DUPLEX_HALF; bnx2_remote_phy_event() 1997 bp->line_speed = SPEED_1000; bnx2_remote_phy_event() 2000 bp->duplex = DUPLEX_HALF; bnx2_remote_phy_event() 2003 bp->line_speed = SPEED_2500; bnx2_remote_phy_event() 2006 bp->line_speed = 0; bnx2_remote_phy_event() 2010 bp->flow_ctrl = 0; bnx2_remote_phy_event() 2011 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != bnx2_remote_phy_event() 2013 if (bp->duplex == DUPLEX_FULL) bnx2_remote_phy_event() 2014 bp->flow_ctrl = bp->req_flow_ctrl; bnx2_remote_phy_event() 2017 bp->flow_ctrl |= FLOW_CTRL_TX; bnx2_remote_phy_event() 2019 bp->flow_ctrl |= FLOW_CTRL_RX; bnx2_remote_phy_event() 2022 old_port = bp->phy_port; bnx2_remote_phy_event() 2024 bp->phy_port = PORT_FIBRE; bnx2_remote_phy_event() 2026 bp->phy_port = PORT_TP; bnx2_remote_phy_event() 2028 if (old_port != bp->phy_port) bnx2_remote_phy_event() 2029 bnx2_set_default_link(bp); bnx2_remote_phy_event() 2032 if (bp->link_up != link_up) bnx2_remote_phy_event() 2033 bnx2_report_link(bp); bnx2_remote_phy_event() 2035 bnx2_set_mac_link(bp); bnx2_remote_phy_event() 2039 bnx2_set_remote_link(struct bnx2 *bp) bnx2_set_remote_link() argument 2043 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB); bnx2_set_remote_link() 2046 bnx2_remote_phy_event(bp); bnx2_set_remote_link() 2050 bnx2_send_heart_beat(bp); bnx2_set_remote_link() 2057 bnx2_setup_copper_phy(struct bnx2 *bp) 2058 __releases(&bp->phy_lock) 2059 __acquires(&bp->phy_lock) 2064 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 2066 bnx2_read_phy(bp, bp->mii_adv, &adv_reg); 2070 new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising); 2072 if (bp->autoneg & AUTONEG_SPEED) { 2076 new_adv |= bnx2_phy_get_pause_adv(bp); 2078 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg); 2081 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising); 2086 bnx2_write_phy(bp, bp->mii_adv, new_adv); 2087 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000); 2088 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART | 2091 else if (bp->link_up) { 2095 bnx2_resolve_flow_ctrl(bp); variable 2096 bnx2_set_mac_link(bp); variable 2103 bnx2_write_phy(bp, bp->mii_adv, new_adv); 2106 if (bp->req_line_speed == SPEED_100) { 2109 if (bp->req_duplex == DUPLEX_FULL) { 2115 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); 2116 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); 2120 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); 2121 spin_unlock_bh(&bp->phy_lock); 2123 spin_lock_bh(&bp->phy_lock); 2125 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); 2126 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); 2129 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); 2136 bp->line_speed = bp->req_line_speed; 2137 bp->duplex = bp->req_duplex; 2138 bnx2_resolve_flow_ctrl(bp); variable 2139 bnx2_set_mac_link(bp); variable 2142 bnx2_resolve_flow_ctrl(bp); variable 2143 bnx2_set_mac_link(bp); variable 2149 bnx2_setup_phy(struct bnx2 *bp, u8 port) 2150 __releases(&bp->phy_lock) 2151 __acquires(&bp->phy_lock) 2153 if (bp->loopback == MAC_LOOPBACK) 2156 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { 2157 return bnx2_setup_serdes_phy(bp, port); 2160 return bnx2_setup_copper_phy(bp); 2165 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy) bnx2_init_5709s_phy() argument 2169 bp->mii_bmcr = MII_BMCR + 0x10; bnx2_init_5709s_phy() 2170 bp->mii_bmsr = MII_BMSR + 0x10; bnx2_init_5709s_phy() 2171 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1; bnx2_init_5709s_phy() 2172 bp->mii_adv = MII_ADVERTISE + 0x10; bnx2_init_5709s_phy() 2173 bp->mii_lpa = MII_LPA + 0x10; bnx2_init_5709s_phy() 2174 bp->mii_up1 = MII_BNX2_OVER1G_UP1; bnx2_init_5709s_phy() 2176 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER); bnx2_init_5709s_phy() 2177 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD); bnx2_init_5709s_phy() 2179 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); bnx2_init_5709s_phy() 2181 bnx2_reset_phy(bp); bnx2_init_5709s_phy() 2183 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); bnx2_init_5709s_phy() 2185 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val); bnx2_init_5709s_phy() 2188 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val); bnx2_init_5709s_phy() 2190 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); bnx2_init_5709s_phy() 2191 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val); bnx2_init_5709s_phy() 2192 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) bnx2_init_5709s_phy() 2196 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val); bnx2_init_5709s_phy() 2198 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG); bnx2_init_5709s_phy() 2199 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val); bnx2_init_5709s_phy() 2201 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val); bnx2_init_5709s_phy() 2203 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0); bnx2_init_5709s_phy() 2207 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val); bnx2_init_5709s_phy() 2209 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); bnx2_init_5709s_phy() 2215 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy) bnx2_init_5708s_phy() argument 2220 bnx2_reset_phy(bp); bnx2_init_5708s_phy() 2222 bp->mii_up1 = BCM5708S_UP1; bnx2_init_5708s_phy() 2224 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3); bnx2_init_5708s_phy() 2225 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE); bnx2_init_5708s_phy() 2226 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); bnx2_init_5708s_phy() 2228 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val); bnx2_init_5708s_phy() 2230 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val); bnx2_init_5708s_phy() 2232 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val); bnx2_init_5708s_phy() 2234 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val); bnx2_init_5708s_phy() 2236 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) { bnx2_init_5708s_phy() 2237 bnx2_read_phy(bp, BCM5708S_UP1, &val); bnx2_init_5708s_phy() 2239 bnx2_write_phy(bp, BCM5708S_UP1, val); bnx2_init_5708s_phy() 2242 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) || bnx2_init_5708s_phy() 2243 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) || bnx2_init_5708s_phy() 2244 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) { bnx2_init_5708s_phy() 2246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, bnx2_init_5708s_phy() 2248 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val); bnx2_init_5708s_phy() 2250 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val); bnx2_init_5708s_phy() 2251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); bnx2_init_5708s_phy() 2254 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) & bnx2_init_5708s_phy() 2260 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG); bnx2_init_5708s_phy() 2262 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, bnx2_init_5708s_phy() 2264 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val); bnx2_init_5708s_phy() 2265 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, bnx2_init_5708s_phy() 2273 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy) bnx2_init_5706s_phy() argument 2276 bnx2_reset_phy(bp); bnx2_init_5706s_phy() 2278 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; bnx2_init_5706s_phy() 2280 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) bnx2_init_5706s_phy() 2281 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); bnx2_init_5706s_phy() 2283 if (bp->dev->mtu > 1500) { bnx2_init_5706s_phy() 2287 bnx2_write_phy(bp, 0x18, 0x7); bnx2_init_5706s_phy() 2288 bnx2_read_phy(bp, 0x18, &val); bnx2_init_5706s_phy() 2289 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000); bnx2_init_5706s_phy() 2291 bnx2_write_phy(bp, 0x1c, 0x6c00); bnx2_init_5706s_phy() 2292 bnx2_read_phy(bp, 0x1c, &val); bnx2_init_5706s_phy() 2293 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02); bnx2_init_5706s_phy() 2298 bnx2_write_phy(bp, 0x18, 0x7); bnx2_init_5706s_phy() 2299 bnx2_read_phy(bp, 0x18, &val); bnx2_init_5706s_phy() 2300 bnx2_write_phy(bp, 0x18, val & ~0x4007); bnx2_init_5706s_phy() 2302 bnx2_write_phy(bp, 0x1c, 0x6c00); bnx2_init_5706s_phy() 2303 bnx2_read_phy(bp, 0x1c, &val); bnx2_init_5706s_phy() 2304 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00); bnx2_init_5706s_phy() 2311 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy) bnx2_init_copper_phy() argument 2316 bnx2_reset_phy(bp); bnx2_init_copper_phy() 2318 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) { bnx2_init_copper_phy() 2319 bnx2_write_phy(bp, 0x18, 0x0c00); bnx2_init_copper_phy() 2320 bnx2_write_phy(bp, 0x17, 0x000a); bnx2_init_copper_phy() 2321 bnx2_write_phy(bp, 0x15, 0x310b); bnx2_init_copper_phy() 2322 bnx2_write_phy(bp, 0x17, 0x201f); bnx2_init_copper_phy() 2323 bnx2_write_phy(bp, 0x15, 0x9506); bnx2_init_copper_phy() 2324 bnx2_write_phy(bp, 0x17, 0x401f); bnx2_init_copper_phy() 2325 bnx2_write_phy(bp, 0x15, 0x14e2); bnx2_init_copper_phy() 2326 bnx2_write_phy(bp, 0x18, 0x0400); bnx2_init_copper_phy() 2329 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) { bnx2_init_copper_phy() 2330 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, bnx2_init_copper_phy() 2332 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); bnx2_init_copper_phy() 2334 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val); bnx2_init_copper_phy() 2337 if (bp->dev->mtu > 1500) { bnx2_init_copper_phy() 2339 bnx2_write_phy(bp, 0x18, 0x7); bnx2_init_copper_phy() 2340 bnx2_read_phy(bp, 0x18, &val); bnx2_init_copper_phy() 2341 bnx2_write_phy(bp, 0x18, val | 0x4000); bnx2_init_copper_phy() 2343 bnx2_read_phy(bp, 0x10, &val); bnx2_init_copper_phy() 2344 bnx2_write_phy(bp, 0x10, val | 0x1); bnx2_init_copper_phy() 2347 bnx2_write_phy(bp, 0x18, 0x7); bnx2_init_copper_phy() 2348 bnx2_read_phy(bp, 0x18, &val); bnx2_init_copper_phy() 2349 bnx2_write_phy(bp, 0x18, val & ~0x4007); bnx2_init_copper_phy() 2351 bnx2_read_phy(bp, 0x10, &val); bnx2_init_copper_phy() 2352 bnx2_write_phy(bp, 0x10, val & ~0x1); bnx2_init_copper_phy() 2356 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL); bnx2_init_copper_phy() 2357 bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val); bnx2_init_copper_phy() 2361 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_init_copper_phy() 2364 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val); bnx2_init_copper_phy() 2370 bnx2_init_phy(struct bnx2 *bp, int reset_phy) 2371 __releases(&bp->phy_lock) 2372 __acquires(&bp->phy_lock) 2377 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK; 2378 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY; 2380 bp->mii_bmcr = MII_BMCR; 2381 bp->mii_bmsr = MII_BMSR; 2382 bp->mii_bmsr1 = MII_BMSR; 2383 bp->mii_adv = MII_ADVERTISE; 2384 bp->mii_lpa = MII_LPA; 2386 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); 2388 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) 2391 bnx2_read_phy(bp, MII_PHYSID1, &val); 2392 bp->phy_id = val << 16; 2393 bnx2_read_phy(bp, MII_PHYSID2, &val); 2394 bp->phy_id |= val & 0xffff; 2396 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { 2397 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) 2398 rc = bnx2_init_5706s_phy(bp, reset_phy); 2399 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) 2400 rc = bnx2_init_5708s_phy(bp, reset_phy); 2401 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709) 2402 rc = bnx2_init_5709s_phy(bp, reset_phy); 2405 rc = bnx2_init_copper_phy(bp, reset_phy); 2410 rc = bnx2_setup_phy(bp, bp->phy_port); 2416 bnx2_set_mac_loopback(struct bnx2 *bp) bnx2_set_mac_loopback() argument 2420 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE); bnx2_set_mac_loopback() 2423 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode); bnx2_set_mac_loopback() 2424 bp->link_up = 1; bnx2_set_mac_loopback() 2431 bnx2_set_phy_loopback(struct bnx2 *bp) bnx2_set_phy_loopback() argument 2436 spin_lock_bh(&bp->phy_lock); bnx2_set_phy_loopback() 2437 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX | bnx2_set_phy_loopback() 2439 spin_unlock_bh(&bp->phy_lock); bnx2_set_phy_loopback() 2444 if (bnx2_test_link(bp) == 0) bnx2_set_phy_loopback() 2449 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE); bnx2_set_phy_loopback() 2455 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode); bnx2_set_phy_loopback() 2456 bp->link_up = 1; bnx2_set_phy_loopback() 2461 bnx2_dump_mcp_state(struct bnx2 *bp) bnx2_dump_mcp_state() argument 2463 struct net_device *dev = bp->dev; bnx2_dump_mcp_state() 2467 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_dump_mcp_state() 2475 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1)); bnx2_dump_mcp_state() 2477 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE), bnx2_dump_mcp_state() 2478 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE), bnx2_dump_mcp_state() 2479 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK)); bnx2_dump_mcp_state() 2481 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER), bnx2_dump_mcp_state() 2482 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER), bnx2_dump_mcp_state() 2483 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION)); bnx2_dump_mcp_state() 2486 bnx2_shmem_rd(bp, BNX2_DRV_MB), bnx2_dump_mcp_state() 2487 bnx2_shmem_rd(bp, BNX2_FW_MB), bnx2_dump_mcp_state() 2488 bnx2_shmem_rd(bp, BNX2_LINK_STATUS)); bnx2_dump_mcp_state() 2489 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB)); bnx2_dump_mcp_state() 2491 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE), bnx2_dump_mcp_state() 2492 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE)); bnx2_dump_mcp_state() 2494 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION)); bnx2_dump_mcp_state() 2495 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE); bnx2_dump_mcp_state() 2496 DP_SHMEM_LINE(bp, 0x3cc); bnx2_dump_mcp_state() 2497 DP_SHMEM_LINE(bp, 0x3dc); bnx2_dump_mcp_state() 2498 DP_SHMEM_LINE(bp, 0x3ec); bnx2_dump_mcp_state() 2499 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc)); bnx2_dump_mcp_state() 2504 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent) bnx2_fw_sync() argument 2509 bp->fw_wr_seq++; bnx2_fw_sync() 2510 msg_data |= bp->fw_wr_seq; bnx2_fw_sync() 2511 bp->fw_last_msg = msg_data; bnx2_fw_sync() 2513 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); bnx2_fw_sync() 2522 val = bnx2_shmem_rd(bp, BNX2_FW_MB); bnx2_fw_sync() 2535 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); bnx2_fw_sync() 2538 bnx2_dump_mcp_state(bp); bnx2_fw_sync() 2551 bnx2_init_5709_context(struct bnx2 *bp) bnx2_init_5709_context() argument 2558 BNX2_WR(bp, BNX2_CTX_COMMAND, val); bnx2_init_5709_context() 2560 val = BNX2_RD(bp, BNX2_CTX_COMMAND); bnx2_init_5709_context() 2568 for (i = 0; i < bp->ctx_pages; i++) { bnx2_init_5709_context() 2571 if (bp->ctx_blk[i]) bnx2_init_5709_context() 2572 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE); bnx2_init_5709_context() 2576 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, bnx2_init_5709_context() 2577 (bp->ctx_blk_mapping[i] & 0xffffffff) | bnx2_init_5709_context() 2579 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1, bnx2_init_5709_context() 2580 (u64) bp->ctx_blk_mapping[i] >> 32); bnx2_init_5709_context() 2581 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i | bnx2_init_5709_context() 2585 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL); bnx2_init_5709_context() 2599 bnx2_init_context(struct bnx2 *bp) bnx2_init_context() argument 2610 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bnx2_init_context() 2631 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); bnx2_init_context() 2632 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); bnx2_init_context() 2636 bnx2_ctx_wr(bp, vcid_addr, offset, 0); bnx2_init_context() 2642 bnx2_alloc_bad_rbuf(struct bnx2 *bp) bnx2_alloc_bad_rbuf() argument 2652 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, bnx2_alloc_bad_rbuf() 2658 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1); bnx2_alloc_bad_rbuf() 2660 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND, bnx2_alloc_bad_rbuf() 2663 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC); bnx2_alloc_bad_rbuf() 2673 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1); bnx2_alloc_bad_rbuf() 2684 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val); bnx2_alloc_bad_rbuf() 2691 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos) bnx2_set_mac_addr() argument 2697 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val); bnx2_set_mac_addr() 2702 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val); bnx2_set_mac_addr() 2706 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) bnx2_alloc_rx_page() argument 2716 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, bnx2_alloc_rx_page() 2718 if (dma_mapping_error(&bp->pdev->dev, mapping)) { bnx2_alloc_rx_page() 2731 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) bnx2_free_rx_page() argument 2739 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), bnx2_free_rx_page() 2747 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) bnx2_alloc_rx_data() argument 2755 data = kmalloc(bp->rx_buf_size, gfp); bnx2_alloc_rx_data() 2759 mapping = dma_map_single(&bp->pdev->dev, bnx2_alloc_rx_data() 2761 bp->rx_buf_use_size, bnx2_alloc_rx_data() 2763 if (dma_mapping_error(&bp->pdev->dev, mapping)) { bnx2_alloc_rx_data() 2774 rxr->rx_prod_bseq += bp->rx_buf_use_size; bnx2_alloc_rx_data() 2780 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event) bnx2_phy_event_is_set() argument 2790 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); bnx2_phy_event_is_set() 2792 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); bnx2_phy_event_is_set() 2800 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi) bnx2_phy_int() argument 2802 spin_lock(&bp->phy_lock); bnx2_phy_int() 2804 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) bnx2_phy_int() 2805 bnx2_set_link(bp); bnx2_phy_int() 2806 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT)) bnx2_phy_int() 2807 bnx2_set_remote_link(bp); bnx2_phy_int() 2809 spin_unlock(&bp->phy_lock); bnx2_phy_int() 2828 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) bnx2_tx_int() argument 2836 index = (bnapi - bp->bnx2_napi); bnx2_tx_int() 2837 txq = netdev_get_tx_queue(bp->dev, index); bnx2_tx_int() 2869 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), bnx2_tx_int() 2881 dma_unmap_page(&bp->pdev->dev, bnx2_tx_int() 2911 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) { bnx2_tx_int() 2914 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) bnx2_tx_int() 2923 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, bnx2_reuse_rx_skb_pages() argument 2981 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, bnx2_reuse_rx_data() argument 2990 dma_sync_single_for_device(&bp->pdev->dev, bnx2_reuse_rx_data() 2994 rxr->rx_prod_bseq += bp->rx_buf_use_size; bnx2_reuse_rx_data() 3011 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data, bnx2_rx_skb() argument 3019 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); bnx2_rx_skb() 3021 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod); bnx2_rx_skb() 3027 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); bnx2_rx_skb() 3032 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, bnx2_rx_skb() 3062 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, bnx2_rx_skb() 3087 err = bnx2_alloc_rx_page(bp, rxr, bnx2_rx_skb() 3093 bnx2_reuse_rx_skb_pages(bp, rxr, skb, bnx2_rx_skb() 3098 dma_unmap_page(&bp->pdev->dev, mapping_old, bnx2_rx_skb() 3130 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) bnx2_rx_int() argument 3169 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bnx2_rx_int() 3184 } else if (len > bp->rx_jumbo_thresh) { bnx2_rx_int() 3185 hdr_len = bp->rx_jumbo_thresh; bnx2_rx_int() 3195 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, bnx2_rx_int() 3202 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); bnx2_rx_int() 3209 if (len <= bp->rx_copy_thresh) { bnx2_rx_int() 3210 skb = netdev_alloc_skb(bp->dev, len + 6); bnx2_rx_int() 3212 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, bnx2_rx_int() 3224 bnx2_reuse_rx_data(bp, rxr, data, bnx2_rx_int() 3228 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr, bnx2_rx_int() 3234 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) bnx2_rx_int() 3237 skb->protocol = eth_type_trans(skb, bp->dev); bnx2_rx_int() 3239 if (len > (bp->dev->mtu + ETH_HLEN) && bnx2_rx_int() 3249 if ((bp->dev->features & NETIF_F_RXCSUM) && bnx2_rx_int() 3257 if ((bp->dev->features & NETIF_F_RXHASH) && bnx2_rx_int() 3263 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]); bnx2_rx_int() 3284 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); bnx2_rx_int() 3286 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod); bnx2_rx_int() 3288 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); bnx2_rx_int() 3303 struct bnx2 *bp = bnapi->bp; bnx2_msi() local 3306 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnx2_msi() 3311 if (unlikely(atomic_read(&bp->intr_sem) != 0)) bnx2_msi() 3323 struct bnx2 *bp = bnapi->bp; bnx2_msi_1shot() local 3328 if (unlikely(atomic_read(&bp->intr_sem) != 0)) bnx2_msi_1shot() 3340 struct bnx2 *bp = bnapi->bp; bnx2_interrupt() local 3350 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) & bnx2_interrupt() 3354 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnx2_interrupt() 3361 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD); bnx2_interrupt() 3364 if (unlikely(atomic_read(&bp->intr_sem) != 0)) bnx2_interrupt() 3411 bnx2_chk_missed_msi(struct bnx2 *bp) bnx2_chk_missed_msi() argument 3413 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; bnx2_chk_missed_msi() 3417 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL); bnx2_chk_missed_msi() 3421 if (bnapi->last_status_idx == bp->idle_chk_status_idx) { bnx2_chk_missed_msi() 3422 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl & bnx2_chk_missed_msi() 3424 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl); bnx2_chk_missed_msi() 3425 bnx2_msi(bp->irq_tbl[0].vector, bnapi); bnx2_chk_missed_msi() 3429 bp->idle_chk_status_idx = bnapi->last_status_idx; bnx2_chk_missed_msi() 3433 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi) bnx2_poll_cnic() argument 3441 c_ops = rcu_dereference(bp->cnic_ops); bnx2_poll_cnic() 3443 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data, bnx2_poll_cnic() 3449 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) bnx2_poll_link() argument 3458 bnx2_phy_int(bp, bnapi); bnx2_poll_link() 3463 BNX2_WR(bp, BNX2_HC_COMMAND, bnx2_poll_link() 3464 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); bnx2_poll_link() 3465 BNX2_RD(bp, BNX2_HC_COMMAND); bnx2_poll_link() 3469 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi, bnx2_poll_work() argument 3476 bnx2_tx_int(bp, bnapi, 0); bnx2_poll_work() 3479 work_done += bnx2_rx_int(bp, bnapi, budget - work_done); bnx2_poll_work() 3487 struct bnx2 *bp = bnapi->bp; bnx2_poll_msix() local 3492 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); bnx2_poll_msix() 3502 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | bnx2_poll_msix() 3514 struct bnx2 *bp = bnapi->bp; bnx2_poll() local 3519 bnx2_poll_link(bp, bnapi); bnx2_poll() 3521 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); bnx2_poll() 3524 bnx2_poll_cnic(bp, bnapi); bnx2_poll() 3539 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { bnx2_poll() 3540 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnx2_poll() 3545 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnx2_poll() 3550 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnx2_poll() 3566 struct bnx2 *bp = netdev_priv(dev); bnx2_set_rx_mode() local 3574 spin_lock_bh(&bp->phy_lock); bnx2_set_rx_mode() 3576 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | bnx2_set_rx_mode() 3580 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) bnx2_set_rx_mode() 3590 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), bnx2_set_rx_mode() 3613 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), 3628 bnx2_set_mac_addr(bp, ha->addr, netdev_for_each_uc_addr() 3637 if (rx_mode != bp->rx_mode) { 3638 bp->rx_mode = rx_mode; 3639 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode); 3642 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); 3643 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); 3644 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); 3646 spin_unlock_bh(&bp->phy_lock); 3676 static void bnx2_release_firmware(struct bnx2 *bp) bnx2_release_firmware() argument 3678 if (bp->rv2p_firmware) { bnx2_release_firmware() 3679 release_firmware(bp->mips_firmware); bnx2_release_firmware() 3680 release_firmware(bp->rv2p_firmware); bnx2_release_firmware() 3681 bp->rv2p_firmware = NULL; bnx2_release_firmware() 3685 static int bnx2_request_uncached_firmware(struct bnx2 *bp) bnx2_request_uncached_firmware() argument 3692 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_request_uncached_firmware() 3694 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) || bnx2_request_uncached_firmware() 3695 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1)) bnx2_request_uncached_firmware() 3704 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev); bnx2_request_uncached_firmware() 3710 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev); bnx2_request_uncached_firmware() 3715 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; bnx2_request_uncached_firmware() 3716 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; bnx2_request_uncached_firmware() 3717 if (bp->mips_firmware->size < sizeof(*mips_fw) || bnx2_request_uncached_firmware() 3718 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) || bnx2_request_uncached_firmware() 3719 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) || bnx2_request_uncached_firmware() 3720 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) || bnx2_request_uncached_firmware() 3721 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) || bnx2_request_uncached_firmware() 3722 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) { bnx2_request_uncached_firmware() 3727 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) || bnx2_request_uncached_firmware() 3728 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) || bnx2_request_uncached_firmware() 3729 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) { bnx2_request_uncached_firmware() 3738 release_firmware(bp->rv2p_firmware); bnx2_request_uncached_firmware() 3739 bp->rv2p_firmware = NULL; bnx2_request_uncached_firmware() 3741 release_firmware(bp->mips_firmware); bnx2_request_uncached_firmware() 3745 static int bnx2_request_firmware(struct bnx2 *bp) bnx2_request_firmware() argument 3747 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp); bnx2_request_firmware() 3763 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc, load_rv2p_fw() argument 3774 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); load_rv2p_fw() 3785 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code)); load_rv2p_fw() 3787 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code)); load_rv2p_fw() 3791 BNX2_WR(bp, addr, val); load_rv2p_fw() 3794 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); load_rv2p_fw() 3801 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code); load_rv2p_fw() 3804 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code); load_rv2p_fw() 3807 BNX2_WR(bp, addr, val); load_rv2p_fw() 3813 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET); load_rv2p_fw() 3816 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET); load_rv2p_fw() 3823 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, load_cpu_fw() argument 3832 val = bnx2_reg_rd_ind(bp, cpu_reg->mode); load_cpu_fw() 3834 bnx2_reg_wr_ind(bp, cpu_reg->mode, val); load_cpu_fw() 3835 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear); load_cpu_fw() 3841 data = (__be32 *)(bp->mips_firmware->data + file_offset); load_cpu_fw() 3848 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); load_cpu_fw() 3855 data = (__be32 *)(bp->mips_firmware->data + file_offset); load_cpu_fw() 3862 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); load_cpu_fw() 3869 data = (__be32 *)(bp->mips_firmware->data + file_offset); load_cpu_fw() 3876 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); load_cpu_fw() 3880 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0); load_cpu_fw() 3883 bnx2_reg_wr_ind(bp, cpu_reg->pc, val); load_cpu_fw() 3886 val = bnx2_reg_rd_ind(bp, cpu_reg->mode); load_cpu_fw() 3888 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear); load_cpu_fw() 3889 bnx2_reg_wr_ind(bp, cpu_reg->mode, val); load_cpu_fw() 3895 bnx2_init_cpus(struct bnx2 *bp) bnx2_init_cpus() argument 3898 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; bnx2_init_cpus() 3900 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; bnx2_init_cpus() 3904 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1); bnx2_init_cpus() 3905 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2); bnx2_init_cpus() 3908 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp); bnx2_init_cpus() 3913 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp); bnx2_init_cpus() 3918 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat); bnx2_init_cpus() 3923 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com); bnx2_init_cpus() 3928 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp); bnx2_init_cpus() 3935 bnx2_setup_wol(struct bnx2 *bp) bnx2_setup_wol() argument 3940 if (bp->wol) { bnx2_setup_wol() 3944 autoneg = bp->autoneg; bnx2_setup_wol() 3945 advertising = bp->advertising; bnx2_setup_wol() 3947 if (bp->phy_port == PORT_TP) { bnx2_setup_wol() 3948 bp->autoneg = AUTONEG_SPEED; bnx2_setup_wol() 3949 bp->advertising = ADVERTISED_10baseT_Half | bnx2_setup_wol() 3956 spin_lock_bh(&bp->phy_lock); bnx2_setup_wol() 3957 bnx2_setup_phy(bp, bp->phy_port); bnx2_setup_wol() 3958 spin_unlock_bh(&bp->phy_lock); bnx2_setup_wol() 3960 bp->autoneg = autoneg; bnx2_setup_wol() 3961 bp->advertising = advertising; bnx2_setup_wol() 3963 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); bnx2_setup_wol() 3965 val = BNX2_RD(bp, BNX2_EMAC_MODE); bnx2_setup_wol() 3972 if (bp->phy_port == PORT_TP) { bnx2_setup_wol() 3976 if (bp->line_speed == SPEED_2500) bnx2_setup_wol() 3980 BNX2_WR(bp, BNX2_EMAC_MODE, val); bnx2_setup_wol() 3984 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), bnx2_setup_wol() 3987 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE); bnx2_setup_wol() 3990 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); bnx2_setup_wol() 3991 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); bnx2_setup_wol() 3992 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA); bnx2_setup_wol() 3995 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, bnx2_setup_wol() 4000 val = BNX2_RD(bp, BNX2_RPM_CONFIG); bnx2_setup_wol() 4002 BNX2_WR(bp, BNX2_RPM_CONFIG, val); bnx2_setup_wol() 4009 if (!(bp->flags & BNX2_FLAG_NO_WOL)) { bnx2_setup_wol() 4013 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) { bnx2_setup_wol() 4014 bnx2_fw_sync(bp, wol_msg, 1, 0); bnx2_setup_wol() 4020 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); bnx2_setup_wol() 4021 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, bnx2_setup_wol() 4023 bnx2_fw_sync(bp, wol_msg, 1, 0); bnx2_setup_wol() 4024 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val); bnx2_setup_wol() 4030 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) bnx2_set_power_state() argument 4036 pci_enable_wake(bp->pdev, PCI_D0, false); bnx2_set_power_state() 4037 pci_set_power_state(bp->pdev, PCI_D0); bnx2_set_power_state() 4039 val = BNX2_RD(bp, BNX2_EMAC_MODE); bnx2_set_power_state() 4042 BNX2_WR(bp, BNX2_EMAC_MODE, val); bnx2_set_power_state() 4044 val = BNX2_RD(bp, BNX2_RPM_CONFIG); bnx2_set_power_state() 4046 BNX2_WR(bp, BNX2_RPM_CONFIG, val); bnx2_set_power_state() 4050 bnx2_setup_wol(bp); bnx2_set_power_state() 4051 pci_wake_from_d3(bp->pdev, bp->wol); bnx2_set_power_state() 4052 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || bnx2_set_power_state() 4053 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) { bnx2_set_power_state() 4055 if (bp->wol) bnx2_set_power_state() 4056 pci_set_power_state(bp->pdev, PCI_D3hot); bnx2_set_power_state() 4060 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_set_power_state() 4067 val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); bnx2_set_power_state() 4070 bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val); bnx2_set_power_state() 4072 pci_set_power_state(bp->pdev, PCI_D3hot); bnx2_set_power_state() 4086 bnx2_acquire_nvram_lock(struct bnx2 *bp) bnx2_acquire_nvram_lock() argument 4092 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2); bnx2_acquire_nvram_lock() 4094 val = BNX2_RD(bp, BNX2_NVM_SW_ARB); bnx2_acquire_nvram_lock() 4108 bnx2_release_nvram_lock(struct bnx2 *bp) bnx2_release_nvram_lock() argument 4114 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2); bnx2_release_nvram_lock() 4117 val = BNX2_RD(bp, BNX2_NVM_SW_ARB); bnx2_release_nvram_lock() 4132 bnx2_enable_nvram_write(struct bnx2 *bp) bnx2_enable_nvram_write() argument 4136 val = BNX2_RD(bp, BNX2_MISC_CFG); bnx2_enable_nvram_write() 4137 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); bnx2_enable_nvram_write() 4139 if (bp->flash_info->flags & BNX2_NV_WREN) { bnx2_enable_nvram_write() 4142 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); bnx2_enable_nvram_write() 4143 BNX2_WR(bp, BNX2_NVM_COMMAND, bnx2_enable_nvram_write() 4149 val = BNX2_RD(bp, BNX2_NVM_COMMAND); bnx2_enable_nvram_write() 4161 bnx2_disable_nvram_write(struct bnx2 *bp) bnx2_disable_nvram_write() argument 4165 val = BNX2_RD(bp, BNX2_MISC_CFG); bnx2_disable_nvram_write() 4166 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN); bnx2_disable_nvram_write() 4171 bnx2_enable_nvram_access(struct bnx2 *bp) bnx2_enable_nvram_access() argument 4175 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE); bnx2_enable_nvram_access() 4177 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE, bnx2_enable_nvram_access() 4182 bnx2_disable_nvram_access(struct bnx2 *bp) bnx2_disable_nvram_access() argument 4186 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE); bnx2_disable_nvram_access() 4188 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE, bnx2_disable_nvram_access() 4194 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) bnx2_nvram_erase_page() argument 4199 if (bp->flash_info->flags & BNX2_NV_BUFFERED) bnx2_nvram_erase_page() 4208 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); bnx2_nvram_erase_page() 4211 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); bnx2_nvram_erase_page() 4214 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); bnx2_nvram_erase_page() 4222 val = BNX2_RD(bp, BNX2_NVM_COMMAND); bnx2_nvram_erase_page() 4234 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) bnx2_nvram_read_dword() argument 4243 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { bnx2_nvram_read_dword() 4244 offset = ((offset / bp->flash_info->page_size) << bnx2_nvram_read_dword() 4245 bp->flash_info->page_bits) + bnx2_nvram_read_dword() 4246 (offset % bp->flash_info->page_size); bnx2_nvram_read_dword() 4250 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); bnx2_nvram_read_dword() 4253 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); bnx2_nvram_read_dword() 4256 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); bnx2_nvram_read_dword() 4264 val = BNX2_RD(bp, BNX2_NVM_COMMAND); bnx2_nvram_read_dword() 4266 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ)); bnx2_nvram_read_dword() 4279 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) bnx2_nvram_write_dword() argument 4289 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { bnx2_nvram_write_dword() 4290 offset = ((offset / bp->flash_info->page_size) << bnx2_nvram_write_dword() 4291 bp->flash_info->page_bits) + bnx2_nvram_write_dword() 4292 (offset % bp->flash_info->page_size); bnx2_nvram_write_dword() 4296 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); bnx2_nvram_write_dword() 4301 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32)); bnx2_nvram_write_dword() 4304 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); bnx2_nvram_write_dword() 4307 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); bnx2_nvram_write_dword() 4313 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE) bnx2_nvram_write_dword() 4323 bnx2_init_nvram(struct bnx2 *bp) bnx2_init_nvram() argument 4329 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_nvram() 4330 bp->flash_info = &flash_5709; bnx2_init_nvram() 4335 val = BNX2_RD(bp, BNX2_NVM_CFG1); bnx2_init_nvram() 4346 bp->flash_info = flash; bnx2_init_nvram() 4364 bp->flash_info = flash; bnx2_init_nvram() 4367 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) bnx2_init_nvram() 4371 bnx2_enable_nvram_access(bp); bnx2_init_nvram() 4374 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1); bnx2_init_nvram() 4375 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2); bnx2_init_nvram() 4376 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3); bnx2_init_nvram() 4377 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1); bnx2_init_nvram() 4380 bnx2_disable_nvram_access(bp); bnx2_init_nvram() 4381 bnx2_release_nvram_lock(bp); bnx2_init_nvram() 4389 bp->flash_info = NULL; bnx2_init_nvram() 4395 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2); bnx2_init_nvram() 4398 bp->flash_size = val; bnx2_init_nvram() 4400 bp->flash_size = bp->flash_info->total_size; bnx2_init_nvram() 4406 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf, bnx2_nvram_read() argument 4416 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) bnx2_nvram_read() 4420 bnx2_enable_nvram_access(bp); bnx2_nvram_read() 4444 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); bnx2_nvram_read() 4469 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); bnx2_nvram_read() 4482 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags); bnx2_nvram_read() 4490 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0); bnx2_nvram_read() 4502 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); bnx2_nvram_read() 4508 bnx2_disable_nvram_access(bp); bnx2_nvram_read() 4510 bnx2_release_nvram_lock(bp); bnx2_nvram_read() 4516 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, bnx2_nvram_write() argument 4534 if ((rc = bnx2_nvram_read(bp, offset32, start, 4))) bnx2_nvram_write() 4541 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4))) bnx2_nvram_write() 4559 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { bnx2_nvram_write() 4575 page_start -= (page_start % bp->flash_info->page_size); bnx2_nvram_write() 4577 page_end = page_start + bp->flash_info->page_size; bnx2_nvram_write() 4585 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) bnx2_nvram_write() 4589 bnx2_enable_nvram_access(bp); bnx2_nvram_write() 4592 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { bnx2_nvram_write() 4597 for (j = 0; j < bp->flash_info->page_size; j += 4) { bnx2_nvram_write() 4598 if (j == (bp->flash_info->page_size - 4)) { bnx2_nvram_write() 4601 rc = bnx2_nvram_read_dword(bp, bnx2_nvram_write() 4614 if ((rc = bnx2_enable_nvram_write(bp)) != 0) bnx2_nvram_write() 4620 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { bnx2_nvram_write() 4622 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) bnx2_nvram_write() 4626 bnx2_enable_nvram_write(bp); bnx2_nvram_write() 4631 rc = bnx2_nvram_write_dword(bp, addr, bnx2_nvram_write() 4644 ((bp->flash_info->flags & BNX2_NV_BUFFERED) && bnx2_nvram_write() 4649 rc = bnx2_nvram_write_dword(bp, addr, buf, bnx2_nvram_write() 4661 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { bnx2_nvram_write() 4668 rc = bnx2_nvram_write_dword(bp, addr, bnx2_nvram_write() 4679 bnx2_disable_nvram_write(bp); bnx2_nvram_write() 4682 bnx2_disable_nvram_access(bp); bnx2_nvram_write() 4683 bnx2_release_nvram_lock(bp); bnx2_nvram_write() 4696 bnx2_init_fw_cap(struct bnx2 *bp) bnx2_init_fw_cap() argument 4700 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP; bnx2_init_fw_cap() 4701 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN; bnx2_init_fw_cap() 4703 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE)) bnx2_init_fw_cap() 4704 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN; bnx2_init_fw_cap() 4706 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB); bnx2_init_fw_cap() 4711 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN; bnx2_init_fw_cap() 4715 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_init_fw_cap() 4719 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP; bnx2_init_fw_cap() 4721 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS); bnx2_init_fw_cap() 4723 bp->phy_port = PORT_FIBRE; bnx2_init_fw_cap() 4725 bp->phy_port = PORT_TP; bnx2_init_fw_cap() 4731 if (netif_running(bp->dev) && sig) bnx2_init_fw_cap() 4732 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig); bnx2_init_fw_cap() 4736 bnx2_setup_msix_tbl(struct bnx2 *bp) bnx2_setup_msix_tbl() argument 4738 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN); bnx2_setup_msix_tbl() 4740 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR); bnx2_setup_msix_tbl() 4741 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); bnx2_setup_msix_tbl() 4745 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) bnx2_reset_chip() argument 4753 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || bnx2_reset_chip() 4754 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { bnx2_reset_chip() 4755 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, bnx2_reset_chip() 4760 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); bnx2_reset_chip() 4763 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); bnx2_reset_chip() 4765 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); bnx2_reset_chip() 4766 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); bnx2_reset_chip() 4770 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL); bnx2_reset_chip() 4777 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); bnx2_reset_chip() 4781 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE, bnx2_reset_chip() 4786 val = BNX2_RD(bp, BNX2_MISC_ID); bnx2_reset_chip() 4788 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_reset_chip() 4789 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET); bnx2_reset_chip() 4790 BNX2_RD(bp, BNX2_MISC_COMMAND); bnx2_reset_chip() 4796 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); bnx2_reset_chip() 4804 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); bnx2_reset_chip() 4810 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || bnx2_reset_chip() 4811 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) bnx2_reset_chip() 4816 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG); bnx2_reset_chip() 4831 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0); bnx2_reset_chip() 4838 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0); bnx2_reset_chip() 4842 spin_lock_bh(&bp->phy_lock); bnx2_reset_chip() 4843 old_port = bp->phy_port; bnx2_reset_chip() 4844 bnx2_init_fw_cap(bp); bnx2_reset_chip() 4845 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) && bnx2_reset_chip() 4846 old_port != bp->phy_port) bnx2_reset_chip() 4847 bnx2_set_default_remote_link(bp); bnx2_reset_chip() 4848 spin_unlock_bh(&bp->phy_lock); bnx2_reset_chip() 4850 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bnx2_reset_chip() 4853 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa); bnx2_reset_chip() 4856 rc = bnx2_alloc_bad_rbuf(bp); bnx2_reset_chip() 4859 if (bp->flags & BNX2_FLAG_USING_MSIX) { bnx2_reset_chip() 4860 bnx2_setup_msix_tbl(bp); bnx2_reset_chip() 4862 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL, bnx2_reset_chip() 4870 bnx2_init_chip(struct bnx2 *bp) bnx2_init_chip() argument 4876 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT); bnx2_init_chip() 4889 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133)) bnx2_init_chip() 4892 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) && bnx2_init_chip() 4893 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) && bnx2_init_chip() 4894 !(bp->flags & BNX2_FLAG_PCIX)) bnx2_init_chip() 4897 BNX2_WR(bp, BNX2_DMA_CONFIG, val); bnx2_init_chip() 4899 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bnx2_init_chip() 4900 val = BNX2_RD(bp, BNX2_TDMA_CONFIG); bnx2_init_chip() 4902 BNX2_WR(bp, BNX2_TDMA_CONFIG, val); bnx2_init_chip() 4905 if (bp->flags & BNX2_FLAG_PCIX) { bnx2_init_chip() 4908 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, bnx2_init_chip() 4910 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, bnx2_init_chip() 4914 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, bnx2_init_chip() 4921 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_chip() 4922 rc = bnx2_init_5709_context(bp); bnx2_init_chip() 4926 bnx2_init_context(bp); bnx2_init_chip() 4928 if ((rc = bnx2_init_cpus(bp)) != 0) bnx2_init_chip() 4931 bnx2_init_nvram(bp); bnx2_init_chip() 4933 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); bnx2_init_chip() 4935 val = BNX2_RD(bp, BNX2_MQ_CONFIG); bnx2_init_chip() 4938 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_chip() 4940 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax) bnx2_init_chip() 4944 BNX2_WR(bp, BNX2_MQ_CONFIG, val); bnx2_init_chip() 4947 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); bnx2_init_chip() 4948 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val); bnx2_init_chip() 4951 BNX2_WR(bp, BNX2_RV2P_CONFIG, val); bnx2_init_chip() 4954 val = BNX2_RD(bp, BNX2_TBDR_CONFIG); bnx2_init_chip() 4957 BNX2_WR(bp, BNX2_TBDR_CONFIG, val); bnx2_init_chip() 4959 val = bp->mac_addr[0] + bnx2_init_chip() 4960 (bp->mac_addr[1] << 8) + bnx2_init_chip() 4961 (bp->mac_addr[2] << 16) + bnx2_init_chip() 4962 bp->mac_addr[3] + bnx2_init_chip() 4963 (bp->mac_addr[4] << 8) + bnx2_init_chip() 4964 (bp->mac_addr[5] << 16); bnx2_init_chip() 4965 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); bnx2_init_chip() 4968 mtu = bp->dev->mtu; bnx2_init_chip() 4972 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); bnx2_init_chip() 4977 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu)); bnx2_init_chip() 4978 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu)); bnx2_init_chip() 4979 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu)); bnx2_init_chip() 4981 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size); bnx2_init_chip() 4983 bp->bnx2_napi[i].last_status_idx = 0; bnx2_init_chip() 4985 bp->idle_chk_status_idx = 0xffff; bnx2_init_chip() 4988 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); bnx2_init_chip() 4990 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L, bnx2_init_chip() 4991 (u64) bp->status_blk_mapping & 0xffffffff); bnx2_init_chip() 4992 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32); bnx2_init_chip() 4994 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L, bnx2_init_chip() 4995 (u64) bp->stats_blk_mapping & 0xffffffff); bnx2_init_chip() 4996 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H, bnx2_init_chip() 4997 (u64) bp->stats_blk_mapping >> 32); bnx2_init_chip() 4999 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, bnx2_init_chip() 5000 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); bnx2_init_chip() 5002 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP, bnx2_init_chip() 5003 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); bnx2_init_chip() 5005 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP, bnx2_init_chip() 5006 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip); bnx2_init_chip() 5008 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks); bnx2_init_chip() 5010 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks); bnx2_init_chip() 5012 BNX2_WR(bp, BNX2_HC_COM_TICKS, bnx2_init_chip() 5013 (bp->com_ticks_int << 16) | bp->com_ticks); bnx2_init_chip() 5015 BNX2_WR(bp, BNX2_HC_CMD_TICKS, bnx2_init_chip() 5016 (bp->cmd_ticks_int << 16) | bp->cmd_ticks); bnx2_init_chip() 5018 if (bp->flags & BNX2_FLAG_BROKEN_STATS) bnx2_init_chip() 5019 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0); bnx2_init_chip() 5021 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); bnx2_init_chip() 5022 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ bnx2_init_chip() 5024 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) bnx2_init_chip() 5031 if (bp->flags & BNX2_FLAG_USING_MSIX) { bnx2_init_chip() 5032 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, bnx2_init_chip() 5038 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI) bnx2_init_chip() 5041 BNX2_WR(bp, BNX2_HC_CONFIG, val); bnx2_init_chip() 5043 if (bp->rx_ticks < 25) bnx2_init_chip() 5044 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1); bnx2_init_chip() 5046 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0); bnx2_init_chip() 5048 for (i = 1; i < bp->irq_nvecs; i++) { bnx2_init_chip() 5052 BNX2_WR(bp, base, bnx2_init_chip() 5057 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF, bnx2_init_chip() 5058 (bp->tx_quick_cons_trip_int << 16) | bnx2_init_chip() 5059 bp->tx_quick_cons_trip); bnx2_init_chip() 5061 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF, bnx2_init_chip() 5062 (bp->tx_ticks_int << 16) | bp->tx_ticks); bnx2_init_chip() 5064 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF, bnx2_init_chip() 5065 (bp->rx_quick_cons_trip_int << 16) | bnx2_init_chip() 5066 bp->rx_quick_cons_trip); bnx2_init_chip() 5068 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF, bnx2_init_chip() 5069 (bp->rx_ticks_int << 16) | bp->rx_ticks); bnx2_init_chip() 5073 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); bnx2_init_chip() 5075 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); bnx2_init_chip() 5078 bnx2_set_rx_mode(bp->dev); bnx2_init_chip() 5080 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_chip() 5081 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); bnx2_init_chip() 5083 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); bnx2_init_chip() 5085 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, bnx2_init_chip() 5088 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); bnx2_init_chip() 5089 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS); bnx2_init_chip() 5093 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND); bnx2_init_chip() 5099 bnx2_clear_ring_states(struct bnx2 *bp) bnx2_clear_ring_states() argument 5107 bnapi = &bp->bnx2_napi[i]; bnx2_clear_ring_states() 5122 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr) bnx2_init_tx_context() argument 5127 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_tx_context() 5139 bnx2_ctx_wr(bp, cid_addr, offset0, val); bnx2_init_tx_context() 5142 bnx2_ctx_wr(bp, cid_addr, offset1, val); bnx2_init_tx_context() 5145 bnx2_ctx_wr(bp, cid_addr, offset2, val); bnx2_init_tx_context() 5148 bnx2_ctx_wr(bp, cid_addr, offset3, val); bnx2_init_tx_context() 5152 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num) bnx2_init_tx_ring() argument 5159 bnapi = &bp->bnx2_napi[ring_num]; bnx2_init_tx_ring() 5167 bp->tx_wake_thresh = bp->tx_ring_size / 2; bnx2_init_tx_ring() 5180 bnx2_init_tx_context(bp, cid, txr); bnx2_init_tx_ring() 5208 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) bnx2_init_rx_ring() argument 5213 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num]; bnx2_init_rx_ring() 5224 bp->rx_buf_use_size, bp->rx_max_ring); bnx2_init_rx_ring() 5226 bnx2_init_rx_context(bp, cid); bnx2_init_rx_ring() 5228 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_rx_ring() 5229 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5); bnx2_init_rx_ring() 5230 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM); bnx2_init_rx_ring() 5233 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0); bnx2_init_rx_ring() 5234 if (bp->rx_pg_ring_size) { bnx2_init_rx_ring() 5237 PAGE_SIZE, bp->rx_max_pg_ring); bnx2_init_rx_ring() 5238 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE; bnx2_init_rx_ring() 5239 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val); bnx2_init_rx_ring() 5240 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY, bnx2_init_rx_ring() 5244 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val); bnx2_init_rx_ring() 5247 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val); bnx2_init_rx_ring() 5249 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_init_rx_ring() 5250 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT); bnx2_init_rx_ring() 5254 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); bnx2_init_rx_ring() 5257 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); bnx2_init_rx_ring() 5260 for (i = 0; i < bp->rx_pg_ring_size; i++) { bnx2_init_rx_ring() 5261 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) { bnx2_init_rx_ring() 5262 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n", bnx2_init_rx_ring() 5263 ring_num, i, bp->rx_pg_ring_size); bnx2_init_rx_ring() 5272 for (i = 0; i < bp->rx_ring_size; i++) { bnx2_init_rx_ring() 5273 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) { bnx2_init_rx_ring() 5274 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", bnx2_init_rx_ring() 5275 ring_num, i, bp->rx_ring_size); bnx2_init_rx_ring() 5287 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); bnx2_init_rx_ring() 5288 BNX2_WR16(bp, rxr->rx_bidx_addr, prod); bnx2_init_rx_ring() 5290 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); bnx2_init_rx_ring() 5294 bnx2_init_all_rings(struct bnx2 *bp) bnx2_init_all_rings() argument 5299 bnx2_clear_ring_states(bp); bnx2_init_all_rings() 5301 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0); bnx2_init_all_rings() 5302 for (i = 0; i < bp->num_tx_rings; i++) bnx2_init_all_rings() 5303 bnx2_init_tx_ring(bp, i); bnx2_init_all_rings() 5305 if (bp->num_tx_rings > 1) bnx2_init_all_rings() 5306 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) | bnx2_init_all_rings() 5309 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0); bnx2_init_all_rings() 5310 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0); bnx2_init_all_rings() 5312 for (i = 0; i < bp->num_rx_rings; i++) bnx2_init_all_rings() 5313 bnx2_init_rx_ring(bp, i); bnx2_init_all_rings() 5315 if (bp->num_rx_rings > 1) { bnx2_init_all_rings() 5321 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift; bnx2_init_all_rings() 5323 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32); bnx2_init_all_rings() 5324 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) | bnx2_init_all_rings() 5335 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val); bnx2_init_all_rings() 5360 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) bnx2_set_rx_ring_size() argument 5365 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8; bnx2_set_rx_ring_size() 5370 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH; bnx2_set_rx_ring_size() 5371 bp->rx_pg_ring_size = 0; bnx2_set_rx_ring_size() 5372 bp->rx_max_pg_ring = 0; bnx2_set_rx_ring_size() 5373 bp->rx_max_pg_ring_idx = 0; bnx2_set_rx_ring_size() 5374 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) { bnx2_set_rx_ring_size() 5375 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; bnx2_set_rx_ring_size() 5381 bp->rx_pg_ring_size = jumbo_size; bnx2_set_rx_ring_size() 5382 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size, bnx2_set_rx_ring_size() 5384 bp->rx_max_pg_ring_idx = bnx2_set_rx_ring_size() 5385 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1; bnx2_set_rx_ring_size() 5387 bp->rx_copy_thresh = 0; bnx2_set_rx_ring_size() 5390 bp->rx_buf_use_size = rx_size; bnx2_set_rx_ring_size() 5392 bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) + bnx2_set_rx_ring_size() 5394 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; bnx2_set_rx_ring_size() 5395 bp->rx_ring_size = size; bnx2_set_rx_ring_size() 5396 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS); bnx2_set_rx_ring_size() 5397 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1; bnx2_set_rx_ring_size() 5401 bnx2_free_tx_skbs(struct bnx2 *bp) bnx2_free_tx_skbs() argument 5405 for (i = 0; i < bp->num_tx_rings; i++) { bnx2_free_tx_skbs() 5406 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_free_tx_skbs() 5423 dma_unmap_single(&bp->pdev->dev, bnx2_free_tx_skbs() 5434 dma_unmap_page(&bp->pdev->dev, bnx2_free_tx_skbs() 5441 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); bnx2_free_tx_skbs() 5446 bnx2_free_rx_skbs(struct bnx2 *bp) bnx2_free_rx_skbs() argument 5450 for (i = 0; i < bp->num_rx_rings; i++) { bnx2_free_rx_skbs() 5451 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_free_rx_skbs() 5458 for (j = 0; j < bp->rx_max_ring_idx; j++) { bnx2_free_rx_skbs() 5465 dma_unmap_single(&bp->pdev->dev, bnx2_free_rx_skbs() 5467 bp->rx_buf_use_size, bnx2_free_rx_skbs() 5474 for (j = 0; j < bp->rx_max_pg_ring_idx; j++) bnx2_free_rx_skbs() 5475 bnx2_free_rx_page(bp, rxr, j); bnx2_free_rx_skbs() 5480 bnx2_free_skbs(struct bnx2 *bp) bnx2_free_skbs() argument 5482 bnx2_free_tx_skbs(bp); bnx2_free_skbs() 5483 bnx2_free_rx_skbs(bp); bnx2_free_skbs() 5487 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code) bnx2_reset_nic() argument 5491 rc = bnx2_reset_chip(bp, reset_code); bnx2_reset_nic() 5492 bnx2_free_skbs(bp); bnx2_reset_nic() 5496 if ((rc = bnx2_init_chip(bp)) != 0) bnx2_reset_nic() 5499 bnx2_init_all_rings(bp); bnx2_reset_nic() 5504 bnx2_init_nic(struct bnx2 *bp, int reset_phy) bnx2_init_nic() argument 5508 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0) bnx2_init_nic() 5511 spin_lock_bh(&bp->phy_lock); bnx2_init_nic() 5512 bnx2_init_phy(bp, reset_phy); bnx2_init_nic() 5513 bnx2_set_link(bp); bnx2_init_nic() 5514 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_init_nic() 5515 bnx2_remote_phy_event(bp); bnx2_init_nic() 5516 spin_unlock_bh(&bp->phy_lock); bnx2_init_nic() 5521 bnx2_shutdown_chip(struct bnx2 *bp) bnx2_shutdown_chip() argument 5525 if (bp->flags & BNX2_FLAG_NO_WOL) bnx2_shutdown_chip() 5527 else if (bp->wol) bnx2_shutdown_chip() 5532 return bnx2_reset_chip(bp, reset_code); bnx2_shutdown_chip() 5536 bnx2_test_registers(struct bnx2 *bp) bnx2_test_registers() argument 5657 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_registers() 5671 save_val = readl(bp->regview + offset); bnx2_test_registers() 5673 writel(0, bp->regview + offset); bnx2_test_registers() 5675 val = readl(bp->regview + offset); bnx2_test_registers() 5684 writel(0xffffffff, bp->regview + offset); bnx2_test_registers() 5686 val = readl(bp->regview + offset); bnx2_test_registers() 5695 writel(save_val, bp->regview + offset); bnx2_test_registers() 5699 writel(save_val, bp->regview + offset); bnx2_test_registers() 5707 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) bnx2_do_mem_test() argument 5718 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]); bnx2_do_mem_test() 5720 if (bnx2_reg_rd_ind(bp, start + offset) != bnx2_do_mem_test() 5730 bnx2_test_memory(struct bnx2 *bp) bnx2_test_memory() argument 5756 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_memory() 5762 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset, bnx2_test_memory() 5775 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) bnx2_run_loopback() argument 5787 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; bnx2_run_loopback() 5796 bp->loopback = MAC_LOOPBACK; bnx2_run_loopback() 5797 bnx2_set_mac_loopback(bp); bnx2_run_loopback() 5800 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_run_loopback() 5803 bp->loopback = PHY_LOOPBACK; bnx2_run_loopback() 5804 bnx2_set_phy_loopback(bp); bnx2_run_loopback() 5809 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4); bnx2_run_loopback() 5810 skb = netdev_alloc_skb(bp->dev, pkt_size); bnx2_run_loopback() 5814 memcpy(packet, bp->dev->dev_addr, ETH_ALEN); bnx2_run_loopback() 5819 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, bnx2_run_loopback() 5821 if (dma_mapping_error(&bp->pdev->dev, map)) { bnx2_run_loopback() 5826 BNX2_WR(bp, BNX2_HC_COMMAND, bnx2_run_loopback() 5827 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); bnx2_run_loopback() 5829 BNX2_RD(bp, BNX2_HC_COMMAND); bnx2_run_loopback() 5847 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod); bnx2_run_loopback() 5848 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); bnx2_run_loopback() 5852 BNX2_WR(bp, BNX2_HC_COMMAND, bnx2_run_loopback() 5853 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); bnx2_run_loopback() 5855 BNX2_RD(bp, BNX2_HC_COMMAND); bnx2_run_loopback() 5859 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); bnx2_run_loopback() 5876 dma_sync_single_for_cpu(&bp->pdev->dev, bnx2_run_loopback() 5878 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); bnx2_run_loopback() 5903 bp->loopback = 0; bnx2_run_loopback() 5913 bnx2_test_loopback(struct bnx2 *bp) bnx2_test_loopback() argument 5917 if (!netif_running(bp->dev)) bnx2_test_loopback() 5920 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); bnx2_test_loopback() 5921 spin_lock_bh(&bp->phy_lock); bnx2_test_loopback() 5922 bnx2_init_phy(bp, 1); bnx2_test_loopback() 5923 spin_unlock_bh(&bp->phy_lock); bnx2_test_loopback() 5924 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK)) bnx2_test_loopback() 5926 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK)) bnx2_test_loopback() 5935 bnx2_test_nvram(struct bnx2 *bp) bnx2_test_nvram() argument 5942 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0) bnx2_test_nvram() 5951 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0) bnx2_test_nvram() 5970 bnx2_test_link(struct bnx2 *bp) bnx2_test_link() argument 5974 if (!netif_running(bp->dev)) bnx2_test_link() 5977 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { bnx2_test_link() 5978 if (bp->link_up) bnx2_test_link() 5982 spin_lock_bh(&bp->phy_lock); bnx2_test_link() 5983 bnx2_enable_bmsr1(bp); bnx2_test_link() 5984 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); bnx2_test_link() 5985 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); bnx2_test_link() 5986 bnx2_disable_bmsr1(bp); bnx2_test_link() 5987 spin_unlock_bh(&bp->phy_lock); bnx2_test_link() 5996 bnx2_test_intr(struct bnx2 *bp) bnx2_test_intr() argument 6001 if (!netif_running(bp->dev)) bnx2_test_intr() 6004 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; bnx2_test_intr() 6007 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); bnx2_test_intr() 6008 BNX2_RD(bp, BNX2_HC_COMMAND); bnx2_test_intr() 6011 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) != bnx2_test_intr() 6027 bnx2_5706_serdes_has_link(struct bnx2 *bp) bnx2_5706_serdes_has_link() argument 6031 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL) bnx2_5706_serdes_has_link() 6034 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL); bnx2_5706_serdes_has_link() 6035 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl); bnx2_5706_serdes_has_link() 6040 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); bnx2_5706_serdes_has_link() 6041 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); bnx2_5706_serdes_has_link() 6042 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); bnx2_5706_serdes_has_link() 6047 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1); bnx2_5706_serdes_has_link() 6048 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp); bnx2_5706_serdes_has_link() 6049 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp); bnx2_5706_serdes_has_link() 6058 bnx2_5706_serdes_timer(struct bnx2 *bp) bnx2_5706_serdes_timer() argument 6062 spin_lock(&bp->phy_lock); bnx2_5706_serdes_timer() 6063 if (bp->serdes_an_pending) { bnx2_5706_serdes_timer() 6064 bp->serdes_an_pending--; bnx2_5706_serdes_timer() 6066 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { bnx2_5706_serdes_timer() 6069 bp->current_interval = BNX2_TIMER_INTERVAL; bnx2_5706_serdes_timer() 6071 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_5706_serdes_timer() 6074 if (bnx2_5706_serdes_has_link(bp)) { bnx2_5706_serdes_timer() 6077 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); bnx2_5706_serdes_timer() 6078 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT; bnx2_5706_serdes_timer() 6082 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) && bnx2_5706_serdes_timer() 6083 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) { bnx2_5706_serdes_timer() 6086 bnx2_write_phy(bp, 0x17, 0x0f01); bnx2_5706_serdes_timer() 6087 bnx2_read_phy(bp, 0x15, &phy2); bnx2_5706_serdes_timer() 6091 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_5706_serdes_timer() 6093 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); bnx2_5706_serdes_timer() 6095 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; bnx2_5706_serdes_timer() 6098 bp->current_interval = BNX2_TIMER_INTERVAL; bnx2_5706_serdes_timer() 6103 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); bnx2_5706_serdes_timer() 6104 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); bnx2_5706_serdes_timer() 6105 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); bnx2_5706_serdes_timer() 6107 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) { bnx2_5706_serdes_timer() 6108 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) { bnx2_5706_serdes_timer() 6109 bnx2_5706s_force_link_dn(bp, 1); bnx2_5706_serdes_timer() 6110 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN; bnx2_5706_serdes_timer() 6112 bnx2_set_link(bp); bnx2_5706_serdes_timer() 6113 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC)) bnx2_5706_serdes_timer() 6114 bnx2_set_link(bp); bnx2_5706_serdes_timer() 6116 spin_unlock(&bp->phy_lock); bnx2_5706_serdes_timer() 6120 bnx2_5708_serdes_timer(struct bnx2 *bp) bnx2_5708_serdes_timer() argument 6122 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_5708_serdes_timer() 6125 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) { bnx2_5708_serdes_timer() 6126 bp->serdes_an_pending = 0; bnx2_5708_serdes_timer() 6130 spin_lock(&bp->phy_lock); bnx2_5708_serdes_timer() 6131 if (bp->serdes_an_pending) bnx2_5708_serdes_timer() 6132 bp->serdes_an_pending--; bnx2_5708_serdes_timer() 6133 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { bnx2_5708_serdes_timer() 6136 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_5708_serdes_timer() 6138 bnx2_enable_forced_2g5(bp); bnx2_5708_serdes_timer() 6139 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT; bnx2_5708_serdes_timer() 6141 bnx2_disable_forced_2g5(bp); bnx2_5708_serdes_timer() 6142 bp->serdes_an_pending = 2; bnx2_5708_serdes_timer() 6143 bp->current_interval = BNX2_TIMER_INTERVAL; bnx2_5708_serdes_timer() 6147 bp->current_interval = BNX2_TIMER_INTERVAL; bnx2_5708_serdes_timer() 6149 spin_unlock(&bp->phy_lock); bnx2_5708_serdes_timer() 6155 struct bnx2 *bp = (struct bnx2 *) data; bnx2_timer() local 6157 if (!netif_running(bp->dev)) bnx2_timer() 6160 if (atomic_read(&bp->intr_sem) != 0) bnx2_timer() 6163 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) == bnx2_timer() 6165 bnx2_chk_missed_msi(bp); bnx2_timer() 6167 bnx2_send_heart_beat(bp); bnx2_timer() 6169 bp->stats_blk->stat_FwRxDrop = bnx2_timer() 6170 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT); bnx2_timer() 6173 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks) bnx2_timer() 6174 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | bnx2_timer() 6177 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_timer() 6178 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) bnx2_timer() 6179 bnx2_5706_serdes_timer(bp); bnx2_timer() 6181 bnx2_5708_serdes_timer(bp); bnx2_timer() 6185 mod_timer(&bp->timer, jiffies + bp->current_interval); bnx2_timer() 6189 bnx2_request_irq(struct bnx2 *bp) bnx2_request_irq() argument 6195 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX) bnx2_request_irq() 6200 for (i = 0; i < bp->irq_nvecs; i++) { bnx2_request_irq() 6201 irq = &bp->irq_tbl[i]; bnx2_request_irq() 6203 &bp->bnx2_napi[i]); bnx2_request_irq() 6212 __bnx2_free_irq(struct bnx2 *bp) __bnx2_free_irq() argument 6217 for (i = 0; i < bp->irq_nvecs; i++) { __bnx2_free_irq() 6218 irq = &bp->irq_tbl[i]; __bnx2_free_irq() 6220 free_irq(irq->vector, &bp->bnx2_napi[i]); __bnx2_free_irq() 6226 bnx2_free_irq(struct bnx2 *bp) bnx2_free_irq() argument 6229 __bnx2_free_irq(bp); bnx2_free_irq() 6230 if (bp->flags & BNX2_FLAG_USING_MSI) bnx2_free_irq() 6231 pci_disable_msi(bp->pdev); bnx2_free_irq() 6232 else if (bp->flags & BNX2_FLAG_USING_MSIX) bnx2_free_irq() 6233 pci_disable_msix(bp->pdev); bnx2_free_irq() 6235 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI); bnx2_free_irq() 6239 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) bnx2_enable_msix() argument 6243 struct net_device *dev = bp->dev; bnx2_enable_msix() 6244 const int len = sizeof(bp->irq_tbl[0].name); bnx2_enable_msix() 6246 bnx2_setup_msix_tbl(bp); bnx2_enable_msix() 6247 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1); bnx2_enable_msix() 6248 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); bnx2_enable_msix() 6249 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); bnx2_enable_msix() 6253 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL); bnx2_enable_msix() 6264 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, bnx2_enable_msix() 6273 bp->irq_nvecs = msix_vecs; bnx2_enable_msix() 6274 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI; bnx2_enable_msix() 6276 bp->irq_tbl[i].vector = msix_ent[i].vector; bnx2_enable_msix() 6277 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i); bnx2_enable_msix() 6278 bp->irq_tbl[i].handler = bnx2_msi_1shot; bnx2_enable_msix() 6283 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) bnx2_setup_int_mode() argument 6288 if (!bp->num_req_rx_rings) bnx2_setup_int_mode() 6289 msix_vecs = max(cpus + 1, bp->num_req_tx_rings); bnx2_setup_int_mode() 6290 else if (!bp->num_req_tx_rings) bnx2_setup_int_mode() 6291 msix_vecs = max(cpus, bp->num_req_rx_rings); bnx2_setup_int_mode() 6293 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings); bnx2_setup_int_mode() 6297 bp->irq_tbl[0].handler = bnx2_interrupt; bnx2_setup_int_mode() 6298 strcpy(bp->irq_tbl[0].name, bp->dev->name); bnx2_setup_int_mode() 6299 bp->irq_nvecs = 1; bnx2_setup_int_mode() 6300 bp->irq_tbl[0].vector = bp->pdev->irq; bnx2_setup_int_mode() 6302 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi) bnx2_setup_int_mode() 6303 bnx2_enable_msix(bp, msix_vecs); bnx2_setup_int_mode() 6305 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi && bnx2_setup_int_mode() 6306 !(bp->flags & BNX2_FLAG_USING_MSIX)) { bnx2_setup_int_mode() 6307 if (pci_enable_msi(bp->pdev) == 0) { bnx2_setup_int_mode() 6308 bp->flags |= BNX2_FLAG_USING_MSI; bnx2_setup_int_mode() 6309 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_setup_int_mode() 6310 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI; bnx2_setup_int_mode() 6311 bp->irq_tbl[0].handler = bnx2_msi_1shot; bnx2_setup_int_mode() 6313 bp->irq_tbl[0].handler = bnx2_msi; bnx2_setup_int_mode() 6315 bp->irq_tbl[0].vector = bp->pdev->irq; bnx2_setup_int_mode() 6319 if (!bp->num_req_tx_rings) bnx2_setup_int_mode() 6320 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs); bnx2_setup_int_mode() 6322 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings); bnx2_setup_int_mode() 6324 if (!bp->num_req_rx_rings) bnx2_setup_int_mode() 6325 bp->num_rx_rings = bp->irq_nvecs; bnx2_setup_int_mode() 6327 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings); bnx2_setup_int_mode() 6329 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings); bnx2_setup_int_mode() 6331 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings); bnx2_setup_int_mode() 6338 struct bnx2 *bp = netdev_priv(dev); bnx2_open() local 6341 rc = bnx2_request_firmware(bp); bnx2_open() 6347 bnx2_disable_int(bp); bnx2_open() 6349 rc = bnx2_setup_int_mode(bp, disable_msi); bnx2_open() 6352 bnx2_init_napi(bp); bnx2_open() 6353 bnx2_napi_enable(bp); bnx2_open() 6354 rc = bnx2_alloc_mem(bp); bnx2_open() 6358 rc = bnx2_request_irq(bp); bnx2_open() 6362 rc = bnx2_init_nic(bp, 1); bnx2_open() 6366 mod_timer(&bp->timer, jiffies + bp->current_interval); bnx2_open() 6368 atomic_set(&bp->intr_sem, 0); bnx2_open() 6370 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block)); bnx2_open() 6372 bnx2_enable_int(bp); bnx2_open() 6374 if (bp->flags & BNX2_FLAG_USING_MSI) { bnx2_open() 6378 if (bnx2_test_intr(bp) != 0) { bnx2_open() 6379 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n"); bnx2_open() 6381 bnx2_disable_int(bp); bnx2_open() 6382 bnx2_free_irq(bp); bnx2_open() 6384 bnx2_setup_int_mode(bp, 1); bnx2_open() 6386 rc = bnx2_init_nic(bp, 0); bnx2_open() 6389 rc = bnx2_request_irq(bp); bnx2_open() 6392 del_timer_sync(&bp->timer); bnx2_open() 6395 bnx2_enable_int(bp); bnx2_open() 6398 if (bp->flags & BNX2_FLAG_USING_MSI) bnx2_open() 6400 else if (bp->flags & BNX2_FLAG_USING_MSIX) bnx2_open() 6408 bnx2_napi_disable(bp); bnx2_open() 6409 bnx2_free_skbs(bp); bnx2_open() 6410 bnx2_free_irq(bp); bnx2_open() 6411 bnx2_free_mem(bp); bnx2_open() 6412 bnx2_del_napi(bp); bnx2_open() 6413 bnx2_release_firmware(bp); bnx2_open() 6420 struct bnx2 *bp = container_of(work, struct bnx2, reset_task); bnx2_reset_task() local 6425 if (!netif_running(bp->dev)) { bnx2_reset_task() 6430 bnx2_netif_stop(bp, true); bnx2_reset_task() 6432 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd); bnx2_reset_task() 6435 pci_restore_state(bp->pdev); bnx2_reset_task() 6436 pci_save_state(bp->pdev); bnx2_reset_task() 6438 rc = bnx2_init_nic(bp, 1); bnx2_reset_task() 6440 netdev_err(bp->dev, "failed to reset NIC, closing\n"); bnx2_reset_task() 6441 bnx2_napi_enable(bp); bnx2_reset_task() 6442 dev_close(bp->dev); bnx2_reset_task() 6447 atomic_set(&bp->intr_sem, 1); bnx2_reset_task() 6448 bnx2_netif_start(bp, true); bnx2_reset_task() 6455 bnx2_dump_ftq(struct bnx2 *bp) bnx2_dump_ftq() argument 6459 struct net_device *dev = bp->dev; bnx2_dump_ftq() 6483 bnx2_reg_rd_ind(bp, ftq_arr[i].off)); bnx2_dump_ftq() 6488 reg, bnx2_reg_rd_ind(bp, reg), bnx2_dump_ftq() 6489 bnx2_reg_rd_ind(bp, reg + 4), bnx2_dump_ftq() 6490 bnx2_reg_rd_ind(bp, reg + 8), bnx2_dump_ftq() 6491 bnx2_reg_rd_ind(bp, reg + 0x1c), bnx2_dump_ftq() 6492 bnx2_reg_rd_ind(bp, reg + 0x1c), bnx2_dump_ftq() 6493 bnx2_reg_rd_ind(bp, reg + 0x20)); bnx2_dump_ftq() 6498 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT); bnx2_dump_ftq() 6503 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i); bnx2_dump_ftq() 6504 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE, bnx2_dump_ftq() 6506 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB); bnx2_dump_ftq() 6507 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) & bnx2_dump_ftq() 6511 cid = BNX2_RD(bp, BNX2_TBDC_CID); bnx2_dump_ftq() 6512 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX); bnx2_dump_ftq() 6513 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE); bnx2_dump_ftq() 6522 bnx2_dump_state(struct bnx2 *bp) bnx2_dump_state() argument 6524 struct net_device *dev = bp->dev; bnx2_dump_state() 6527 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1); bnx2_dump_state() 6529 atomic_read(&bp->intr_sem), val1); bnx2_dump_state() 6530 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1); bnx2_dump_state() 6531 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2); bnx2_dump_state() 6534 BNX2_RD(bp, BNX2_EMAC_TX_STATUS), bnx2_dump_state() 6535 BNX2_RD(bp, BNX2_EMAC_RX_STATUS)); bnx2_dump_state() 6537 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); bnx2_dump_state() 6539 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS)); bnx2_dump_state() 6540 if (bp->flags & BNX2_FLAG_USING_MSIX) bnx2_dump_state() 6542 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE)); bnx2_dump_state() 6548 struct bnx2 *bp = netdev_priv(dev); bnx2_tx_timeout() local 6550 bnx2_dump_ftq(bp); bnx2_tx_timeout() 6551 bnx2_dump_state(bp); bnx2_tx_timeout() 6552 bnx2_dump_mcp_state(bp); bnx2_tx_timeout() 6555 schedule_work(&bp->reset_task); bnx2_tx_timeout() 6565 struct bnx2 *bp = netdev_priv(dev); bnx2_start_xmit() local 6578 bnapi = &bp->bnx2_napi[i]; bnx2_start_xmit() 6582 if (unlikely(bnx2_tx_avail(bp, txr) < bnx2_start_xmit() 6637 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); bnx2_start_xmit() 6638 if (dma_mapping_error(&bp->pdev->dev, mapping)) { bnx2_start_xmit() 6666 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len, bnx2_start_xmit() 6668 if (dma_mapping_error(&bp->pdev->dev, mapping)) bnx2_start_xmit() 6689 BNX2_WR16(bp, txr->tx_bidx_addr, prod); bnx2_start_xmit() 6690 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); bnx2_start_xmit() 6696 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { bnx2_start_xmit() 6705 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh) bnx2_start_xmit() 6719 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), bnx2_start_xmit() 6727 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), bnx2_start_xmit() 6740 struct bnx2 *bp = netdev_priv(dev); bnx2_close() local 6742 bnx2_disable_int_sync(bp); bnx2_close() 6743 bnx2_napi_disable(bp); bnx2_close() 6745 del_timer_sync(&bp->timer); bnx2_close() 6746 bnx2_shutdown_chip(bp); bnx2_close() 6747 bnx2_free_irq(bp); bnx2_close() 6748 bnx2_free_skbs(bp); bnx2_close() 6749 bnx2_free_mem(bp); bnx2_close() 6750 bnx2_del_napi(bp); bnx2_close() 6751 bp->link_up = 0; bnx2_close() 6752 netif_carrier_off(bp->dev); bnx2_close() 6757 bnx2_save_stats(struct bnx2 *bp) bnx2_save_stats() argument 6759 u32 *hw_stats = (u32 *) bp->stats_blk; bnx2_save_stats() 6760 u32 *temp_stats = (u32 *) bp->temp_stats_blk; bnx2_save_stats() 6784 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \ 6785 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr) 6788 (unsigned long) (bp->stats_blk->ctr + \ 6789 bp->temp_stats_blk->ctr) 6794 struct bnx2 *bp = netdev_priv(dev); bnx2_get_stats64() local 6796 if (bp->stats_blk == NULL) bnx2_get_stats64() 6843 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || bnx2_get_stats64() 6844 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0)) bnx2_get_stats64() 6869 struct bnx2 *bp = netdev_priv(dev); bnx2_get_settings() local 6873 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { bnx2_get_settings() 6876 } else if (bp->phy_port == PORT_FIBRE) bnx2_get_settings() 6884 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) bnx2_get_settings() 6898 spin_lock_bh(&bp->phy_lock); bnx2_get_settings() 6899 cmd->port = bp->phy_port; bnx2_get_settings() 6900 cmd->advertising = bp->advertising; bnx2_get_settings() 6902 if (bp->autoneg & AUTONEG_SPEED) { bnx2_get_settings() 6909 ethtool_cmd_speed_set(cmd, bp->line_speed); bnx2_get_settings() 6910 cmd->duplex = bp->duplex; bnx2_get_settings() 6911 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) { bnx2_get_settings() 6912 if (bp->phy_flags & BNX2_PHY_FLAG_MDIX) bnx2_get_settings() 6922 spin_unlock_bh(&bp->phy_lock); bnx2_get_settings() 6925 cmd->phy_address = bp->phy_addr; bnx2_get_settings() 6933 struct bnx2 *bp = netdev_priv(dev); bnx2_set_settings() local 6934 u8 autoneg = bp->autoneg; bnx2_set_settings() 6935 u8 req_duplex = bp->req_duplex; bnx2_set_settings() 6936 u16 req_line_speed = bp->req_line_speed; bnx2_set_settings() 6937 u32 advertising = bp->advertising; bnx2_set_settings() 6940 spin_lock_bh(&bp->phy_lock); bnx2_set_settings() 6945 if (cmd->port != bp->phy_port && bnx2_set_settings() 6946 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)) bnx2_set_settings() 6952 if (!netif_running(dev) && cmd->port != bp->phy_port) bnx2_set_settings() 6979 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) bnx2_set_settings() 6990 bp->autoneg = autoneg; bnx2_set_settings() 6991 bp->advertising = advertising; bnx2_set_settings() 6992 bp->req_line_speed = req_line_speed; bnx2_set_settings() 6993 bp->req_duplex = req_duplex; bnx2_set_settings() 7000 err = bnx2_setup_phy(bp, cmd->port); bnx2_set_settings() 7003 spin_unlock_bh(&bp->phy_lock); bnx2_set_settings() 7011 struct bnx2 *bp = netdev_priv(dev); bnx2_get_drvinfo() local 7015 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); bnx2_get_drvinfo() 7016 strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version)); bnx2_get_drvinfo() 7032 struct bnx2 *bp = netdev_priv(dev); bnx2_get_regs() local 7062 if (!netif_running(bp->dev)) bnx2_get_regs() 7069 *p++ = BNX2_RD(bp, offset); bnx2_get_regs() 7082 struct bnx2 *bp = netdev_priv(dev); bnx2_get_wol() local 7084 if (bp->flags & BNX2_FLAG_NO_WOL) { bnx2_get_wol() 7090 if (bp->wol) bnx2_get_wol() 7101 struct bnx2 *bp = netdev_priv(dev); bnx2_set_wol() local 7107 if (bp->flags & BNX2_FLAG_NO_WOL) bnx2_set_wol() 7110 bp->wol = 1; bnx2_set_wol() 7113 bp->wol = 0; bnx2_set_wol() 7116 device_set_wakeup_enable(&bp->pdev->dev, bp->wol); bnx2_set_wol() 7124 struct bnx2 *bp = netdev_priv(dev); bnx2_nway_reset() local 7130 if (!(bp->autoneg & AUTONEG_SPEED)) { bnx2_nway_reset() 7134 spin_lock_bh(&bp->phy_lock); bnx2_nway_reset() 7136 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { bnx2_nway_reset() 7139 rc = bnx2_setup_remote_phy(bp, bp->phy_port); bnx2_nway_reset() 7140 spin_unlock_bh(&bp->phy_lock); bnx2_nway_reset() 7145 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_nway_reset() 7146 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); bnx2_nway_reset() 7147 spin_unlock_bh(&bp->phy_lock); bnx2_nway_reset() 7151 spin_lock_bh(&bp->phy_lock); bnx2_nway_reset() 7153 bp->current_interval = BNX2_SERDES_AN_TIMEOUT; bnx2_nway_reset() 7154 bp->serdes_an_pending = 1; bnx2_nway_reset() 7155 mod_timer(&bp->timer, jiffies + bp->current_interval); bnx2_nway_reset() 7158 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_nway_reset() 7160 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); bnx2_nway_reset() 7162 spin_unlock_bh(&bp->phy_lock); bnx2_nway_reset() 7170 struct bnx2 *bp = netdev_priv(dev); bnx2_get_link() local 7172 return bp->link_up; bnx2_get_link() 7178 struct bnx2 *bp = netdev_priv(dev); bnx2_get_eeprom_len() local 7180 if (bp->flash_info == NULL) bnx2_get_eeprom_len() 7183 return (int) bp->flash_size; bnx2_get_eeprom_len() 7190 struct bnx2 *bp = netdev_priv(dev); bnx2_get_eeprom() local 7195 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); bnx2_get_eeprom() 7204 struct bnx2 *bp = netdev_priv(dev); bnx2_set_eeprom() local 7209 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); bnx2_set_eeprom() 7217 struct bnx2 *bp = netdev_priv(dev); bnx2_get_coalesce() local 7221 coal->rx_coalesce_usecs = bp->rx_ticks; bnx2_get_coalesce() 7222 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip; bnx2_get_coalesce() 7223 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int; bnx2_get_coalesce() 7224 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int; bnx2_get_coalesce() 7226 coal->tx_coalesce_usecs = bp->tx_ticks; bnx2_get_coalesce() 7227 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip; bnx2_get_coalesce() 7228 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int; bnx2_get_coalesce() 7229 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int; bnx2_get_coalesce() 7231 coal->stats_block_coalesce_usecs = bp->stats_ticks; bnx2_get_coalesce() 7239 struct bnx2 *bp = netdev_priv(dev); bnx2_set_coalesce() local 7241 bp->rx_ticks = (u16) coal->rx_coalesce_usecs; bnx2_set_coalesce() 7242 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff; bnx2_set_coalesce() 7244 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; bnx2_set_coalesce() 7245 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff; bnx2_set_coalesce() 7247 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq; bnx2_set_coalesce() 7248 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff; bnx2_set_coalesce() 7250 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq; bnx2_set_coalesce() 7251 if (bp->rx_quick_cons_trip_int > 0xff) bnx2_set_coalesce() 7252 bp->rx_quick_cons_trip_int = 0xff; bnx2_set_coalesce() 7254 bp->tx_ticks = (u16) coal->tx_coalesce_usecs; bnx2_set_coalesce() 7255 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff; bnx2_set_coalesce() 7257 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames; bnx2_set_coalesce() 7258 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff; bnx2_set_coalesce() 7260 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq; bnx2_set_coalesce() 7261 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff; bnx2_set_coalesce() 7263 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq; bnx2_set_coalesce() 7264 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int = bnx2_set_coalesce() 7267 bp->stats_ticks = coal->stats_block_coalesce_usecs; bnx2_set_coalesce() 7268 if (bp->flags & BNX2_FLAG_BROKEN_STATS) { bnx2_set_coalesce() 7269 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) bnx2_set_coalesce() 7270 bp->stats_ticks = USEC_PER_SEC; bnx2_set_coalesce() 7272 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS) bnx2_set_coalesce() 7273 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS; bnx2_set_coalesce() 7274 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; bnx2_set_coalesce() 7276 if (netif_running(bp->dev)) { bnx2_set_coalesce() 7277 bnx2_netif_stop(bp, true); bnx2_set_coalesce() 7278 bnx2_init_nic(bp, 0); bnx2_set_coalesce() 7279 bnx2_netif_start(bp, true); bnx2_set_coalesce() 7288 struct bnx2 *bp = netdev_priv(dev); bnx2_get_ringparam() local 7293 ering->rx_pending = bp->rx_ring_size; bnx2_get_ringparam() 7294 ering->rx_jumbo_pending = bp->rx_pg_ring_size; bnx2_get_ringparam() 7297 ering->tx_pending = bp->tx_ring_size; bnx2_get_ringparam() 7301 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq) bnx2_change_ring_size() argument 7303 if (netif_running(bp->dev)) { bnx2_change_ring_size() 7305 bnx2_save_stats(bp); bnx2_change_ring_size() 7307 bnx2_netif_stop(bp, true); bnx2_change_ring_size() 7308 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); bnx2_change_ring_size() 7310 bnx2_free_irq(bp); bnx2_change_ring_size() 7311 bnx2_del_napi(bp); bnx2_change_ring_size() 7313 __bnx2_free_irq(bp); bnx2_change_ring_size() 7315 bnx2_free_skbs(bp); bnx2_change_ring_size() 7316 bnx2_free_mem(bp); bnx2_change_ring_size() 7319 bnx2_set_rx_ring_size(bp, rx); bnx2_change_ring_size() 7320 bp->tx_ring_size = tx; bnx2_change_ring_size() 7322 if (netif_running(bp->dev)) { bnx2_change_ring_size() 7326 rc = bnx2_setup_int_mode(bp, disable_msi); bnx2_change_ring_size() 7327 bnx2_init_napi(bp); bnx2_change_ring_size() 7331 rc = bnx2_alloc_mem(bp); bnx2_change_ring_size() 7334 rc = bnx2_request_irq(bp); bnx2_change_ring_size() 7337 rc = bnx2_init_nic(bp, 0); bnx2_change_ring_size() 7340 bnx2_napi_enable(bp); bnx2_change_ring_size() 7341 dev_close(bp->dev); bnx2_change_ring_size() 7345 mutex_lock(&bp->cnic_lock); bnx2_change_ring_size() 7347 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) bnx2_change_ring_size() 7348 bnx2_setup_cnic_irq_info(bp); bnx2_change_ring_size() 7349 mutex_unlock(&bp->cnic_lock); bnx2_change_ring_size() 7351 bnx2_netif_start(bp, true); bnx2_change_ring_size() 7359 struct bnx2 *bp = netdev_priv(dev); bnx2_set_ringparam() local 7368 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending, bnx2_set_ringparam() 7376 struct bnx2 *bp = netdev_priv(dev); bnx2_get_pauseparam() local 7378 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0); bnx2_get_pauseparam() 7379 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0); bnx2_get_pauseparam() 7380 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0); bnx2_get_pauseparam() 7386 struct bnx2 *bp = netdev_priv(dev); bnx2_set_pauseparam() local 7388 bp->req_flow_ctrl = 0; bnx2_set_pauseparam() 7390 bp->req_flow_ctrl |= FLOW_CTRL_RX; bnx2_set_pauseparam() 7392 bp->req_flow_ctrl |= FLOW_CTRL_TX; bnx2_set_pauseparam() 7395 bp->autoneg |= AUTONEG_FLOW_CTRL; bnx2_set_pauseparam() 7398 bp->autoneg &= ~AUTONEG_FLOW_CTRL; bnx2_set_pauseparam() 7402 spin_lock_bh(&bp->phy_lock); bnx2_set_pauseparam() 7403 bnx2_setup_phy(bp, bp->phy_port); bnx2_set_pauseparam() 7404 spin_unlock_bh(&bp->phy_lock); bnx2_set_pauseparam() 7564 struct bnx2 *bp = netdev_priv(dev); bnx2_self_test() local 7570 bnx2_netif_stop(bp, true); bnx2_self_test() 7571 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); bnx2_self_test() 7572 bnx2_free_skbs(bp); bnx2_self_test() 7574 if (bnx2_test_registers(bp) != 0) { bnx2_self_test() 7578 if (bnx2_test_memory(bp) != 0) { bnx2_self_test() 7582 if ((buf[2] = bnx2_test_loopback(bp)) != 0) bnx2_self_test() 7585 if (!netif_running(bp->dev)) bnx2_self_test() 7586 bnx2_shutdown_chip(bp); bnx2_self_test() 7588 bnx2_init_nic(bp, 1); bnx2_self_test() 7589 bnx2_netif_start(bp, true); bnx2_self_test() 7594 if (bp->link_up) bnx2_self_test() 7600 if (bnx2_test_nvram(bp) != 0) { bnx2_self_test() 7604 if (bnx2_test_intr(bp) != 0) { bnx2_self_test() 7609 if (bnx2_test_link(bp) != 0) { bnx2_self_test() 7635 struct bnx2 *bp = netdev_priv(dev); bnx2_get_ethtool_stats() local 7637 u32 *hw_stats = (u32 *) bp->stats_blk; bnx2_get_ethtool_stats() 7638 u32 *temp_stats = (u32 *) bp->temp_stats_blk; bnx2_get_ethtool_stats() 7646 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || bnx2_get_ethtool_stats() 7647 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) || bnx2_get_ethtool_stats() 7648 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) || bnx2_get_ethtool_stats() 7649 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0)) bnx2_get_ethtool_stats() 7681 struct bnx2 *bp = netdev_priv(dev); bnx2_set_phys_id() local 7685 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG); bnx2_set_phys_id() 7686 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); bnx2_set_phys_id() 7690 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE | bnx2_set_phys_id() 7699 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); bnx2_set_phys_id() 7703 BNX2_WR(bp, BNX2_EMAC_LED, 0); bnx2_set_phys_id() 7704 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save); bnx2_set_phys_id() 7714 struct bnx2 *bp = netdev_priv(dev); bnx2_set_features() local 7723 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) && bnx2_set_features() 7725 bnx2_netif_stop(bp, false); bnx2_set_features() 7728 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); bnx2_set_features() 7729 bnx2_netif_start(bp, false); bnx2_set_features() 7739 struct bnx2 *bp = netdev_priv(dev); bnx2_get_channels() local 7743 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) { bnx2_get_channels() 7752 channels->rx_count = bp->num_rx_rings; bnx2_get_channels() 7753 channels->tx_count = bp->num_tx_rings; bnx2_get_channels() 7761 struct bnx2 *bp = netdev_priv(dev); bnx2_set_channels() local 7766 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) { bnx2_set_channels() 7774 bp->num_req_rx_rings = channels->rx_count; bnx2_set_channels() 7775 bp->num_req_tx_rings = channels->tx_count; bnx2_set_channels() 7778 rc = bnx2_change_ring_size(bp, bp->rx_ring_size, bnx2_set_channels() 7779 bp->tx_ring_size, true); bnx2_set_channels() 7817 struct bnx2 *bp = netdev_priv(dev); bnx2_ioctl() local 7822 data->phy_id = bp->phy_addr; bnx2_ioctl() 7828 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_ioctl() 7834 spin_lock_bh(&bp->phy_lock); bnx2_ioctl() 7835 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); bnx2_ioctl() 7836 spin_unlock_bh(&bp->phy_lock); bnx2_ioctl() 7844 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_ioctl() 7850 spin_lock_bh(&bp->phy_lock); bnx2_ioctl() 7851 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); bnx2_ioctl() 7852 spin_unlock_bh(&bp->phy_lock); bnx2_ioctl() 7868 struct bnx2 *bp = netdev_priv(dev); bnx2_change_mac_addr() local 7875 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); bnx2_change_mac_addr() 7884 struct bnx2 *bp = netdev_priv(dev); bnx2_change_mtu() local 7891 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size, bnx2_change_mtu() 7899 struct bnx2 *bp = netdev_priv(dev); poll_bnx2() local 7902 for (i = 0; i < bp->irq_nvecs; i++) { poll_bnx2() 7903 struct bnx2_irq *irq = &bp->irq_tbl[i]; poll_bnx2() 7906 irq->handler(irq->vector, &bp->bnx2_napi[i]); poll_bnx2() 7913 bnx2_get_5709_media(struct bnx2 *bp) bnx2_get_5709_media() argument 7915 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); bnx2_get_5709_media() 7922 bp->phy_flags |= BNX2_PHY_FLAG_SERDES; bnx2_get_5709_media() 7931 if (bp->func == 0) { bnx2_get_5709_media() 7936 bp->phy_flags |= BNX2_PHY_FLAG_SERDES; bnx2_get_5709_media() 7944 bp->phy_flags |= BNX2_PHY_FLAG_SERDES; bnx2_get_5709_media() 7951 bnx2_get_pci_speed(struct bnx2 *bp) bnx2_get_pci_speed() argument 7955 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS); bnx2_get_pci_speed() 7959 bp->flags |= BNX2_FLAG_PCIX; bnx2_get_pci_speed() 7961 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS); bnx2_get_pci_speed() 7966 bp->bus_speed_mhz = 133; bnx2_get_pci_speed() 7970 bp->bus_speed_mhz = 100; bnx2_get_pci_speed() 7975 bp->bus_speed_mhz = 66; bnx2_get_pci_speed() 7980 bp->bus_speed_mhz = 50; bnx2_get_pci_speed() 7986 bp->bus_speed_mhz = 33; bnx2_get_pci_speed() 7992 bp->bus_speed_mhz = 66; bnx2_get_pci_speed() 7994 bp->bus_speed_mhz = 33; bnx2_get_pci_speed() 7998 bp->flags |= BNX2_FLAG_PCI_32BIT; bnx2_get_pci_speed() 8003 bnx2_read_vpd_fw_ver(struct bnx2 *bp) bnx2_read_vpd_fw_ver() argument 8017 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN, bnx2_read_vpd_fw_ver() 8063 memcpy(bp->fw_version, &data[j], len); bnx2_read_vpd_fw_ver() 8064 bp->fw_version[len] = ' '; bnx2_read_vpd_fw_ver() 8073 struct bnx2 *bp; bnx2_init_board() local 8080 bp = netdev_priv(dev); bnx2_init_board() 8082 bp->flags = 0; bnx2_init_board() 8083 bp->phy_flags = 0; bnx2_init_board() 8085 bp->temp_stats_blk = bnx2_init_board() 8088 if (bp->temp_stats_blk == NULL) { bnx2_init_board() 8115 bp->pm_cap = pdev->pm_cap; bnx2_init_board() 8116 if (bp->pm_cap == 0) { bnx2_init_board() 8123 bp->dev = dev; bnx2_init_board() 8124 bp->pdev = pdev; bnx2_init_board() 8126 spin_lock_init(&bp->phy_lock); bnx2_init_board() 8127 spin_lock_init(&bp->indirect_lock); bnx2_init_board() 8129 mutex_init(&bp->cnic_lock); bnx2_init_board() 8131 INIT_WORK(&bp->reset_task, bnx2_reset_task); bnx2_init_board() 8133 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID + bnx2_init_board() 8135 if (!bp->regview) { bnx2_init_board() 8145 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, bnx2_init_board() 8149 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID); bnx2_init_board() 8151 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_board() 8157 bp->flags |= BNX2_FLAG_PCIE; bnx2_init_board() 8158 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax) bnx2_init_board() 8159 bp->flags |= BNX2_FLAG_JUMBO_BROKEN; bnx2_init_board() 8164 bp->flags |= BNX2_FLAG_AER_ENABLED; bnx2_init_board() 8167 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); bnx2_init_board() 8168 if (bp->pcix_cap == 0) { bnx2_init_board() 8174 bp->flags |= BNX2_FLAG_BROKEN_STATS; bnx2_init_board() 8177 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && bnx2_init_board() 8178 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) { bnx2_init_board() 8180 bp->flags |= BNX2_FLAG_MSIX_CAP; bnx2_init_board() 8183 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 && bnx2_init_board() 8184 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) { bnx2_init_board() 8186 bp->flags |= BNX2_FLAG_MSI_CAP; bnx2_init_board() 8190 if (BNX2_CHIP(bp) == BNX2_CHIP_5708) bnx2_init_board() 8209 if (!(bp->flags & BNX2_FLAG_PCIE)) bnx2_init_board() 8210 bnx2_get_pci_speed(bp); bnx2_init_board() 8213 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bnx2_init_board() 8214 reg = BNX2_RD(bp, PCI_COMMAND); bnx2_init_board() 8216 BNX2_WR(bp, PCI_COMMAND, reg); bnx2_init_board() 8217 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) && bnx2_init_board() 8218 !(bp->flags & BNX2_FLAG_PCIX)) { bnx2_init_board() 8225 bnx2_init_nvram(bp); bnx2_init_board() 8227 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE); bnx2_init_board() 8229 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID) bnx2_init_board() 8230 bp->func = 1; bnx2_init_board() 8234 u32 off = bp->func << 2; bnx2_init_board() 8236 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off); bnx2_init_board() 8238 bp->shmem_base = HOST_VIEW_SHMEM_BASE; bnx2_init_board() 8243 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE); bnx2_init_board() 8252 bnx2_read_vpd_fw_ver(bp); bnx2_init_board() 8254 j = strlen(bp->fw_version); bnx2_init_board() 8255 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV); bnx2_init_board() 8260 bp->fw_version[j++] = 'b'; bnx2_init_board() 8261 bp->fw_version[j++] = 'c'; bnx2_init_board() 8262 bp->fw_version[j++] = ' '; bnx2_init_board() 8267 bp->fw_version[j++] = (num / k) + '0'; bnx2_init_board() 8272 bp->fw_version[j++] = '.'; bnx2_init_board() 8274 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); bnx2_init_board() 8276 bp->wol = 1; bnx2_init_board() 8279 bp->flags |= BNX2_FLAG_ASF_ENABLE; bnx2_init_board() 8282 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); bnx2_init_board() 8288 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); bnx2_init_board() 8292 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR); bnx2_init_board() 8295 bp->fw_version[j++] = ' '; bnx2_init_board() 8297 reg = bnx2_reg_rd_ind(bp, addr + i * 4); bnx2_init_board() 8299 memcpy(&bp->fw_version[j], ®, 4); bnx2_init_board() 8304 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER); bnx2_init_board() 8305 bp->mac_addr[0] = (u8) (reg >> 8); bnx2_init_board() 8306 bp->mac_addr[1] = (u8) reg; bnx2_init_board() 8308 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER); bnx2_init_board() 8309 bp->mac_addr[2] = (u8) (reg >> 24); bnx2_init_board() 8310 bp->mac_addr[3] = (u8) (reg >> 16); bnx2_init_board() 8311 bp->mac_addr[4] = (u8) (reg >> 8); bnx2_init_board() 8312 bp->mac_addr[5] = (u8) reg; bnx2_init_board() 8314 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT; bnx2_init_board() 8315 bnx2_set_rx_ring_size(bp, 255); bnx2_init_board() 8317 bp->tx_quick_cons_trip_int = 2; bnx2_init_board() 8318 bp->tx_quick_cons_trip = 20; bnx2_init_board() 8319 bp->tx_ticks_int = 18; bnx2_init_board() 8320 bp->tx_ticks = 80; bnx2_init_board() 8322 bp->rx_quick_cons_trip_int = 2; bnx2_init_board() 8323 bp->rx_quick_cons_trip = 12; bnx2_init_board() 8324 bp->rx_ticks_int = 18; bnx2_init_board() 8325 bp->rx_ticks = 18; bnx2_init_board() 8327 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS; bnx2_init_board() 8329 bp->current_interval = BNX2_TIMER_INTERVAL; bnx2_init_board() 8331 bp->phy_addr = 1; bnx2_init_board() 8334 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_init_board() 8335 bnx2_get_5709_media(bp); bnx2_init_board() 8336 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT) bnx2_init_board() 8337 bp->phy_flags |= BNX2_PHY_FLAG_SERDES; bnx2_init_board() 8339 bp->phy_port = PORT_TP; bnx2_init_board() 8340 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_init_board() 8341 bp->phy_port = PORT_FIBRE; bnx2_init_board() 8342 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG); bnx2_init_board() 8344 bp->flags |= BNX2_FLAG_NO_WOL; bnx2_init_board() 8345 bp->wol = 0; bnx2_init_board() 8347 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) { bnx2_init_board() 8354 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL; bnx2_init_board() 8356 bp->phy_addr = 2; bnx2_init_board() 8358 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE; bnx2_init_board() 8360 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 || bnx2_init_board() 8361 BNX2_CHIP(bp) == BNX2_CHIP_5708) bnx2_init_board() 8362 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX; bnx2_init_board() 8363 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && bnx2_init_board() 8364 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax || bnx2_init_board() 8365 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx)) bnx2_init_board() 8366 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC; bnx2_init_board() 8368 bnx2_init_fw_cap(bp); bnx2_init_board() 8370 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) || bnx2_init_board() 8371 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) || bnx2_init_board() 8372 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) || bnx2_init_board() 8373 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) { bnx2_init_board() 8374 bp->flags |= BNX2_FLAG_NO_WOL; bnx2_init_board() 8375 bp->wol = 0; bnx2_init_board() 8378 if (bp->flags & BNX2_FLAG_NO_WOL) bnx2_init_board() 8379 device_set_wakeup_capable(&bp->pdev->dev, false); bnx2_init_board() 8381 device_set_wakeup_enable(&bp->pdev->dev, bp->wol); bnx2_init_board() 8383 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bnx2_init_board() 8384 bp->tx_quick_cons_trip_int = bnx2_init_board() 8385 bp->tx_quick_cons_trip; bnx2_init_board() 8386 bp->tx_ticks_int = bp->tx_ticks; bnx2_init_board() 8387 bp->rx_quick_cons_trip_int = bnx2_init_board() 8388 bp->rx_quick_cons_trip; bnx2_init_board() 8389 bp->rx_ticks_int = bp->rx_ticks; bnx2_init_board() 8390 bp->comp_prod_trip_int = bp->comp_prod_trip; bnx2_init_board() 8391 bp->com_ticks_int = bp->com_ticks; bnx2_init_board() 8392 bp->cmd_ticks_int = bp->cmd_ticks; bnx2_init_board() 8405 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) { bnx2_init_board() 8421 bnx2_set_default_link(bp); bnx2_init_board() 8422 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; bnx2_init_board() 8424 init_timer(&bp->timer); bnx2_init_board() 8425 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL); bnx2_init_board() 8426 bp->timer.data = (unsigned long) bp; bnx2_init_board() 8427 bp->timer.function = bnx2_timer; bnx2_init_board() 8430 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN) bnx2_init_board() 8431 bp->cnic_eth_dev.max_iscsi_conn = bnx2_init_board() 8432 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) & bnx2_init_board() 8434 bp->cnic_probe = bnx2_cnic_probe; bnx2_init_board() 8441 if (bp->flags & BNX2_FLAG_AER_ENABLED) { bnx2_init_board() 8443 bp->flags &= ~BNX2_FLAG_AER_ENABLED; bnx2_init_board() 8446 pci_iounmap(pdev, bp->regview); bnx2_init_board() 8447 bp->regview = NULL; bnx2_init_board() 8460 bnx2_bus_string(struct bnx2 *bp, char *str) bnx2_bus_string() argument 8464 if (bp->flags & BNX2_FLAG_PCIE) { bnx2_bus_string() 8468 if (bp->flags & BNX2_FLAG_PCIX) bnx2_bus_string() 8470 if (bp->flags & BNX2_FLAG_PCI_32BIT) bnx2_bus_string() 8474 s += sprintf(s, " %dMHz", bp->bus_speed_mhz); bnx2_bus_string() 8480 bnx2_del_napi(struct bnx2 *bp) bnx2_del_napi() argument 8484 for (i = 0; i < bp->irq_nvecs; i++) bnx2_del_napi() 8485 netif_napi_del(&bp->bnx2_napi[i].napi); bnx2_del_napi() 8489 bnx2_init_napi(struct bnx2 *bp) bnx2_init_napi() argument 8493 for (i = 0; i < bp->irq_nvecs; i++) { bnx2_init_napi() 8494 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_init_napi() 8502 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64); bnx2_init_napi() 8503 bnapi->bp = bp; bnx2_init_napi() 8529 struct bnx2 *bp; bnx2_init_one() local 8537 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); bnx2_init_one() 8549 bp = netdev_priv(dev); bnx2_init_one() 8553 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); bnx2_init_one() 8559 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_init_one() 8567 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) bnx2_init_one() 8577 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A', bnx2_init_one() 8578 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4), bnx2_init_one() 8579 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0), bnx2_init_one() 8585 pci_iounmap(pdev, bp->regview); bnx2_init_one() 8597 struct bnx2 *bp = netdev_priv(dev); bnx2_remove_one() local 8601 del_timer_sync(&bp->timer); bnx2_remove_one() 8602 cancel_work_sync(&bp->reset_task); bnx2_remove_one() 8604 pci_iounmap(bp->pdev, bp->regview); bnx2_remove_one() 8606 kfree(bp->temp_stats_blk); bnx2_remove_one() 8608 if (bp->flags & BNX2_FLAG_AER_ENABLED) { bnx2_remove_one() 8610 bp->flags &= ~BNX2_FLAG_AER_ENABLED; bnx2_remove_one() 8613 bnx2_release_firmware(bp); bnx2_remove_one() 8627 struct bnx2 *bp = netdev_priv(dev); bnx2_suspend() local 8630 cancel_work_sync(&bp->reset_task); bnx2_suspend() 8631 bnx2_netif_stop(bp, true); bnx2_suspend() 8633 del_timer_sync(&bp->timer); bnx2_suspend() 8634 bnx2_shutdown_chip(bp); bnx2_suspend() 8635 __bnx2_free_irq(bp); bnx2_suspend() 8636 bnx2_free_skbs(bp); bnx2_suspend() 8638 bnx2_setup_wol(bp); bnx2_suspend() 8647 struct bnx2 *bp = netdev_priv(dev); bnx2_resume() local 8652 bnx2_set_power_state(bp, PCI_D0); bnx2_resume() 8654 bnx2_request_irq(bp); bnx2_resume() 8655 bnx2_init_nic(bp, 1); bnx2_resume() 8656 bnx2_netif_start(bp, true); bnx2_resume() 8680 struct bnx2 *bp = netdev_priv(dev); bnx2_io_error_detected() local 8691 bnx2_netif_stop(bp, true); bnx2_io_error_detected() 8692 del_timer_sync(&bp->timer); bnx2_io_error_detected() 8693 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); bnx2_io_error_detected() 8712 struct bnx2 *bp = netdev_priv(dev); bnx2_io_slot_reset() local 8726 err = bnx2_init_nic(bp, 1); bnx2_io_slot_reset() 8733 bnx2_napi_enable(bp); bnx2_io_slot_reset() 8738 if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) bnx2_io_slot_reset() 8761 struct bnx2 *bp = netdev_priv(dev); bnx2_io_resume() local 8765 bnx2_netif_start(bp, true); bnx2_io_resume() 8774 struct bnx2 *bp; bnx2_shutdown() local 8779 bp = netdev_priv(dev); bnx2_shutdown() 8780 if (!bp) bnx2_shutdown() 8785 dev_close(bp->dev); bnx2_shutdown() 8788 bnx2_set_power_state(bp, PCI_D3hot); bnx2_shutdown()
|
H A D | b44.c | 166 static inline unsigned long br32(const struct b44 *bp, unsigned long reg) br32() argument 168 return ssb_read32(bp->sdev, reg); br32() 171 static inline void bw32(const struct b44 *bp, bw32() argument 174 ssb_write32(bp->sdev, reg, val); bw32() 177 static int b44_wait_bit(struct b44 *bp, unsigned long reg, b44_wait_bit() argument 183 u32 val = br32(bp, reg); b44_wait_bit() 193 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n", b44_wait_bit() 201 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index) __b44_cam_read() argument 205 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ | __b44_cam_read() 208 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); __b44_cam_read() 210 val = br32(bp, B44_CAM_DATA_LO); __b44_cam_read() 217 val = br32(bp, B44_CAM_DATA_HI); __b44_cam_read() 223 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) __b44_cam_write() argument 231 bw32(bp, B44_CAM_DATA_LO, val); __b44_cam_write() 235 bw32(bp, B44_CAM_DATA_HI, val); __b44_cam_write() 236 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE | __b44_cam_write() 238 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); __b44_cam_write() 241 static inline void __b44_disable_ints(struct b44 *bp) __b44_disable_ints() argument 243 bw32(bp, B44_IMASK, 0); __b44_disable_ints() 246 static void b44_disable_ints(struct b44 *bp) b44_disable_ints() argument 248 __b44_disable_ints(bp); b44_disable_ints() 251 br32(bp, B44_IMASK); b44_disable_ints() 254 static void b44_enable_ints(struct b44 *bp) b44_enable_ints() argument 256 bw32(bp, B44_IMASK, bp->imask); b44_enable_ints() 259 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val) __b44_readphy() argument 263 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); __b44_readphy() 264 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | __b44_readphy() 269 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); __b44_readphy() 270 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA; __b44_readphy() 275 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val) __b44_writephy() argument 277 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); __b44_writephy() 278 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | __b44_writephy() 284 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); __b44_writephy() 287 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val) b44_readphy() argument 289 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_readphy() 292 return __b44_readphy(bp, bp->phy_addr, reg, val); b44_readphy() 295 static inline int b44_writephy(struct b44 *bp, int reg, u32 val) b44_writephy() argument 297 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_writephy() 300 return __b44_writephy(bp, bp->phy_addr, reg, val); b44_writephy() 307 struct b44 *bp = netdev_priv(dev); b44_mdio_read_mii() local 308 int rc = __b44_readphy(bp, phy_id, location, &val); b44_mdio_read_mii() 317 struct b44 *bp = netdev_priv(dev); b44_mdio_write_mii() local 318 __b44_writephy(bp, phy_id, location, val); b44_mdio_write_mii() 324 struct b44 *bp = bus->priv; b44_mdio_read_phylib() local 325 int rc = __b44_readphy(bp, phy_id, location, &val); b44_mdio_read_phylib() 334 struct b44 *bp = bus->priv; b44_mdio_write_phylib() local 335 return __b44_writephy(bp, phy_id, location, val); b44_mdio_write_phylib() 338 static int b44_phy_reset(struct b44 *bp) b44_phy_reset() argument 343 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_phy_reset() 345 err = b44_writephy(bp, MII_BMCR, BMCR_RESET); b44_phy_reset() 349 err = b44_readphy(bp, MII_BMCR, &val); b44_phy_reset() 352 netdev_err(bp->dev, "PHY Reset would not complete\n"); b44_phy_reset() 360 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags) __b44_set_flow_ctrl() argument 364 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE); __b44_set_flow_ctrl() 365 bp->flags |= pause_flags; __b44_set_flow_ctrl() 367 val = br32(bp, B44_RXCONFIG); __b44_set_flow_ctrl() 372 bw32(bp, B44_RXCONFIG, val); __b44_set_flow_ctrl() 374 val = br32(bp, B44_MAC_FLOW); __b44_set_flow_ctrl() 380 bw32(bp, B44_MAC_FLOW, val); __b44_set_flow_ctrl() 383 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) b44_set_flow_ctrl() argument 399 __b44_set_flow_ctrl(bp, pause_enab); b44_set_flow_ctrl() 404 static void b44_wap54g10_workaround(struct b44 *bp) b44_wap54g10_workaround() argument 418 err = __b44_readphy(bp, 0, MII_BMCR, &val); b44_wap54g10_workaround() 424 err = __b44_writephy(bp, 0, MII_BMCR, val); b44_wap54g10_workaround() 433 static inline void b44_wap54g10_workaround(struct b44 *bp) b44_wap54g10_workaround() argument 438 static int b44_setup_phy(struct b44 *bp) b44_setup_phy() argument 443 b44_wap54g10_workaround(bp); b44_setup_phy() 445 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_setup_phy() 447 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) b44_setup_phy() 449 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, b44_setup_phy() 452 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0) b44_setup_phy() 454 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL, b44_setup_phy() 458 if (!(bp->flags & B44_FLAG_FORCE_LINK)) { b44_setup_phy() 461 if (bp->flags & B44_FLAG_ADV_10HALF) b44_setup_phy() 463 if (bp->flags & B44_FLAG_ADV_10FULL) b44_setup_phy() 465 if (bp->flags & B44_FLAG_ADV_100HALF) b44_setup_phy() 467 if (bp->flags & B44_FLAG_ADV_100FULL) b44_setup_phy() 470 if (bp->flags & B44_FLAG_PAUSE_AUTO) b44_setup_phy() 473 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0) b44_setup_phy() 475 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE | b44_setup_phy() 481 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0) b44_setup_phy() 484 if (bp->flags & B44_FLAG_100_BASE_T) b44_setup_phy() 486 if (bp->flags & B44_FLAG_FULL_DUPLEX) b44_setup_phy() 488 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0) b44_setup_phy() 495 b44_set_flow_ctrl(bp, 0, 0); b44_setup_phy() 502 static void b44_stats_update(struct b44 *bp) b44_stats_update() argument 507 val = &bp->hw_stats.tx_good_octets; b44_stats_update() 508 u64_stats_update_begin(&bp->hw_stats.syncp); b44_stats_update() 511 *val++ += br32(bp, reg); b44_stats_update() 518 *val++ += br32(bp, reg); b44_stats_update() 521 u64_stats_update_end(&bp->hw_stats.syncp); b44_stats_update() 524 static void b44_link_report(struct b44 *bp) b44_link_report() argument 526 if (!netif_carrier_ok(bp->dev)) { b44_link_report() 527 netdev_info(bp->dev, "Link is down\n"); b44_link_report() 529 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n", b44_link_report() 530 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10, b44_link_report() 531 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half"); b44_link_report() 533 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n", b44_link_report() 534 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off", b44_link_report() 535 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off"); b44_link_report() 539 static void b44_check_phy(struct b44 *bp) b44_check_phy() argument 543 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { b44_check_phy() 544 bp->flags |= B44_FLAG_100_BASE_T; b44_check_phy() 545 if (!netif_carrier_ok(bp->dev)) { b44_check_phy() 546 u32 val = br32(bp, B44_TX_CTRL); b44_check_phy() 547 if (bp->flags & B44_FLAG_FULL_DUPLEX) b44_check_phy() 551 bw32(bp, B44_TX_CTRL, val); b44_check_phy() 552 netif_carrier_on(bp->dev); b44_check_phy() 553 b44_link_report(bp); b44_check_phy() 558 if (!b44_readphy(bp, MII_BMSR, &bmsr) && b44_check_phy() 559 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && b44_check_phy() 562 bp->flags |= B44_FLAG_100_BASE_T; b44_check_phy() 564 bp->flags &= ~B44_FLAG_100_BASE_T; b44_check_phy() 566 bp->flags |= B44_FLAG_FULL_DUPLEX; b44_check_phy() 568 bp->flags &= ~B44_FLAG_FULL_DUPLEX; b44_check_phy() 570 if (!netif_carrier_ok(bp->dev) && b44_check_phy() 572 u32 val = br32(bp, B44_TX_CTRL); b44_check_phy() 575 if (bp->flags & B44_FLAG_FULL_DUPLEX) b44_check_phy() 579 bw32(bp, B44_TX_CTRL, val); b44_check_phy() 581 if (!(bp->flags & B44_FLAG_FORCE_LINK) && b44_check_phy() 582 !b44_readphy(bp, MII_ADVERTISE, &local_adv) && b44_check_phy() 583 !b44_readphy(bp, MII_LPA, &remote_adv)) b44_check_phy() 584 b44_set_flow_ctrl(bp, local_adv, remote_adv); b44_check_phy() 587 netif_carrier_on(bp->dev); b44_check_phy() 588 b44_link_report(bp); b44_check_phy() 589 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) { b44_check_phy() 591 netif_carrier_off(bp->dev); b44_check_phy() 592 b44_link_report(bp); b44_check_phy() 596 netdev_warn(bp->dev, "Remote fault detected in PHY\n"); b44_check_phy() 598 netdev_warn(bp->dev, "Jabber detected in PHY\n"); b44_check_phy() 604 struct b44 *bp = (struct b44 *) __opaque; b44_timer() local 606 spin_lock_irq(&bp->lock); b44_timer() 608 b44_check_phy(bp); b44_timer() 610 b44_stats_update(bp); b44_timer() 612 spin_unlock_irq(&bp->lock); b44_timer() 614 mod_timer(&bp->timer, round_jiffies(jiffies + HZ)); b44_timer() 617 static void b44_tx(struct b44 *bp) b44_tx() argument 622 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; b44_tx() 626 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) { b44_tx() 627 struct ring_info *rp = &bp->tx_buffers[cons]; b44_tx() 632 dma_unmap_single(bp->sdev->dma_dev, b44_tx() 644 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); b44_tx() 645 bp->tx_cons = cons; b44_tx() 646 if (netif_queue_stopped(bp->dev) && b44_tx() 647 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) b44_tx() 648 netif_wake_queue(bp->dev); b44_tx() 650 bw32(bp, B44_GPTIMER, 0); b44_tx() 658 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) b44_alloc_rx_skb() argument 670 src_map = &bp->rx_buffers[src_idx]; b44_alloc_rx_skb() 672 map = &bp->rx_buffers[dest_idx]; b44_alloc_rx_skb() 673 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ); b44_alloc_rx_skb() 677 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, b44_alloc_rx_skb() 683 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || b44_alloc_rx_skb() 686 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) b44_alloc_rx_skb() 687 dma_unmap_single(bp->sdev->dma_dev, mapping, b44_alloc_rx_skb() 693 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, b44_alloc_rx_skb() 696 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || b44_alloc_rx_skb() 698 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) b44_alloc_rx_skb() 699 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); b44_alloc_rx_skb() 703 bp->force_copybreak = 1; b44_alloc_rx_skb() 721 dp = &bp->rx_ring[dest_idx]; b44_alloc_rx_skb() 723 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset); b44_alloc_rx_skb() 725 if (bp->flags & B44_FLAG_RX_RING_HACK) b44_alloc_rx_skb() 726 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, b44_alloc_rx_skb() 733 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) b44_recycle_rx() argument 742 dest_desc = &bp->rx_ring[dest_idx]; b44_recycle_rx() 743 dest_map = &bp->rx_buffers[dest_idx]; b44_recycle_rx() 744 src_desc = &bp->rx_ring[src_idx]; b44_recycle_rx() 745 src_map = &bp->rx_buffers[src_idx]; b44_recycle_rx() 753 if (bp->flags & B44_FLAG_RX_RING_HACK) b44_recycle_rx() 754 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, b44_recycle_rx() 769 if (bp->flags & B44_FLAG_RX_RING_HACK) b44_recycle_rx() 770 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, b44_recycle_rx() 774 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping, b44_recycle_rx() 779 static int b44_rx(struct b44 *bp, int budget) b44_rx() argument 785 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK; b44_rx() 787 cons = bp->rx_cons; b44_rx() 790 struct ring_info *rp = &bp->rx_buffers[cons]; b44_rx() 796 dma_sync_single_for_cpu(bp->sdev->dma_dev, map, b44_rx() 804 b44_recycle_rx(bp, cons, bp->rx_prod); b44_rx() 806 bp->dev->stats.rx_dropped++; b44_rx() 825 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) { b44_rx() 827 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); b44_rx() 830 dma_unmap_single(bp->sdev->dma_dev, map, b44_rx() 838 b44_recycle_rx(bp, cons, bp->rx_prod); b44_rx() 839 copy_skb = napi_alloc_skb(&bp->napi, len); b44_rx() 850 skb->protocol = eth_type_trans(skb, bp->dev); b44_rx() 855 bp->rx_prod = (bp->rx_prod + 1) & b44_rx() 860 bp->rx_cons = cons; b44_rx() 861 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc)); b44_rx() 868 struct b44 *bp = container_of(napi, struct b44, napi); b44_poll() local 872 spin_lock_irqsave(&bp->lock, flags); b44_poll() 874 if (bp->istat & (ISTAT_TX | ISTAT_TO)) { b44_poll() 875 /* spin_lock(&bp->tx_lock); */ b44_poll() 876 b44_tx(bp); b44_poll() 877 /* spin_unlock(&bp->tx_lock); */ b44_poll() 879 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */ b44_poll() 880 bp->istat &= ~ISTAT_RFO; b44_poll() 881 b44_disable_ints(bp); b44_poll() 882 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */ b44_poll() 883 b44_init_rings(bp); b44_poll() 884 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); b44_poll() 885 netif_wake_queue(bp->dev); b44_poll() 888 spin_unlock_irqrestore(&bp->lock, flags); b44_poll() 891 if (bp->istat & ISTAT_RX) b44_poll() 892 work_done += b44_rx(bp, budget); b44_poll() 894 if (bp->istat & ISTAT_ERRORS) { b44_poll() 895 spin_lock_irqsave(&bp->lock, flags); b44_poll() 896 b44_halt(bp); b44_poll() 897 b44_init_rings(bp); b44_poll() 898 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); b44_poll() 899 netif_wake_queue(bp->dev); b44_poll() 900 spin_unlock_irqrestore(&bp->lock, flags); b44_poll() 906 b44_enable_ints(bp); b44_poll() 915 struct b44 *bp = netdev_priv(dev); b44_interrupt() local 919 spin_lock(&bp->lock); b44_interrupt() 921 istat = br32(bp, B44_ISTAT); b44_interrupt() 922 imask = br32(bp, B44_IMASK); b44_interrupt() 937 if (napi_schedule_prep(&bp->napi)) { b44_interrupt() 941 bp->istat = istat; b44_interrupt() 942 __b44_disable_ints(bp); b44_interrupt() 943 __napi_schedule(&bp->napi); b44_interrupt() 947 bw32(bp, B44_ISTAT, istat); b44_interrupt() 948 br32(bp, B44_ISTAT); b44_interrupt() 950 spin_unlock(&bp->lock); b44_interrupt() 956 struct b44 *bp = netdev_priv(dev); b44_tx_timeout() local 960 spin_lock_irq(&bp->lock); b44_tx_timeout() 962 b44_halt(bp); b44_tx_timeout() 963 b44_init_rings(bp); b44_tx_timeout() 964 b44_init_hw(bp, B44_FULL_RESET); b44_tx_timeout() 966 spin_unlock_irq(&bp->lock); b44_tx_timeout() 968 b44_enable_ints(bp); b44_tx_timeout() 975 struct b44 *bp = netdev_priv(dev); b44_start_xmit() local 982 spin_lock_irqsave(&bp->lock, flags); b44_start_xmit() 985 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { b44_start_xmit() 991 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); b44_start_xmit() 992 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { b44_start_xmit() 996 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) b44_start_xmit() 997 dma_unmap_single(bp->sdev->dma_dev, mapping, len, b44_start_xmit() 1004 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, b44_start_xmit() 1006 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { b44_start_xmit() 1007 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) b44_start_xmit() 1008 dma_unmap_single(bp->sdev->dma_dev, mapping, b44_start_xmit() 1019 entry = bp->tx_prod; b44_start_xmit() 1020 bp->tx_buffers[entry].skb = skb; b44_start_xmit() 1021 bp->tx_buffers[entry].mapping = mapping; b44_start_xmit() 1028 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); b44_start_xmit() 1029 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); b44_start_xmit() 1031 if (bp->flags & B44_FLAG_TX_RING_HACK) b44_start_xmit() 1032 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma, b44_start_xmit() 1033 entry * sizeof(bp->tx_ring[0]), b44_start_xmit() 1038 bp->tx_prod = entry; b44_start_xmit() 1042 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); b44_start_xmit() 1043 if (bp->flags & B44_FLAG_BUGGY_TXPTR) b44_start_xmit() 1044 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); b44_start_xmit() 1045 if (bp->flags & B44_FLAG_REORDER_BUG) b44_start_xmit() 1046 br32(bp, B44_DMATX_PTR); b44_start_xmit() 1050 if (TX_BUFFS_AVAIL(bp) < 1) b44_start_xmit() 1054 spin_unlock_irqrestore(&bp->lock, flags); b44_start_xmit() 1065 struct b44 *bp = netdev_priv(dev); b44_change_mtu() local 1078 spin_lock_irq(&bp->lock); b44_change_mtu() 1079 b44_halt(bp); b44_change_mtu() 1081 b44_init_rings(bp); b44_change_mtu() 1082 b44_init_hw(bp, B44_FULL_RESET); b44_change_mtu() 1083 spin_unlock_irq(&bp->lock); b44_change_mtu() 1085 b44_enable_ints(bp); b44_change_mtu() 1094 * end up in the driver. bp->lock is not held and we are not 1097 static void b44_free_rings(struct b44 *bp) b44_free_rings() argument 1103 rp = &bp->rx_buffers[i]; b44_free_rings() 1107 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, b44_free_rings() 1115 rp = &bp->tx_buffers[i]; b44_free_rings() 1119 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, b44_free_rings() 1132 static void b44_init_rings(struct b44 *bp) b44_init_rings() argument 1136 b44_free_rings(bp); b44_init_rings() 1138 memset(bp->rx_ring, 0, B44_RX_RING_BYTES); b44_init_rings() 1139 memset(bp->tx_ring, 0, B44_TX_RING_BYTES); b44_init_rings() 1141 if (bp->flags & B44_FLAG_RX_RING_HACK) b44_init_rings() 1142 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, b44_init_rings() 1145 if (bp->flags & B44_FLAG_TX_RING_HACK) b44_init_rings() 1146 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, b44_init_rings() 1149 for (i = 0; i < bp->rx_pending; i++) { b44_init_rings() 1150 if (b44_alloc_rx_skb(bp, -1, i) < 0) b44_init_rings() 1159 static void b44_free_consistent(struct b44 *bp) b44_free_consistent() argument 1161 kfree(bp->rx_buffers); b44_free_consistent() 1162 bp->rx_buffers = NULL; b44_free_consistent() 1163 kfree(bp->tx_buffers); b44_free_consistent() 1164 bp->tx_buffers = NULL; b44_free_consistent() 1165 if (bp->rx_ring) { b44_free_consistent() 1166 if (bp->flags & B44_FLAG_RX_RING_HACK) { b44_free_consistent() 1167 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, b44_free_consistent() 1169 kfree(bp->rx_ring); b44_free_consistent() 1171 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, b44_free_consistent() 1172 bp->rx_ring, bp->rx_ring_dma); b44_free_consistent() 1173 bp->rx_ring = NULL; b44_free_consistent() 1174 bp->flags &= ~B44_FLAG_RX_RING_HACK; b44_free_consistent() 1176 if (bp->tx_ring) { b44_free_consistent() 1177 if (bp->flags & B44_FLAG_TX_RING_HACK) { b44_free_consistent() 1178 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, b44_free_consistent() 1180 kfree(bp->tx_ring); b44_free_consistent() 1182 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, b44_free_consistent() 1183 bp->tx_ring, bp->tx_ring_dma); b44_free_consistent() 1184 bp->tx_ring = NULL; b44_free_consistent() 1185 bp->flags &= ~B44_FLAG_TX_RING_HACK; b44_free_consistent() 1193 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) b44_alloc_consistent() argument 1198 bp->rx_buffers = kzalloc(size, gfp); b44_alloc_consistent() 1199 if (!bp->rx_buffers) b44_alloc_consistent() 1203 bp->tx_buffers = kzalloc(size, gfp); b44_alloc_consistent() 1204 if (!bp->tx_buffers) b44_alloc_consistent() 1208 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, b44_alloc_consistent() 1209 &bp->rx_ring_dma, gfp); b44_alloc_consistent() 1210 if (!bp->rx_ring) { b44_alloc_consistent() 1221 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, b44_alloc_consistent() 1225 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) || b44_alloc_consistent() 1231 bp->rx_ring = rx_ring; b44_alloc_consistent() 1232 bp->rx_ring_dma = rx_ring_dma; b44_alloc_consistent() 1233 bp->flags |= B44_FLAG_RX_RING_HACK; b44_alloc_consistent() 1236 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, b44_alloc_consistent() 1237 &bp->tx_ring_dma, gfp); b44_alloc_consistent() 1238 if (!bp->tx_ring) { b44_alloc_consistent() 1249 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, b44_alloc_consistent() 1253 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) || b44_alloc_consistent() 1259 bp->tx_ring = tx_ring; b44_alloc_consistent() 1260 bp->tx_ring_dma = tx_ring_dma; b44_alloc_consistent() 1261 bp->flags |= B44_FLAG_TX_RING_HACK; b44_alloc_consistent() 1267 b44_free_consistent(bp); b44_alloc_consistent() 1271 /* bp->lock is held. */ b44_clear_stats() 1272 static void b44_clear_stats(struct b44 *bp) b44_clear_stats() argument 1276 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); b44_clear_stats() 1278 br32(bp, reg); b44_clear_stats() 1280 br32(bp, reg); b44_clear_stats() 1283 /* bp->lock is held. */ b44_chip_reset() 1284 static void b44_chip_reset(struct b44 *bp, int reset_kind) b44_chip_reset() argument 1286 struct ssb_device *sdev = bp->sdev; b44_chip_reset() 1289 was_enabled = ssb_device_is_enabled(bp->sdev); b44_chip_reset() 1291 ssb_device_enable(bp->sdev, 0); b44_chip_reset() 1295 bw32(bp, B44_RCV_LAZY, 0); b44_chip_reset() 1296 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); b44_chip_reset() 1297 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); b44_chip_reset() 1298 bw32(bp, B44_DMATX_CTRL, 0); b44_chip_reset() 1299 bp->tx_prod = bp->tx_cons = 0; b44_chip_reset() 1300 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) { b44_chip_reset() 1301 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, b44_chip_reset() 1304 bw32(bp, B44_DMARX_CTRL, 0); b44_chip_reset() 1305 bp->rx_prod = bp->rx_cons = 0; b44_chip_reset() 1308 b44_clear_stats(bp); b44_chip_reset() 1319 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | b44_chip_reset() 1325 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | b44_chip_reset() 1334 br32(bp, B44_MDIO_CTRL); b44_chip_reset() 1336 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { b44_chip_reset() 1337 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); b44_chip_reset() 1338 br32(bp, B44_ENET_CTRL); b44_chip_reset() 1339 bp->flags |= B44_FLAG_EXTERNAL_PHY; b44_chip_reset() 1341 u32 val = br32(bp, B44_DEVCTRL); b44_chip_reset() 1344 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); b44_chip_reset() 1345 br32(bp, B44_DEVCTRL); b44_chip_reset() 1348 bp->flags &= ~B44_FLAG_EXTERNAL_PHY; b44_chip_reset() 1352 /* bp->lock is held. */ b44_halt() 1353 static void b44_halt(struct b44 *bp) b44_halt() argument 1355 b44_disable_ints(bp); b44_halt() 1357 b44_phy_reset(bp); b44_halt() 1359 netdev_info(bp->dev, "powering down PHY\n"); b44_halt() 1360 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN); b44_halt() 1363 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_halt() 1364 b44_chip_reset(bp, B44_CHIP_RESET_FULL); b44_halt() 1366 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); b44_halt() 1369 /* bp->lock is held. */ __b44_set_mac_addr() 1370 static void __b44_set_mac_addr(struct b44 *bp) __b44_set_mac_addr() argument 1372 bw32(bp, B44_CAM_CTRL, 0); __b44_set_mac_addr() 1373 if (!(bp->dev->flags & IFF_PROMISC)) { __b44_set_mac_addr() 1376 __b44_cam_write(bp, bp->dev->dev_addr, 0); __b44_set_mac_addr() 1377 val = br32(bp, B44_CAM_CTRL); __b44_set_mac_addr() 1378 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); __b44_set_mac_addr() 1384 struct b44 *bp = netdev_priv(dev); b44_set_mac_addr() local 1396 spin_lock_irq(&bp->lock); b44_set_mac_addr() 1398 val = br32(bp, B44_RXCONFIG); b44_set_mac_addr() 1400 __b44_set_mac_addr(bp); b44_set_mac_addr() 1402 spin_unlock_irq(&bp->lock); b44_set_mac_addr() 1408 * packet processing. Invoked with bp->lock held. 1411 static void b44_init_hw(struct b44 *bp, int reset_kind) b44_init_hw() argument 1415 b44_chip_reset(bp, B44_CHIP_RESET_FULL); b44_init_hw() 1417 b44_phy_reset(bp); b44_init_hw() 1418 b44_setup_phy(bp); b44_init_hw() 1422 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL); b44_init_hw() 1423 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT)); b44_init_hw() 1426 __b44_set_rx_mode(bp->dev); b44_init_hw() 1429 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); b44_init_hw() 1430 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); b44_init_hw() 1432 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ b44_init_hw() 1434 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | b44_init_hw() 1437 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); b44_init_hw() 1438 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); b44_init_hw() 1439 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | b44_init_hw() 1441 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); b44_init_hw() 1443 bw32(bp, B44_DMARX_PTR, bp->rx_pending); b44_init_hw() 1444 bp->rx_prod = bp->rx_pending; b44_init_hw() 1446 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); b44_init_hw() 1449 val = br32(bp, B44_ENET_CTRL); b44_init_hw() 1450 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); b44_init_hw() 1452 netdev_reset_queue(bp->dev); b44_init_hw() 1457 struct b44 *bp = netdev_priv(dev); b44_open() local 1460 err = b44_alloc_consistent(bp, GFP_KERNEL); b44_open() 1464 napi_enable(&bp->napi); b44_open() 1466 b44_init_rings(bp); b44_open() 1467 b44_init_hw(bp, B44_FULL_RESET); b44_open() 1469 b44_check_phy(bp); b44_open() 1473 napi_disable(&bp->napi); b44_open() 1474 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); b44_open() 1475 b44_free_rings(bp); b44_open() 1476 b44_free_consistent(bp); b44_open() 1480 init_timer(&bp->timer); b44_open() 1481 bp->timer.expires = jiffies + HZ; b44_open() 1482 bp->timer.data = (unsigned long) bp; b44_open() 1483 bp->timer.function = b44_timer; b44_open() 1484 add_timer(&bp->timer); b44_open() 1486 b44_enable_ints(bp); b44_open() 1488 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_open() 1489 phy_start(bp->phydev); b44_open() 1509 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset) bwfilter_table() argument 1515 bw32(bp, B44_FILT_ADDR, table_offset + i); bwfilter_table() 1516 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]); bwfilter_table() 1549 static void b44_setup_pseudo_magicp(struct b44 *bp) b44_setup_pseudo_magicp() argument 1563 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, b44_setup_pseudo_magicp() 1566 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE); b44_setup_pseudo_magicp() 1567 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE); b44_setup_pseudo_magicp() 1572 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, b44_setup_pseudo_magicp() 1575 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, b44_setup_pseudo_magicp() 1577 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, b44_setup_pseudo_magicp() 1583 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, b44_setup_pseudo_magicp() 1586 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, b44_setup_pseudo_magicp() 1588 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, b44_setup_pseudo_magicp() 1595 bw32(bp, B44_WKUP_LEN, val); b44_setup_pseudo_magicp() 1598 val = br32(bp, B44_DEVCTRL); b44_setup_pseudo_magicp() 1599 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE); b44_setup_pseudo_magicp() 1604 static void b44_setup_wol_pci(struct b44 *bp) b44_setup_wol_pci() argument 1608 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) { b44_setup_wol_pci() 1609 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE); b44_setup_wol_pci() 1610 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val); b44_setup_wol_pci() 1611 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE); b44_setup_wol_pci() 1615 static inline void b44_setup_wol_pci(struct b44 *bp) { } b44_setup_wol_pci() argument 1618 static void b44_setup_wol(struct b44 *bp) b44_setup_wol() argument 1622 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI); b44_setup_wol() 1624 if (bp->flags & B44_FLAG_B0_ANDLATER) { b44_setup_wol() 1626 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE); b44_setup_wol() 1628 val = bp->dev->dev_addr[2] << 24 | b44_setup_wol() 1629 bp->dev->dev_addr[3] << 16 | b44_setup_wol() 1630 bp->dev->dev_addr[4] << 8 | b44_setup_wol() 1631 bp->dev->dev_addr[5]; b44_setup_wol() 1632 bw32(bp, B44_ADDR_LO, val); b44_setup_wol() 1634 val = bp->dev->dev_addr[0] << 8 | b44_setup_wol() 1635 bp->dev->dev_addr[1]; b44_setup_wol() 1636 bw32(bp, B44_ADDR_HI, val); b44_setup_wol() 1638 val = br32(bp, B44_DEVCTRL); b44_setup_wol() 1639 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE); b44_setup_wol() 1642 b44_setup_pseudo_magicp(bp); b44_setup_wol() 1644 b44_setup_wol_pci(bp); b44_setup_wol() 1649 struct b44 *bp = netdev_priv(dev); b44_close() local 1653 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_close() 1654 phy_stop(bp->phydev); b44_close() 1656 napi_disable(&bp->napi); b44_close() 1658 del_timer_sync(&bp->timer); b44_close() 1660 spin_lock_irq(&bp->lock); b44_close() 1662 b44_halt(bp); b44_close() 1663 b44_free_rings(bp); b44_close() 1666 spin_unlock_irq(&bp->lock); b44_close() 1670 if (bp->flags & B44_FLAG_WOL_ENABLE) { b44_close() 1671 b44_init_hw(bp, B44_PARTIAL_RESET); b44_close() 1672 b44_setup_wol(bp); b44_close() 1675 b44_free_consistent(bp); b44_close() 1683 struct b44 *bp = netdev_priv(dev); b44_get_stats64() local 1684 struct b44_hw_stats *hwstat = &bp->hw_stats; b44_get_stats64() 1727 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) __b44_load_mcast() argument 1737 __b44_cam_write(bp, ha->addr, i++ + 1); netdev_for_each_mc_addr() 1744 struct b44 *bp = netdev_priv(dev); __b44_set_rx_mode() local 1747 val = br32(bp, B44_RXCONFIG); __b44_set_rx_mode() 1751 bw32(bp, B44_RXCONFIG, val); __b44_set_rx_mode() 1756 __b44_set_mac_addr(bp); __b44_set_rx_mode() 1762 i = __b44_load_mcast(bp, dev); __b44_set_rx_mode() 1765 __b44_cam_write(bp, zero, i); __b44_set_rx_mode() 1767 bw32(bp, B44_RXCONFIG, val); __b44_set_rx_mode() 1768 val = br32(bp, B44_CAM_CTRL); __b44_set_rx_mode() 1769 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); __b44_set_rx_mode() 1775 struct b44 *bp = netdev_priv(dev); b44_set_rx_mode() local 1777 spin_lock_irq(&bp->lock); b44_set_rx_mode() 1779 spin_unlock_irq(&bp->lock); b44_set_rx_mode() 1784 struct b44 *bp = netdev_priv(dev); b44_get_msglevel() local 1785 return bp->msg_enable; b44_get_msglevel() 1790 struct b44 *bp = netdev_priv(dev); b44_set_msglevel() local 1791 bp->msg_enable = value; b44_set_msglevel() 1796 struct b44 *bp = netdev_priv(dev); b44_get_drvinfo() local 1797 struct ssb_bus *bus = bp->sdev->bus; b44_get_drvinfo() 1817 struct b44 *bp = netdev_priv(dev); b44_nway_reset() local 1821 spin_lock_irq(&bp->lock); b44_nway_reset() 1822 b44_readphy(bp, MII_BMCR, &bmcr); b44_nway_reset() 1823 b44_readphy(bp, MII_BMCR, &bmcr); b44_nway_reset() 1826 b44_writephy(bp, MII_BMCR, b44_nway_reset() 1830 spin_unlock_irq(&bp->lock); b44_nway_reset() 1837 struct b44 *bp = netdev_priv(dev); b44_get_settings() local 1839 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { b44_get_settings() 1840 BUG_ON(!bp->phydev); b44_get_settings() 1841 return phy_ethtool_gset(bp->phydev, cmd); b44_get_settings() 1852 if (bp->flags & B44_FLAG_ADV_10HALF) b44_get_settings() 1854 if (bp->flags & B44_FLAG_ADV_10FULL) b44_get_settings() 1856 if (bp->flags & B44_FLAG_ADV_100HALF) b44_get_settings() 1858 if (bp->flags & B44_FLAG_ADV_100FULL) b44_get_settings() 1861 ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ? b44_get_settings() 1863 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? b44_get_settings() 1866 cmd->phy_address = bp->phy_addr; b44_get_settings() 1867 cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ? b44_get_settings() 1869 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ? b44_get_settings() 1884 struct b44 *bp = netdev_priv(dev); b44_set_settings() local 1888 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { b44_set_settings() 1889 BUG_ON(!bp->phydev); b44_set_settings() 1890 spin_lock_irq(&bp->lock); b44_set_settings() 1892 b44_setup_phy(bp); b44_set_settings() 1894 ret = phy_ethtool_sset(bp->phydev, cmd); b44_set_settings() 1896 spin_unlock_irq(&bp->lock); b44_set_settings() 1916 spin_lock_irq(&bp->lock); b44_set_settings() 1919 bp->flags &= ~(B44_FLAG_FORCE_LINK | b44_set_settings() 1927 bp->flags |= (B44_FLAG_ADV_10HALF | b44_set_settings() 1933 bp->flags |= B44_FLAG_ADV_10HALF; b44_set_settings() 1935 bp->flags |= B44_FLAG_ADV_10FULL; b44_set_settings() 1937 bp->flags |= B44_FLAG_ADV_100HALF; b44_set_settings() 1939 bp->flags |= B44_FLAG_ADV_100FULL; b44_set_settings() 1942 bp->flags |= B44_FLAG_FORCE_LINK; b44_set_settings() 1943 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX); b44_set_settings() 1945 bp->flags |= B44_FLAG_100_BASE_T; b44_set_settings() 1947 bp->flags |= B44_FLAG_FULL_DUPLEX; b44_set_settings() 1951 b44_setup_phy(bp); b44_set_settings() 1953 spin_unlock_irq(&bp->lock); b44_set_settings() 1961 struct b44 *bp = netdev_priv(dev); b44_get_ringparam() local 1964 ering->rx_pending = bp->rx_pending; b44_get_ringparam() 1972 struct b44 *bp = netdev_priv(dev); b44_set_ringparam() local 1980 spin_lock_irq(&bp->lock); b44_set_ringparam() 1982 bp->rx_pending = ering->rx_pending; b44_set_ringparam() 1983 bp->tx_pending = ering->tx_pending; b44_set_ringparam() 1985 b44_halt(bp); b44_set_ringparam() 1986 b44_init_rings(bp); b44_set_ringparam() 1987 b44_init_hw(bp, B44_FULL_RESET); b44_set_ringparam() 1988 netif_wake_queue(bp->dev); b44_set_ringparam() 1989 spin_unlock_irq(&bp->lock); b44_set_ringparam() 1991 b44_enable_ints(bp); b44_set_ringparam() 1999 struct b44 *bp = netdev_priv(dev); b44_get_pauseparam() local 2002 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0; b44_get_pauseparam() 2004 (bp->flags & B44_FLAG_RX_PAUSE) != 0; b44_get_pauseparam() 2006 (bp->flags & B44_FLAG_TX_PAUSE) != 0; b44_get_pauseparam() 2012 struct b44 *bp = netdev_priv(dev); b44_set_pauseparam() local 2014 spin_lock_irq(&bp->lock); b44_set_pauseparam() 2016 bp->flags |= B44_FLAG_PAUSE_AUTO; b44_set_pauseparam() 2018 bp->flags &= ~B44_FLAG_PAUSE_AUTO; b44_set_pauseparam() 2020 bp->flags |= B44_FLAG_RX_PAUSE; b44_set_pauseparam() 2022 bp->flags &= ~B44_FLAG_RX_PAUSE; b44_set_pauseparam() 2024 bp->flags |= B44_FLAG_TX_PAUSE; b44_set_pauseparam() 2026 bp->flags &= ~B44_FLAG_TX_PAUSE; b44_set_pauseparam() 2027 if (bp->flags & B44_FLAG_PAUSE_AUTO) { b44_set_pauseparam() 2028 b44_halt(bp); b44_set_pauseparam() 2029 b44_init_rings(bp); b44_set_pauseparam() 2030 b44_init_hw(bp, B44_FULL_RESET); b44_set_pauseparam() 2032 __b44_set_flow_ctrl(bp, bp->flags); b44_set_pauseparam() 2034 spin_unlock_irq(&bp->lock); b44_set_pauseparam() 2036 b44_enable_ints(bp); b44_set_pauseparam() 2063 struct b44 *bp = netdev_priv(dev); b44_get_ethtool_stats() local 2064 struct b44_hw_stats *hwstat = &bp->hw_stats; b44_get_ethtool_stats() 2069 spin_lock_irq(&bp->lock); b44_get_ethtool_stats() 2070 b44_stats_update(bp); b44_get_ethtool_stats() 2071 spin_unlock_irq(&bp->lock); b44_get_ethtool_stats() 2086 struct b44 *bp = netdev_priv(dev); b44_get_wol() local 2089 if (bp->flags & B44_FLAG_WOL_ENABLE) b44_get_wol() 2098 struct b44 *bp = netdev_priv(dev); b44_set_wol() local 2100 spin_lock_irq(&bp->lock); b44_set_wol() 2102 bp->flags |= B44_FLAG_WOL_ENABLE; b44_set_wol() 2104 bp->flags &= ~B44_FLAG_WOL_ENABLE; b44_set_wol() 2105 spin_unlock_irq(&bp->lock); b44_set_wol() 2107 device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC); b44_set_wol() 2132 struct b44 *bp = netdev_priv(dev); b44_ioctl() local 2138 spin_lock_irq(&bp->lock); b44_ioctl() 2139 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { b44_ioctl() 2140 BUG_ON(!bp->phydev); b44_ioctl() 2141 err = phy_mii_ioctl(bp->phydev, ifr, cmd); b44_ioctl() 2143 err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL); b44_ioctl() 2145 spin_unlock_irq(&bp->lock); b44_ioctl() 2150 static int b44_get_invariants(struct b44 *bp) b44_get_invariants() argument 2152 struct ssb_device *sdev = bp->sdev; b44_get_invariants() 2156 bp->dma_offset = ssb_dma_translation(sdev); b44_get_invariants() 2161 bp->phy_addr = sdev->bus->sprom.et1phyaddr; b44_get_invariants() 2164 bp->phy_addr = sdev->bus->sprom.et0phyaddr; b44_get_invariants() 2169 bp->phy_addr &= 0x1F; b44_get_invariants() 2171 memcpy(bp->dev->dev_addr, addr, ETH_ALEN); b44_get_invariants() 2173 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ b44_get_invariants() 2178 bp->imask = IMASK_DEF; b44_get_invariants() 2181 bp->flags |= B44_FLAG_BUGGY_TXPTR; b44_get_invariants() 2184 if (bp->sdev->id.revision >= 7) b44_get_invariants() 2185 bp->flags |= B44_FLAG_B0_ANDLATER; b44_get_invariants() 2208 struct b44 *bp = netdev_priv(dev); b44_adjust_link() local 2209 struct phy_device *phydev = bp->phydev; b44_adjust_link() 2214 if (bp->old_link != phydev->link) { b44_adjust_link() 2216 bp->old_link = phydev->link; b44_adjust_link() 2222 (bp->flags & B44_FLAG_FULL_DUPLEX)) { b44_adjust_link() 2224 bp->flags &= ~B44_FLAG_FULL_DUPLEX; b44_adjust_link() 2226 !(bp->flags & B44_FLAG_FULL_DUPLEX)) { b44_adjust_link() 2228 bp->flags |= B44_FLAG_FULL_DUPLEX; b44_adjust_link() 2233 u32 val = br32(bp, B44_TX_CTRL); b44_adjust_link() 2234 if (bp->flags & B44_FLAG_FULL_DUPLEX) b44_adjust_link() 2238 bw32(bp, B44_TX_CTRL, val); b44_adjust_link() 2243 static int b44_register_phy_one(struct b44 *bp) b44_register_phy_one() argument 2246 struct ssb_device *sdev = bp->sdev; b44_register_phy_one() 2259 mii_bus->priv = bp; b44_register_phy_one() 2264 mii_bus->phy_mask = ~(1 << bp->phy_addr); b44_register_phy_one() 2275 bp->mii_bus = mii_bus; b44_register_phy_one() 2283 if (!bp->mii_bus->phy_map[bp->phy_addr] && b44_register_phy_one() 2288 bp->phy_addr); b44_register_phy_one() 2290 bp->phy_addr = 0; b44_register_phy_one() 2292 bp->phy_addr); b44_register_phy_one() 2295 bp->phy_addr); b44_register_phy_one() 2298 phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link, b44_register_phy_one() 2302 bp->phy_addr); b44_register_phy_one() 2314 bp->phydev = phydev; b44_register_phy_one() 2315 bp->old_link = 0; b44_register_phy_one() 2316 bp->phy_addr = phydev->addr; b44_register_phy_one() 2336 static void b44_unregister_phy_one(struct b44 *bp) b44_unregister_phy_one() argument 2338 struct mii_bus *mii_bus = bp->mii_bus; b44_unregister_phy_one() 2340 phy_disconnect(bp->phydev); b44_unregister_phy_one() 2350 struct b44 *bp; b44_init_one() local 2357 dev = alloc_etherdev(sizeof(*bp)); b44_init_one() 2368 bp = netdev_priv(dev); b44_init_one() 2369 bp->sdev = sdev; b44_init_one() 2370 bp->dev = dev; b44_init_one() 2371 bp->force_copybreak = 0; b44_init_one() 2373 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); b44_init_one() 2375 spin_lock_init(&bp->lock); b44_init_one() 2377 bp->rx_pending = B44_DEF_RX_RING_PENDING; b44_init_one() 2378 bp->tx_pending = B44_DEF_TX_RING_PENDING; b44_init_one() 2381 netif_napi_add(dev, &bp->napi, b44_poll, 64); b44_init_one() 2399 err = b44_get_invariants(bp); b44_init_one() 2406 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { b44_init_one() 2412 bp->mii_if.dev = dev; b44_init_one() 2413 bp->mii_if.mdio_read = b44_mdio_read_mii; b44_init_one() 2414 bp->mii_if.mdio_write = b44_mdio_write_mii; b44_init_one() 2415 bp->mii_if.phy_id = bp->phy_addr; b44_init_one() 2416 bp->mii_if.phy_id_mask = 0x1f; b44_init_one() 2417 bp->mii_if.reg_num_mask = 0x1f; b44_init_one() 2420 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | b44_init_one() 2424 bp->flags |= B44_FLAG_PAUSE_AUTO; b44_init_one() 2439 b44_chip_reset(bp, B44_CHIP_RESET_FULL); b44_init_one() 2442 err = b44_phy_reset(bp); b44_init_one() 2448 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { b44_init_one() 2449 err = b44_register_phy_one(bp); b44_init_one() 2467 netif_napi_del(&bp->napi); b44_init_one() 2477 struct b44 *bp = netdev_priv(dev); b44_remove_one() local 2480 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_remove_one() 2481 b44_unregister_phy_one(bp); b44_remove_one() 2484 netif_napi_del(&bp->napi); b44_remove_one() 2493 struct b44 *bp = netdev_priv(dev); b44_suspend() local 2498 del_timer_sync(&bp->timer); b44_suspend() 2500 spin_lock_irq(&bp->lock); b44_suspend() 2502 b44_halt(bp); b44_suspend() 2503 netif_carrier_off(bp->dev); b44_suspend() 2504 netif_device_detach(bp->dev); b44_suspend() 2505 b44_free_rings(bp); b44_suspend() 2507 spin_unlock_irq(&bp->lock); b44_suspend() 2510 if (bp->flags & B44_FLAG_WOL_ENABLE) { b44_suspend() 2511 b44_init_hw(bp, B44_PARTIAL_RESET); b44_suspend() 2512 b44_setup_wol(bp); b44_suspend() 2522 struct b44 *bp = netdev_priv(dev); b44_resume() local 2535 spin_lock_irq(&bp->lock); b44_resume() 2536 b44_init_rings(bp); b44_resume() 2537 b44_init_hw(bp, B44_FULL_RESET); b44_resume() 2538 spin_unlock_irq(&bp->lock); b44_resume() 2548 spin_lock_irq(&bp->lock); b44_resume() 2549 b44_halt(bp); b44_resume() 2550 b44_free_rings(bp); b44_resume() 2551 spin_unlock_irq(&bp->lock); b44_resume() 2555 netif_device_attach(bp->dev); b44_resume() 2557 b44_enable_ints(bp); b44_resume() 2560 mod_timer(&bp->timer, jiffies + 1); b44_resume()
|
H A D | bnx2_fw.h | 22 .bp = BNX2_COM_CPU_HW_BREAKPOINT, 38 .bp = BNX2_CP_CPU_HW_BREAKPOINT, 54 .bp = BNX2_RXP_CPU_HW_BREAKPOINT, 70 .bp = BNX2_TPAT_CPU_HW_BREAKPOINT, 86 .bp = BNX2_TXP_CPU_HW_BREAKPOINT,
|
H A D | cnic.h | 368 #define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) 405 #define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ 406 (BP_VN(bp) << 17) | (x)) 410 #define BNX2X_CL_QZONE_ID(bp, cli) \ 411 (BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \ 412 cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H)) 416 (CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \ 417 ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \ 422 (BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp))
|
H A D | cnic.c | 1182 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_alloc_bnx2x_context() local 1200 if (!CHIP_IS_E1(bp)) cnic_alloc_bnx2x_context() 1230 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_alloc_bnx2x_resc() local 1240 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cnic_alloc_bnx2x_resc() 1288 if (CNIC_SUPPORTS_FCOE(bp)) { cnic_alloc_bnx2x_resc() 1382 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_submit_kwqe_16() local 1390 BNX2X_HW_CID(bp, cid))); cnic_submit_kwqe_16() 1393 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & cnic_submit_kwqe_16() 1431 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_set_tcp_options() local 1443 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags); cnic_bnx2x_set_tcp_options() 1446 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags); cnic_bnx2x_set_tcp_options() 1452 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_iscsi_init1() local 1455 u32 pfid = bp->pfid; cnic_bnx2x_iscsi_init1() 1538 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_iscsi_init2() local 1539 u32 pfid = bp->pfid; cnic_bnx2x_iscsi_init2() 1678 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_setup_bnx2x_ctx() local 1687 u32 hw_cid = BNX2X_HW_CID(bp, cid); cnic_setup_bnx2x_ctx() 1691 u8 port = BP_PORT(bp); cnic_setup_bnx2x_ctx() 1745 if (BNX2X_CHIP_IS_E2_PLUS(bp) && cnic_setup_bnx2x_ctx() 1746 bp->common.chip_port_mode == CHIP_2_PORT_MODE) { cnic_setup_bnx2x_ctx() 1867 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_iscsi_ofld1() local 1921 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid); cnic_bnx2x_iscsi_ofld1() 1957 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_destroy_ramrod() local 1966 hw_cid = BNX2X_HW_CID(bp, ctx->cid); cnic_bnx2x_destroy_ramrod() 2074 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_init_bnx2x_mac() local 2075 u32 pfid = bp->pfid; cnic_init_bnx2x_mac() 2112 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_connect() local 2181 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id); cnic_bnx2x_connect() 2250 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_fcoe_stat() local 2255 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); cnic_bnx2x_fcoe_stat() 2274 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_fcoe_init1() local 2319 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); cnic_bnx2x_fcoe_init1() 2332 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_fcoe_ofld1() local 2375 u32 hw_cid = BNX2X_HW_CID(bp, cid); cnic_bnx2x_fcoe_ofld1() 2399 cid = BNX2X_HW_CID(bp, cid); cnic_bnx2x_fcoe_ofld1() 2557 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_fcoe_fw_destroy() local 2564 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); cnic_bnx2x_fcoe_fw_destroy() 2721 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_submit_bnx2x_fcoe_kwqes() local 2729 if (!BNX2X_CHIP_IS_E2_PLUS(bp)) cnic_submit_bnx2x_fcoe_kwqes() 3045 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_ack_bnx2x_int() local 3046 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 + cnic_ack_bnx2x_int() 3133 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_service_bnx2x_bh() local 3145 if (!CNIC_SUPPORTS_FCOE(bp)) { cnic_service_bnx2x_bh() 4225 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_cm_init_bnx2x_hw() local 4226 u32 pfid = bp->pfid; cnic_cm_init_bnx2x_hw() 4227 u32 port = BP_PORT(bp); cnic_cm_init_bnx2x_hw() 4871 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_storm_memset_hc_disable() local 4889 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_enable_bnx2x_int() local 4908 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_init_bnx2x_tx_ring() local 4937 if (BNX2X_CHIP_IS_E2_PLUS(bp)) cnic_init_bnx2x_tx_ring() 4974 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_init_bnx2x_rx_ring() local 4983 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); cnic_init_bnx2x_rx_ring() 4992 data->general.func_id = bp->pfid; cnic_init_bnx2x_rx_ring() 5041 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_init_bnx2x_kcq() local 5042 u32 pfid = bp->pfid; cnic_init_bnx2x_kcq() 5048 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cnic_init_bnx2x_kcq() 5064 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cnic_init_bnx2x_kcq() 5080 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_start_bnx2x_hw() local 5086 cp->func = bp->pf_num; cnic_start_bnx2x_hw() 5089 pfid = bp->pfid; cnic_start_bnx2x_hw() 5097 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cnic_start_bnx2x_hw() 5156 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_init_rings() local 5179 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); cnic_init_rings() 5182 (BNX2X_CHIP_IS_E2_PLUS(bp) ? cnic_init_rings() 5184 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli)); cnic_init_rings() 5218 *(cid_ptr + 1) = cid * bp->db_size; cnic_init_rings() 5366 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_stop_bnx2x_hw() local 5373 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cnic_stop_bnx2x_hw() 5390 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0); cnic_stop_bnx2x_hw() 5472 struct bnx2 *bp = netdev_priv(dev); init_bnx2_cnic() local 5475 if (bp->cnic_probe) init_bnx2_cnic() 5476 ethdev = (bp->cnic_probe)(dev); init_bnx2_cnic() 5532 struct bnx2x *bp = netdev_priv(dev); init_bnx2x_cnic() local 5535 if (bp->cnic_probe) init_bnx2x_cnic() 5536 ethdev = bp->cnic_probe(dev); init_bnx2x_cnic() 5564 if (CNIC_SUPPORTS_FCOE(bp)) { init_bnx2x_cnic() 5584 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { init_bnx2x_cnic()
|
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/ |
H A D | bnx2x_main.c | 288 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); 294 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr); 296 static void __storm_memset_dma_mapping(struct bnx2x *bp, __storm_memset_dma_mapping() argument 299 REG_WR(bp, addr, U64_LO(mapping)); __storm_memset_dma_mapping() 300 REG_WR(bp, addr + 4, U64_HI(mapping)); __storm_memset_dma_mapping() 303 static void storm_memset_spq_addr(struct bnx2x *bp, storm_memset_spq_addr() argument 309 __storm_memset_dma_mapping(bp, addr, mapping); storm_memset_spq_addr() 312 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, storm_memset_vf_to_pf() argument 315 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf() 317 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf() 319 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf() 321 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf() 325 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, storm_memset_func_en() argument 328 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en() 330 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en() 332 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en() 334 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en() 338 static void storm_memset_eq_data(struct bnx2x *bp, storm_memset_eq_data() argument 346 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); storm_memset_eq_data() 349 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, storm_memset_eq_prod() argument 353 REG_WR16(bp, addr, eq_prod); storm_memset_eq_prod() 359 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) bnx2x_reg_wr_ind() argument 361 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); bnx2x_reg_wr_ind() 362 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); bnx2x_reg_wr_ind() 363 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, bnx2x_reg_wr_ind() 367 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) bnx2x_reg_rd_ind() argument 371 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); bnx2x_reg_rd_ind() 372 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); bnx2x_reg_rd_ind() 373 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, bnx2x_reg_rd_ind() 385 static void bnx2x_dp_dmae(struct bnx2x *bp, bnx2x_dp_dmae() argument 452 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) bnx2x_post_dmae() argument 459 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); bnx2x_post_dmae() 461 REG_WR(bp, dmae_reg_go_c[idx], 1); bnx2x_post_dmae() 475 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, bnx2x_dmae_opcode() argument 485 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); bnx2x_dmae_opcode() 486 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | bnx2x_dmae_opcode() 487 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); bnx2x_dmae_opcode() 500 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, bnx2x_prep_dmae_with_comp() argument 507 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, bnx2x_prep_dmae_with_comp() 511 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); bnx2x_prep_dmae_with_comp() 512 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); bnx2x_prep_dmae_with_comp() 517 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, bnx2x_issue_dmae_with_comp() argument 520 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; bnx2x_issue_dmae_with_comp() 523 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); bnx2x_issue_dmae_with_comp() 530 spin_lock_bh(&bp->dmae_lock); bnx2x_issue_dmae_with_comp() 536 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); bnx2x_issue_dmae_with_comp() 543 (bp->recovery_state != BNX2X_RECOVERY_DONE && bnx2x_issue_dmae_with_comp() 544 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { bnx2x_issue_dmae_with_comp() 559 spin_unlock_bh(&bp->dmae_lock); bnx2x_issue_dmae_with_comp() 564 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, bnx2x_write_dmae() argument 570 if (!bp->dmae_ready) { bnx2x_write_dmae() 571 u32 *data = bnx2x_sp(bp, wb_data[0]); bnx2x_write_dmae() 573 if (CHIP_IS_E1(bp)) bnx2x_write_dmae() 574 bnx2x_init_ind_wr(bp, dst_addr, data, len32); bnx2x_write_dmae() 576 bnx2x_init_str_wr(bp, dst_addr, data, len32); bnx2x_write_dmae() 581 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); bnx2x_write_dmae() 591 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); bnx2x_write_dmae() 600 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) bnx2x_read_dmae() argument 605 if (!bp->dmae_ready) { bnx2x_read_dmae() 606 u32 *data = bnx2x_sp(bp, wb_data[0]); bnx2x_read_dmae() 609 if (CHIP_IS_E1(bp)) bnx2x_read_dmae() 611 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); bnx2x_read_dmae() 614 data[i] = REG_RD(bp, src_addr + i*4); bnx2x_read_dmae() 620 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); bnx2x_read_dmae() 625 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); bnx2x_read_dmae() 626 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); bnx2x_read_dmae() 630 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); bnx2x_read_dmae() 639 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, bnx2x_write_dmae_phys_len() argument 642 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); bnx2x_write_dmae_phys_len() 646 bnx2x_write_dmae(bp, phys_addr + offset, bnx2x_write_dmae_phys_len() 652 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); bnx2x_write_dmae_phys_len() 666 static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp, bnx2x_get_assert_list_entry() argument 686 static int bnx2x_mc_assert(struct bnx2x *bp) bnx2x_mc_assert() argument 712 last_idx = REG_RD8(bp, bar_storm_intmem[storm] + bnx2x_mc_assert() 722 regs[j] = REG_RD(bp, bar_storm_intmem[storm] + bnx2x_mc_assert() 723 bnx2x_get_assert_list_entry(bp, bnx2x_mc_assert() 741 CHIP_IS_E1(bp) ? "everest1" : bnx2x_mc_assert() 742 CHIP_IS_E1H(bp) ? "everest1h" : bnx2x_mc_assert() 743 CHIP_IS_E2(bp) ? "everest2" : "everest3", bnx2x_mc_assert() 752 #define SCRATCH_BUFFER_SIZE(bp) \ 753 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000)) 755 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) bnx2x_fw_dump_lvl() argument 762 if (BP_NOMCP(bp)) { bnx2x_fw_dump_lvl() 766 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", bnx2x_fw_dump_lvl() 767 (bp->common.bc_ver & 0xff0000) >> 16, bnx2x_fw_dump_lvl() 768 (bp->common.bc_ver & 0xff00) >> 8, bnx2x_fw_dump_lvl() 769 (bp->common.bc_ver & 0xff)); bnx2x_fw_dump_lvl() 771 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); bnx2x_fw_dump_lvl() 772 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) bnx2x_fw_dump_lvl() 775 if (BP_PATH(bp) == 0) bnx2x_fw_dump_lvl() 776 trace_shmem_base = bp->common.shmem_base; bnx2x_fw_dump_lvl() 778 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); bnx2x_fw_dump_lvl() 781 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || bnx2x_fw_dump_lvl() 782 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + bnx2x_fw_dump_lvl() 783 SCRATCH_BUFFER_SIZE(bp)) { bnx2x_fw_dump_lvl() 792 mark = REG_RD(bp, addr); bnx2x_fw_dump_lvl() 800 mark = REG_RD(bp, addr); bnx2x_fw_dump_lvl() 801 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; bnx2x_fw_dump_lvl() 813 data[word] = htonl(REG_RD(bp, offset + 4*word)); bnx2x_fw_dump_lvl() 821 data[word] = htonl(REG_RD(bp, offset + 4*word)); bnx2x_fw_dump_lvl() 828 static void bnx2x_fw_dump(struct bnx2x *bp) bnx2x_fw_dump() argument 830 bnx2x_fw_dump_lvl(bp, KERN_ERR); bnx2x_fw_dump() 833 static void bnx2x_hc_int_disable(struct bnx2x *bp) bnx2x_hc_int_disable() argument 835 int port = BP_PORT(bp); bnx2x_hc_int_disable() 837 u32 val = REG_RD(bp, addr); bnx2x_hc_int_disable() 843 if (CHIP_IS_E1(bp)) { bnx2x_hc_int_disable() 848 REG_WR(bp, HC_REG_INT_MASK + port*4, 0); bnx2x_hc_int_disable() 866 REG_WR(bp, addr, val); bnx2x_hc_int_disable() 867 if (REG_RD(bp, addr) != val) bnx2x_hc_int_disable() 871 static void bnx2x_igu_int_disable(struct bnx2x *bp) bnx2x_igu_int_disable() argument 873 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); bnx2x_igu_int_disable() 884 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); bnx2x_igu_int_disable() 885 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) bnx2x_igu_int_disable() 889 static void bnx2x_int_disable(struct bnx2x *bp) bnx2x_int_disable() argument 891 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_int_disable() 892 bnx2x_hc_int_disable(bp); bnx2x_int_disable() 894 bnx2x_igu_int_disable(bp); bnx2x_int_disable() 897 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) bnx2x_panic_dump() argument 902 int func = BP_FUNC(bp); bnx2x_panic_dump() 907 if (IS_PF(bp) && disable_int) bnx2x_panic_dump() 908 bnx2x_int_disable(bp); bnx2x_panic_dump() 910 bp->stats_state = STATS_STATE_DISABLED; bnx2x_panic_dump() 911 bp->eth_stats.unrecoverable_error++; bnx2x_panic_dump() 918 if (IS_PF(bp)) { bnx2x_panic_dump() 919 struct host_sp_status_block *def_sb = bp->def_status_blk; bnx2x_panic_dump() 923 bp->def_idx, bp->def_att_idx, bp->attn_state, bnx2x_panic_dump() 924 bp->spq_prod_idx, bp->stats_counter); bnx2x_panic_dump() 941 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset + bnx2x_panic_dump() 954 for_each_eth_queue(bp, i) { for_each_eth_queue() 955 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue() 960 CHIP_IS_E1x(bp) ? for_each_eth_queue() 964 CHIP_IS_E1x(bp) ? for_each_eth_queue() 971 if (!bp->fp) for_each_eth_queue() 1004 loop = CHIP_IS_E1x(bp) ? 1025 if (IS_VF(bp)) 1029 data_size = CHIP_IS_E1x(bp) ? 1033 sb_data_p = CHIP_IS_E1x(bp) ? 1038 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + 1042 if (!CHIP_IS_E1x(bp)) { 1079 if (IS_PF(bp)) { 1081 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); 1083 u32 *data = (u32 *)&bp->eq_ring[i].message.data; 1086 i, bp->eq_ring[i].message.opcode, 1087 bp->eq_ring[i].message.error); 1095 for_each_valid_rx_queue(bp, i) { for_each_valid_rx_queue() 1096 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_valid_rx_queue() 1098 if (!bp->fp) for_each_valid_rx_queue() 1135 for_each_valid_tx_queue(bp, i) { for_each_valid_tx_queue() 1136 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_valid_tx_queue() 1138 if (!bp->fp) for_each_valid_tx_queue() 1173 if (IS_PF(bp)) { 1174 bnx2x_fw_dump(bp); 1175 bnx2x_mc_assert(bp); 1203 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, bnx2x_pbf_pN_buf_flushed() argument 1210 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); bnx2x_pbf_pN_buf_flushed() 1211 crd = crd_start = REG_RD(bp, regs->crd); bnx2x_pbf_pN_buf_flushed() 1212 init_crd = REG_RD(bp, regs->init_crd); bnx2x_pbf_pN_buf_flushed() 1222 crd = REG_RD(bp, regs->crd); bnx2x_pbf_pN_buf_flushed() 1223 crd_freed = REG_RD(bp, regs->crd_freed); bnx2x_pbf_pN_buf_flushed() 1238 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, bnx2x_pbf_pN_cmd_flushed() argument 1245 occup = to_free = REG_RD(bp, regs->lines_occup); bnx2x_pbf_pN_cmd_flushed() 1246 freed = freed_start = REG_RD(bp, regs->lines_freed); bnx2x_pbf_pN_cmd_flushed() 1254 occup = REG_RD(bp, regs->lines_occup); bnx2x_pbf_pN_cmd_flushed() 1255 freed = REG_RD(bp, regs->lines_freed); bnx2x_pbf_pN_cmd_flushed() 1270 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, bnx2x_flr_clnup_reg_poll() argument 1276 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) bnx2x_flr_clnup_reg_poll() 1282 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, bnx2x_flr_clnup_poll_hw_counter() argument 1285 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); bnx2x_flr_clnup_poll_hw_counter() 1294 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) bnx2x_flr_clnup_poll_count() argument 1297 if (CHIP_REV_IS_EMUL(bp)) bnx2x_flr_clnup_poll_count() 1300 if (CHIP_REV_IS_FPGA(bp)) bnx2x_flr_clnup_poll_count() 1306 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) bnx2x_tx_hw_flushed() argument 1309 {0, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1312 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1315 {1, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1318 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1321 {4, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1324 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1330 {0, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1333 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1336 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1339 {1, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1342 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1345 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1348 {4, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1351 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1354 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed() 1363 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); bnx2x_tx_hw_flushed() 1367 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); bnx2x_tx_hw_flushed() 1379 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) bnx2x_send_final_clnup() argument 1386 if (REG_RD(bp, comp_addr)) { bnx2x_send_final_clnup() 1397 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command); bnx2x_send_final_clnup() 1399 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { bnx2x_send_final_clnup() 1402 (REG_RD(bp, comp_addr))); bnx2x_send_final_clnup() 1407 REG_WR(bp, comp_addr, 0); bnx2x_send_final_clnup() 1422 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) bnx2x_poll_hw_usage_counters() argument 1425 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters() 1432 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters() 1439 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters() 1440 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), bnx2x_poll_hw_usage_counters() 1446 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters() 1447 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), bnx2x_poll_hw_usage_counters() 1451 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters() 1452 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), bnx2x_poll_hw_usage_counters() 1458 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters() 1459 dmae_reg_go_c[INIT_DMAE_C(bp)], bnx2x_poll_hw_usage_counters() 1467 static void bnx2x_hw_enable_status(struct bnx2x *bp) bnx2x_hw_enable_status() argument 1471 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); bnx2x_hw_enable_status() 1474 val = REG_RD(bp, PBF_REG_DISABLE_PF); bnx2x_hw_enable_status() 1477 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); bnx2x_hw_enable_status() 1480 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); bnx2x_hw_enable_status() 1483 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); bnx2x_hw_enable_status() 1486 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); bnx2x_hw_enable_status() 1489 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); bnx2x_hw_enable_status() 1492 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); bnx2x_hw_enable_status() 1497 static int bnx2x_pf_flr_clnup(struct bnx2x *bp) bnx2x_pf_flr_clnup() argument 1499 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); bnx2x_pf_flr_clnup() 1501 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); bnx2x_pf_flr_clnup() 1504 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); bnx2x_pf_flr_clnup() 1508 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) bnx2x_pf_flr_clnup() 1514 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) bnx2x_pf_flr_clnup() 1520 bnx2x_tx_hw_flushed(bp, poll_cnt); bnx2x_pf_flr_clnup() 1526 if (bnx2x_is_pcie_pending(bp->pdev)) bnx2x_pf_flr_clnup() 1530 bnx2x_hw_enable_status(bp); bnx2x_pf_flr_clnup() 1536 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); bnx2x_pf_flr_clnup() 1541 static void bnx2x_hc_int_enable(struct bnx2x *bp) bnx2x_hc_int_enable() argument 1543 int port = BP_PORT(bp); bnx2x_hc_int_enable() 1545 u32 val = REG_RD(bp, addr); bnx2x_hc_int_enable() 1546 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; bnx2x_hc_int_enable() 1547 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; bnx2x_hc_int_enable() 1548 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; bnx2x_hc_int_enable() 1568 if (!CHIP_IS_E1(bp)) { bnx2x_hc_int_enable() 1572 REG_WR(bp, addr, val); bnx2x_hc_int_enable() 1578 if (CHIP_IS_E1(bp)) bnx2x_hc_int_enable() 1579 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); bnx2x_hc_int_enable() 1585 REG_WR(bp, addr, val); bnx2x_hc_int_enable() 1592 if (!CHIP_IS_E1(bp)) { bnx2x_hc_int_enable() 1594 if (IS_MF(bp)) { bnx2x_hc_int_enable() 1595 val = (0xee0f | (1 << (BP_VN(bp) + 4))); bnx2x_hc_int_enable() 1596 if (bp->port.pmf) bnx2x_hc_int_enable() 1602 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); bnx2x_hc_int_enable() 1603 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); bnx2x_hc_int_enable() 1610 static void bnx2x_igu_int_enable(struct bnx2x *bp) bnx2x_igu_int_enable() argument 1613 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; bnx2x_igu_int_enable() 1614 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; bnx2x_igu_int_enable() 1615 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; bnx2x_igu_int_enable() 1617 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); bnx2x_igu_int_enable() 1641 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); bnx2x_igu_int_enable() 1642 bnx2x_ack_int(bp); bnx2x_igu_int_enable() 1650 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); bnx2x_igu_int_enable() 1653 pci_intx(bp->pdev, true); bnx2x_igu_int_enable() 1658 if (IS_MF(bp)) { bnx2x_igu_int_enable() 1659 val = (0xee0f | (1 << (BP_VN(bp) + 4))); bnx2x_igu_int_enable() 1660 if (bp->port.pmf) bnx2x_igu_int_enable() 1666 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); bnx2x_igu_int_enable() 1667 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); bnx2x_igu_int_enable() 1673 void bnx2x_int_enable(struct bnx2x *bp) bnx2x_int_enable() argument 1675 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_int_enable() 1676 bnx2x_hc_int_enable(bp); bnx2x_int_enable() 1678 bnx2x_igu_int_enable(bp); bnx2x_int_enable() 1681 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) bnx2x_int_disable_sync() argument 1683 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; bnx2x_int_disable_sync() 1688 bnx2x_int_disable(bp); bnx2x_int_disable_sync() 1692 synchronize_irq(bp->msix_table[0].vector); bnx2x_int_disable_sync() 1694 if (CNIC_SUPPORT(bp)) bnx2x_int_disable_sync() 1696 for_each_eth_queue(bp, i) bnx2x_int_disable_sync() 1697 synchronize_irq(bp->msix_table[offset++].vector); bnx2x_int_disable_sync() 1699 synchronize_irq(bp->pdev->irq); bnx2x_int_disable_sync() 1702 cancel_delayed_work(&bp->sp_task); bnx2x_int_disable_sync() 1703 cancel_delayed_work(&bp->period_task); bnx2x_int_disable_sync() 1714 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) bnx2x_trylock_hw_lock() argument 1718 int func = BP_FUNC(bp); bnx2x_trylock_hw_lock() 1739 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); bnx2x_trylock_hw_lock() 1740 lock_status = REG_RD(bp, hw_lock_control_reg); bnx2x_trylock_hw_lock() 1752 * @bp: driver handle 1757 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) bnx2x_get_leader_lock_resource() argument 1759 if (BP_PATH(bp)) bnx2x_get_leader_lock_resource() 1768 * @bp: driver handle 1772 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) bnx2x_trylock_leader_lock() argument 1774 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); bnx2x_trylock_leader_lock() 1777 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1780 static int bnx2x_schedule_sp_task(struct bnx2x *bp) bnx2x_schedule_sp_task() argument 1786 atomic_set(&bp->interrupt_occurred, 1); bnx2x_schedule_sp_task() 1795 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); bnx2x_schedule_sp_task() 1800 struct bnx2x *bp = fp->bp; bnx2x_sp_event() local 1804 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; bnx2x_sp_event() 1808 fp->index, cid, command, bp->state, bnx2x_sp_event() 1816 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); bnx2x_sp_event() 1861 q_obj->complete_cmd(bp, q_obj, drv_cmd)) bnx2x_sp_event() 1865 * In this case we don't want to increase the bp->spq_left bnx2x_sp_event() 1876 atomic_inc(&bp->cq_spq_left); bnx2x_sp_event() 1877 /* push the change in bp->spq_left and towards the memory */ bnx2x_sp_event() 1880 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); bnx2x_sp_event() 1883 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { bnx2x_sp_event() 1894 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); bnx2x_sp_event() 1896 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); bnx2x_sp_event() 1900 bnx2x_schedule_sp_task(bp); bnx2x_sp_event() 1908 struct bnx2x *bp = netdev_priv(dev_instance); bnx2x_interrupt() local 1909 u16 status = bnx2x_ack_int(bp); bnx2x_interrupt() 1922 if (unlikely(bp->panic)) bnx2x_interrupt() 1926 for_each_eth_queue(bp, i) { for_each_eth_queue() 1927 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue() 1929 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); for_each_eth_queue() 1935 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); for_each_eth_queue() 1940 if (CNIC_SUPPORT(bp)) { 1946 c_ops = rcu_dereference(bp->cnic_ops); 1947 if (c_ops && (bp->cnic_eth_dev.drv_state & 1949 c_ops->cnic_handler(bp->cnic_data, NULL); 1961 bnx2x_schedule_sp_task(bp); 1981 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) bnx2x_acquire_hw_lock() argument 1985 int func = BP_FUNC(bp); bnx2x_acquire_hw_lock() 2004 lock_status = REG_RD(bp, hw_lock_control_reg); bnx2x_acquire_hw_lock() 2014 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); bnx2x_acquire_hw_lock() 2015 lock_status = REG_RD(bp, hw_lock_control_reg); bnx2x_acquire_hw_lock() 2025 int bnx2x_release_leader_lock(struct bnx2x *bp) bnx2x_release_leader_lock() argument 2027 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); bnx2x_release_leader_lock() 2030 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) bnx2x_release_hw_lock() argument 2034 int func = BP_FUNC(bp); bnx2x_release_hw_lock() 2052 lock_status = REG_RD(bp, hw_lock_control_reg); bnx2x_release_hw_lock() 2059 REG_WR(bp, hw_lock_control_reg, resource_bit); bnx2x_release_hw_lock() 2063 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) bnx2x_get_gpio() argument 2066 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && bnx2x_get_gpio() 2067 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; bnx2x_get_gpio() 2080 gpio_reg = REG_RD(bp, MISC_REG_GPIO); bnx2x_get_gpio() 2091 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) bnx2x_set_gpio() argument 2094 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && bnx2x_set_gpio() 2095 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; bnx2x_set_gpio() 2106 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_gpio() 2108 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); bnx2x_set_gpio() 2141 REG_WR(bp, MISC_REG_GPIO, gpio_reg); bnx2x_set_gpio() 2142 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_gpio() 2147 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) bnx2x_set_mult_gpio() argument 2154 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_mult_gpio() 2156 gpio_reg = REG_RD(bp, MISC_REG_GPIO); bnx2x_set_mult_gpio() 2187 REG_WR(bp, MISC_REG_GPIO, gpio_reg); bnx2x_set_mult_gpio() 2189 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_mult_gpio() 2194 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) bnx2x_set_gpio_int() argument 2197 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && bnx2x_set_gpio_int() 2198 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; bnx2x_set_gpio_int() 2209 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_gpio_int() 2211 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); bnx2x_set_gpio_int() 2236 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); bnx2x_set_gpio_int() 2237 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_gpio_int() 2242 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode) bnx2x_set_spio() argument 2252 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); bnx2x_set_spio() 2254 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT); bnx2x_set_spio() 2281 REG_WR(bp, MISC_REG_SPIO, spio_reg); bnx2x_set_spio() 2282 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); bnx2x_set_spio() 2287 void bnx2x_calc_fc_adv(struct bnx2x *bp) bnx2x_calc_fc_adv() argument 2289 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_calc_fc_adv() 2290 switch (bp->link_vars.ieee_fc & bnx2x_calc_fc_adv() 2293 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | bnx2x_calc_fc_adv() 2298 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | bnx2x_calc_fc_adv() 2303 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; bnx2x_calc_fc_adv() 2307 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | bnx2x_calc_fc_adv() 2313 static void bnx2x_set_requested_fc(struct bnx2x *bp) bnx2x_set_requested_fc() argument 2319 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) bnx2x_set_requested_fc() 2320 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; bnx2x_set_requested_fc() 2322 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; bnx2x_set_requested_fc() 2325 static void bnx2x_init_dropless_fc(struct bnx2x *bp) bnx2x_init_dropless_fc() argument 2329 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { bnx2x_init_dropless_fc() 2330 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) bnx2x_init_dropless_fc() 2333 REG_WR(bp, BAR_USTRORM_INTMEM + bnx2x_init_dropless_fc() 2334 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), bnx2x_init_dropless_fc() 2342 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) bnx2x_initial_phy_init() argument 2344 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_initial_phy_init() 2345 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; bnx2x_initial_phy_init() 2347 if (!BP_NOMCP(bp)) { bnx2x_initial_phy_init() 2348 bnx2x_set_requested_fc(bp); bnx2x_initial_phy_init() 2349 bnx2x_acquire_phy_lock(bp); bnx2x_initial_phy_init() 2352 struct link_params *lp = &bp->link_params; bnx2x_initial_phy_init() 2367 struct link_params *lp = &bp->link_params; bnx2x_initial_phy_init() 2371 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_initial_phy_init() 2373 bnx2x_release_phy_lock(bp); bnx2x_initial_phy_init() 2375 bnx2x_init_dropless_fc(bp); bnx2x_initial_phy_init() 2377 bnx2x_calc_fc_adv(bp); bnx2x_initial_phy_init() 2379 if (bp->link_vars.link_up) { bnx2x_initial_phy_init() 2380 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x_initial_phy_init() 2381 bnx2x_link_report(bp); bnx2x_initial_phy_init() 2383 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); bnx2x_initial_phy_init() 2384 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; bnx2x_initial_phy_init() 2391 void bnx2x_link_set(struct bnx2x *bp) bnx2x_link_set() argument 2393 if (!BP_NOMCP(bp)) { bnx2x_link_set() 2394 bnx2x_acquire_phy_lock(bp); bnx2x_link_set() 2395 bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_link_set() 2396 bnx2x_release_phy_lock(bp); bnx2x_link_set() 2398 bnx2x_init_dropless_fc(bp); bnx2x_link_set() 2400 bnx2x_calc_fc_adv(bp); bnx2x_link_set() 2405 static void bnx2x__link_reset(struct bnx2x *bp) bnx2x__link_reset() argument 2407 if (!BP_NOMCP(bp)) { bnx2x__link_reset() 2408 bnx2x_acquire_phy_lock(bp); bnx2x__link_reset() 2409 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); bnx2x__link_reset() 2410 bnx2x_release_phy_lock(bp); bnx2x__link_reset() 2415 void bnx2x_force_link_reset(struct bnx2x *bp) bnx2x_force_link_reset() argument 2417 bnx2x_acquire_phy_lock(bp); bnx2x_force_link_reset() 2418 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); bnx2x_force_link_reset() 2419 bnx2x_release_phy_lock(bp); bnx2x_force_link_reset() 2422 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) bnx2x_link_test() argument 2426 if (!BP_NOMCP(bp)) { bnx2x_link_test() 2427 bnx2x_acquire_phy_lock(bp); bnx2x_link_test() 2428 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, bnx2x_link_test() 2430 bnx2x_release_phy_lock(bp); bnx2x_link_test() 2446 static void bnx2x_calc_vn_min(struct bnx2x *bp, bnx2x_calc_vn_min() argument 2452 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { bnx2x_calc_vn_min() 2453 u32 vn_cfg = bp->mf_config[vn]; bnx2x_calc_vn_min() 2470 if (BNX2X_IS_ETS_ENABLED(bp)) { bnx2x_calc_vn_min() 2484 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, bnx2x_calc_vn_max() argument 2488 u32 vn_cfg = bp->mf_config[vn]; bnx2x_calc_vn_max() 2493 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); bnx2x_calc_vn_max() 2495 if (IS_MF_SI(bp)) { bnx2x_calc_vn_max() 2497 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; bnx2x_calc_vn_max() 2508 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) bnx2x_get_cmng_fns_mode() argument 2510 if (CHIP_REV_IS_SLOW(bp)) bnx2x_get_cmng_fns_mode() 2512 if (IS_MF(bp)) bnx2x_get_cmng_fns_mode() 2518 void bnx2x_read_mf_cfg(struct bnx2x *bp) bnx2x_read_mf_cfg() argument 2520 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); bnx2x_read_mf_cfg() 2522 if (BP_NOMCP(bp)) bnx2x_read_mf_cfg() 2536 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { bnx2x_read_mf_cfg() 2537 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); bnx2x_read_mf_cfg() 2542 bp->mf_config[vn] = bnx2x_read_mf_cfg() 2543 MF_CFG_RD(bp, func_mf_config[func].config); bnx2x_read_mf_cfg() 2545 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { bnx2x_read_mf_cfg() 2547 bp->flags |= MF_FUNC_DIS; bnx2x_read_mf_cfg() 2550 bp->flags &= ~MF_FUNC_DIS; bnx2x_read_mf_cfg() 2554 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) bnx2x_cmng_fns_init() argument 2559 input.port_rate = bp->link_vars.line_speed; bnx2x_cmng_fns_init() 2566 bnx2x_read_mf_cfg(bp); bnx2x_cmng_fns_init() 2569 bnx2x_calc_vn_min(bp, &input); bnx2x_cmng_fns_init() 2572 if (bp->port.pmf) bnx2x_cmng_fns_init() 2573 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) bnx2x_cmng_fns_init() 2574 bnx2x_calc_vn_max(bp, vn, &input); bnx2x_cmng_fns_init() 2580 bnx2x_init_cmng(&input, &bp->cmng); bnx2x_cmng_fns_init() 2589 static void storm_memset_cmng(struct bnx2x *bp, storm_memset_cmng() argument 2599 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); storm_memset_cmng() 2601 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { storm_memset_cmng() 2602 int func = func_by_vn(bp, vn); storm_memset_cmng() 2607 __storm_memset_struct(bp, addr, size, storm_memset_cmng() 2613 __storm_memset_struct(bp, addr, size, storm_memset_cmng() 2619 void bnx2x_set_local_cmng(struct bnx2x *bp) bnx2x_set_local_cmng() argument 2621 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); bnx2x_set_local_cmng() 2624 bnx2x_cmng_fns_init(bp, false, cmng_fns); bnx2x_set_local_cmng() 2625 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); bnx2x_set_local_cmng() 2634 static void bnx2x_link_attn(struct bnx2x *bp) bnx2x_link_attn() argument 2637 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_link_attn() 2639 bnx2x_link_update(&bp->link_params, &bp->link_vars); bnx2x_link_attn() 2641 bnx2x_init_dropless_fc(bp); bnx2x_link_attn() 2643 if (bp->link_vars.link_up) { bnx2x_link_attn() 2645 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { bnx2x_link_attn() 2648 pstats = bnx2x_sp(bp, port_stats); bnx2x_link_attn() 2653 if (bp->state == BNX2X_STATE_OPEN) bnx2x_link_attn() 2654 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x_link_attn() 2657 if (bp->link_vars.link_up && bp->link_vars.line_speed) bnx2x_link_attn() 2658 bnx2x_set_local_cmng(bp); bnx2x_link_attn() 2660 __bnx2x_link_report(bp); bnx2x_link_attn() 2662 if (IS_MF(bp)) bnx2x_link_attn() 2663 bnx2x_link_sync_notify(bp); bnx2x_link_attn() 2666 void bnx2x__link_status_update(struct bnx2x *bp) bnx2x__link_status_update() argument 2668 if (bp->state != BNX2X_STATE_OPEN) bnx2x__link_status_update() 2672 if (IS_PF(bp)) { bnx2x__link_status_update() 2673 bnx2x_dcbx_pmf_update(bp); bnx2x__link_status_update() 2674 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); bnx2x__link_status_update() 2675 if (bp->link_vars.link_up) bnx2x__link_status_update() 2676 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x__link_status_update() 2678 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x__link_status_update() 2680 bnx2x_link_report(bp); bnx2x__link_status_update() 2683 bp->port.supported[0] |= (SUPPORTED_10baseT_Half | bnx2x__link_status_update() 2695 bp->port.advertising[0] = bp->port.supported[0]; bnx2x__link_status_update() 2697 bp->link_params.bp = bp; bnx2x__link_status_update() 2698 bp->link_params.port = BP_PORT(bp); bnx2x__link_status_update() 2699 bp->link_params.req_duplex[0] = DUPLEX_FULL; bnx2x__link_status_update() 2700 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; bnx2x__link_status_update() 2701 bp->link_params.req_line_speed[0] = SPEED_10000; bnx2x__link_status_update() 2702 bp->link_params.speed_cap_mask[0] = 0x7f0000; bnx2x__link_status_update() 2703 bp->link_params.switch_cfg = SWITCH_CFG_10G; bnx2x__link_status_update() 2704 bp->link_vars.mac_type = MAC_TYPE_BMAC; bnx2x__link_status_update() 2705 bp->link_vars.line_speed = SPEED_10000; bnx2x__link_status_update() 2706 bp->link_vars.link_status = bnx2x__link_status_update() 2709 bp->link_vars.link_up = 1; bnx2x__link_status_update() 2710 bp->link_vars.duplex = DUPLEX_FULL; bnx2x__link_status_update() 2711 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; bnx2x__link_status_update() 2712 __bnx2x_link_report(bp); bnx2x__link_status_update() 2714 bnx2x_sample_bulletin(bp); bnx2x__link_status_update() 2721 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x__link_status_update() 2725 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, bnx2x_afex_func_update() argument 2732 func_params.f_obj = &bp->func_obj; bnx2x_afex_func_update() 2744 if (bnx2x_func_state_change(bp, &func_params) < 0) bnx2x_afex_func_update() 2745 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); bnx2x_afex_func_update() 2750 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, bnx2x_afex_handle_vif_list_cmd() argument 2764 func_params.f_obj = &bp->func_obj; bnx2x_afex_handle_vif_list_cmd() 2781 rc = bnx2x_func_state_change(bp, &func_params); bnx2x_afex_handle_vif_list_cmd() 2783 bnx2x_fw_command(bp, drv_msg_code, 0); bnx2x_afex_handle_vif_list_cmd() 2788 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) bnx2x_handle_afex_cmd() argument 2791 u32 func = BP_ABS_FUNC(bp); bnx2x_handle_afex_cmd() 2801 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); bnx2x_handle_afex_cmd() 2804 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); bnx2x_handle_afex_cmd() 2808 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); bnx2x_handle_afex_cmd() 2809 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); bnx2x_handle_afex_cmd() 2813 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, bnx2x_handle_afex_cmd() 2818 addr_to_write = SHMEM2_RD(bp, bnx2x_handle_afex_cmd() 2819 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); bnx2x_handle_afex_cmd() 2820 stats_type = SHMEM2_RD(bp, bnx2x_handle_afex_cmd() 2821 afex_param1_to_driver[BP_FW_MB_IDX(bp)]); bnx2x_handle_afex_cmd() 2827 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); bnx2x_handle_afex_cmd() 2831 REG_WR(bp, addr_to_write + i*sizeof(u32), bnx2x_handle_afex_cmd() 2835 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); bnx2x_handle_afex_cmd() 2839 mf_config = MF_CFG_RD(bp, func_mf_config[func].config); bnx2x_handle_afex_cmd() 2840 bp->mf_config[BP_VN(bp)] = mf_config; bnx2x_handle_afex_cmd() 2852 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); bnx2x_handle_afex_cmd() 2854 bp->mf_config[BP_VN(bp)] = mf_config; bnx2x_handle_afex_cmd() 2856 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); bnx2x_handle_afex_cmd() 2858 cmng_input.vnic_max_rate[BP_VN(bp)]; bnx2x_handle_afex_cmd() 2863 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); bnx2x_handle_afex_cmd() 2867 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & bnx2x_handle_afex_cmd() 2871 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & bnx2x_handle_afex_cmd() 2879 (MF_CFG_RD(bp, bnx2x_handle_afex_cmd() 2884 (MF_CFG_RD(bp, bnx2x_handle_afex_cmd() 2890 if (bnx2x_afex_func_update(bp, vif_id, vlan_val, bnx2x_handle_afex_cmd() 2894 bp->afex_def_vlan_tag = vlan_val; bnx2x_handle_afex_cmd() 2895 bp->afex_vlan_mode = vlan_mode; bnx2x_handle_afex_cmd() 2898 bnx2x_link_report(bp); bnx2x_handle_afex_cmd() 2901 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); bnx2x_handle_afex_cmd() 2904 bp->afex_def_vlan_tag = -1; bnx2x_handle_afex_cmd() 2909 static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) bnx2x_handle_update_svid_cmd() argument 2916 func_params.f_obj = &bp->func_obj; bnx2x_handle_update_svid_cmd() 2919 if (IS_MF_UFP(bp)) { bnx2x_handle_update_svid_cmd() 2920 int func = BP_ABS_FUNC(bp); bnx2x_handle_update_svid_cmd() 2924 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & bnx2x_handle_update_svid_cmd() 2927 bp->mf_ov = val; bnx2x_handle_update_svid_cmd() 2934 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8, bnx2x_handle_update_svid_cmd() 2935 bp->mf_ov); bnx2x_handle_update_svid_cmd() 2940 switch_update_params->vlan = bp->mf_ov; bnx2x_handle_update_svid_cmd() 2942 if (bnx2x_func_state_change(bp, &func_params) < 0) { bnx2x_handle_update_svid_cmd() 2944 bp->mf_ov); bnx2x_handle_update_svid_cmd() 2948 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov); bnx2x_handle_update_svid_cmd() 2950 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); bnx2x_handle_update_svid_cmd() 2957 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0); bnx2x_handle_update_svid_cmd() 2960 static void bnx2x_pmf_update(struct bnx2x *bp) bnx2x_pmf_update() argument 2962 int port = BP_PORT(bp); bnx2x_pmf_update() 2965 bp->port.pmf = 1; bnx2x_pmf_update() 2966 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); bnx2x_pmf_update() 2970 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). bnx2x_pmf_update() 2975 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); bnx2x_pmf_update() 2977 bnx2x_dcbx_pmf_update(bp); bnx2x_pmf_update() 2980 val = (0xff0f | (1 << (BP_VN(bp) + 4))); bnx2x_pmf_update() 2981 if (bp->common.int_block == INT_BLOCK_HC) { bnx2x_pmf_update() 2982 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); bnx2x_pmf_update() 2983 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); bnx2x_pmf_update() 2984 } else if (!CHIP_IS_E1x(bp)) { bnx2x_pmf_update() 2985 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); bnx2x_pmf_update() 2986 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); bnx2x_pmf_update() 2989 bnx2x_stats_handle(bp, STATS_EVENT_PMF); bnx2x_pmf_update() 3001 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) bnx2x_fw_command() argument 3003 int mb_idx = BP_FW_MB_IDX(bp); bnx2x_fw_command() 3007 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; bnx2x_fw_command() 3009 mutex_lock(&bp->fw_mb_mutex); bnx2x_fw_command() 3010 seq = ++bp->fw_seq; bnx2x_fw_command() 3011 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); bnx2x_fw_command() 3012 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); bnx2x_fw_command() 3021 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); bnx2x_fw_command() 3035 bnx2x_fw_dump(bp); bnx2x_fw_command() 3038 mutex_unlock(&bp->fw_mb_mutex); bnx2x_fw_command() 3043 static void storm_memset_func_cfg(struct bnx2x *bp, storm_memset_func_cfg() argument 3052 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); storm_memset_func_cfg() 3055 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) bnx2x_func_init() argument 3057 if (CHIP_IS_E1x(bp)) { bnx2x_func_init() 3060 storm_memset_func_cfg(bp, &tcfg, p->func_id); bnx2x_func_init() 3064 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); bnx2x_func_init() 3065 storm_memset_func_en(bp, p->func_id, 1); bnx2x_func_init() 3069 storm_memset_spq_addr(bp, p->spq_map, p->func_id); bnx2x_func_init() 3070 REG_WR(bp, XSEM_REG_FAST_MEMORY + bnx2x_func_init() 3078 * @bp device handle 3084 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, bnx2x_get_common_flags() argument 3102 if (bp->flags & TX_SWITCHING) bnx2x_get_common_flags() 3115 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, bnx2x_get_q_flags() argument 3122 if (IS_MF_SD(bp)) bnx2x_get_q_flags() 3147 if (IS_MF_AFEX(bp)) bnx2x_get_q_flags() 3150 return flags | bnx2x_get_common_flags(bp, fp, true); bnx2x_get_q_flags() 3153 static void bnx2x_pf_q_prep_general(struct bnx2x *bp, bnx2x_pf_q_prep_general() argument 3164 gen_init->mtu = bp->dev->mtu; bnx2x_pf_q_prep_general() 3171 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, bnx2x_pf_rx_q_prep() argument 3180 pause->sge_th_lo = SGE_TH_LO(bp); bnx2x_pf_rx_q_prep() 3181 pause->sge_th_hi = SGE_TH_HI(bp); bnx2x_pf_rx_q_prep() 3184 WARN_ON(bp->dropless_fc && bnx2x_pf_rx_q_prep() 3189 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> bnx2x_pf_rx_q_prep() 3197 if (!CHIP_IS_E1(bp)) { bnx2x_pf_rx_q_prep() 3198 pause->bd_th_lo = BD_TH_LO(bp); bnx2x_pf_rx_q_prep() 3199 pause->bd_th_hi = BD_TH_HI(bp); bnx2x_pf_rx_q_prep() 3201 pause->rcq_th_lo = RCQ_TH_LO(bp); bnx2x_pf_rx_q_prep() 3202 pause->rcq_th_hi = RCQ_TH_HI(bp); bnx2x_pf_rx_q_prep() 3207 WARN_ON(bp->dropless_fc && bnx2x_pf_rx_q_prep() 3209 bp->rx_ring_size); bnx2x_pf_rx_q_prep() 3210 WARN_ON(bp->dropless_fc && bnx2x_pf_rx_q_prep() 3233 rxq_init->rss_engine_id = BP_FUNC(bp); bnx2x_pf_rx_q_prep() 3234 rxq_init->mcast_engine_id = BP_FUNC(bp); bnx2x_pf_rx_q_prep() 3241 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); bnx2x_pf_rx_q_prep() 3253 if (IS_MF_AFEX(bp)) { bnx2x_pf_rx_q_prep() 3254 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; bnx2x_pf_rx_q_prep() 3259 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, bnx2x_pf_tx_q_prep() argument 3272 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); bnx2x_pf_tx_q_prep() 3280 static void bnx2x_pf_init(struct bnx2x *bp) bnx2x_pf_init() argument 3286 if (!CHIP_IS_E1x(bp)) { bnx2x_pf_init() 3289 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + bnx2x_pf_init() 3291 (CHIP_MODE_IS_4_PORT(bp) ? bnx2x_pf_init() 3292 BP_FUNC(bp) : BP_VN(bp))*4, 0); bnx2x_pf_init() 3294 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + bnx2x_pf_init() 3297 (CHIP_MODE_IS_4_PORT(bp) ? bnx2x_pf_init() 3298 BP_FUNC(bp) : BP_VN(bp))*4, 0); bnx2x_pf_init() 3307 flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0; bnx2x_pf_init() 3310 func_init.pf_id = BP_FUNC(bp); bnx2x_pf_init() 3311 func_init.func_id = BP_FUNC(bp); bnx2x_pf_init() 3312 func_init.spq_map = bp->spq_mapping; bnx2x_pf_init() 3313 func_init.spq_prod = bp->spq_prod_idx; bnx2x_pf_init() 3315 bnx2x_func_init(bp, &func_init); bnx2x_pf_init() 3317 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); bnx2x_pf_init() 3325 bp->link_vars.line_speed = SPEED_10000; bnx2x_pf_init() 3326 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); bnx2x_pf_init() 3329 if (bp->port.pmf) bnx2x_pf_init() 3330 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); bnx2x_pf_init() 3333 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); bnx2x_pf_init() 3334 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); bnx2x_pf_init() 3335 eq_data.producer = bp->eq_prod; bnx2x_pf_init() 3338 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); bnx2x_pf_init() 3341 static void bnx2x_e1h_disable(struct bnx2x *bp) bnx2x_e1h_disable() argument 3343 int port = BP_PORT(bp); bnx2x_e1h_disable() 3345 bnx2x_tx_disable(bp); bnx2x_e1h_disable() 3347 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); bnx2x_e1h_disable() 3350 static void bnx2x_e1h_enable(struct bnx2x *bp) bnx2x_e1h_enable() argument 3352 int port = BP_PORT(bp); bnx2x_e1h_enable() 3354 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) bnx2x_e1h_enable() 3355 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); bnx2x_e1h_enable() 3358 netif_tx_wake_all_queues(bp->dev); bnx2x_e1h_enable() 3368 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) bnx2x_drv_info_ether_stat() argument 3371 &bp->slowpath->drv_info_to_mcp.ether_stat; bnx2x_drv_info_ether_stat() 3373 &bp->sp_objs->mac_obj; bnx2x_drv_info_ether_stat() 3390 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, bnx2x_drv_info_ether_stat() 3394 ether_stat->mtu_size = bp->dev->mtu; bnx2x_drv_info_ether_stat() 3395 if (bp->dev->features & NETIF_F_RXCSUM) bnx2x_drv_info_ether_stat() 3397 if (bp->dev->features & NETIF_F_TSO) bnx2x_drv_info_ether_stat() 3399 ether_stat->feature_flags |= bp->common.boot_mode; bnx2x_drv_info_ether_stat() 3401 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; bnx2x_drv_info_ether_stat() 3403 ether_stat->txq_size = bp->tx_ring_size; bnx2x_drv_info_ether_stat() 3404 ether_stat->rxq_size = bp->rx_ring_size; bnx2x_drv_info_ether_stat() 3407 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; bnx2x_drv_info_ether_stat() 3411 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) bnx2x_drv_info_fcoe_stat() argument 3413 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; bnx2x_drv_info_fcoe_stat() 3415 &bp->slowpath->drv_info_to_mcp.fcoe_stat; bnx2x_drv_info_fcoe_stat() 3417 if (!CNIC_LOADED(bp)) bnx2x_drv_info_fcoe_stat() 3420 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); bnx2x_drv_info_fcoe_stat() 3426 if (!NO_FCOE(bp)) { bnx2x_drv_info_fcoe_stat() 3428 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. bnx2x_drv_info_fcoe_stat() 3432 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. bnx2x_drv_info_fcoe_stat() 3436 &bp->fw_stats_data->fcoe; bnx2x_drv_info_fcoe_stat() 3510 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); bnx2x_drv_info_fcoe_stat() 3513 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) bnx2x_drv_info_iscsi_stat() argument 3515 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; bnx2x_drv_info_iscsi_stat() 3517 &bp->slowpath->drv_info_to_mcp.iscsi_stat; bnx2x_drv_info_iscsi_stat() 3519 if (!CNIC_LOADED(bp)) bnx2x_drv_info_iscsi_stat() 3522 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, bnx2x_drv_info_iscsi_stat() 3529 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); bnx2x_drv_info_iscsi_stat() 3537 static void bnx2x_config_mf_bw(struct bnx2x *bp) bnx2x_config_mf_bw() argument 3539 if (bp->link_vars.link_up) { bnx2x_config_mf_bw() 3540 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); bnx2x_config_mf_bw() 3541 bnx2x_link_sync_notify(bp); bnx2x_config_mf_bw() 3543 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); bnx2x_config_mf_bw() 3546 static void bnx2x_set_mf_bw(struct bnx2x *bp) bnx2x_set_mf_bw() argument 3548 bnx2x_config_mf_bw(bp); bnx2x_set_mf_bw() 3549 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); bnx2x_set_mf_bw() 3552 static void bnx2x_handle_eee_event(struct bnx2x *bp) bnx2x_handle_eee_event() argument 3555 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); bnx2x_handle_eee_event() 3561 static void bnx2x_handle_drv_info_req(struct bnx2x *bp) bnx2x_handle_drv_info_req() argument 3564 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); bnx2x_handle_drv_info_req() 3570 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); bnx2x_handle_drv_info_req() 3578 mutex_lock(&bp->drv_info_mutex); bnx2x_handle_drv_info_req() 3580 memset(&bp->slowpath->drv_info_to_mcp, 0, bnx2x_handle_drv_info_req() 3585 bnx2x_drv_info_ether_stat(bp); bnx2x_handle_drv_info_req() 3588 bnx2x_drv_info_fcoe_stat(bp); bnx2x_handle_drv_info_req() 3591 bnx2x_drv_info_iscsi_stat(bp); bnx2x_handle_drv_info_req() 3595 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); bnx2x_handle_drv_info_req() 3602 SHMEM2_WR(bp, drv_info_host_addr_lo, bnx2x_handle_drv_info_req() 3603 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); bnx2x_handle_drv_info_req() 3604 SHMEM2_WR(bp, drv_info_host_addr_hi, bnx2x_handle_drv_info_req() 3605 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); bnx2x_handle_drv_info_req() 3607 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); bnx2x_handle_drv_info_req() 3613 if (!SHMEM2_HAS(bp, mfw_drv_indication)) { bnx2x_handle_drv_info_req() 3615 } else if (!bp->drv_info_mng_owner) { bnx2x_handle_drv_info_req() 3616 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1)); bnx2x_handle_drv_info_req() 3619 u32 indication = SHMEM2_RD(bp, mfw_drv_indication); bnx2x_handle_drv_info_req() 3623 SHMEM2_WR(bp, mfw_drv_indication, bnx2x_handle_drv_info_req() 3634 bp->drv_info_mng_owner = true; bnx2x_handle_drv_info_req() 3638 mutex_unlock(&bp->drv_info_mutex); bnx2x_handle_drv_info_req() 3662 void bnx2x_update_mng_version(struct bnx2x *bp) bnx2x_update_mng_version() argument 3667 int idx = BP_FW_MB_IDX(bp); bnx2x_update_mng_version() 3670 if (!SHMEM2_HAS(bp, func_os_drv_ver)) bnx2x_update_mng_version() 3673 mutex_lock(&bp->drv_info_mutex); bnx2x_update_mng_version() 3675 if (bp->drv_info_mng_owner) bnx2x_update_mng_version() 3678 if (bp->state != BNX2X_STATE_OPEN) bnx2x_update_mng_version() 3683 if (!CNIC_LOADED(bp)) bnx2x_update_mng_version() 3687 memset(&bp->slowpath->drv_info_to_mcp, 0, bnx2x_update_mng_version() 3689 bnx2x_drv_info_iscsi_stat(bp); bnx2x_update_mng_version() 3690 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; bnx2x_update_mng_version() 3693 memset(&bp->slowpath->drv_info_to_mcp, 0, bnx2x_update_mng_version() 3695 bnx2x_drv_info_fcoe_stat(bp); bnx2x_update_mng_version() 3696 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; bnx2x_update_mng_version() 3700 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver); bnx2x_update_mng_version() 3701 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver); bnx2x_update_mng_version() 3702 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever); bnx2x_update_mng_version() 3704 mutex_unlock(&bp->drv_info_mutex); bnx2x_update_mng_version() 3710 static void bnx2x_oem_event(struct bnx2x *bp, u32 event) bnx2x_oem_event() argument 3734 * where the bp->flags can change so it is done without any bnx2x_oem_event() 3737 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { bnx2x_oem_event() 3739 bp->flags |= MF_FUNC_DIS; bnx2x_oem_event() 3741 bnx2x_e1h_disable(bp); bnx2x_oem_event() 3744 bp->flags &= ~MF_FUNC_DIS; bnx2x_oem_event() 3746 bnx2x_e1h_enable(bp); bnx2x_oem_event() 3754 bnx2x_config_mf_bw(bp); bnx2x_oem_event() 3761 bnx2x_fw_command(bp, cmd_fail, 0); bnx2x_oem_event() 3763 bnx2x_fw_command(bp, cmd_ok, 0); bnx2x_oem_event() 3767 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) bnx2x_sp_get_next() argument 3769 struct eth_spe *next_spe = bp->spq_prod_bd; bnx2x_sp_get_next() 3771 if (bp->spq_prod_bd == bp->spq_last_bd) { bnx2x_sp_get_next() 3772 bp->spq_prod_bd = bp->spq; bnx2x_sp_get_next() 3773 bp->spq_prod_idx = 0; bnx2x_sp_get_next() 3776 bp->spq_prod_bd++; bnx2x_sp_get_next() 3777 bp->spq_prod_idx++; bnx2x_sp_get_next() 3783 static void bnx2x_sp_prod_update(struct bnx2x *bp) bnx2x_sp_prod_update() argument 3785 int func = BP_FUNC(bp); bnx2x_sp_prod_update() 3794 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), bnx2x_sp_prod_update() 3795 bp->spq_prod_idx); bnx2x_sp_prod_update() 3822 * @bp: driver handle 3833 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, bnx2x_sp_post() argument 3841 if (unlikely(bp->panic)) { bnx2x_sp_post() 3847 spin_lock_bh(&bp->spq_lock); bnx2x_sp_post() 3850 if (!atomic_read(&bp->eq_spq_left)) { bnx2x_sp_post() 3852 spin_unlock_bh(&bp->spq_lock); bnx2x_sp_post() 3856 } else if (!atomic_read(&bp->cq_spq_left)) { bnx2x_sp_post() 3858 spin_unlock_bh(&bp->spq_lock); bnx2x_sp_post() 3863 spe = bnx2x_sp_get_next(bp); bnx2x_sp_post() 3868 HW_CID(bp, cid)); bnx2x_sp_post() 3877 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & bnx2x_sp_post() 3894 atomic_dec(&bp->eq_spq_left); bnx2x_sp_post() 3896 atomic_dec(&bp->cq_spq_left); bnx2x_sp_post() 3900 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), bnx2x_sp_post() 3901 (u32)(U64_LO(bp->spq_mapping) + bnx2x_sp_post() 3902 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, bnx2x_sp_post() 3903 HW_CID(bp, cid), data_hi, data_lo, type, bnx2x_sp_post() 3904 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); bnx2x_sp_post() 3906 bnx2x_sp_prod_update(bp); bnx2x_sp_post() 3907 spin_unlock_bh(&bp->spq_lock); bnx2x_sp_post() 3912 static int bnx2x_acquire_alr(struct bnx2x *bp) bnx2x_acquire_alr() argument 3919 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); bnx2x_acquire_alr() 3920 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); bnx2x_acquire_alr() 3935 static void bnx2x_release_alr(struct bnx2x *bp) bnx2x_release_alr() argument 3937 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); bnx2x_release_alr() 3943 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) bnx2x_update_dsb_idx() argument 3945 struct host_sp_status_block *def_sb = bp->def_status_blk; bnx2x_update_dsb_idx() 3949 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { bnx2x_update_dsb_idx() 3950 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; bnx2x_update_dsb_idx() 3954 if (bp->def_idx != def_sb->sp_sb.running_index) { bnx2x_update_dsb_idx() 3955 bp->def_idx = def_sb->sp_sb.running_index; bnx2x_update_dsb_idx() 3968 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) bnx2x_attn_int_asserted() argument 3970 int port = BP_PORT(bp); bnx2x_attn_int_asserted() 3979 if (bp->attn_state & asserted) bnx2x_attn_int_asserted() 3982 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); bnx2x_attn_int_asserted() 3983 aeu_mask = REG_RD(bp, aeu_addr); bnx2x_attn_int_asserted() 3990 REG_WR(bp, aeu_addr, aeu_mask); bnx2x_attn_int_asserted() 3991 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); bnx2x_attn_int_asserted() 3993 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); bnx2x_attn_int_asserted() 3994 bp->attn_state |= asserted; bnx2x_attn_int_asserted() 3995 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); bnx2x_attn_int_asserted() 4000 bnx2x_acquire_phy_lock(bp); bnx2x_attn_int_asserted() 4003 nig_mask = REG_RD(bp, nig_int_mask_addr); bnx2x_attn_int_asserted() 4009 REG_WR(bp, nig_int_mask_addr, 0); bnx2x_attn_int_asserted() 4011 bnx2x_link_attn(bp); bnx2x_attn_int_asserted() 4031 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); bnx2x_attn_int_asserted() 4035 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); bnx2x_attn_int_asserted() 4039 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); bnx2x_attn_int_asserted() 4044 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); bnx2x_attn_int_asserted() 4048 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); bnx2x_attn_int_asserted() 4052 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); bnx2x_attn_int_asserted() 4058 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_attn_int_asserted() 4065 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); bnx2x_attn_int_asserted() 4066 REG_WR(bp, reg_addr, asserted); bnx2x_attn_int_asserted() 4073 if (bp->common.int_block != INT_BLOCK_HC) { bnx2x_attn_int_asserted() 4076 igu_acked = REG_RD(bp, bnx2x_attn_int_asserted() 4085 REG_WR(bp, nig_int_mask_addr, nig_mask); bnx2x_attn_int_asserted() 4086 bnx2x_release_phy_lock(bp); bnx2x_attn_int_asserted() 4090 static void bnx2x_fan_failure(struct bnx2x *bp) bnx2x_fan_failure() argument 4092 int port = BP_PORT(bp); bnx2x_fan_failure() 4096 SHMEM_RD(bp, bnx2x_fan_failure() 4101 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, bnx2x_fan_failure() 4105 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" bnx2x_fan_failure() 4112 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0); bnx2x_fan_failure() 4115 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) bnx2x_attn_int_deasserted0() argument 4117 int port = BP_PORT(bp); bnx2x_attn_int_deasserted0() 4126 val = REG_RD(bp, reg_offset); bnx2x_attn_int_deasserted0() 4128 REG_WR(bp, reg_offset, val); bnx2x_attn_int_deasserted0() 4133 bnx2x_hw_reset_phy(&bp->link_params); bnx2x_attn_int_deasserted0() 4134 bnx2x_fan_failure(bp); bnx2x_attn_int_deasserted0() 4137 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { bnx2x_attn_int_deasserted0() 4138 bnx2x_acquire_phy_lock(bp); bnx2x_attn_int_deasserted0() 4139 bnx2x_handle_module_detect_int(&bp->link_params); bnx2x_attn_int_deasserted0() 4140 bnx2x_release_phy_lock(bp); bnx2x_attn_int_deasserted0() 4145 val = REG_RD(bp, reg_offset); bnx2x_attn_int_deasserted0() 4147 REG_WR(bp, reg_offset, val); bnx2x_attn_int_deasserted0() 4155 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) bnx2x_attn_int_deasserted1() argument 4161 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); bnx2x_attn_int_deasserted1() 4170 int port = BP_PORT(bp); bnx2x_attn_int_deasserted1() 4176 val = REG_RD(bp, reg_offset); bnx2x_attn_int_deasserted1() 4178 REG_WR(bp, reg_offset, val); bnx2x_attn_int_deasserted1() 4186 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) bnx2x_attn_int_deasserted2() argument 4192 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); bnx2x_attn_int_deasserted2() 4200 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); bnx2x_attn_int_deasserted2() 4206 if (!CHIP_IS_E1x(bp)) { bnx2x_attn_int_deasserted2() 4207 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); bnx2x_attn_int_deasserted2() 4214 int port = BP_PORT(bp); bnx2x_attn_int_deasserted2() 4220 val = REG_RD(bp, reg_offset); bnx2x_attn_int_deasserted2() 4222 REG_WR(bp, reg_offset, val); bnx2x_attn_int_deasserted2() 4230 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) bnx2x_attn_int_deasserted3() argument 4237 int func = BP_FUNC(bp); bnx2x_attn_int_deasserted3() 4239 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bnx2x_attn_int_deasserted3() 4240 bnx2x_read_mf_cfg(bp); bnx2x_attn_int_deasserted3() 4241 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, bnx2x_attn_int_deasserted3() 4242 func_mf_config[BP_ABS_FUNC(bp)].config); bnx2x_attn_int_deasserted3() 4243 val = SHMEM_RD(bp, bnx2x_attn_int_deasserted3() 4244 func_mb[BP_FW_MB_IDX(bp)].drv_status); bnx2x_attn_int_deasserted3() 4248 bnx2x_oem_event(bp, bnx2x_attn_int_deasserted3() 4253 bnx2x_set_mf_bw(bp); bnx2x_attn_int_deasserted3() 4256 bnx2x_handle_drv_info_req(bp); bnx2x_attn_int_deasserted3() 4259 bnx2x_schedule_iov_task(bp, bnx2x_attn_int_deasserted3() 4262 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_attn_int_deasserted3() 4263 bnx2x_pmf_update(bp); bnx2x_attn_int_deasserted3() 4265 if (bp->port.pmf && bnx2x_attn_int_deasserted3() 4267 bp->dcbx_enabled > 0) bnx2x_attn_int_deasserted3() 4269 bnx2x_dcbx_set_params(bp, bnx2x_attn_int_deasserted3() 4272 bnx2x_handle_afex_cmd(bp, bnx2x_attn_int_deasserted3() 4275 bnx2x_handle_eee_event(bp); bnx2x_attn_int_deasserted3() 4278 bnx2x_handle_update_svid_cmd(bp); bnx2x_attn_int_deasserted3() 4280 if (bp->link_vars.periodic_flags & bnx2x_attn_int_deasserted3() 4283 bnx2x_acquire_phy_lock(bp); bnx2x_attn_int_deasserted3() 4284 bp->link_vars.periodic_flags &= bnx2x_attn_int_deasserted3() 4286 bnx2x_release_phy_lock(bp); bnx2x_attn_int_deasserted3() 4287 if (IS_MF(bp)) bnx2x_attn_int_deasserted3() 4288 bnx2x_link_sync_notify(bp); bnx2x_attn_int_deasserted3() 4289 bnx2x_link_report(bp); bnx2x_attn_int_deasserted3() 4294 bnx2x__link_status_update(bp); bnx2x_attn_int_deasserted3() 4298 bnx2x_mc_assert(bp); bnx2x_attn_int_deasserted3() 4299 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); bnx2x_attn_int_deasserted3() 4300 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); bnx2x_attn_int_deasserted3() 4301 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); bnx2x_attn_int_deasserted3() 4302 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); bnx2x_attn_int_deasserted3() 4308 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); bnx2x_attn_int_deasserted3() 4309 bnx2x_fw_dump(bp); bnx2x_attn_int_deasserted3() 4318 val = CHIP_IS_E1(bp) ? 0 : bnx2x_attn_int_deasserted3() 4319 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); bnx2x_attn_int_deasserted3() 4323 val = CHIP_IS_E1(bp) ? 0 : bnx2x_attn_int_deasserted3() 4324 REG_RD(bp, MISC_REG_GRC_RSV_ATTN); bnx2x_attn_int_deasserted3() 4327 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); bnx2x_attn_int_deasserted3() 4360 void bnx2x_set_reset_global(struct bnx2x *bp) bnx2x_set_reset_global() argument 4363 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_global() 4364 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_set_reset_global() 4365 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); bnx2x_set_reset_global() 4366 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_global() 4374 static void bnx2x_clear_reset_global(struct bnx2x *bp) bnx2x_clear_reset_global() argument 4377 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_clear_reset_global() 4378 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_clear_reset_global() 4379 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); bnx2x_clear_reset_global() 4380 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_clear_reset_global() 4388 static bool bnx2x_reset_is_global(struct bnx2x *bp) bnx2x_reset_is_global() argument 4390 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_reset_is_global() 4401 static void bnx2x_set_reset_done(struct bnx2x *bp) bnx2x_set_reset_done() argument 4404 u32 bit = BP_PATH(bp) ? bnx2x_set_reset_done() 4406 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_done() 4407 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_set_reset_done() 4411 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_set_reset_done() 4413 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_done() 4421 void bnx2x_set_reset_in_progress(struct bnx2x *bp) bnx2x_set_reset_in_progress() argument 4424 u32 bit = BP_PATH(bp) ? bnx2x_set_reset_in_progress() 4426 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_in_progress() 4427 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_set_reset_in_progress() 4431 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_set_reset_in_progress() 4432 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_in_progress() 4439 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) bnx2x_reset_is_done() argument 4441 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_reset_is_done() 4454 void bnx2x_set_pf_load(struct bnx2x *bp) bnx2x_set_pf_load() argument 4457 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : bnx2x_set_pf_load() 4459 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : bnx2x_set_pf_load() 4462 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_pf_load() 4463 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_set_pf_load() 4471 val1 |= (1 << bp->pf_num); bnx2x_set_pf_load() 4479 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_set_pf_load() 4480 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_pf_load() 4486 * @bp: driver handle 4492 bool bnx2x_clear_pf_load(struct bnx2x *bp) bnx2x_clear_pf_load() argument 4495 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : bnx2x_clear_pf_load() 4497 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : bnx2x_clear_pf_load() 4500 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_clear_pf_load() 4501 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_clear_pf_load() 4508 val1 &= ~(1 << bp->pf_num); bnx2x_clear_pf_load() 4516 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_clear_pf_load() 4517 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_clear_pf_load() 4526 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) bnx2x_get_load_status() argument 4532 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_get_load_status() 4544 static void _print_parity(struct bnx2x *bp, u32 reg) _print_parity() argument 4546 pr_cont(" [0x%08x] ", REG_RD(bp, reg)); _print_parity() 4554 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, bnx2x_check_blocks_with_parity0() argument 4572 _print_parity(bp, bnx2x_check_blocks_with_parity0() 4578 _print_parity(bp, PRS_REG_PRS_PRTY_STS); bnx2x_check_blocks_with_parity0() 4582 _print_parity(bp, bnx2x_check_blocks_with_parity0() 4588 _print_parity(bp, SRC_REG_SRC_PRTY_STS); bnx2x_check_blocks_with_parity0() 4592 _print_parity(bp, TCM_REG_TCM_PRTY_STS); bnx2x_check_blocks_with_parity0() 4597 _print_parity(bp, bnx2x_check_blocks_with_parity0() 4599 _print_parity(bp, bnx2x_check_blocks_with_parity0() 4604 _print_parity(bp, GRCBASE_XPB + bnx2x_check_blocks_with_parity0() 4618 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, bnx2x_check_blocks_with_parity1() argument 4636 _print_parity(bp, PBF_REG_PBF_PRTY_STS); bnx2x_check_blocks_with_parity1() 4642 _print_parity(bp, QM_REG_QM_PRTY_STS); bnx2x_check_blocks_with_parity1() 4648 _print_parity(bp, TM_REG_TM_PRTY_STS); bnx2x_check_blocks_with_parity1() 4654 _print_parity(bp, bnx2x_check_blocks_with_parity1() 4661 _print_parity(bp, XCM_REG_XCM_PRTY_STS); bnx2x_check_blocks_with_parity1() 4668 _print_parity(bp, bnx2x_check_blocks_with_parity1() 4670 _print_parity(bp, bnx2x_check_blocks_with_parity1() 4678 _print_parity(bp, bnx2x_check_blocks_with_parity1() 4685 if (CHIP_IS_E1x(bp)) { bnx2x_check_blocks_with_parity1() 4686 _print_parity(bp, bnx2x_check_blocks_with_parity1() 4689 _print_parity(bp, bnx2x_check_blocks_with_parity1() 4691 _print_parity(bp, bnx2x_check_blocks_with_parity1() 4706 _print_parity(bp, DBG_REG_DBG_PRTY_STS); bnx2x_check_blocks_with_parity1() 4712 _print_parity(bp, bnx2x_check_blocks_with_parity1() 4719 _print_parity(bp, UCM_REG_UCM_PRTY_STS); bnx2x_check_blocks_with_parity1() 4726 _print_parity(bp, bnx2x_check_blocks_with_parity1() 4728 _print_parity(bp, bnx2x_check_blocks_with_parity1() 4735 _print_parity(bp, GRCBASE_UPB + bnx2x_check_blocks_with_parity1() 4742 _print_parity(bp, bnx2x_check_blocks_with_parity1() 4749 _print_parity(bp, CCM_REG_CCM_PRTY_STS); bnx2x_check_blocks_with_parity1() 4762 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, bnx2x_check_blocks_with_parity2() argument 4780 _print_parity(bp, bnx2x_check_blocks_with_parity2() 4782 _print_parity(bp, bnx2x_check_blocks_with_parity2() 4787 _print_parity(bp, PXP_REG_PXP_PRTY_STS); bnx2x_check_blocks_with_parity2() 4788 _print_parity(bp, bnx2x_check_blocks_with_parity2() 4790 _print_parity(bp, bnx2x_check_blocks_with_parity2() 4799 _print_parity(bp, bnx2x_check_blocks_with_parity2() 4804 _print_parity(bp, CDU_REG_CDU_PRTY_STS); bnx2x_check_blocks_with_parity2() 4808 _print_parity(bp, bnx2x_check_blocks_with_parity2() 4813 if (CHIP_IS_E1x(bp)) bnx2x_check_blocks_with_parity2() 4814 _print_parity(bp, bnx2x_check_blocks_with_parity2() 4817 _print_parity(bp, bnx2x_check_blocks_with_parity2() 4822 _print_parity(bp, bnx2x_check_blocks_with_parity2() 4836 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, bnx2x_check_blocks_with_parity3() argument 4874 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, bnx2x_check_blocks_with_parity3() 4887 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, bnx2x_check_blocks_with_parity4() argument 4905 _print_parity(bp, bnx2x_check_blocks_with_parity4() 4910 _print_parity(bp, bnx2x_check_blocks_with_parity4() 4923 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, bnx2x_parity_attn() argument 4942 netdev_err(bp->dev, bnx2x_parity_attn() 4944 res |= bnx2x_check_blocks_with_parity0(bp, bnx2x_parity_attn() 4946 res |= bnx2x_check_blocks_with_parity1(bp, bnx2x_parity_attn() 4948 res |= bnx2x_check_blocks_with_parity2(bp, bnx2x_parity_attn() 4950 res |= bnx2x_check_blocks_with_parity3(bp, bnx2x_parity_attn() 4952 res |= bnx2x_check_blocks_with_parity4(bp, bnx2x_parity_attn() 4965 * @bp: driver handle 4969 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) bnx2x_chk_parity_attn() argument 4972 int port = BP_PORT(bp); bnx2x_chk_parity_attn() 4974 attn.sig[0] = REG_RD(bp, bnx2x_chk_parity_attn() 4977 attn.sig[1] = REG_RD(bp, bnx2x_chk_parity_attn() 4980 attn.sig[2] = REG_RD(bp, bnx2x_chk_parity_attn() 4983 attn.sig[3] = REG_RD(bp, bnx2x_chk_parity_attn() 4989 attn.sig[3] &= ((REG_RD(bp, bnx2x_chk_parity_attn() 4995 if (!CHIP_IS_E1x(bp)) bnx2x_chk_parity_attn() 4996 attn.sig[4] = REG_RD(bp, bnx2x_chk_parity_attn() 5000 return bnx2x_parity_attn(bp, global, print, attn.sig); bnx2x_chk_parity_attn() 5003 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) bnx2x_attn_int_deasserted4() argument 5008 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); bnx2x_attn_int_deasserted4() 5032 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); bnx2x_attn_int_deasserted4() 5056 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) bnx2x_attn_int_deasserted() argument 5059 int port = BP_PORT(bp); bnx2x_attn_int_deasserted() 5068 bnx2x_acquire_alr(bp); bnx2x_attn_int_deasserted() 5070 if (bnx2x_chk_parity_attn(bp, &global, true)) { bnx2x_attn_int_deasserted() 5072 bp->recovery_state = BNX2X_RECOVERY_INIT; bnx2x_attn_int_deasserted() 5073 schedule_delayed_work(&bp->sp_rtnl_task, 0); bnx2x_attn_int_deasserted() 5075 bnx2x_int_disable(bp); bnx2x_attn_int_deasserted() 5082 bnx2x_release_alr(bp); bnx2x_attn_int_deasserted() 5086 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); bnx2x_attn_int_deasserted() 5087 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); bnx2x_attn_int_deasserted() 5088 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); bnx2x_attn_int_deasserted() 5089 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); bnx2x_attn_int_deasserted() 5090 if (!CHIP_IS_E1x(bp)) bnx2x_attn_int_deasserted() 5092 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); bnx2x_attn_int_deasserted() 5101 group_mask = &bp->attn_group[index]; bnx2x_attn_int_deasserted() 5109 bnx2x_attn_int_deasserted4(bp, bnx2x_attn_int_deasserted() 5111 bnx2x_attn_int_deasserted3(bp, bnx2x_attn_int_deasserted() 5113 bnx2x_attn_int_deasserted1(bp, bnx2x_attn_int_deasserted() 5115 bnx2x_attn_int_deasserted2(bp, bnx2x_attn_int_deasserted() 5117 bnx2x_attn_int_deasserted0(bp, bnx2x_attn_int_deasserted() 5122 bnx2x_release_alr(bp); bnx2x_attn_int_deasserted() 5124 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_attn_int_deasserted() 5132 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); bnx2x_attn_int_deasserted() 5133 REG_WR(bp, reg_addr, val); bnx2x_attn_int_deasserted() 5135 if (~bp->attn_state & deasserted) bnx2x_attn_int_deasserted() 5141 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); bnx2x_attn_int_deasserted() 5142 aeu_mask = REG_RD(bp, reg_addr); bnx2x_attn_int_deasserted() 5149 REG_WR(bp, reg_addr, aeu_mask); bnx2x_attn_int_deasserted() 5150 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); bnx2x_attn_int_deasserted() 5152 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); bnx2x_attn_int_deasserted() 5153 bp->attn_state &= ~deasserted; bnx2x_attn_int_deasserted() 5154 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); bnx2x_attn_int_deasserted() 5157 static void bnx2x_attn_int(struct bnx2x *bp) bnx2x_attn_int() argument 5160 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. bnx2x_attn_int() 5162 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. bnx2x_attn_int() 5164 u32 attn_state = bp->attn_state; bnx2x_attn_int() 5179 bnx2x_attn_int_asserted(bp, asserted); bnx2x_attn_int() 5182 bnx2x_attn_int_deasserted(bp, deasserted); bnx2x_attn_int() 5185 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, bnx2x_igu_ack_sb() argument 5188 u32 igu_addr = bp->igu_base_addr; bnx2x_igu_ack_sb() 5190 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, bnx2x_igu_ack_sb() 5194 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) bnx2x_update_eq_prod() argument 5197 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); bnx2x_update_eq_prod() 5201 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, bnx2x_cnic_handle_cfc_del() argument 5206 if (!bp->cnic_eth_dev.starting_cid || bnx2x_cnic_handle_cfc_del() 5207 (cid < bp->cnic_eth_dev.starting_cid && bnx2x_cnic_handle_cfc_del() 5208 cid != bp->cnic_eth_dev.iscsi_l2_cid)) bnx2x_cnic_handle_cfc_del() 5217 bnx2x_panic_dump(bp, false); bnx2x_cnic_handle_cfc_del() 5219 bnx2x_cnic_cfc_comp(bp, cid, err); bnx2x_cnic_handle_cfc_del() 5223 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) bnx2x_handle_mcast_eqe() argument 5230 rparam.mcast_obj = &bp->mcast_obj; bnx2x_handle_mcast_eqe() 5232 netif_addr_lock_bh(bp->dev); bnx2x_handle_mcast_eqe() 5235 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); bnx2x_handle_mcast_eqe() 5238 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { bnx2x_handle_mcast_eqe() 5239 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); bnx2x_handle_mcast_eqe() 5245 netif_addr_unlock_bh(bp->dev); bnx2x_handle_mcast_eqe() 5248 static void bnx2x_handle_classification_eqe(struct bnx2x *bp, bnx2x_handle_classification_eqe() argument 5263 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) bnx2x_handle_classification_eqe() 5264 vlan_mac_obj = &bp->iscsi_l2_mac_obj; bnx2x_handle_classification_eqe() 5266 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; bnx2x_handle_classification_eqe() 5274 bnx2x_handle_mcast_eqe(bp); bnx2x_handle_classification_eqe() 5282 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); bnx2x_handle_classification_eqe() 5290 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 5292 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) bnx2x_handle_rx_mode_eqe() argument 5294 netif_addr_lock_bh(bp->dev); bnx2x_handle_rx_mode_eqe() 5296 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); bnx2x_handle_rx_mode_eqe() 5299 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) bnx2x_handle_rx_mode_eqe() 5300 bnx2x_set_storm_rx_mode(bp); bnx2x_handle_rx_mode_eqe() 5302 &bp->sp_state)) bnx2x_handle_rx_mode_eqe() 5303 bnx2x_set_iscsi_eth_rx_mode(bp, true); bnx2x_handle_rx_mode_eqe() 5305 &bp->sp_state)) bnx2x_handle_rx_mode_eqe() 5306 bnx2x_set_iscsi_eth_rx_mode(bp, false); bnx2x_handle_rx_mode_eqe() 5308 netif_addr_unlock_bh(bp->dev); bnx2x_handle_rx_mode_eqe() 5311 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, bnx2x_after_afex_vif_lists() argument 5318 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, bnx2x_after_afex_vif_lists() 5323 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); bnx2x_after_afex_vif_lists() 5328 static void bnx2x_after_function_update(struct bnx2x *bp) bnx2x_after_function_update() argument 5347 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { bnx2x_after_function_update() 5352 (bp->afex_def_vlan_tag & VLAN_VID_MASK); bnx2x_after_function_update() 5356 for_each_eth_queue(bp, q) { for_each_eth_queue() 5358 fp = &bp->fp[q]; for_each_eth_queue() 5359 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; for_each_eth_queue() 5362 rc = bnx2x_queue_state_change(bp, &queue_params); for_each_eth_queue() 5368 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { 5369 fp = &bp->fp[FCOE_IDX(bp)]; 5370 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 5377 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 5381 rc = bnx2x_queue_state_change(bp, &queue_params); 5387 bnx2x_link_report(bp); 5388 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5393 struct bnx2x *bp, u32 cid) bnx2x_cid_to_q_obj() 5397 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp))) bnx2x_cid_to_q_obj() 5398 return &bnx2x_fcoe_sp_obj(bp, q_obj); bnx2x_cid_to_q_obj() 5400 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; bnx2x_cid_to_q_obj() 5403 static void bnx2x_eq_int(struct bnx2x *bp) bnx2x_eq_int() argument 5412 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; bnx2x_eq_int() 5413 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; bnx2x_eq_int() 5415 hw_cons = le16_to_cpu(*bp->eq_cons_sb); bnx2x_eq_int() 5426 * specific bp, thus there is no need in "paired" read memory bnx2x_eq_int() 5429 sw_cons = bp->eq_cons; bnx2x_eq_int() 5430 sw_prod = bp->eq_prod; bnx2x_eq_int() 5432 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", bnx2x_eq_int() 5433 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); bnx2x_eq_int() 5438 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; bnx2x_eq_int() 5440 rc = bnx2x_iov_eq_sp_event(bp, elem); bnx2x_eq_int() 5455 bnx2x_vf_mbx_schedule(bp, bnx2x_eq_int() 5462 bp->stats_comp++); bnx2x_eq_int() 5469 * we may want to verify here that the bp state is bnx2x_eq_int() 5475 if (CNIC_LOADED(bp) && bnx2x_eq_int() 5476 !bnx2x_cnic_handle_cfc_del(bp, cid, elem)) bnx2x_eq_int() 5479 q_obj = bnx2x_cid_to_q_obj(bp, cid); bnx2x_eq_int() 5481 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) bnx2x_eq_int() 5488 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); bnx2x_eq_int() 5489 if (f_obj->complete_cmd(bp, f_obj, bnx2x_eq_int() 5496 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); bnx2x_eq_int() 5497 if (f_obj->complete_cmd(bp, f_obj, bnx2x_eq_int() 5508 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE)) bnx2x_eq_int() 5516 f_obj->complete_cmd(bp, f_obj, bnx2x_eq_int() 5523 bnx2x_schedule_sp_rtnl(bp, cmd, 0); bnx2x_eq_int() 5529 f_obj->complete_cmd(bp, f_obj, bnx2x_eq_int() 5531 bnx2x_after_afex_vif_lists(bp, elem); bnx2x_eq_int() 5536 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) bnx2x_eq_int() 5544 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) bnx2x_eq_int() 5552 if (f_obj->complete_cmd(bp, f_obj, bnx2x_eq_int() 5558 switch (opcode | bp->state) { bnx2x_eq_int() 5581 bnx2x_handle_classification_eqe(bp, elem); bnx2x_eq_int() 5591 bnx2x_handle_mcast_eqe(bp); bnx2x_eq_int() 5601 bnx2x_handle_rx_mode_eqe(bp); bnx2x_eq_int() 5605 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", bnx2x_eq_int() 5606 elem->message.opcode, bp->state); bnx2x_eq_int() 5613 atomic_add(spqe_cnt, &bp->eq_spq_left); bnx2x_eq_int() 5615 bp->eq_cons = sw_cons; bnx2x_eq_int() 5616 bp->eq_prod = sw_prod; bnx2x_eq_int() 5621 bnx2x_update_eq_prod(bp, bp->eq_prod); bnx2x_eq_int() 5626 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); bnx2x_sp_task() local 5632 if (atomic_read(&bp->interrupt_occurred)) { bnx2x_sp_task() 5635 u16 status = bnx2x_update_dsb_idx(bp); bnx2x_sp_task() 5639 atomic_set(&bp->interrupt_occurred, 0); bnx2x_sp_task() 5643 bnx2x_attn_int(bp); bnx2x_sp_task() 5649 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); bnx2x_sp_task() 5651 if (FCOE_INIT(bp) && bnx2x_sp_task() 5657 napi_schedule(&bnx2x_fcoe(bp, napi)); bnx2x_sp_task() 5662 bnx2x_eq_int(bp); bnx2x_sp_task() 5663 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, bnx2x_sp_task() 5664 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); bnx2x_sp_task() 5675 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, bnx2x_sp_task() 5676 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); bnx2x_sp_task() 5681 &bp->sp_state)) { bnx2x_sp_task() 5682 bnx2x_link_report(bp); bnx2x_sp_task() 5683 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); bnx2x_sp_task() 5690 struct bnx2x *bp = netdev_priv(dev); bnx2x_msix_sp_int() local 5692 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, bnx2x_msix_sp_int() 5696 if (unlikely(bp->panic)) bnx2x_msix_sp_int() 5700 if (CNIC_LOADED(bp)) { bnx2x_msix_sp_int() 5704 c_ops = rcu_dereference(bp->cnic_ops); bnx2x_msix_sp_int() 5706 c_ops->cnic_handler(bp->cnic_data, NULL); bnx2x_msix_sp_int() 5713 bnx2x_schedule_sp_task(bp); bnx2x_msix_sp_int() 5720 void bnx2x_drv_pulse(struct bnx2x *bp) bnx2x_drv_pulse() argument 5722 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, bnx2x_drv_pulse() 5723 bp->fw_drv_pulse_wr_seq); bnx2x_drv_pulse() 5728 struct bnx2x *bp = (struct bnx2x *) data; bnx2x_timer() local 5730 if (!netif_running(bp->dev)) bnx2x_timer() 5733 if (IS_PF(bp) && bnx2x_timer() 5734 !BP_NOMCP(bp)) { bnx2x_timer() 5735 int mb_idx = BP_FW_MB_IDX(bp); bnx2x_timer() 5739 ++bp->fw_drv_pulse_wr_seq; bnx2x_timer() 5740 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; bnx2x_timer() 5741 drv_pulse = bp->fw_drv_pulse_wr_seq; bnx2x_timer() 5742 bnx2x_drv_pulse(bp); bnx2x_timer() 5744 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & bnx2x_timer() 5756 if (bp->state == BNX2X_STATE_OPEN) bnx2x_timer() 5757 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); bnx2x_timer() 5760 if (IS_VF(bp)) bnx2x_timer() 5761 bnx2x_timer_sriov(bp); bnx2x_timer() 5763 mod_timer(&bp->timer, jiffies + bp->current_interval); bnx2x_timer() 5774 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) bnx2x_fill() argument 5779 REG_WR(bp, addr + i, fill); bnx2x_fill() 5782 REG_WR8(bp, addr + i, fill); bnx2x_fill() 5786 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, bnx2x_wr_fp_sb_data() argument 5793 REG_WR(bp, BAR_CSTRORM_INTMEM + bnx2x_wr_fp_sb_data() 5799 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) bnx2x_zero_fp_sb() argument 5807 if (!CHIP_IS_E1x(bp)) { bnx2x_zero_fp_sb() 5821 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); bnx2x_zero_fp_sb() 5823 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + bnx2x_zero_fp_sb() 5826 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + bnx2x_zero_fp_sb() 5832 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, bnx2x_wr_sp_sb_data() argument 5835 int func = BP_FUNC(bp); bnx2x_wr_sp_sb_data() 5838 REG_WR(bp, BAR_CSTRORM_INTMEM + bnx2x_wr_sp_sb_data() 5844 static void bnx2x_zero_sp_sb(struct bnx2x *bp) bnx2x_zero_sp_sb() argument 5846 int func = BP_FUNC(bp); bnx2x_zero_sp_sb() 5853 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); bnx2x_zero_sp_sb() 5855 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + bnx2x_zero_sp_sb() 5858 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + bnx2x_zero_sp_sb() 5901 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, bnx2x_init_sb() argument 5912 if (CHIP_INT_MODE_IS_BC(bp)) bnx2x_init_sb() 5917 bnx2x_zero_fp_sb(bp, fw_sb_id); bnx2x_init_sb() 5919 if (!CHIP_IS_E1x(bp)) { bnx2x_init_sb() 5922 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); bnx2x_init_sb() 5925 sb_data_e2.common.p_func.vnic_id = BP_VN(bp); bnx2x_init_sb() 5937 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); bnx2x_init_sb() 5940 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); bnx2x_init_sb() 5958 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); bnx2x_init_sb() 5961 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, bnx2x_update_coalesce_sb() argument 5964 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, bnx2x_update_coalesce_sb() 5966 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, bnx2x_update_coalesce_sb() 5969 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, bnx2x_update_coalesce_sb() 5972 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, bnx2x_update_coalesce_sb() 5977 static void bnx2x_init_def_sb(struct bnx2x *bp) bnx2x_init_def_sb() argument 5979 struct host_sp_status_block *def_sb = bp->def_status_blk; bnx2x_init_def_sb() 5980 dma_addr_t mapping = bp->def_status_blk_mapping; bnx2x_init_def_sb() 5983 int port = BP_PORT(bp); bnx2x_init_def_sb() 5984 int func = BP_FUNC(bp); bnx2x_init_def_sb() 5991 if (CHIP_INT_MODE_IS_BC(bp)) { bnx2x_init_def_sb() 5995 igu_sp_sb_index = bp->igu_dsb_id; bnx2x_init_def_sb() 6004 bp->attn_state = 0; bnx2x_init_def_sb() 6014 bp->attn_group[index].sig[sindex] = bnx2x_init_def_sb() 6015 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); bnx2x_init_def_sb() 6017 if (!CHIP_IS_E1x(bp)) bnx2x_init_def_sb() 6023 bp->attn_group[index].sig[4] = REG_RD(bp, bnx2x_init_def_sb() 6026 bp->attn_group[index].sig[4] = 0; bnx2x_init_def_sb() 6029 if (bp->common.int_block == INT_BLOCK_HC) { bnx2x_init_def_sb() 6033 REG_WR(bp, reg_offset, U64_LO(section)); bnx2x_init_def_sb() 6034 REG_WR(bp, reg_offset + 4, U64_HI(section)); bnx2x_init_def_sb() 6035 } else if (!CHIP_IS_E1x(bp)) { bnx2x_init_def_sb() 6036 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); bnx2x_init_def_sb() 6037 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); bnx2x_init_def_sb() 6043 bnx2x_zero_sp_sb(bp); bnx2x_init_def_sb() 6052 sp_sb_data.p_func.vnic_id = BP_VN(bp); bnx2x_init_def_sb() 6055 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); bnx2x_init_def_sb() 6057 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); bnx2x_init_def_sb() 6060 void bnx2x_update_coalesce(struct bnx2x *bp) bnx2x_update_coalesce() argument 6064 for_each_eth_queue(bp, i) bnx2x_update_coalesce() 6065 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, bnx2x_update_coalesce() 6066 bp->tx_ticks, bp->rx_ticks); bnx2x_update_coalesce() 6069 static void bnx2x_init_sp_ring(struct bnx2x *bp) bnx2x_init_sp_ring() argument 6071 spin_lock_init(&bp->spq_lock); bnx2x_init_sp_ring() 6072 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); bnx2x_init_sp_ring() 6074 bp->spq_prod_idx = 0; bnx2x_init_sp_ring() 6075 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; bnx2x_init_sp_ring() 6076 bp->spq_prod_bd = bp->spq; bnx2x_init_sp_ring() 6077 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; bnx2x_init_sp_ring() 6080 static void bnx2x_init_eq_ring(struct bnx2x *bp) bnx2x_init_eq_ring() argument 6085 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; bnx2x_init_eq_ring() 6088 cpu_to_le32(U64_HI(bp->eq_mapping + bnx2x_init_eq_ring() 6091 cpu_to_le32(U64_LO(bp->eq_mapping + bnx2x_init_eq_ring() 6094 bp->eq_cons = 0; bnx2x_init_eq_ring() 6095 bp->eq_prod = NUM_EQ_DESC; bnx2x_init_eq_ring() 6096 bp->eq_cons_sb = BNX2X_EQ_INDEX; bnx2x_init_eq_ring() 6098 atomic_set(&bp->eq_spq_left, bnx2x_init_eq_ring() 6103 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, bnx2x_set_q_rx_mode() argument 6117 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; bnx2x_set_q_rx_mode() 6118 ramrod_param.func_id = BP_FUNC(bp); bnx2x_set_q_rx_mode() 6120 ramrod_param.pstate = &bp->sp_state; bnx2x_set_q_rx_mode() 6123 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); bnx2x_set_q_rx_mode() 6124 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); bnx2x_set_q_rx_mode() 6126 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); bnx2x_set_q_rx_mode() 6134 rc = bnx2x_config_rx_mode(bp, &ramrod_param); bnx2x_set_q_rx_mode() 6136 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); bnx2x_set_q_rx_mode() 6143 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, bnx2x_fill_accept_flags() argument 6194 if (IS_MF_SI(bp)) bnx2x_fill_accept_flags() 6215 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp) bnx2x_set_storm_rx_mode() argument 6221 if (!NO_FCOE(bp)) bnx2x_set_storm_rx_mode() 6225 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, bnx2x_set_storm_rx_mode() 6233 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, bnx2x_set_storm_rx_mode() 6238 static void bnx2x_init_internal_common(struct bnx2x *bp) bnx2x_init_internal_common() argument 6245 REG_WR(bp, BAR_USTRORM_INTMEM + bnx2x_init_internal_common() 6247 if (!CHIP_IS_E1x(bp)) { bnx2x_init_internal_common() 6248 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, bnx2x_init_internal_common() 6249 CHIP_INT_MODE_IS_BC(bp) ? bnx2x_init_internal_common() 6254 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) bnx2x_init_internal() argument 6259 bnx2x_init_internal_common(bp); bnx2x_init_internal() 6279 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); bnx2x_fp_igu_sb_id() 6284 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); bnx2x_fp_fw_sb_id() 6289 if (CHIP_IS_E1x(fp->bp)) bnx2x_fp_cl_id() 6290 return BP_L_ID(fp->bp) + fp->index; bnx2x_fp_cl_id() 6295 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) bnx2x_init_eth_fp() argument 6297 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; bnx2x_init_eth_fp() 6323 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], for_each_cos_in_tx_queue() 6324 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), for_each_cos_in_tx_queue() 6325 FP_COS_TO_TXQ(fp, cos, bp), for_each_cos_in_tx_queue() 6331 if (IS_VF(bp)) 6334 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, 6337 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, 6338 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 6339 bnx2x_sp_mapping(bp, q_rdata), q_type); 6348 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 6381 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) bnx2x_init_tx_rings_cnic() argument 6385 for_each_tx_queue_cnic(bp, i) bnx2x_init_tx_rings_cnic() 6386 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); bnx2x_init_tx_rings_cnic() 6389 static void bnx2x_init_tx_rings(struct bnx2x *bp) bnx2x_init_tx_rings() argument 6394 for_each_eth_queue(bp, i) bnx2x_init_tx_rings() 6395 for_each_cos_in_tx_queue(&bp->fp[i], cos) bnx2x_init_tx_rings() 6396 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); bnx2x_init_tx_rings() 6399 static void bnx2x_init_fcoe_fp(struct bnx2x *bp) bnx2x_init_fcoe_fp() argument 6401 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); bnx2x_init_fcoe_fp() 6404 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_init_fcoe_fp() 6405 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, bnx2x_init_fcoe_fp() 6407 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); bnx2x_init_fcoe_fp() 6408 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; bnx2x_init_fcoe_fp() 6409 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; bnx2x_init_fcoe_fp() 6410 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; bnx2x_init_fcoe_fp() 6411 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), bnx2x_init_fcoe_fp() 6412 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, bnx2x_init_fcoe_fp() 6418 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); bnx2x_init_fcoe_fp() 6420 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = bnx2x_init_fcoe_fp() 6430 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, bnx2x_init_fcoe_fp() 6431 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), bnx2x_init_fcoe_fp() 6432 bnx2x_sp_mapping(bp, q_rdata), q_type); bnx2x_init_fcoe_fp() 6436 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, bnx2x_init_fcoe_fp() 6440 void bnx2x_nic_init_cnic(struct bnx2x *bp) bnx2x_nic_init_cnic() argument 6442 if (!NO_FCOE(bp)) bnx2x_nic_init_cnic() 6443 bnx2x_init_fcoe_fp(bp); bnx2x_nic_init_cnic() 6445 bnx2x_init_sb(bp, bp->cnic_sb_mapping, bnx2x_nic_init_cnic() 6447 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); bnx2x_nic_init_cnic() 6451 bnx2x_init_rx_rings_cnic(bp); bnx2x_nic_init_cnic() 6452 bnx2x_init_tx_rings_cnic(bp); bnx2x_nic_init_cnic() 6459 void bnx2x_pre_irq_nic_init(struct bnx2x *bp) bnx2x_pre_irq_nic_init() argument 6464 for_each_eth_queue(bp, i) bnx2x_pre_irq_nic_init() 6465 bnx2x_init_eth_fp(bp, i); bnx2x_pre_irq_nic_init() 6469 bnx2x_init_rx_rings(bp); bnx2x_pre_irq_nic_init() 6470 bnx2x_init_tx_rings(bp); bnx2x_pre_irq_nic_init() 6472 if (IS_PF(bp)) { bnx2x_pre_irq_nic_init() 6474 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, bnx2x_pre_irq_nic_init() 6475 bp->common.shmem_base, bnx2x_pre_irq_nic_init() 6476 bp->common.shmem2_base, BP_PORT(bp)); bnx2x_pre_irq_nic_init() 6479 bnx2x_init_def_sb(bp); bnx2x_pre_irq_nic_init() 6480 bnx2x_update_dsb_idx(bp); bnx2x_pre_irq_nic_init() 6481 bnx2x_init_sp_ring(bp); bnx2x_pre_irq_nic_init() 6483 bnx2x_memset_stats(bp); bnx2x_pre_irq_nic_init() 6487 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) bnx2x_post_irq_nic_init() argument 6489 bnx2x_init_eq_ring(bp); bnx2x_post_irq_nic_init() 6490 bnx2x_init_internal(bp, load_code); bnx2x_post_irq_nic_init() 6491 bnx2x_pf_init(bp); bnx2x_post_irq_nic_init() 6492 bnx2x_stats_init(bp); bnx2x_post_irq_nic_init() 6498 bnx2x_int_enable(bp); bnx2x_post_irq_nic_init() 6501 bnx2x_attn_int_deasserted0(bp, bnx2x_post_irq_nic_init() 6502 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & bnx2x_post_irq_nic_init() 6507 static int bnx2x_gunzip_init(struct bnx2x *bp) bnx2x_gunzip_init() argument 6509 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, bnx2x_gunzip_init() 6510 &bp->gunzip_mapping, GFP_KERNEL); bnx2x_gunzip_init() 6511 if (bp->gunzip_buf == NULL) bnx2x_gunzip_init() 6514 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); bnx2x_gunzip_init() 6515 if (bp->strm == NULL) bnx2x_gunzip_init() 6518 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); bnx2x_gunzip_init() 6519 if (bp->strm->workspace == NULL) bnx2x_gunzip_init() 6525 kfree(bp->strm); bnx2x_gunzip_init() 6526 bp->strm = NULL; bnx2x_gunzip_init() 6529 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, bnx2x_gunzip_init() 6530 bp->gunzip_mapping); bnx2x_gunzip_init() 6531 bp->gunzip_buf = NULL; bnx2x_gunzip_init() 6538 static void bnx2x_gunzip_end(struct bnx2x *bp) bnx2x_gunzip_end() argument 6540 if (bp->strm) { bnx2x_gunzip_end() 6541 vfree(bp->strm->workspace); bnx2x_gunzip_end() 6542 kfree(bp->strm); bnx2x_gunzip_end() 6543 bp->strm = NULL; bnx2x_gunzip_end() 6546 if (bp->gunzip_buf) { bnx2x_gunzip_end() 6547 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, bnx2x_gunzip_end() 6548 bp->gunzip_mapping); bnx2x_gunzip_end() 6549 bp->gunzip_buf = NULL; bnx2x_gunzip_end() 6553 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) bnx2x_gunzip() argument 6570 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; bnx2x_gunzip() 6571 bp->strm->avail_in = len - n; bnx2x_gunzip() 6572 bp->strm->next_out = bp->gunzip_buf; bnx2x_gunzip() 6573 bp->strm->avail_out = FW_BUF_SIZE; bnx2x_gunzip() 6575 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); bnx2x_gunzip() 6579 rc = zlib_inflate(bp->strm, Z_FINISH); bnx2x_gunzip() 6581 netdev_err(bp->dev, "Firmware decompression error: %s\n", bnx2x_gunzip() 6582 bp->strm->msg); bnx2x_gunzip() 6584 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); bnx2x_gunzip() 6585 if (bp->gunzip_outlen & 0x3) bnx2x_gunzip() 6586 netdev_err(bp->dev, bnx2x_gunzip() 6588 bp->gunzip_outlen); bnx2x_gunzip() 6589 bp->gunzip_outlen >>= 2; bnx2x_gunzip() 6591 zlib_inflateEnd(bp->strm); bnx2x_gunzip() 6606 static void bnx2x_lb_pckt(struct bnx2x *bp) bnx2x_lb_pckt() argument 6614 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); bnx2x_lb_pckt() 6620 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); bnx2x_lb_pckt() 6627 static int bnx2x_int_mem_test(struct bnx2x *bp) bnx2x_int_mem_test() argument 6633 if (CHIP_REV_IS_FPGA(bp)) bnx2x_int_mem_test() 6635 else if (CHIP_REV_IS_EMUL(bp)) bnx2x_int_mem_test() 6641 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); bnx2x_int_mem_test() 6642 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); bnx2x_int_mem_test() 6643 REG_WR(bp, CFC_REG_DEBUG0, 0x1); bnx2x_int_mem_test() 6644 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); bnx2x_int_mem_test() 6647 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); bnx2x_int_mem_test() 6650 bnx2x_lb_pckt(bp); bnx2x_int_mem_test() 6657 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); bnx2x_int_mem_test() 6658 val = *bnx2x_sp(bp, wb_data[0]); bnx2x_int_mem_test() 6673 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); bnx2x_int_mem_test() 6686 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); bnx2x_int_mem_test() 6688 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); bnx2x_int_mem_test() 6690 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); bnx2x_int_mem_test() 6691 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); bnx2x_int_mem_test() 6696 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); bnx2x_int_mem_test() 6697 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); bnx2x_int_mem_test() 6698 REG_WR(bp, CFC_REG_DEBUG0, 0x1); bnx2x_int_mem_test() 6699 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); bnx2x_int_mem_test() 6702 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); bnx2x_int_mem_test() 6706 bnx2x_lb_pckt(bp); bnx2x_int_mem_test() 6713 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); bnx2x_int_mem_test() 6714 val = *bnx2x_sp(bp, wb_data[0]); bnx2x_int_mem_test() 6727 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); bnx2x_int_mem_test() 6732 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); bnx2x_int_mem_test() 6737 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); bnx2x_int_mem_test() 6743 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); bnx2x_int_mem_test() 6744 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); bnx2x_int_mem_test() 6751 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); bnx2x_int_mem_test() 6753 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); bnx2x_int_mem_test() 6755 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); bnx2x_int_mem_test() 6756 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); bnx2x_int_mem_test() 6757 if (!CNIC_SUPPORT(bp)) bnx2x_int_mem_test() 6759 REG_WR(bp, PRS_REG_NIC_MODE, 1); bnx2x_int_mem_test() 6762 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); bnx2x_int_mem_test() 6763 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); bnx2x_int_mem_test() 6764 REG_WR(bp, CFC_REG_DEBUG0, 0x0); bnx2x_int_mem_test() 6765 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); bnx2x_int_mem_test() 6772 static void bnx2x_enable_blocks_attention(struct bnx2x *bp) bnx2x_enable_blocks_attention() argument 6776 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); bnx2x_enable_blocks_attention() 6777 if (!CHIP_IS_E1x(bp)) bnx2x_enable_blocks_attention() 6778 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); bnx2x_enable_blocks_attention() 6780 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); bnx2x_enable_blocks_attention() 6781 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); bnx2x_enable_blocks_attention() 6782 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); bnx2x_enable_blocks_attention() 6789 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); bnx2x_enable_blocks_attention() 6790 REG_WR(bp, QM_REG_QM_INT_MASK, 0); bnx2x_enable_blocks_attention() 6791 REG_WR(bp, TM_REG_TM_INT_MASK, 0); bnx2x_enable_blocks_attention() 6792 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); bnx2x_enable_blocks_attention() 6793 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); bnx2x_enable_blocks_attention() 6794 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); bnx2x_enable_blocks_attention() 6795 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ bnx2x_enable_blocks_attention() 6796 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ bnx2x_enable_blocks_attention() 6797 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); bnx2x_enable_blocks_attention() 6798 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); bnx2x_enable_blocks_attention() 6799 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); bnx2x_enable_blocks_attention() 6800 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ bnx2x_enable_blocks_attention() 6801 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ bnx2x_enable_blocks_attention() 6802 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); bnx2x_enable_blocks_attention() 6803 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); bnx2x_enable_blocks_attention() 6804 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); bnx2x_enable_blocks_attention() 6805 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); bnx2x_enable_blocks_attention() 6806 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ bnx2x_enable_blocks_attention() 6807 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ bnx2x_enable_blocks_attention() 6812 if (!CHIP_IS_E1x(bp)) bnx2x_enable_blocks_attention() 6815 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val); bnx2x_enable_blocks_attention() 6817 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); bnx2x_enable_blocks_attention() 6818 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); bnx2x_enable_blocks_attention() 6819 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); bnx2x_enable_blocks_attention() 6820 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ bnx2x_enable_blocks_attention() 6822 if (!CHIP_IS_E1x(bp)) bnx2x_enable_blocks_attention() 6824 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); bnx2x_enable_blocks_attention() 6826 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); bnx2x_enable_blocks_attention() 6827 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); bnx2x_enable_blocks_attention() 6828 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ bnx2x_enable_blocks_attention() 6829 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ bnx2x_enable_blocks_attention() 6832 static void bnx2x_reset_common(struct bnx2x *bp) bnx2x_reset_common() argument 6837 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, bnx2x_reset_common() 6840 if (CHIP_IS_E3(bp)) { bnx2x_reset_common() 6845 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); bnx2x_reset_common() 6848 static void bnx2x_setup_dmae(struct bnx2x *bp) bnx2x_setup_dmae() argument 6850 bp->dmae_ready = 0; bnx2x_setup_dmae() 6851 spin_lock_init(&bp->dmae_lock); bnx2x_setup_dmae() 6854 static void bnx2x_init_pxp(struct bnx2x *bp) bnx2x_init_pxp() argument 6859 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); bnx2x_init_pxp() 6862 if (bp->mrrs == -1) bnx2x_init_pxp() 6865 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); bnx2x_init_pxp() 6866 r_order = bp->mrrs; bnx2x_init_pxp() 6869 bnx2x_init_pxp_arb(bp, r_order, w_order); bnx2x_init_pxp() 6872 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) bnx2x_setup_fan_failure_detection() argument 6878 if (BP_NOMCP(bp)) bnx2x_setup_fan_failure_detection() 6882 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & bnx2x_setup_fan_failure_detection() 6897 bp, bnx2x_setup_fan_failure_detection() 6898 bp->common.shmem_base, bnx2x_setup_fan_failure_detection() 6899 bp->common.shmem2_base, bnx2x_setup_fan_failure_detection() 6909 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); bnx2x_setup_fan_failure_detection() 6912 val = REG_RD(bp, MISC_REG_SPIO_INT); bnx2x_setup_fan_failure_detection() 6914 REG_WR(bp, MISC_REG_SPIO_INT, val); bnx2x_setup_fan_failure_detection() 6917 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); bnx2x_setup_fan_failure_detection() 6919 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); bnx2x_setup_fan_failure_detection() 6922 void bnx2x_pf_disable(struct bnx2x *bp) bnx2x_pf_disable() argument 6924 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); bnx2x_pf_disable() 6927 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); bnx2x_pf_disable() 6928 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); bnx2x_pf_disable() 6929 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); bnx2x_pf_disable() 6932 static void bnx2x__common_init_phy(struct bnx2x *bp) bnx2x__common_init_phy() argument 6936 if (SHMEM2_RD(bp, size) > bnx2x__common_init_phy() 6937 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) bnx2x__common_init_phy() 6939 shmem_base[0] = bp->common.shmem_base; bnx2x__common_init_phy() 6940 shmem2_base[0] = bp->common.shmem2_base; bnx2x__common_init_phy() 6941 if (!CHIP_IS_E1x(bp)) { bnx2x__common_init_phy() 6943 SHMEM2_RD(bp, other_shmem_base_addr); bnx2x__common_init_phy() 6945 SHMEM2_RD(bp, other_shmem2_base_addr); bnx2x__common_init_phy() 6947 bnx2x_acquire_phy_lock(bp); bnx2x__common_init_phy() 6948 bnx2x_common_init_phy(bp, shmem_base, shmem2_base, bnx2x__common_init_phy() 6949 bp->common.chip_id); bnx2x__common_init_phy() 6950 bnx2x_release_phy_lock(bp); bnx2x__common_init_phy() 6953 static void bnx2x_config_endianity(struct bnx2x *bp, u32 val) bnx2x_config_endianity() argument 6955 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val); bnx2x_config_endianity() 6956 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val); bnx2x_config_endianity() 6957 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val); bnx2x_config_endianity() 6958 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val); bnx2x_config_endianity() 6959 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val); bnx2x_config_endianity() 6962 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); bnx2x_config_endianity() 6964 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val); bnx2x_config_endianity() 6965 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val); bnx2x_config_endianity() 6966 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val); bnx2x_config_endianity() 6967 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val); bnx2x_config_endianity() 6970 static void bnx2x_set_endianity(struct bnx2x *bp) bnx2x_set_endianity() argument 6973 bnx2x_config_endianity(bp, 1); bnx2x_set_endianity() 6975 bnx2x_config_endianity(bp, 0); bnx2x_set_endianity() 6979 static void bnx2x_reset_endianity(struct bnx2x *bp) bnx2x_reset_endianity() argument 6981 bnx2x_config_endianity(bp, 0); bnx2x_reset_endianity() 6987 * @bp: driver handle 6989 static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_hw_common() argument 6993 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); bnx2x_init_hw_common() 6999 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_init_hw_common() 7001 bnx2x_reset_common(bp); bnx2x_init_hw_common() 7002 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); bnx2x_init_hw_common() 7005 if (CHIP_IS_E3(bp)) { bnx2x_init_hw_common() 7009 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); bnx2x_init_hw_common() 7011 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_init_hw_common() 7013 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); bnx2x_init_hw_common() 7015 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common() 7025 for (abs_func_id = BP_PATH(bp); bnx2x_init_hw_common() 7027 if (abs_func_id == BP_ABS_FUNC(bp)) { bnx2x_init_hw_common() 7028 REG_WR(bp, bnx2x_init_hw_common() 7034 bnx2x_pretend_func(bp, abs_func_id); bnx2x_init_hw_common() 7036 bnx2x_pf_disable(bp); bnx2x_init_hw_common() 7037 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_init_hw_common() 7041 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); bnx2x_init_hw_common() 7042 if (CHIP_IS_E1(bp)) { bnx2x_init_hw_common() 7045 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); bnx2x_init_hw_common() 7048 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); bnx2x_init_hw_common() 7049 bnx2x_init_pxp(bp); bnx2x_init_hw_common() 7050 bnx2x_set_endianity(bp); bnx2x_init_hw_common() 7051 bnx2x_ilt_init_page_size(bp, INITOP_SET); bnx2x_init_hw_common() 7053 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) bnx2x_init_hw_common() 7054 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); bnx2x_init_hw_common() 7059 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); bnx2x_init_hw_common() 7064 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); bnx2x_init_hw_common() 7075 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common() 7159 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); bnx2x_init_hw_common() 7160 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); bnx2x_init_hw_common() 7161 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_init_hw_common() 7163 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); bnx2x_init_hw_common() 7164 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); bnx2x_init_hw_common() 7165 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); bnx2x_init_hw_common() 7168 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); bnx2x_init_hw_common() 7169 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); bnx2x_init_hw_common() 7171 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common() 7172 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : bnx2x_init_hw_common() 7173 (CHIP_REV_IS_FPGA(bp) ? 400 : 0); bnx2x_init_hw_common() 7174 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); bnx2x_init_hw_common() 7176 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); bnx2x_init_hw_common() 7181 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); bnx2x_init_hw_common() 7190 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); bnx2x_init_hw_common() 7192 bnx2x_iov_init_dmae(bp); bnx2x_init_hw_common() 7195 bp->dmae_ready = 1; bnx2x_init_hw_common() 7196 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); bnx2x_init_hw_common() 7198 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); bnx2x_init_hw_common() 7200 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); bnx2x_init_hw_common() 7202 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); bnx2x_init_hw_common() 7204 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); bnx2x_init_hw_common() 7206 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); bnx2x_init_hw_common() 7207 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); bnx2x_init_hw_common() 7208 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); bnx2x_init_hw_common() 7209 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); bnx2x_init_hw_common() 7211 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); bnx2x_init_hw_common() 7214 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); bnx2x_init_hw_common() 7217 REG_WR(bp, QM_REG_SOFT_RESET, 1); bnx2x_init_hw_common() 7218 REG_WR(bp, QM_REG_SOFT_RESET, 0); bnx2x_init_hw_common() 7220 if (CNIC_SUPPORT(bp)) bnx2x_init_hw_common() 7221 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); bnx2x_init_hw_common() 7223 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); bnx2x_init_hw_common() 7225 if (!CHIP_REV_IS_SLOW(bp)) bnx2x_init_hw_common() 7227 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); bnx2x_init_hw_common() 7229 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); bnx2x_init_hw_common() 7231 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); bnx2x_init_hw_common() 7232 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); bnx2x_init_hw_common() 7234 if (!CHIP_IS_E1(bp)) bnx2x_init_hw_common() 7235 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); bnx2x_init_hw_common() 7237 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { bnx2x_init_hw_common() 7238 if (IS_MF_AFEX(bp)) { bnx2x_init_hw_common() 7242 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); bnx2x_init_hw_common() 7243 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); bnx2x_init_hw_common() 7244 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); bnx2x_init_hw_common() 7245 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); bnx2x_init_hw_common() 7246 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); bnx2x_init_hw_common() 7251 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, bnx2x_init_hw_common() 7252 bp->path_has_ovlan ? 7 : 6); bnx2x_init_hw_common() 7256 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); bnx2x_init_hw_common() 7257 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); bnx2x_init_hw_common() 7258 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); bnx2x_init_hw_common() 7259 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); bnx2x_init_hw_common() 7261 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common() 7263 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, bnx2x_init_hw_common() 7266 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, bnx2x_init_hw_common() 7273 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); bnx2x_init_hw_common() 7274 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); bnx2x_init_hw_common() 7275 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); bnx2x_init_hw_common() 7276 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); bnx2x_init_hw_common() 7279 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, bnx2x_init_hw_common() 7281 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, bnx2x_init_hw_common() 7284 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); bnx2x_init_hw_common() 7285 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); bnx2x_init_hw_common() 7286 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); bnx2x_init_hw_common() 7288 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common() 7289 if (IS_MF_AFEX(bp)) { bnx2x_init_hw_common() 7293 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); bnx2x_init_hw_common() 7294 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); bnx2x_init_hw_common() 7295 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); bnx2x_init_hw_common() 7296 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); bnx2x_init_hw_common() 7297 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); bnx2x_init_hw_common() 7299 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, bnx2x_init_hw_common() 7300 bp->path_has_ovlan ? 7 : 6); bnx2x_init_hw_common() 7304 REG_WR(bp, SRC_REG_SOFT_RST, 1); bnx2x_init_hw_common() 7306 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); bnx2x_init_hw_common() 7308 if (CNIC_SUPPORT(bp)) { bnx2x_init_hw_common() 7309 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); bnx2x_init_hw_common() 7310 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); bnx2x_init_hw_common() 7311 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); bnx2x_init_hw_common() 7312 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); bnx2x_init_hw_common() 7313 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); bnx2x_init_hw_common() 7314 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); bnx2x_init_hw_common() 7315 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); bnx2x_init_hw_common() 7316 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); bnx2x_init_hw_common() 7317 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); bnx2x_init_hw_common() 7318 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); bnx2x_init_hw_common() 7320 REG_WR(bp, SRC_REG_SOFT_RST, 0); bnx2x_init_hw_common() 7324 dev_alert(&bp->pdev->dev, bnx2x_init_hw_common() 7328 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); bnx2x_init_hw_common() 7330 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); bnx2x_init_hw_common() 7332 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); bnx2x_init_hw_common() 7333 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); bnx2x_init_hw_common() 7335 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); bnx2x_init_hw_common() 7338 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); bnx2x_init_hw_common() 7340 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); bnx2x_init_hw_common() 7342 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) bnx2x_init_hw_common() 7343 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); bnx2x_init_hw_common() 7345 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); bnx2x_init_hw_common() 7346 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); bnx2x_init_hw_common() 7349 REG_WR(bp, 0x2814, 0xffffffff); bnx2x_init_hw_common() 7350 REG_WR(bp, 0x3820, 0xffffffff); bnx2x_init_hw_common() 7352 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common() 7353 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, bnx2x_init_hw_common() 7356 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, bnx2x_init_hw_common() 7360 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, bnx2x_init_hw_common() 7366 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); bnx2x_init_hw_common() 7367 if (!CHIP_IS_E1(bp)) { bnx2x_init_hw_common() 7369 if (!CHIP_IS_E3(bp)) bnx2x_init_hw_common() 7370 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); bnx2x_init_hw_common() 7372 if (CHIP_IS_E1H(bp)) bnx2x_init_hw_common() 7374 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); bnx2x_init_hw_common() 7376 if (CHIP_REV_IS_SLOW(bp)) bnx2x_init_hw_common() 7380 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); bnx2x_init_hw_common() 7385 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); bnx2x_init_hw_common() 7390 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); bnx2x_init_hw_common() 7395 REG_WR(bp, CFC_REG_DEBUG0, 0); bnx2x_init_hw_common() 7397 if (CHIP_IS_E1(bp)) { bnx2x_init_hw_common() 7400 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); bnx2x_init_hw_common() 7401 val = *bnx2x_sp(bp, wb_data[0]); bnx2x_init_hw_common() 7404 if ((val == 0) && bnx2x_int_mem_test(bp)) { bnx2x_init_hw_common() 7410 bnx2x_setup_fan_failure_detection(bp); bnx2x_init_hw_common() 7413 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); bnx2x_init_hw_common() 7415 bnx2x_enable_blocks_attention(bp); bnx2x_init_hw_common() 7416 bnx2x_enable_blocks_parity(bp); bnx2x_init_hw_common() 7418 if (!BP_NOMCP(bp)) { bnx2x_init_hw_common() 7419 if (CHIP_IS_E1x(bp)) bnx2x_init_hw_common() 7420 bnx2x__common_init_phy(bp); bnx2x_init_hw_common() 7430 * @bp: driver handle 7432 static int bnx2x_init_hw_common_chip(struct bnx2x *bp) bnx2x_init_hw_common_chip() argument 7434 int rc = bnx2x_init_hw_common(bp); bnx2x_init_hw_common_chip() 7440 if (!BP_NOMCP(bp)) bnx2x_init_hw_common_chip() 7441 bnx2x__common_init_phy(bp); bnx2x_init_hw_common_chip() 7446 static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_hw_port() argument 7448 int port = BP_PORT(bp); bnx2x_init_hw_port() 7455 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); bnx2x_init_hw_port() 7457 bnx2x_init_block(bp, BLOCK_MISC, init_phase); bnx2x_init_hw_port() 7458 bnx2x_init_block(bp, BLOCK_PXP, init_phase); bnx2x_init_hw_port() 7459 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); bnx2x_init_hw_port() 7466 if (!CHIP_IS_E1x(bp)) bnx2x_init_hw_port() 7467 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); bnx2x_init_hw_port() 7469 bnx2x_init_block(bp, BLOCK_ATC, init_phase); bnx2x_init_hw_port() 7470 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); bnx2x_init_hw_port() 7471 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); bnx2x_init_hw_port() 7472 bnx2x_init_block(bp, BLOCK_QM, init_phase); bnx2x_init_hw_port() 7474 bnx2x_init_block(bp, BLOCK_TCM, init_phase); bnx2x_init_hw_port() 7475 bnx2x_init_block(bp, BLOCK_UCM, init_phase); bnx2x_init_hw_port() 7476 bnx2x_init_block(bp, BLOCK_CCM, init_phase); bnx2x_init_hw_port() 7477 bnx2x_init_block(bp, BLOCK_XCM, init_phase); bnx2x_init_hw_port() 7480 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); bnx2x_init_hw_port() 7482 if (CNIC_SUPPORT(bp)) { bnx2x_init_hw_port() 7483 bnx2x_init_block(bp, BLOCK_TM, init_phase); bnx2x_init_hw_port() 7484 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); bnx2x_init_hw_port() 7485 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); bnx2x_init_hw_port() 7488 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); bnx2x_init_hw_port() 7490 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); bnx2x_init_hw_port() 7492 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { bnx2x_init_hw_port() 7494 if (IS_MF(bp)) bnx2x_init_hw_port() 7495 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); bnx2x_init_hw_port() 7496 else if (bp->dev->mtu > 4096) { bnx2x_init_hw_port() 7497 if (bp->flags & ONE_PORT_FLAG) bnx2x_init_hw_port() 7500 val = bp->dev->mtu; bnx2x_init_hw_port() 7506 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); bnx2x_init_hw_port() 7508 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); bnx2x_init_hw_port() 7509 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); bnx2x_init_hw_port() 7512 if (CHIP_MODE_IS_4_PORT(bp)) bnx2x_init_hw_port() 7513 REG_WR(bp, (BP_PORT(bp) ? bnx2x_init_hw_port() 7517 bnx2x_init_block(bp, BLOCK_PRS, init_phase); bnx2x_init_hw_port() 7518 if (CHIP_IS_E3B0(bp)) { bnx2x_init_hw_port() 7519 if (IS_MF_AFEX(bp)) { bnx2x_init_hw_port() 7521 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port() 7524 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port() 7527 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port() 7535 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port() 7538 (bp->path_has_ovlan ? 7 : 6)); bnx2x_init_hw_port() 7542 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); bnx2x_init_hw_port() 7543 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); bnx2x_init_hw_port() 7544 bnx2x_init_block(bp, BLOCK_USDM, init_phase); bnx2x_init_hw_port() 7545 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); bnx2x_init_hw_port() 7547 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); bnx2x_init_hw_port() 7548 bnx2x_init_block(bp, BLOCK_USEM, init_phase); bnx2x_init_hw_port() 7549 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); bnx2x_init_hw_port() 7550 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); bnx2x_init_hw_port() 7552 bnx2x_init_block(bp, BLOCK_UPB, init_phase); bnx2x_init_hw_port() 7553 bnx2x_init_block(bp, BLOCK_XPB, init_phase); bnx2x_init_hw_port() 7555 bnx2x_init_block(bp, BLOCK_PBF, init_phase); bnx2x_init_hw_port() 7557 if (CHIP_IS_E1x(bp)) { bnx2x_init_hw_port() 7559 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); bnx2x_init_hw_port() 7562 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); bnx2x_init_hw_port() 7564 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); bnx2x_init_hw_port() 7567 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); bnx2x_init_hw_port() 7569 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); bnx2x_init_hw_port() 7572 if (CNIC_SUPPORT(bp)) bnx2x_init_hw_port() 7573 bnx2x_init_block(bp, BLOCK_SRC, init_phase); bnx2x_init_hw_port() 7575 bnx2x_init_block(bp, BLOCK_CDU, init_phase); bnx2x_init_hw_port() 7576 bnx2x_init_block(bp, BLOCK_CFC, init_phase); bnx2x_init_hw_port() 7578 if (CHIP_IS_E1(bp)) { bnx2x_init_hw_port() 7579 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); bnx2x_init_hw_port() 7580 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); bnx2x_init_hw_port() 7582 bnx2x_init_block(bp, BLOCK_HC, init_phase); bnx2x_init_hw_port() 7584 bnx2x_init_block(bp, BLOCK_IGU, init_phase); bnx2x_init_hw_port() 7586 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); bnx2x_init_hw_port() 7591 val = IS_MF(bp) ? 0xF7 : 0x7; bnx2x_init_hw_port() 7593 val |= CHIP_IS_E1(bp) ? 0 : 0x10; bnx2x_init_hw_port() 7594 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); bnx2x_init_hw_port() 7598 REG_WR(bp, reg, bnx2x_init_hw_port() 7599 REG_RD(bp, reg) & bnx2x_init_hw_port() 7603 REG_WR(bp, reg, bnx2x_init_hw_port() 7604 REG_RD(bp, reg) & bnx2x_init_hw_port() 7607 bnx2x_init_block(bp, BLOCK_NIG, init_phase); bnx2x_init_hw_port() 7609 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_port() 7613 if (IS_MF_AFEX(bp)) bnx2x_init_hw_port() 7614 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port() 7618 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port() 7621 IS_MF_SD(bp) ? 7 : 6); bnx2x_init_hw_port() 7623 if (CHIP_IS_E3(bp)) bnx2x_init_hw_port() 7624 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port() 7626 NIG_REG_LLH_MF_MODE, IS_MF(bp)); bnx2x_init_hw_port() 7628 if (!CHIP_IS_E3(bp)) bnx2x_init_hw_port() 7629 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); bnx2x_init_hw_port() 7631 if (!CHIP_IS_E1(bp)) { bnx2x_init_hw_port() 7633 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, bnx2x_init_hw_port() 7634 (IS_MF_SD(bp) ? 0x1 : 0x2)); bnx2x_init_hw_port() 7636 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_port() 7638 switch (bp->mf_mode) { bnx2x_init_hw_port() 7648 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : bnx2x_init_hw_port() 7652 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); bnx2x_init_hw_port() 7653 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); bnx2x_init_hw_port() 7654 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); bnx2x_init_hw_port() 7659 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); bnx2x_init_hw_port() 7663 val = REG_RD(bp, reg_addr); bnx2x_init_hw_port() 7665 REG_WR(bp, reg_addr, val); bnx2x_init_hw_port() 7671 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) bnx2x_ilt_wr() argument 7676 if (CHIP_IS_E1(bp)) bnx2x_ilt_wr() 7683 REG_WR_DMAE(bp, reg, wb_write, 2); bnx2x_ilt_wr() 7686 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) bnx2x_igu_clear_sb_gen() argument 7697 if (CHIP_INT_MODE_IS_BC(bp)) bnx2x_igu_clear_sb_gen() 7711 REG_WR(bp, igu_addr_data, data); bnx2x_igu_clear_sb_gen() 7716 REG_WR(bp, igu_addr_ctl, ctl); bnx2x_igu_clear_sb_gen() 7721 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) bnx2x_igu_clear_sb_gen() 7724 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { bnx2x_igu_clear_sb_gen() 7731 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) bnx2x_igu_clear_sb() argument 7733 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); bnx2x_igu_clear_sb() 7736 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) bnx2x_clear_func_ilt() argument 7740 bnx2x_ilt_wr(bp, i, 0); bnx2x_clear_func_ilt() 7743 static void bnx2x_init_searcher(struct bnx2x *bp) bnx2x_init_searcher() argument 7745 int port = BP_PORT(bp); bnx2x_init_searcher() 7746 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); bnx2x_init_searcher() 7748 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); bnx2x_init_searcher() 7751 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) bnx2x_func_switch_update() argument 7762 func_params.f_obj = &bp->func_obj; bnx2x_func_switch_update() 7772 rc = bnx2x_func_state_change(bp, &func_params); bnx2x_func_switch_update() 7777 static int bnx2x_reset_nic_mode(struct bnx2x *bp) bnx2x_reset_nic_mode() argument 7779 int rc, i, port = BP_PORT(bp); bnx2x_reset_nic_mode() 7783 if (bp->mf_mode == SINGLE_FUNCTION) { bnx2x_reset_nic_mode() 7784 bnx2x_set_rx_filter(&bp->link_params, 0); bnx2x_reset_nic_mode() 7786 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN : bnx2x_reset_nic_mode() 7788 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : bnx2x_reset_nic_mode() 7791 mac_en[i] = REG_RD(bp, port ? bnx2x_reset_nic_mode() 7796 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + bnx2x_reset_nic_mode() 7803 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : bnx2x_reset_nic_mode() 7811 rc = bnx2x_func_switch_update(bp, 1); bnx2x_reset_nic_mode() 7818 REG_WR(bp, PRS_REG_NIC_MODE, 0); bnx2x_reset_nic_mode() 7821 if (bp->mf_mode == SINGLE_FUNCTION) { bnx2x_reset_nic_mode() 7822 bnx2x_set_rx_filter(&bp->link_params, 1); bnx2x_reset_nic_mode() 7824 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : bnx2x_reset_nic_mode() 7827 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + bnx2x_reset_nic_mode() 7835 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : bnx2x_reset_nic_mode() 7839 rc = bnx2x_func_switch_update(bp, 0); bnx2x_reset_nic_mode() 7849 int bnx2x_init_hw_func_cnic(struct bnx2x *bp) bnx2x_init_hw_func_cnic() argument 7853 bnx2x_ilt_init_op_cnic(bp, INITOP_SET); bnx2x_init_hw_func_cnic() 7855 if (CONFIGURE_NIC_MODE(bp)) { bnx2x_init_hw_func_cnic() 7857 bnx2x_init_searcher(bp); bnx2x_init_hw_func_cnic() 7860 rc = bnx2x_reset_nic_mode(bp); bnx2x_init_hw_func_cnic() 7876 static void bnx2x_clean_pglue_errors(struct bnx2x *bp) bnx2x_clean_pglue_errors() argument 7878 if (!CHIP_IS_E1x(bp)) bnx2x_clean_pglue_errors() 7879 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, bnx2x_clean_pglue_errors() 7880 1 << BP_ABS_FUNC(bp)); bnx2x_clean_pglue_errors() 7883 static int bnx2x_init_hw_func(struct bnx2x *bp) bnx2x_init_hw_func() argument 7885 int port = BP_PORT(bp); bnx2x_init_hw_func() 7886 int func = BP_FUNC(bp); bnx2x_init_hw_func() 7888 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_init_hw_func() 7897 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_func() 7898 rc = bnx2x_pf_flr_clnup(bp); bnx2x_init_hw_func() 7900 bnx2x_fw_dump(bp); bnx2x_init_hw_func() 7906 if (bp->common.int_block == INT_BLOCK_HC) { bnx2x_init_hw_func() 7908 val = REG_RD(bp, addr); bnx2x_init_hw_func() 7910 REG_WR(bp, addr, val); bnx2x_init_hw_func() 7913 bnx2x_init_block(bp, BLOCK_PXP, init_phase); bnx2x_init_hw_func() 7914 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); bnx2x_init_hw_func() 7916 ilt = BP_ILT(bp); bnx2x_init_hw_func() 7919 if (IS_SRIOV(bp)) bnx2x_init_hw_func() 7921 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start); bnx2x_init_hw_func() 7927 for (i = 0; i < L2_ILT_LINES(bp); i++) { bnx2x_init_hw_func() 7928 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; bnx2x_init_hw_func() 7930 bp->context[i].cxt_mapping; bnx2x_init_hw_func() 7931 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; bnx2x_init_hw_func() 7934 bnx2x_ilt_init_op(bp, INITOP_SET); bnx2x_init_hw_func() 7936 if (!CONFIGURE_NIC_MODE(bp)) { bnx2x_init_hw_func() 7937 bnx2x_init_searcher(bp); bnx2x_init_hw_func() 7938 REG_WR(bp, PRS_REG_NIC_MODE, 0); bnx2x_init_hw_func() 7942 REG_WR(bp, PRS_REG_NIC_MODE, 1); bnx2x_init_hw_func() 7946 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_func() 7952 if (!(bp->flags & USING_MSIX_FLAG)) bnx2x_init_hw_func() 7966 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); bnx2x_init_hw_func() 7968 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); bnx2x_init_hw_func() 7971 bp->dmae_ready = 1; bnx2x_init_hw_func() 7973 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); bnx2x_init_hw_func() 7975 bnx2x_clean_pglue_errors(bp); bnx2x_init_hw_func() 7977 bnx2x_init_block(bp, BLOCK_ATC, init_phase); bnx2x_init_hw_func() 7978 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); bnx2x_init_hw_func() 7979 bnx2x_init_block(bp, BLOCK_NIG, init_phase); bnx2x_init_hw_func() 7980 bnx2x_init_block(bp, BLOCK_SRC, init_phase); bnx2x_init_hw_func() 7981 bnx2x_init_block(bp, BLOCK_MISC, init_phase); bnx2x_init_hw_func() 7982 bnx2x_init_block(bp, BLOCK_TCM, init_phase); bnx2x_init_hw_func() 7983 bnx2x_init_block(bp, BLOCK_UCM, init_phase); bnx2x_init_hw_func() 7984 bnx2x_init_block(bp, BLOCK_CCM, init_phase); bnx2x_init_hw_func() 7985 bnx2x_init_block(bp, BLOCK_XCM, init_phase); bnx2x_init_hw_func() 7986 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); bnx2x_init_hw_func() 7987 bnx2x_init_block(bp, BLOCK_USEM, init_phase); bnx2x_init_hw_func() 7988 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); bnx2x_init_hw_func() 7989 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); bnx2x_init_hw_func() 7991 if (!CHIP_IS_E1x(bp)) bnx2x_init_hw_func() 7992 REG_WR(bp, QM_REG_PF_EN, 1); bnx2x_init_hw_func() 7994 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_func() 7995 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); bnx2x_init_hw_func() 7996 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); bnx2x_init_hw_func() 7997 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); bnx2x_init_hw_func() 7998 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); bnx2x_init_hw_func() 8000 bnx2x_init_block(bp, BLOCK_QM, init_phase); bnx2x_init_hw_func() 8002 bnx2x_init_block(bp, BLOCK_TM, init_phase); bnx2x_init_hw_func() 8003 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); bnx2x_init_hw_func() 8004 REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */ bnx2x_init_hw_func() 8006 bnx2x_iov_init_dq(bp); bnx2x_init_hw_func() 8008 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); bnx2x_init_hw_func() 8009 bnx2x_init_block(bp, BLOCK_PRS, init_phase); bnx2x_init_hw_func() 8010 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); bnx2x_init_hw_func() 8011 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); bnx2x_init_hw_func() 8012 bnx2x_init_block(bp, BLOCK_USDM, init_phase); bnx2x_init_hw_func() 8013 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); bnx2x_init_hw_func() 8014 bnx2x_init_block(bp, BLOCK_UPB, init_phase); bnx2x_init_hw_func() 8015 bnx2x_init_block(bp, BLOCK_XPB, init_phase); bnx2x_init_hw_func() 8016 bnx2x_init_block(bp, BLOCK_PBF, init_phase); bnx2x_init_hw_func() 8017 if (!CHIP_IS_E1x(bp)) bnx2x_init_hw_func() 8018 REG_WR(bp, PBF_REG_DISABLE_PF, 0); bnx2x_init_hw_func() 8020 bnx2x_init_block(bp, BLOCK_CDU, init_phase); bnx2x_init_hw_func() 8022 bnx2x_init_block(bp, BLOCK_CFC, init_phase); bnx2x_init_hw_func() 8024 if (!CHIP_IS_E1x(bp)) bnx2x_init_hw_func() 8025 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); bnx2x_init_hw_func() 8027 if (IS_MF(bp)) { bnx2x_init_hw_func() 8028 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) { bnx2x_init_hw_func() 8029 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); bnx2x_init_hw_func() 8030 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, bnx2x_init_hw_func() 8031 bp->mf_ov); bnx2x_init_hw_func() 8035 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); bnx2x_init_hw_func() 8038 if (bp->common.int_block == INT_BLOCK_HC) { bnx2x_init_hw_func() 8039 if (CHIP_IS_E1H(bp)) { bnx2x_init_hw_func() 8040 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bnx2x_init_hw_func() 8042 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); bnx2x_init_hw_func() 8043 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); bnx2x_init_hw_func() 8045 bnx2x_init_block(bp, BLOCK_HC, init_phase); bnx2x_init_hw_func() 8050 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bnx2x_init_hw_func() 8052 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_func() 8053 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); bnx2x_init_hw_func() 8054 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); bnx2x_init_hw_func() 8057 bnx2x_init_block(bp, BLOCK_IGU, init_phase); bnx2x_init_hw_func() 8059 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_func() 8082 num_segs = CHIP_INT_MODE_IS_BC(bp) ? bnx2x_init_hw_func() 8084 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { bnx2x_init_hw_func() 8085 prod_offset = (bp->igu_base_sb + sb_idx) * bnx2x_init_hw_func() 8091 REG_WR(bp, addr, 0); bnx2x_init_hw_func() 8094 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, bnx2x_init_hw_func() 8096 bnx2x_igu_clear_sb(bp, bnx2x_init_hw_func() 8097 bp->igu_base_sb + sb_idx); bnx2x_init_hw_func() 8101 num_segs = CHIP_INT_MODE_IS_BC(bp) ? bnx2x_init_hw_func() 8104 if (CHIP_MODE_IS_4_PORT(bp)) bnx2x_init_hw_func() 8105 dsb_idx = BP_FUNC(bp); bnx2x_init_hw_func() 8107 dsb_idx = BP_VN(bp); bnx2x_init_hw_func() 8109 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? bnx2x_init_hw_func() 8121 REG_WR(bp, addr, 0); bnx2x_init_hw_func() 8124 if (CHIP_INT_MODE_IS_BC(bp)) { bnx2x_init_hw_func() 8125 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func() 8127 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func() 8129 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func() 8131 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func() 8133 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func() 8136 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func() 8138 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func() 8141 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); bnx2x_init_hw_func() 8145 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); bnx2x_init_hw_func() 8146 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); bnx2x_init_hw_func() 8147 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); bnx2x_init_hw_func() 8148 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); bnx2x_init_hw_func() 8149 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); bnx2x_init_hw_func() 8150 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); bnx2x_init_hw_func() 8155 REG_WR(bp, 0x2114, 0xffffffff); bnx2x_init_hw_func() 8156 REG_WR(bp, 0x2120, 0xffffffff); bnx2x_init_hw_func() 8158 if (CHIP_IS_E1x(bp)) { bnx2x_init_hw_func() 8161 BP_PORT(bp) * (main_mem_size * 4); bnx2x_init_hw_func() 8165 val = REG_RD(bp, main_mem_prty_clr); bnx2x_init_hw_func() 8175 bnx2x_read_dmae(bp, i, main_mem_width / 4); bnx2x_init_hw_func() 8176 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), bnx2x_init_hw_func() 8180 REG_RD(bp, main_mem_prty_clr); bnx2x_init_hw_func() 8185 REG_WR8(bp, BAR_USTRORM_INTMEM + bnx2x_init_hw_func() 8186 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); bnx2x_init_hw_func() 8187 REG_WR8(bp, BAR_TSTRORM_INTMEM + bnx2x_init_hw_func() 8188 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); bnx2x_init_hw_func() 8189 REG_WR8(bp, BAR_CSTRORM_INTMEM + bnx2x_init_hw_func() 8190 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); bnx2x_init_hw_func() 8191 REG_WR8(bp, BAR_XSTRORM_INTMEM + bnx2x_init_hw_func() 8192 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); bnx2x_init_hw_func() 8195 bnx2x_phy_probe(&bp->link_params); bnx2x_init_hw_func() 8200 void bnx2x_free_mem_cnic(struct bnx2x *bp) bnx2x_free_mem_cnic() argument 8202 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); bnx2x_free_mem_cnic() 8204 if (!CHIP_IS_E1x(bp)) bnx2x_free_mem_cnic() 8205 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, bnx2x_free_mem_cnic() 8208 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, bnx2x_free_mem_cnic() 8211 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); bnx2x_free_mem_cnic() 8214 void bnx2x_free_mem(struct bnx2x *bp) bnx2x_free_mem() argument 8218 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, bnx2x_free_mem() 8219 bp->fw_stats_data_sz + bp->fw_stats_req_sz); bnx2x_free_mem() 8221 if (IS_VF(bp)) bnx2x_free_mem() 8224 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, bnx2x_free_mem() 8227 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, bnx2x_free_mem() 8230 for (i = 0; i < L2_ILT_LINES(bp); i++) bnx2x_free_mem() 8231 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, bnx2x_free_mem() 8232 bp->context[i].size); bnx2x_free_mem() 8233 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); bnx2x_free_mem() 8235 BNX2X_FREE(bp->ilt->lines); bnx2x_free_mem() 8237 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); bnx2x_free_mem() 8239 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, bnx2x_free_mem() 8242 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); bnx2x_free_mem() 8244 bnx2x_iov_free_mem(bp); bnx2x_free_mem() 8247 int bnx2x_alloc_mem_cnic(struct bnx2x *bp) bnx2x_alloc_mem_cnic() argument 8249 if (!CHIP_IS_E1x(bp)) { bnx2x_alloc_mem_cnic() 8251 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, bnx2x_alloc_mem_cnic() 8253 if (!bp->cnic_sb.e2_sb) bnx2x_alloc_mem_cnic() 8256 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, bnx2x_alloc_mem_cnic() 8258 if (!bp->cnic_sb.e1x_sb) bnx2x_alloc_mem_cnic() 8262 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { bnx2x_alloc_mem_cnic() 8264 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); bnx2x_alloc_mem_cnic() 8265 if (!bp->t2) bnx2x_alloc_mem_cnic() 8270 bp->cnic_eth_dev.addr_drv_info_to_mcp = bnx2x_alloc_mem_cnic() 8271 &bp->slowpath->drv_info_to_mcp; bnx2x_alloc_mem_cnic() 8273 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC)) bnx2x_alloc_mem_cnic() 8279 bnx2x_free_mem_cnic(bp); bnx2x_alloc_mem_cnic() 8284 int bnx2x_alloc_mem(struct bnx2x *bp) bnx2x_alloc_mem() argument 8288 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { bnx2x_alloc_mem() 8290 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); bnx2x_alloc_mem() 8291 if (!bp->t2) bnx2x_alloc_mem() 8295 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, bnx2x_alloc_mem() 8297 if (!bp->def_status_blk) bnx2x_alloc_mem() 8300 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, bnx2x_alloc_mem() 8302 if (!bp->slowpath) bnx2x_alloc_mem() 8318 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); bnx2x_alloc_mem() 8321 bp->context[i].size = min(CDU_ILT_PAGE_SZ, bnx2x_alloc_mem() 8323 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, bnx2x_alloc_mem() 8324 bp->context[i].size); bnx2x_alloc_mem() 8325 if (!bp->context[i].vcxt) bnx2x_alloc_mem() 8327 allocated += bp->context[i].size; bnx2x_alloc_mem() 8329 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), bnx2x_alloc_mem() 8331 if (!bp->ilt->lines) bnx2x_alloc_mem() 8334 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) bnx2x_alloc_mem() 8337 if (bnx2x_iov_alloc_mem(bp)) bnx2x_alloc_mem() 8341 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); bnx2x_alloc_mem() 8342 if (!bp->spq) bnx2x_alloc_mem() 8346 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, bnx2x_alloc_mem() 8348 if (!bp->eq_ring) bnx2x_alloc_mem() 8354 bnx2x_free_mem(bp); bnx2x_alloc_mem() 8363 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, bnx2x_set_mac_one() argument 8389 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); bnx2x_set_mac_one() 8401 int bnx2x_del_all_macs(struct bnx2x *bp, bnx2x_del_all_macs() argument 8415 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); bnx2x_del_all_macs() 8422 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) bnx2x_set_eth_mac() argument 8424 if (IS_PF(bp)) { bnx2x_set_eth_mac() 8429 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, bnx2x_set_eth_mac() 8430 &bp->sp_objs->mac_obj, set, bnx2x_set_eth_mac() 8433 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bnx2x_set_eth_mac() 8434 bp->fp->index, true); bnx2x_set_eth_mac() 8438 int bnx2x_setup_leading(struct bnx2x *bp) bnx2x_setup_leading() argument 8440 if (IS_PF(bp)) bnx2x_setup_leading() 8441 return bnx2x_setup_queue(bp, &bp->fp[0], true); bnx2x_setup_leading() 8443 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); bnx2x_setup_leading() 8449 * @bp: driver handle 8453 int bnx2x_set_int_mode(struct bnx2x *bp) bnx2x_set_int_mode() argument 8457 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) { bnx2x_set_int_mode() 8465 rc = bnx2x_enable_msix(bp); bnx2x_set_int_mode() 8472 if (rc && IS_VF(bp)) bnx2x_set_int_mode() 8477 bp->num_queues, bnx2x_set_int_mode() 8478 1 + bp->num_cnic_queues); bnx2x_set_int_mode() 8482 bnx2x_enable_msi(bp); bnx2x_set_int_mode() 8486 bp->num_ethernet_queues = 1; bnx2x_set_int_mode() 8487 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; bnx2x_set_int_mode() 8498 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) bnx2x_cid_ilt_lines() argument 8500 if (IS_SRIOV(bp)) bnx2x_cid_ilt_lines() 8502 return L2_ILT_LINES(bp); bnx2x_cid_ilt_lines() 8505 void bnx2x_ilt_set_info(struct bnx2x *bp) bnx2x_ilt_set_info() argument 8508 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_ilt_set_info() 8511 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); bnx2x_ilt_set_info() 8520 line += bnx2x_cid_ilt_lines(bp); bnx2x_ilt_set_info() 8522 if (CNIC_SUPPORT(bp)) bnx2x_ilt_set_info() 8534 if (QM_INIT(bp->qm_cid_count)) { bnx2x_ilt_set_info() 8542 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, bnx2x_ilt_set_info() 8556 if (CNIC_SUPPORT(bp)) { bnx2x_ilt_set_info() 8598 * @bp: driver handle 8606 static void bnx2x_pf_q_prep_init(struct bnx2x *bp, bnx2x_pf_q_prep_init() argument 8624 init_params->rx.hc_rate = bp->rx_ticks ? bnx2x_pf_q_prep_init() 8625 (1000000 / bp->rx_ticks) : 0; bnx2x_pf_q_prep_init() 8626 init_params->tx.hc_rate = bp->tx_ticks ? bnx2x_pf_q_prep_init() 8627 (1000000 / bp->tx_ticks) : 0; bnx2x_pf_q_prep_init() 8653 &bp->context[cxt_index].vcxt[cxt_offset].eth; bnx2x_pf_q_prep_init() 8657 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_setup_tx_only() argument 8668 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); bnx2x_setup_tx_only() 8674 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); bnx2x_setup_tx_only() 8677 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); bnx2x_setup_tx_only() 8686 return bnx2x_queue_state_change(bp, q_params); bnx2x_setup_tx_only() 8692 * @bp: driver handle 8700 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_setup_queue() argument 8715 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, bnx2x_setup_queue() 8718 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; bnx2x_setup_queue() 8723 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); bnx2x_setup_queue() 8729 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_setup_queue() 8741 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); bnx2x_setup_queue() 8744 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, bnx2x_setup_queue() 8747 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, bnx2x_setup_queue() 8750 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, bnx2x_setup_queue() 8757 bp->fcoe_init = true; bnx2x_setup_queue() 8760 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_setup_queue() 8772 rc = bnx2x_setup_tx_only(bp, fp, &q_params, bnx2x_setup_queue() 8784 static int bnx2x_stop_queue(struct bnx2x *bp, int index) bnx2x_stop_queue() argument 8786 struct bnx2x_fastpath *fp = &bp->fp[index]; bnx2x_stop_queue() 8793 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; bnx2x_stop_queue() 8814 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_stop_queue() 8823 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_stop_queue() 8830 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_stop_queue() 8839 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_stop_queue() 8847 return bnx2x_queue_state_change(bp, &q_params); bnx2x_stop_queue() 8850 static void bnx2x_reset_func(struct bnx2x *bp) bnx2x_reset_func() argument 8852 int port = BP_PORT(bp); bnx2x_reset_func() 8853 int func = BP_FUNC(bp); bnx2x_reset_func() 8857 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); bnx2x_reset_func() 8858 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); bnx2x_reset_func() 8859 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); bnx2x_reset_func() 8860 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); bnx2x_reset_func() 8863 for_each_eth_queue(bp, i) { for_each_eth_queue() 8864 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue() 8865 REG_WR8(bp, BAR_CSTRORM_INTMEM + for_each_eth_queue() 8870 if (CNIC_LOADED(bp)) 8872 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8874 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED); 8877 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8882 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 8886 if (bp->common.int_block == INT_BLOCK_HC) { 8887 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 8888 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 8890 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 8891 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8894 if (CNIC_LOADED(bp)) { 8896 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 8903 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 8908 bnx2x_clear_func_ilt(bp, func); 8913 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { 8921 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); 8925 if (!CHIP_IS_E1x(bp)) 8926 bnx2x_pf_disable(bp); 8928 bp->dmae_ready = 0; 8931 static void bnx2x_reset_port(struct bnx2x *bp) bnx2x_reset_port() argument 8933 int port = BP_PORT(bp); bnx2x_reset_port() 8937 bnx2x__link_reset(bp); bnx2x_reset_port() 8939 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); bnx2x_reset_port() 8942 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); bnx2x_reset_port() 8944 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : bnx2x_reset_port() 8948 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); bnx2x_reset_port() 8952 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); bnx2x_reset_port() 8960 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) bnx2x_reset_hw() argument 8967 func_params.f_obj = &bp->func_obj; bnx2x_reset_hw() 8972 return bnx2x_func_state_change(bp, &func_params); bnx2x_reset_hw() 8975 static int bnx2x_func_stop(struct bnx2x *bp) bnx2x_func_stop() argument 8982 func_params.f_obj = &bp->func_obj; bnx2x_func_stop() 8991 rc = bnx2x_func_state_change(bp, &func_params); bnx2x_func_stop() 8998 return bnx2x_func_state_change(bp, &func_params); bnx2x_func_stop() 9008 * @bp: driver handle 9013 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) bnx2x_send_unload_req() argument 9016 int port = BP_PORT(bp); bnx2x_send_unload_req() 9022 else if (bp->flags & NO_WOL_FLAG) bnx2x_send_unload_req() 9025 else if (bp->wol) { bnx2x_send_unload_req() 9027 u8 *mac_addr = bp->dev->dev_addr; bnx2x_send_unload_req() 9028 struct pci_dev *pdev = bp->pdev; bnx2x_send_unload_req() 9035 u8 entry = (BP_VN(bp) + 1)*8; bnx2x_send_unload_req() 9038 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); bnx2x_send_unload_req() 9042 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); bnx2x_send_unload_req() 9055 if (!BP_NOMCP(bp)) bnx2x_send_unload_req() 9056 reset_code = bnx2x_fw_command(bp, reset_code, 0); bnx2x_send_unload_req() 9058 int path = BP_PATH(bp); bnx2x_send_unload_req() 9082 * @bp: driver handle 9085 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) bnx2x_send_unload_done() argument 9090 if (!BP_NOMCP(bp)) bnx2x_send_unload_done() 9091 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); bnx2x_send_unload_done() 9094 static int bnx2x_func_wait_started(struct bnx2x *bp) bnx2x_func_wait_started() argument 9097 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; bnx2x_func_wait_started() 9099 if (!bp->port.pmf) bnx2x_func_wait_started() 9118 synchronize_irq(bp->msix_table[0].vector); bnx2x_func_wait_started() 9120 synchronize_irq(bp->pdev->irq); bnx2x_func_wait_started() 9125 while (bnx2x_func_get_state(bp, &bp->func_obj) != bnx2x_func_wait_started() 9129 if (bnx2x_func_get_state(bp, &bp->func_obj) != bnx2x_func_wait_started() 9144 func_params.f_obj = &bp->func_obj; bnx2x_func_wait_started() 9150 bnx2x_func_state_change(bp, &func_params); bnx2x_func_wait_started() 9154 return bnx2x_func_state_change(bp, &func_params); bnx2x_func_wait_started() 9161 static void bnx2x_disable_ptp(struct bnx2x *bp) bnx2x_disable_ptp() argument 9163 int port = BP_PORT(bp); bnx2x_disable_ptp() 9166 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : bnx2x_disable_ptp() 9170 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_disable_ptp() 9172 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_disable_ptp() 9174 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : bnx2x_disable_ptp() 9176 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : bnx2x_disable_ptp() 9180 REG_WR(bp, port ? NIG_REG_P1_PTP_EN : bnx2x_disable_ptp() 9185 static void bnx2x_stop_ptp(struct bnx2x *bp) bnx2x_stop_ptp() argument 9190 cancel_work_sync(&bp->ptp_task); bnx2x_stop_ptp() 9192 if (bp->ptp_tx_skb) { bnx2x_stop_ptp() 9193 dev_kfree_skb_any(bp->ptp_tx_skb); bnx2x_stop_ptp() 9194 bp->ptp_tx_skb = NULL; bnx2x_stop_ptp() 9198 bnx2x_disable_ptp(bp); bnx2x_stop_ptp() 9203 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_chip_cleanup() argument 9205 int port = BP_PORT(bp); bnx2x_chip_cleanup() 9212 for_each_tx_queue(bp, i) { for_each_tx_queue() 9213 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_tx_queue() 9216 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); for_each_tx_queue() 9227 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, 9233 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, 9240 if (!CHIP_IS_E1(bp)) 9241 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 9247 netif_addr_lock_bh(bp->dev); 9249 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 9250 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 9252 bnx2x_set_storm_rx_mode(bp); 9255 rparam.mcast_obj = &bp->mcast_obj; 9256 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 9260 netif_addr_unlock_bh(bp->dev); 9262 bnx2x_iov_chip_cleanup(bp); 9269 reset_code = bnx2x_send_unload_req(bp, unload_mode); 9275 rc = bnx2x_func_wait_started(bp); 9286 for_each_eth_queue(bp, i) for_each_eth_queue() 9287 if (bnx2x_stop_queue(bp, i)) for_each_eth_queue() 9294 if (CNIC_LOADED(bp)) { for_each_eth_queue() 9295 for_each_cnic_queue(bp, i) for_each_eth_queue() 9296 if (bnx2x_stop_queue(bp, i)) for_each_eth_queue() 9307 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) 9313 rc = bnx2x_func_stop(bp); 9326 if (bp->flags & PTP_SUPPORTED) 9327 bnx2x_stop_ptp(bp); 9330 bnx2x_netif_stop(bp, 1); 9332 bnx2x_del_all_napi(bp); 9333 if (CNIC_LOADED(bp)) 9334 bnx2x_del_all_napi_cnic(bp); 9337 bnx2x_free_irq(bp); 9340 rc = bnx2x_reset_hw(bp, reset_code); 9345 bnx2x_send_unload_done(bp, keep_link); 9348 void bnx2x_disable_close_the_gate(struct bnx2x *bp) bnx2x_disable_close_the_gate() argument 9354 if (CHIP_IS_E1(bp)) { bnx2x_disable_close_the_gate() 9355 int port = BP_PORT(bp); bnx2x_disable_close_the_gate() 9359 val = REG_RD(bp, addr); bnx2x_disable_close_the_gate() 9361 REG_WR(bp, addr, val); bnx2x_disable_close_the_gate() 9363 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); bnx2x_disable_close_the_gate() 9366 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); bnx2x_disable_close_the_gate() 9371 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) bnx2x_set_234_gates() argument 9376 if (!CHIP_IS_E1(bp)) { bnx2x_set_234_gates() 9378 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); bnx2x_set_234_gates() 9380 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); bnx2x_set_234_gates() 9384 if (CHIP_IS_E1x(bp)) { bnx2x_set_234_gates() 9386 val = REG_RD(bp, HC_REG_CONFIG_1); bnx2x_set_234_gates() 9387 REG_WR(bp, HC_REG_CONFIG_1, bnx2x_set_234_gates() 9391 val = REG_RD(bp, HC_REG_CONFIG_0); bnx2x_set_234_gates() 9392 REG_WR(bp, HC_REG_CONFIG_0, bnx2x_set_234_gates() 9397 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); bnx2x_set_234_gates() 9399 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, bnx2x_set_234_gates() 9412 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) bnx2x_clp_reset_prep() argument 9415 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); bnx2x_clp_reset_prep() 9417 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); bnx2x_clp_reset_prep() 9423 * @bp: driver handle 9426 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) bnx2x_clp_reset_done() argument 9429 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); bnx2x_clp_reset_done() 9430 MF_CFG_WR(bp, shared_mf_config.clp_mb, bnx2x_clp_reset_done() 9437 * @bp: driver handle 9442 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) bnx2x_reset_mcp_prep() argument 9450 if (!CHIP_IS_E1(bp)) bnx2x_reset_mcp_prep() 9451 bnx2x_clp_reset_prep(bp, magic_val); bnx2x_reset_mcp_prep() 9454 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); bnx2x_reset_mcp_prep() 9456 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]); bnx2x_reset_mcp_prep() 9460 REG_WR(bp, shmem + validity_offset, 0); bnx2x_reset_mcp_prep() 9469 * @bp: driver handle 9471 static void bnx2x_mcp_wait_one(struct bnx2x *bp) bnx2x_mcp_wait_one() argument 9475 if (CHIP_REV_IS_SLOW(bp)) bnx2x_mcp_wait_one() 9482 * initializes bp->common.shmem_base and waits for validity signature to appear 9484 static int bnx2x_init_shmem(struct bnx2x *bp) bnx2x_init_shmem() argument 9490 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); bnx2x_init_shmem() 9491 if (bp->common.shmem_base) { bnx2x_init_shmem() 9492 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); bnx2x_init_shmem() 9497 bnx2x_mcp_wait_one(bp); bnx2x_init_shmem() 9506 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) bnx2x_reset_mcp_comp() argument 9508 int rc = bnx2x_init_shmem(bp); bnx2x_reset_mcp_comp() 9511 if (!CHIP_IS_E1(bp)) bnx2x_reset_mcp_comp() 9512 bnx2x_clp_reset_done(bp, magic_val); bnx2x_reset_mcp_comp() 9517 static void bnx2x_pxp_prep(struct bnx2x *bp) bnx2x_pxp_prep() argument 9519 if (!CHIP_IS_E1(bp)) { bnx2x_pxp_prep() 9520 REG_WR(bp, PXP2_REG_RD_START_INIT, 0); bnx2x_pxp_prep() 9521 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); bnx2x_pxp_prep() 9536 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) bnx2x_process_kill_chip_reset() argument 9588 if (CHIP_IS_E1(bp)) bnx2x_process_kill_chip_reset() 9590 else if (CHIP_IS_E1H(bp)) bnx2x_process_kill_chip_reset() 9592 else if (CHIP_IS_E2(bp)) bnx2x_process_kill_chip_reset() 9615 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_process_kill_chip_reset() 9618 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, bnx2x_process_kill_chip_reset() 9624 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_process_kill_chip_reset() 9630 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); bnx2x_process_kill_chip_reset() 9638 * @bp: driver handle 9643 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) bnx2x_er_poll_igu_vq() argument 9649 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); bnx2x_er_poll_igu_vq() 9666 static int bnx2x_process_kill(struct bnx2x *bp, bool global) bnx2x_process_kill() argument 9675 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); bnx2x_process_kill() 9676 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); bnx2x_process_kill() 9677 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); bnx2x_process_kill() 9678 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); bnx2x_process_kill() 9679 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); bnx2x_process_kill() 9680 if (CHIP_IS_E3(bp)) bnx2x_process_kill() 9681 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32); bnx2x_process_kill() 9687 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff))) bnx2x_process_kill() 9703 bnx2x_set_234_gates(bp, true); bnx2x_process_kill() 9706 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) bnx2x_process_kill() 9712 REG_WR(bp, MISC_REG_UNPREPARED, 0); bnx2x_process_kill() 9726 bnx2x_reset_mcp_prep(bp, &val); bnx2x_process_kill() 9729 bnx2x_pxp_prep(bp); bnx2x_process_kill() 9733 bnx2x_process_kill_chip_reset(bp, global); bnx2x_process_kill() 9737 if (!CHIP_IS_E1x(bp)) bnx2x_process_kill() 9738 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); bnx2x_process_kill() 9742 if (global && bnx2x_reset_mcp_comp(bp, val)) bnx2x_process_kill() 9748 bnx2x_set_234_gates(bp, false); bnx2x_process_kill() 9756 static int bnx2x_leader_reset(struct bnx2x *bp) bnx2x_leader_reset() argument 9759 bool global = bnx2x_reset_is_global(bp); bnx2x_leader_reset() 9765 if (!global && !BP_NOMCP(bp)) { bnx2x_leader_reset() 9766 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, bnx2x_leader_reset() 9779 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); bnx2x_leader_reset() 9788 if (bnx2x_process_kill(bp, global)) { bnx2x_leader_reset() 9790 BP_PATH(bp)); bnx2x_leader_reset() 9799 bnx2x_set_reset_done(bp); bnx2x_leader_reset() 9801 bnx2x_clear_reset_global(bp); bnx2x_leader_reset() 9805 if (!global && !BP_NOMCP(bp)) { bnx2x_leader_reset() 9806 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bnx2x_leader_reset() 9807 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); bnx2x_leader_reset() 9810 bp->is_leader = 0; bnx2x_leader_reset() 9811 bnx2x_release_leader_lock(bp); bnx2x_leader_reset() 9816 static void bnx2x_recovery_failed(struct bnx2x *bp) bnx2x_recovery_failed() argument 9818 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); bnx2x_recovery_failed() 9821 netif_device_detach(bp->dev); bnx2x_recovery_failed() 9827 bnx2x_set_reset_in_progress(bp); bnx2x_recovery_failed() 9830 bnx2x_set_power_state(bp, PCI_D3hot); bnx2x_recovery_failed() 9832 bp->recovery_state = BNX2X_RECOVERY_FAILED; bnx2x_recovery_failed() 9840 * will never be called when netif_running(bp->dev) is false. 9842 static void bnx2x_parity_recover(struct bnx2x *bp) bnx2x_parity_recover() argument 9850 switch (bp->recovery_state) { bnx2x_parity_recover() 9853 is_parity = bnx2x_chk_parity_attn(bp, &global, false); bnx2x_parity_recover() 9857 if (bnx2x_trylock_leader_lock(bp)) { bnx2x_parity_recover() 9858 bnx2x_set_reset_in_progress(bp); bnx2x_parity_recover() 9866 bnx2x_set_reset_global(bp); bnx2x_parity_recover() 9868 bp->is_leader = 1; bnx2x_parity_recover() 9873 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) bnx2x_parity_recover() 9876 bp->recovery_state = BNX2X_RECOVERY_WAIT; bnx2x_parity_recover() 9887 if (bp->is_leader) { bnx2x_parity_recover() 9888 int other_engine = BP_PATH(bp) ? 0 : 1; bnx2x_parity_recover() 9890 bnx2x_get_load_status(bp, other_engine); bnx2x_parity_recover() 9892 bnx2x_get_load_status(bp, BP_PATH(bp)); bnx2x_parity_recover() 9893 global = bnx2x_reset_is_global(bp); bnx2x_parity_recover() 9908 schedule_delayed_work(&bp->sp_rtnl_task, bnx2x_parity_recover() 9917 if (bnx2x_leader_reset(bp)) { bnx2x_parity_recover() 9918 bnx2x_recovery_failed(bp); bnx2x_parity_recover() 9930 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { bnx2x_parity_recover() 9937 if (bnx2x_trylock_leader_lock(bp)) { bnx2x_parity_recover() 9941 bp->is_leader = 1; bnx2x_parity_recover() 9945 schedule_delayed_work(&bp->sp_rtnl_task, bnx2x_parity_recover() 9954 if (bnx2x_reset_is_global(bp)) { bnx2x_parity_recover() 9956 &bp->sp_rtnl_task, bnx2x_parity_recover() 9962 bp->eth_stats.recoverable_error; bnx2x_parity_recover() 9964 bp->eth_stats.unrecoverable_error; bnx2x_parity_recover() 9965 bp->recovery_state = bnx2x_parity_recover() 9967 if (bnx2x_nic_load(bp, LOAD_NORMAL)) { bnx2x_parity_recover() 9969 netdev_err(bp->dev, bnx2x_parity_recover() 9972 netif_device_detach(bp->dev); bnx2x_parity_recover() 9975 bp, PCI_D3hot); bnx2x_parity_recover() 9978 bp->recovery_state = bnx2x_parity_recover() 9983 bp->eth_stats.recoverable_error = bnx2x_parity_recover() 9985 bp->eth_stats.unrecoverable_error = bnx2x_parity_recover() 10004 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); bnx2x_sp_rtnl_task() local 10008 if (!netif_running(bp->dev)) { bnx2x_sp_rtnl_task() 10013 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { bnx2x_sp_rtnl_task() 10023 bp->sp_rtnl_state = 0; bnx2x_sp_rtnl_task() 10026 bnx2x_parity_recover(bp); bnx2x_sp_rtnl_task() 10032 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task() 10043 bp->sp_rtnl_state = 0; bnx2x_sp_rtnl_task() 10046 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_sp_rtnl_task() 10047 bnx2x_nic_load(bp, LOAD_NORMAL); bnx2x_sp_rtnl_task() 10055 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) bnx2x_sp_rtnl_task() 10056 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); bnx2x_sp_rtnl_task() 10057 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) bnx2x_sp_rtnl_task() 10058 bnx2x_after_function_update(bp); bnx2x_sp_rtnl_task() 10064 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task() 10066 netif_device_detach(bp->dev); bnx2x_sp_rtnl_task() 10067 bnx2x_close(bp->dev); bnx2x_sp_rtnl_task() 10072 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task() 10075 bnx2x_vfpf_set_mcast(bp->dev); bnx2x_sp_rtnl_task() 10078 &bp->sp_rtnl_state)){ bnx2x_sp_rtnl_task() 10079 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) { bnx2x_sp_rtnl_task() 10080 bnx2x_tx_disable(bp); bnx2x_sp_rtnl_task() 10085 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task() 10087 bnx2x_set_rx_mode_inner(bp); bnx2x_sp_rtnl_task() 10091 &bp->sp_rtnl_state)) bnx2x_sp_rtnl_task() 10092 bnx2x_pf_set_vfs_vlan(bp); bnx2x_sp_rtnl_task() 10094 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task() 10095 bnx2x_dcbx_stop_hw_tx(bp); bnx2x_sp_rtnl_task() 10096 bnx2x_dcbx_resume_hw_tx(bp); bnx2x_sp_rtnl_task() 10100 &bp->sp_rtnl_state)) bnx2x_sp_rtnl_task() 10101 bnx2x_update_mng_version(bp); bnx2x_sp_rtnl_task() 10109 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, bnx2x_sp_rtnl_task() 10110 &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task() 10111 bnx2x_disable_sriov(bp); bnx2x_sp_rtnl_task() 10112 bnx2x_enable_sriov(bp); bnx2x_sp_rtnl_task() 10118 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); bnx2x_period_task() local 10120 if (!netif_running(bp->dev)) bnx2x_period_task() 10123 if (CHIP_REV_IS_SLOW(bp)) { bnx2x_period_task() 10128 bnx2x_acquire_phy_lock(bp); bnx2x_period_task() 10131 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and bnx2x_period_task() 10135 if (bp->port.pmf) { bnx2x_period_task() 10136 bnx2x_period_func(&bp->link_params, &bp->link_vars); bnx2x_period_task() 10139 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); bnx2x_period_task() 10142 bnx2x_release_phy_lock(bp); bnx2x_period_task() 10151 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) bnx2x_get_pretend_reg() argument 10155 return base + (BP_ABS_FUNC(bp)) * stride; bnx2x_get_pretend_reg() 10158 static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp, bnx2x_prev_unload_close_umac() argument 10171 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); bnx2x_prev_unload_close_umac() 10172 REG_WR(bp, vals->umac_addr[port], 0); bnx2x_prev_unload_close_umac() 10177 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, bnx2x_prev_unload_close_mac() argument 10182 u8 port = BP_PORT(bp); bnx2x_prev_unload_close_mac() 10187 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); bnx2x_prev_unload_close_mac() 10189 if (!CHIP_IS_E3(bp)) { bnx2x_prev_unload_close_mac() 10190 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); bnx2x_prev_unload_close_mac() 10195 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM bnx2x_prev_unload_close_mac() 10197 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL bnx2x_prev_unload_close_mac() 10206 wb_data[0] = REG_RD(bp, base_addr + offset); bnx2x_prev_unload_close_mac() 10207 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); bnx2x_prev_unload_close_mac() 10212 REG_WR(bp, vals->bmac_addr, wb_data[0]); bnx2x_prev_unload_close_mac() 10213 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); bnx2x_prev_unload_close_mac() 10216 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; bnx2x_prev_unload_close_mac() 10217 vals->emac_val = REG_RD(bp, vals->emac_addr); bnx2x_prev_unload_close_mac() 10218 REG_WR(bp, vals->emac_addr, 0); bnx2x_prev_unload_close_mac() 10223 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; bnx2x_prev_unload_close_mac() 10224 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); bnx2x_prev_unload_close_mac() 10225 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, bnx2x_prev_unload_close_mac() 10227 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, bnx2x_prev_unload_close_mac() 10230 vals->xmac_val = REG_RD(bp, vals->xmac_addr); bnx2x_prev_unload_close_mac() 10231 REG_WR(bp, vals->xmac_addr, 0); bnx2x_prev_unload_close_mac() 10235 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0, bnx2x_prev_unload_close_mac() 10237 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1, bnx2x_prev_unload_close_mac() 10256 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) bnx2x_prev_is_after_undi() argument 10261 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & bnx2x_prev_is_after_undi() 10265 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) { bnx2x_prev_is_after_undi() 10273 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc) bnx2x_prev_unload_undi_inc() argument 10278 if (BP_FUNC(bp) < 2) bnx2x_prev_unload_undi_inc() 10279 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp)); bnx2x_prev_unload_undi_inc() 10281 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); bnx2x_prev_unload_undi_inc() 10283 tmp_reg = REG_RD(bp, addr); bnx2x_prev_unload_undi_inc() 10288 REG_WR(bp, addr, tmp_reg); bnx2x_prev_unload_undi_inc() 10291 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq); bnx2x_prev_unload_undi_inc() 10294 static int bnx2x_prev_mcp_done(struct bnx2x *bp) bnx2x_prev_mcp_done() argument 10296 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, bnx2x_prev_mcp_done() 10307 bnx2x_prev_path_get_entry(struct bnx2x *bp) bnx2x_prev_path_get_entry() argument 10312 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && bnx2x_prev_path_get_entry() 10313 bp->pdev->bus->number == tmp_list->bus && bnx2x_prev_path_get_entry() 10314 BP_PATH(bp) == tmp_list->path) bnx2x_prev_path_get_entry() 10320 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) bnx2x_prev_path_mark_eeh() argument 10331 tmp_list = bnx2x_prev_path_get_entry(bp); bnx2x_prev_path_mark_eeh() 10337 BP_PATH(bp)); bnx2x_prev_path_mark_eeh() 10345 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) bnx2x_prev_is_path_marked() argument 10353 tmp_list = bnx2x_prev_path_get_entry(bp); bnx2x_prev_is_path_marked() 10357 BP_PATH(bp)); bnx2x_prev_is_path_marked() 10361 BP_PATH(bp)); bnx2x_prev_is_path_marked() 10370 bool bnx2x_port_after_undi(struct bnx2x *bp) bnx2x_port_after_undi() argument 10377 entry = bnx2x_prev_path_get_entry(bp); bnx2x_port_after_undi() 10378 val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); bnx2x_port_after_undi() 10385 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) bnx2x_prev_mark_path() argument 10397 tmp_list = bnx2x_prev_path_get_entry(bp); bnx2x_prev_mark_path() 10403 BP_PATH(bp)); bnx2x_prev_mark_path() 10418 tmp_list->bus = bp->pdev->bus->number; bnx2x_prev_mark_path() 10419 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); bnx2x_prev_mark_path() 10420 tmp_list->path = BP_PATH(bp); bnx2x_prev_mark_path() 10422 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; bnx2x_prev_mark_path() 10430 BP_PATH(bp)); bnx2x_prev_mark_path() 10438 static int bnx2x_do_flr(struct bnx2x *bp) bnx2x_do_flr() argument 10440 struct pci_dev *dev = bp->pdev; bnx2x_do_flr() 10442 if (CHIP_IS_E1x(bp)) { bnx2x_do_flr() 10448 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { bnx2x_do_flr() 10450 bp->common.bc_ver); bnx2x_do_flr() 10458 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); bnx2x_do_flr() 10463 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp) bnx2x_prev_unload_uncommon() argument 10470 if (bnx2x_prev_is_path_marked(bp)) bnx2x_prev_unload_uncommon() 10471 return bnx2x_prev_mcp_done(bp); bnx2x_prev_unload_uncommon() 10476 if (bnx2x_prev_is_after_undi(bp)) bnx2x_prev_unload_uncommon() 10483 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false); bnx2x_prev_unload_uncommon() 10488 rc = bnx2x_do_flr(bp); bnx2x_prev_unload_uncommon() 10501 rc = bnx2x_prev_mcp_done(bp); bnx2x_prev_unload_uncommon() 10508 static int bnx2x_prev_unload_common(struct bnx2x *bp) bnx2x_prev_unload_common() argument 10522 if (bnx2x_prev_is_path_marked(bp)) bnx2x_prev_unload_common() 10523 return bnx2x_prev_mcp_done(bp); bnx2x_prev_unload_common() 10525 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); bnx2x_prev_unload_common() 10532 bnx2x_prev_unload_close_mac(bp, &mac_vals); bnx2x_prev_unload_common() 10535 bnx2x_set_rx_filter(&bp->link_params, 0); bnx2x_prev_unload_common() 10536 bp->link_params.port ^= 1; bnx2x_prev_unload_common() 10537 bnx2x_set_rx_filter(&bp->link_params, 0); bnx2x_prev_unload_common() 10538 bp->link_params.port ^= 1; bnx2x_prev_unload_common() 10541 if (bnx2x_prev_is_after_undi(bp)) { bnx2x_prev_unload_common() 10544 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); bnx2x_prev_unload_common() 10546 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); bnx2x_prev_unload_common() 10548 if (!CHIP_IS_E1x(bp)) bnx2x_prev_unload_common() 10550 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); bnx2x_prev_unload_common() 10553 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); bnx2x_prev_unload_common() 10557 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); bnx2x_prev_unload_common() 10571 bnx2x_prev_unload_undi_inc(bp, 1); bnx2x_prev_unload_common() 10581 bnx2x_reset_common(bp); bnx2x_prev_unload_common() 10584 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); bnx2x_prev_unload_common() 10586 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]); bnx2x_prev_unload_common() 10588 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]); bnx2x_prev_unload_common() 10590 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); bnx2x_prev_unload_common() 10592 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]); bnx2x_prev_unload_common() 10593 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); bnx2x_prev_unload_common() 10596 rc = bnx2x_prev_mark_path(bp, prev_undi); bnx2x_prev_unload_common() 10598 bnx2x_prev_mcp_done(bp); bnx2x_prev_unload_common() 10602 return bnx2x_prev_mcp_done(bp); bnx2x_prev_unload_common() 10605 static int bnx2x_prev_unload(struct bnx2x *bp) bnx2x_prev_unload() argument 10614 bnx2x_clean_pglue_errors(bp); bnx2x_prev_unload() 10617 hw_lock_reg = (BP_FUNC(bp) <= 5) ? bnx2x_prev_unload() 10618 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : bnx2x_prev_unload() 10619 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); bnx2x_prev_unload() 10621 hw_lock_val = REG_RD(bp, hw_lock_reg); bnx2x_prev_unload() 10625 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, bnx2x_prev_unload() 10626 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); bnx2x_prev_unload() 10630 REG_WR(bp, hw_lock_reg, 0xffffffff); bnx2x_prev_unload() 10634 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { bnx2x_prev_unload() 10636 bnx2x_release_alr(bp); bnx2x_prev_unload() 10642 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); bnx2x_prev_unload() 10655 aer = !!(bnx2x_prev_path_get_entry(bp) && bnx2x_prev_unload() 10656 bnx2x_prev_path_get_entry(bp)->aer); bnx2x_prev_unload() 10661 rc = bnx2x_prev_unload_common(bp); bnx2x_prev_unload() 10666 rc = bnx2x_prev_unload_uncommon(bp); bnx2x_prev_unload() 10679 if (bnx2x_port_after_undi(bp)) bnx2x_prev_unload() 10680 bp->link_params.feature_config_flags |= bnx2x_prev_unload() 10688 static void bnx2x_get_common_hwinfo(struct bnx2x *bp) bnx2x_get_common_hwinfo() argument 10695 val = REG_RD(bp, MISC_REG_CHIP_NUM); bnx2x_get_common_hwinfo() 10697 val = REG_RD(bp, MISC_REG_CHIP_REV); bnx2x_get_common_hwinfo() 10703 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3); bnx2x_get_common_hwinfo() 10705 val = REG_RD(bp, MISC_REG_BOND_ID); bnx2x_get_common_hwinfo() 10707 bp->common.chip_id = id; bnx2x_get_common_hwinfo() 10710 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { bnx2x_get_common_hwinfo() 10711 if (CHIP_IS_57810(bp)) bnx2x_get_common_hwinfo() 10712 bp->common.chip_id = (CHIP_NUM_57811 << 16) | bnx2x_get_common_hwinfo() 10713 (bp->common.chip_id & 0x0000FFFF); bnx2x_get_common_hwinfo() 10714 else if (CHIP_IS_57810_MF(bp)) bnx2x_get_common_hwinfo() 10715 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | bnx2x_get_common_hwinfo() 10716 (bp->common.chip_id & 0x0000FFFF); bnx2x_get_common_hwinfo() 10717 bp->common.chip_id |= 0x1; bnx2x_get_common_hwinfo() 10721 bp->db_size = (1 << BNX2X_DB_SHIFT); bnx2x_get_common_hwinfo() 10723 if (!CHIP_IS_E1x(bp)) { bnx2x_get_common_hwinfo() 10724 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); bnx2x_get_common_hwinfo() 10726 val = REG_RD(bp, MISC_REG_PORT4MODE_EN); bnx2x_get_common_hwinfo() 10731 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : bnx2x_get_common_hwinfo() 10734 if (CHIP_MODE_IS_4_PORT(bp)) bnx2x_get_common_hwinfo() 10735 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ bnx2x_get_common_hwinfo() 10737 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ bnx2x_get_common_hwinfo() 10739 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ bnx2x_get_common_hwinfo() 10740 bp->pfid = bp->pf_num; /* 0..7 */ bnx2x_get_common_hwinfo() 10743 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); bnx2x_get_common_hwinfo() 10745 bp->link_params.chip_id = bp->common.chip_id; bnx2x_get_common_hwinfo() 10748 val = (REG_RD(bp, 0x2874) & 0x55); bnx2x_get_common_hwinfo() 10749 if ((bp->common.chip_id & 0x1) || bnx2x_get_common_hwinfo() 10750 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { bnx2x_get_common_hwinfo() 10751 bp->flags |= ONE_PORT_FLAG; bnx2x_get_common_hwinfo() 10755 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); bnx2x_get_common_hwinfo() 10756 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << bnx2x_get_common_hwinfo() 10759 bp->common.flash_size, bp->common.flash_size); bnx2x_get_common_hwinfo() 10761 bnx2x_init_shmem(bp); bnx2x_get_common_hwinfo() 10763 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? bnx2x_get_common_hwinfo() 10767 bp->link_params.shmem_base = bp->common.shmem_base; bnx2x_get_common_hwinfo() 10768 bp->link_params.shmem2_base = bp->common.shmem2_base; bnx2x_get_common_hwinfo() 10769 if (SHMEM2_RD(bp, size) > bnx2x_get_common_hwinfo() 10770 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) bnx2x_get_common_hwinfo() 10771 bp->link_params.lfa_base = bnx2x_get_common_hwinfo() 10772 REG_RD(bp, bp->common.shmem2_base + bnx2x_get_common_hwinfo() 10774 lfa_host_addr[BP_PORT(bp)])); bnx2x_get_common_hwinfo() 10776 bp->link_params.lfa_base = 0; bnx2x_get_common_hwinfo() 10778 bp->common.shmem_base, bp->common.shmem2_base); bnx2x_get_common_hwinfo() 10780 if (!bp->common.shmem_base) { bnx2x_get_common_hwinfo() 10782 bp->flags |= NO_MCP_FLAG; bnx2x_get_common_hwinfo() 10786 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); bnx2x_get_common_hwinfo() 10787 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); bnx2x_get_common_hwinfo() 10789 bp->link_params.hw_led_mode = ((bp->common.hw_config & bnx2x_get_common_hwinfo() 10793 bp->link_params.feature_config_flags = 0; bnx2x_get_common_hwinfo() 10794 val = SHMEM_RD(bp, dev_info.shared_feature_config.config); bnx2x_get_common_hwinfo() 10796 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo() 10799 bp->link_params.feature_config_flags &= bnx2x_get_common_hwinfo() 10802 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; bnx2x_get_common_hwinfo() 10803 bp->common.bc_ver = val; bnx2x_get_common_hwinfo() 10811 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo() 10815 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo() 10818 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo() 10821 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo() 10825 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo() 10829 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? bnx2x_get_common_hwinfo() 10832 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? bnx2x_get_common_hwinfo() 10835 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? bnx2x_get_common_hwinfo() 10838 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? bnx2x_get_common_hwinfo() 10841 boot_mode = SHMEM_RD(bp, bnx2x_get_common_hwinfo() 10842 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & bnx2x_get_common_hwinfo() 10846 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; bnx2x_get_common_hwinfo() 10849 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; bnx2x_get_common_hwinfo() 10852 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; bnx2x_get_common_hwinfo() 10855 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; bnx2x_get_common_hwinfo() 10859 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); bnx2x_get_common_hwinfo() 10860 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; bnx2x_get_common_hwinfo() 10863 (bp->flags & NO_WOL_FLAG) ? "not " : ""); bnx2x_get_common_hwinfo() 10865 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); bnx2x_get_common_hwinfo() 10866 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); bnx2x_get_common_hwinfo() 10867 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); bnx2x_get_common_hwinfo() 10868 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); bnx2x_get_common_hwinfo() 10870 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", bnx2x_get_common_hwinfo() 10877 static int bnx2x_get_igu_cam_info(struct bnx2x *bp) bnx2x_get_igu_cam_info() argument 10879 int pfid = BP_FUNC(bp); bnx2x_get_igu_cam_info() 10884 bp->igu_base_sb = 0xff; bnx2x_get_igu_cam_info() 10885 if (CHIP_INT_MODE_IS_BC(bp)) { bnx2x_get_igu_cam_info() 10886 int vn = BP_VN(bp); bnx2x_get_igu_cam_info() 10887 igu_sb_cnt = bp->igu_sb_cnt; bnx2x_get_igu_cam_info() 10888 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * bnx2x_get_igu_cam_info() 10891 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + bnx2x_get_igu_cam_info() 10892 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); bnx2x_get_igu_cam_info() 10900 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); bnx2x_get_igu_cam_info() 10909 bp->igu_dsb_id = igu_sb_id; bnx2x_get_igu_cam_info() 10911 if (bp->igu_base_sb == 0xff) bnx2x_get_igu_cam_info() 10912 bp->igu_base_sb = igu_sb_id; bnx2x_get_igu_cam_info() 10925 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); bnx2x_get_igu_cam_info() 10936 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) bnx2x_link_settings_supported() argument 10938 int cfg_size = 0, idx, port = BP_PORT(bp); bnx2x_link_settings_supported() 10941 bp->port.supported[0] = 0; bnx2x_link_settings_supported() 10942 bp->port.supported[1] = 0; bnx2x_link_settings_supported() 10943 switch (bp->link_params.num_phys) { bnx2x_link_settings_supported() 10945 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; bnx2x_link_settings_supported() 10949 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; bnx2x_link_settings_supported() 10953 if (bp->link_params.multi_phy_config & bnx2x_link_settings_supported() 10955 bp->port.supported[1] = bnx2x_link_settings_supported() 10956 bp->link_params.phy[EXT_PHY1].supported; bnx2x_link_settings_supported() 10957 bp->port.supported[0] = bnx2x_link_settings_supported() 10958 bp->link_params.phy[EXT_PHY2].supported; bnx2x_link_settings_supported() 10960 bp->port.supported[0] = bnx2x_link_settings_supported() 10961 bp->link_params.phy[EXT_PHY1].supported; bnx2x_link_settings_supported() 10962 bp->port.supported[1] = bnx2x_link_settings_supported() 10963 bp->link_params.phy[EXT_PHY2].supported; bnx2x_link_settings_supported() 10969 if (!(bp->port.supported[0] || bp->port.supported[1])) { bnx2x_link_settings_supported() 10971 SHMEM_RD(bp, bnx2x_link_settings_supported() 10973 SHMEM_RD(bp, bnx2x_link_settings_supported() 10978 if (CHIP_IS_E3(bp)) bnx2x_link_settings_supported() 10979 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); bnx2x_link_settings_supported() 10983 bp->port.phy_addr = REG_RD( bnx2x_link_settings_supported() 10984 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); bnx2x_link_settings_supported() 10987 bp->port.phy_addr = REG_RD( bnx2x_link_settings_supported() 10988 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); bnx2x_link_settings_supported() 10992 bp->port.link_config[0]); bnx2x_link_settings_supported() 10996 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); bnx2x_link_settings_supported() 10999 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported() 11001 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; bnx2x_link_settings_supported() 11003 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported() 11005 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; bnx2x_link_settings_supported() 11007 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported() 11009 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; bnx2x_link_settings_supported() 11011 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported() 11013 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; bnx2x_link_settings_supported() 11015 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported() 11017 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | bnx2x_link_settings_supported() 11020 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported() 11022 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; bnx2x_link_settings_supported() 11024 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported() 11026 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; bnx2x_link_settings_supported() 11028 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported() 11030 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; bnx2x_link_settings_supported() 11033 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], bnx2x_link_settings_supported() 11034 bp->port.supported[1]); bnx2x_link_settings_supported() 11037 static void bnx2x_link_settings_requested(struct bnx2x *bp) bnx2x_link_settings_requested() argument 11040 bp->port.advertising[0] = 0; bnx2x_link_settings_requested() 11041 bp->port.advertising[1] = 0; bnx2x_link_settings_requested() 11042 switch (bp->link_params.num_phys) { bnx2x_link_settings_requested() 11052 bp->link_params.req_duplex[idx] = DUPLEX_FULL; bnx2x_link_settings_requested() 11053 link_config = bp->port.link_config[idx]; bnx2x_link_settings_requested() 11056 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { bnx2x_link_settings_requested() 11057 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested() 11059 bp->port.advertising[idx] |= bnx2x_link_settings_requested() 11060 bp->port.supported[idx]; bnx2x_link_settings_requested() 11061 if (bp->link_params.phy[EXT_PHY1].type == bnx2x_link_settings_requested() 11063 bp->port.advertising[idx] |= bnx2x_link_settings_requested() 11068 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested() 11070 bp->port.advertising[idx] |= bnx2x_link_settings_requested() 11078 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { bnx2x_link_settings_requested() 11079 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested() 11081 bp->port.advertising[idx] |= bnx2x_link_settings_requested() 11087 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested() 11093 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { bnx2x_link_settings_requested() 11094 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested() 11096 bp->link_params.req_duplex[idx] = bnx2x_link_settings_requested() 11098 bp->port.advertising[idx] |= bnx2x_link_settings_requested() 11104 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested() 11110 if (bp->port.supported[idx] & bnx2x_link_settings_requested() 11112 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested() 11114 bp->port.advertising[idx] |= bnx2x_link_settings_requested() 11120 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested() 11126 if (bp->port.supported[idx] & bnx2x_link_settings_requested() 11128 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested() 11130 bp->link_params.req_duplex[idx] = bnx2x_link_settings_requested() 11132 bp->port.advertising[idx] |= bnx2x_link_settings_requested() 11138 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested() 11144 if (bp->port.supported[idx] & bnx2x_link_settings_requested() 11146 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested() 11148 bp->port.advertising[idx] |= bnx2x_link_settings_requested() 11154 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested() 11160 if (bp->port.supported[idx] & bnx2x_link_settings_requested() 11162 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested() 11164 bp->port.advertising[idx] |= bnx2x_link_settings_requested() 11170 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested() 11176 if (bp->port.supported[idx] & bnx2x_link_settings_requested() 11178 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested() 11180 bp->port.advertising[idx] |= bnx2x_link_settings_requested() 11186 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested() 11191 bp->link_params.req_line_speed[idx] = SPEED_20000; bnx2x_link_settings_requested() 11197 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested() 11199 bp->port.advertising[idx] = bnx2x_link_settings_requested() 11200 bp->port.supported[idx]; bnx2x_link_settings_requested() 11204 bp->link_params.req_flow_ctrl[idx] = (link_config & bnx2x_link_settings_requested() 11206 if (bp->link_params.req_flow_ctrl[idx] == bnx2x_link_settings_requested() 11208 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) bnx2x_link_settings_requested() 11209 bp->link_params.req_flow_ctrl[idx] = bnx2x_link_settings_requested() 11212 bnx2x_set_requested_fc(bp); bnx2x_link_settings_requested() 11216 bp->link_params.req_line_speed[idx], bnx2x_link_settings_requested() 11217 bp->link_params.req_duplex[idx], bnx2x_link_settings_requested() 11218 bp->link_params.req_flow_ctrl[idx], bnx2x_link_settings_requested() 11219 bp->port.advertising[idx]); bnx2x_link_settings_requested() 11231 static void bnx2x_get_port_hwinfo(struct bnx2x *bp) bnx2x_get_port_hwinfo() argument 11233 int port = BP_PORT(bp); bnx2x_get_port_hwinfo() 11237 bp->link_params.bp = bp; bnx2x_get_port_hwinfo() 11238 bp->link_params.port = port; bnx2x_get_port_hwinfo() 11240 bp->link_params.lane_config = bnx2x_get_port_hwinfo() 11241 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); bnx2x_get_port_hwinfo() 11243 bp->link_params.speed_cap_mask[0] = bnx2x_get_port_hwinfo() 11244 SHMEM_RD(bp, bnx2x_get_port_hwinfo() 11247 bp->link_params.speed_cap_mask[1] = bnx2x_get_port_hwinfo() 11248 SHMEM_RD(bp, bnx2x_get_port_hwinfo() 11251 bp->port.link_config[0] = bnx2x_get_port_hwinfo() 11252 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); bnx2x_get_port_hwinfo() 11254 bp->port.link_config[1] = bnx2x_get_port_hwinfo() 11255 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); bnx2x_get_port_hwinfo() 11257 bp->link_params.multi_phy_config = bnx2x_get_port_hwinfo() 11258 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); bnx2x_get_port_hwinfo() 11262 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); bnx2x_get_port_hwinfo() 11263 bp->wol = (!(bp->flags & NO_WOL_FLAG) && bnx2x_get_port_hwinfo() 11267 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp)) bnx2x_get_port_hwinfo() 11268 bp->flags |= NO_ISCSI_FLAG; bnx2x_get_port_hwinfo() 11270 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp))) bnx2x_get_port_hwinfo() 11271 bp->flags |= NO_FCOE_FLAG; bnx2x_get_port_hwinfo() 11274 bp->link_params.lane_config, bnx2x_get_port_hwinfo() 11275 bp->link_params.speed_cap_mask[0], bnx2x_get_port_hwinfo() 11276 bp->port.link_config[0]); bnx2x_get_port_hwinfo() 11278 bp->link_params.switch_cfg = (bp->port.link_config[0] & bnx2x_get_port_hwinfo() 11280 bnx2x_phy_probe(&bp->link_params); bnx2x_get_port_hwinfo() 11281 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); bnx2x_get_port_hwinfo() 11283 bnx2x_link_settings_requested(bp); bnx2x_get_port_hwinfo() 11290 SHMEM_RD(bp, bnx2x_get_port_hwinfo() 11294 bp->mdio.prtad = bp->port.phy_addr; bnx2x_get_port_hwinfo() 11298 bp->mdio.prtad = bnx2x_get_port_hwinfo() 11302 eee_mode = (((SHMEM_RD(bp, dev_info. bnx2x_get_port_hwinfo() 11307 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | bnx2x_get_port_hwinfo() 11311 bp->link_params.eee_mode = 0; bnx2x_get_port_hwinfo() 11315 void bnx2x_get_iscsi_info(struct bnx2x *bp) bnx2x_get_iscsi_info() argument 11318 int port = BP_PORT(bp); bnx2x_get_iscsi_info() 11319 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, bnx2x_get_iscsi_info() 11322 if (!CNIC_SUPPORT(bp)) { bnx2x_get_iscsi_info() 11323 bp->flags |= no_flags; bnx2x_get_iscsi_info() 11328 bp->cnic_eth_dev.max_iscsi_conn = bnx2x_get_iscsi_info() 11333 bp->cnic_eth_dev.max_iscsi_conn); bnx2x_get_iscsi_info() 11339 if (!bp->cnic_eth_dev.max_iscsi_conn) bnx2x_get_iscsi_info() 11340 bp->flags |= no_flags; bnx2x_get_iscsi_info() 11343 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) bnx2x_get_ext_wwn_info() argument 11346 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = bnx2x_get_ext_wwn_info() 11347 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); bnx2x_get_ext_wwn_info() 11348 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = bnx2x_get_ext_wwn_info() 11349 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); bnx2x_get_ext_wwn_info() 11352 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = bnx2x_get_ext_wwn_info() 11353 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); bnx2x_get_ext_wwn_info() 11354 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = bnx2x_get_ext_wwn_info() 11355 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); bnx2x_get_ext_wwn_info() 11358 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) bnx2x_shared_fcoe_funcs() argument 11362 if (IS_MF(bp)) { bnx2x_shared_fcoe_funcs() 11366 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { bnx2x_shared_fcoe_funcs() 11367 if (IS_MF_SD(bp)) { bnx2x_shared_fcoe_funcs() 11368 u32 cfg = MF_CFG_RD(bp, bnx2x_shared_fcoe_funcs() 11376 u32 cfg = MF_CFG_RD(bp, bnx2x_shared_fcoe_funcs() 11386 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; bnx2x_shared_fcoe_funcs() 11389 u32 lic = SHMEM_RD(bp, bnx2x_shared_fcoe_funcs() 11400 static void bnx2x_get_fcoe_info(struct bnx2x *bp) bnx2x_get_fcoe_info() argument 11402 int port = BP_PORT(bp); bnx2x_get_fcoe_info() 11403 int func = BP_ABS_FUNC(bp); bnx2x_get_fcoe_info() 11404 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, bnx2x_get_fcoe_info() 11406 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); bnx2x_get_fcoe_info() 11408 if (!CNIC_SUPPORT(bp)) { bnx2x_get_fcoe_info() 11409 bp->flags |= NO_FCOE_FLAG; bnx2x_get_fcoe_info() 11414 bp->cnic_eth_dev.max_fcoe_conn = bnx2x_get_fcoe_info() 11419 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; bnx2x_get_fcoe_info() 11423 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; bnx2x_get_fcoe_info() 11426 if (!IS_MF(bp)) { bnx2x_get_fcoe_info() 11428 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = bnx2x_get_fcoe_info() 11429 SHMEM_RD(bp, bnx2x_get_fcoe_info() 11432 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = bnx2x_get_fcoe_info() 11433 SHMEM_RD(bp, bnx2x_get_fcoe_info() 11438 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = bnx2x_get_fcoe_info() 11439 SHMEM_RD(bp, bnx2x_get_fcoe_info() 11442 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = bnx2x_get_fcoe_info() 11443 SHMEM_RD(bp, bnx2x_get_fcoe_info() 11446 } else if (!IS_MF_SD(bp)) { bnx2x_get_fcoe_info() 11450 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp)) bnx2x_get_fcoe_info() 11451 bnx2x_get_ext_wwn_info(bp, func); bnx2x_get_fcoe_info() 11453 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) bnx2x_get_fcoe_info() 11454 bnx2x_get_ext_wwn_info(bp, func); bnx2x_get_fcoe_info() 11457 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); bnx2x_get_fcoe_info() 11463 if (!bp->cnic_eth_dev.max_fcoe_conn) bnx2x_get_fcoe_info() 11464 bp->flags |= NO_FCOE_FLAG; bnx2x_get_fcoe_info() 11467 static void bnx2x_get_cnic_info(struct bnx2x *bp) bnx2x_get_cnic_info() argument 11474 bnx2x_get_iscsi_info(bp); bnx2x_get_cnic_info() 11475 bnx2x_get_fcoe_info(bp); bnx2x_get_cnic_info() 11478 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) bnx2x_get_cnic_mac_hwinfo() argument 11481 int func = BP_ABS_FUNC(bp); bnx2x_get_cnic_mac_hwinfo() 11482 int port = BP_PORT(bp); bnx2x_get_cnic_mac_hwinfo() 11483 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; bnx2x_get_cnic_mac_hwinfo() 11484 u8 *fip_mac = bp->fip_mac; bnx2x_get_cnic_mac_hwinfo() 11486 if (IS_MF(bp)) { bnx2x_get_cnic_mac_hwinfo() 11492 if (!IS_MF_SD(bp)) { bnx2x_get_cnic_mac_hwinfo() 11493 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); bnx2x_get_cnic_mac_hwinfo() 11495 val2 = MF_CFG_RD(bp, func_ext_config[func]. bnx2x_get_cnic_mac_hwinfo() 11497 val = MF_CFG_RD(bp, func_ext_config[func]. bnx2x_get_cnic_mac_hwinfo() 11503 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; bnx2x_get_cnic_mac_hwinfo() 11507 val2 = MF_CFG_RD(bp, func_ext_config[func]. bnx2x_get_cnic_mac_hwinfo() 11509 val = MF_CFG_RD(bp, func_ext_config[func]. bnx2x_get_cnic_mac_hwinfo() 11515 bp->flags |= NO_FCOE_FLAG; bnx2x_get_cnic_mac_hwinfo() 11518 bp->mf_ext_config = cfg; bnx2x_get_cnic_mac_hwinfo() 11521 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { bnx2x_get_cnic_mac_hwinfo() 11523 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); bnx2x_get_cnic_mac_hwinfo() 11528 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { bnx2x_get_cnic_mac_hwinfo() 11530 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); bnx2x_get_cnic_mac_hwinfo() 11541 if (IS_MF_FCOE_AFEX(bp)) bnx2x_get_cnic_mac_hwinfo() 11542 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); bnx2x_get_cnic_mac_hwinfo() 11544 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. bnx2x_get_cnic_mac_hwinfo() 11546 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. bnx2x_get_cnic_mac_hwinfo() 11550 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. bnx2x_get_cnic_mac_hwinfo() 11552 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. bnx2x_get_cnic_mac_hwinfo() 11559 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; bnx2x_get_cnic_mac_hwinfo() 11565 bp->flags |= NO_FCOE_FLAG; bnx2x_get_cnic_mac_hwinfo() 11566 eth_zero_addr(bp->fip_mac); bnx2x_get_cnic_mac_hwinfo() 11570 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) bnx2x_get_mac_hwinfo() argument 11573 int func = BP_ABS_FUNC(bp); bnx2x_get_mac_hwinfo() 11574 int port = BP_PORT(bp); bnx2x_get_mac_hwinfo() 11577 eth_zero_addr(bp->dev->dev_addr); bnx2x_get_mac_hwinfo() 11579 if (BP_NOMCP(bp)) { bnx2x_get_mac_hwinfo() 11581 eth_hw_addr_random(bp->dev); bnx2x_get_mac_hwinfo() 11582 } else if (IS_MF(bp)) { bnx2x_get_mac_hwinfo() 11583 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); bnx2x_get_mac_hwinfo() 11584 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); bnx2x_get_mac_hwinfo() 11587 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); bnx2x_get_mac_hwinfo() 11589 if (CNIC_SUPPORT(bp)) bnx2x_get_mac_hwinfo() 11590 bnx2x_get_cnic_mac_hwinfo(bp); bnx2x_get_mac_hwinfo() 11593 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); bnx2x_get_mac_hwinfo() 11594 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); bnx2x_get_mac_hwinfo() 11595 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); bnx2x_get_mac_hwinfo() 11597 if (CNIC_SUPPORT(bp)) bnx2x_get_mac_hwinfo() 11598 bnx2x_get_cnic_mac_hwinfo(bp); bnx2x_get_mac_hwinfo() 11601 if (!BP_NOMCP(bp)) { bnx2x_get_mac_hwinfo() 11603 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); bnx2x_get_mac_hwinfo() 11604 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); bnx2x_get_mac_hwinfo() 11605 bnx2x_set_mac_buf(bp->phys_port_id, val, val2); bnx2x_get_mac_hwinfo() 11606 bp->flags |= HAS_PHYS_PORT_ID; bnx2x_get_mac_hwinfo() 11609 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); bnx2x_get_mac_hwinfo() 11611 if (!is_valid_ether_addr(bp->dev->dev_addr)) bnx2x_get_mac_hwinfo() 11612 dev_err(&bp->pdev->dev, bnx2x_get_mac_hwinfo() 11615 bp->dev->dev_addr); bnx2x_get_mac_hwinfo() 11618 static bool bnx2x_get_dropless_info(struct bnx2x *bp) bnx2x_get_dropless_info() argument 11623 if (IS_VF(bp)) bnx2x_get_dropless_info() 11626 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { bnx2x_get_dropless_info() 11628 tmp = BP_ABS_FUNC(bp); bnx2x_get_dropless_info() 11629 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg); bnx2x_get_dropless_info() 11633 tmp = BP_PORT(bp); bnx2x_get_dropless_info() 11634 cfg = SHMEM_RD(bp, bnx2x_get_dropless_info() 11641 static void validate_set_si_mode(struct bnx2x *bp) validate_set_si_mode() argument 11643 u8 func = BP_ABS_FUNC(bp); validate_set_si_mode() 11646 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper); validate_set_si_mode() 11650 bp->mf_mode = MULTI_FUNCTION_SI; validate_set_si_mode() 11651 bp->mf_config[BP_VN(bp)] = validate_set_si_mode() 11652 MF_CFG_RD(bp, func_mf_config[func].config); validate_set_si_mode() 11657 static int bnx2x_get_hwinfo(struct bnx2x *bp) bnx2x_get_hwinfo() argument 11659 int /*abs*/func = BP_ABS_FUNC(bp); bnx2x_get_hwinfo() 11665 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) { bnx2x_get_hwinfo() 11666 dev_err(&bp->pdev->dev, bnx2x_get_hwinfo() 11671 bnx2x_get_common_hwinfo(bp); bnx2x_get_hwinfo() 11676 if (CHIP_IS_E1x(bp)) { bnx2x_get_hwinfo() 11677 bp->common.int_block = INT_BLOCK_HC; bnx2x_get_hwinfo() 11679 bp->igu_dsb_id = DEF_SB_IGU_ID; bnx2x_get_hwinfo() 11680 bp->igu_base_sb = 0; bnx2x_get_hwinfo() 11682 bp->common.int_block = INT_BLOCK_IGU; bnx2x_get_hwinfo() 11685 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_get_hwinfo() 11687 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); bnx2x_get_hwinfo() 11695 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); bnx2x_get_hwinfo() 11696 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); bnx2x_get_hwinfo() 11698 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { bnx2x_get_hwinfo() 11703 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { bnx2x_get_hwinfo() 11704 dev_err(&bp->pdev->dev, bnx2x_get_hwinfo() 11706 bnx2x_release_hw_lock(bp, bnx2x_get_hwinfo() 11714 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; bnx2x_get_hwinfo() 11718 rc = bnx2x_get_igu_cam_info(bp); bnx2x_get_hwinfo() 11719 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_get_hwinfo() 11729 if (CHIP_IS_E1x(bp)) bnx2x_get_hwinfo() 11730 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); bnx2x_get_hwinfo() 11736 bp->base_fw_ndsb = bp->igu_base_sb; bnx2x_get_hwinfo() 11739 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, bnx2x_get_hwinfo() 11740 bp->igu_sb_cnt, bp->base_fw_ndsb); bnx2x_get_hwinfo() 11746 bp->mf_ov = 0; bnx2x_get_hwinfo() 11747 bp->mf_mode = 0; bnx2x_get_hwinfo() 11748 bp->mf_sub_mode = 0; bnx2x_get_hwinfo() 11749 vn = BP_VN(bp); bnx2x_get_hwinfo() 11751 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { bnx2x_get_hwinfo() 11753 bp->common.shmem2_base, SHMEM2_RD(bp, size), bnx2x_get_hwinfo() 11756 if (SHMEM2_HAS(bp, mf_cfg_addr)) bnx2x_get_hwinfo() 11757 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); bnx2x_get_hwinfo() 11759 bp->common.mf_cfg_base = bp->common.shmem_base + bnx2x_get_hwinfo() 11770 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { bnx2x_get_hwinfo() 11772 val = SHMEM_RD(bp, bnx2x_get_hwinfo() 11778 validate_set_si_mode(bp); bnx2x_get_hwinfo() 11781 if ((!CHIP_IS_E1x(bp)) && bnx2x_get_hwinfo() 11782 (MF_CFG_RD(bp, func_mf_config[func]. bnx2x_get_hwinfo() 11784 (SHMEM2_HAS(bp, bnx2x_get_hwinfo() 11786 bp->mf_mode = MULTI_FUNCTION_AFEX; bnx2x_get_hwinfo() 11787 bp->mf_config[vn] = MF_CFG_RD(bp, bnx2x_get_hwinfo() 11795 val = MF_CFG_RD(bp, bnx2x_get_hwinfo() 11800 bp->mf_mode = MULTI_FUNCTION_SD; bnx2x_get_hwinfo() 11801 bp->mf_config[vn] = MF_CFG_RD(bp, bnx2x_get_hwinfo() 11807 bp->mf_mode = MULTI_FUNCTION_SD; bnx2x_get_hwinfo() 11808 bp->mf_sub_mode = SUB_MF_MODE_UFP; bnx2x_get_hwinfo() 11809 bp->mf_config[vn] = bnx2x_get_hwinfo() 11810 MF_CFG_RD(bp, bnx2x_get_hwinfo() 11814 bp->mf_config[vn] = 0; bnx2x_get_hwinfo() 11817 val2 = SHMEM_RD(bp, bnx2x_get_hwinfo() 11822 validate_set_si_mode(bp); bnx2x_get_hwinfo() 11823 bp->mf_sub_mode = bnx2x_get_hwinfo() 11828 bp->mf_config[vn] = 0; bnx2x_get_hwinfo() 11835 bp->mf_config[vn] = 0; bnx2x_get_hwinfo() 11841 IS_MF(bp) ? "multi" : "single"); bnx2x_get_hwinfo() 11843 switch (bp->mf_mode) { bnx2x_get_hwinfo() 11845 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & bnx2x_get_hwinfo() 11848 bp->mf_ov = val; bnx2x_get_hwinfo() 11849 bp->path_has_ovlan = true; bnx2x_get_hwinfo() 11852 func, bp->mf_ov, bp->mf_ov); bnx2x_get_hwinfo() 11853 } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) { bnx2x_get_hwinfo() 11854 dev_err(&bp->pdev->dev, bnx2x_get_hwinfo() 11857 bp->path_has_ovlan = true; bnx2x_get_hwinfo() 11859 dev_err(&bp->pdev->dev, bnx2x_get_hwinfo() 11874 dev_err(&bp->pdev->dev, bnx2x_get_hwinfo() 11887 if (CHIP_MODE_IS_4_PORT(bp) && bnx2x_get_hwinfo() 11888 !bp->path_has_ovlan && bnx2x_get_hwinfo() 11889 !IS_MF(bp) && bnx2x_get_hwinfo() 11890 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { bnx2x_get_hwinfo() 11891 u8 other_port = !BP_PORT(bp); bnx2x_get_hwinfo() 11892 u8 other_func = BP_PATH(bp) + 2*other_port; bnx2x_get_hwinfo() 11893 val = MF_CFG_RD(bp, bnx2x_get_hwinfo() 11896 bp->path_has_ovlan = true; bnx2x_get_hwinfo() 11901 if (CHIP_IS_E1H(bp) && IS_MF(bp)) bnx2x_get_hwinfo() 11902 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); bnx2x_get_hwinfo() 11905 bnx2x_get_port_hwinfo(bp); bnx2x_get_hwinfo() 11908 bnx2x_get_mac_hwinfo(bp); bnx2x_get_hwinfo() 11910 bnx2x_get_cnic_info(bp); bnx2x_get_hwinfo() 11915 static void bnx2x_read_fwinfo(struct bnx2x *bp) bnx2x_read_fwinfo() argument 11925 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); bnx2x_read_fwinfo() 11926 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); bnx2x_read_fwinfo() 11951 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, bnx2x_read_fwinfo() 11988 memcpy(bp->fw_ver, &vpd_data[rodi], len); bnx2x_read_fwinfo() 11989 bp->fw_ver[len] = ' '; bnx2x_read_fwinfo() 12000 static void bnx2x_set_modes_bitmap(struct bnx2x *bp) bnx2x_set_modes_bitmap() argument 12004 if (CHIP_REV_IS_FPGA(bp)) bnx2x_set_modes_bitmap() 12006 else if (CHIP_REV_IS_EMUL(bp)) bnx2x_set_modes_bitmap() 12011 if (CHIP_MODE_IS_4_PORT(bp)) bnx2x_set_modes_bitmap() 12016 if (CHIP_IS_E2(bp)) bnx2x_set_modes_bitmap() 12018 else if (CHIP_IS_E3(bp)) { bnx2x_set_modes_bitmap() 12020 if (CHIP_REV(bp) == CHIP_REV_Ax) bnx2x_set_modes_bitmap() 12022 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ bnx2x_set_modes_bitmap() 12026 if (IS_MF(bp)) { bnx2x_set_modes_bitmap() 12028 switch (bp->mf_mode) { bnx2x_set_modes_bitmap() 12047 INIT_MODE_FLAGS(bp) = flags; bnx2x_set_modes_bitmap() 12050 static int bnx2x_init_bp(struct bnx2x *bp) bnx2x_init_bp() argument 12055 mutex_init(&bp->port.phy_mutex); bnx2x_init_bp() 12056 mutex_init(&bp->fw_mb_mutex); bnx2x_init_bp() 12057 mutex_init(&bp->drv_info_mutex); bnx2x_init_bp() 12058 sema_init(&bp->stats_lock, 1); bnx2x_init_bp() 12059 bp->drv_info_mng_owner = false; bnx2x_init_bp() 12061 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); bnx2x_init_bp() 12062 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); bnx2x_init_bp() 12063 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); bnx2x_init_bp() 12064 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); bnx2x_init_bp() 12065 if (IS_PF(bp)) { bnx2x_init_bp() 12066 rc = bnx2x_get_hwinfo(bp); bnx2x_init_bp() 12070 eth_zero_addr(bp->dev->dev_addr); bnx2x_init_bp() 12073 bnx2x_set_modes_bitmap(bp); bnx2x_init_bp() 12075 rc = bnx2x_alloc_mem_bp(bp); bnx2x_init_bp() 12079 bnx2x_read_fwinfo(bp); bnx2x_init_bp() 12081 func = BP_FUNC(bp); bnx2x_init_bp() 12084 if (IS_PF(bp) && !BP_NOMCP(bp)) { bnx2x_init_bp() 12086 bp->fw_seq = bnx2x_init_bp() 12087 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & bnx2x_init_bp() 12089 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); bnx2x_init_bp() 12091 rc = bnx2x_prev_unload(bp); bnx2x_init_bp() 12093 bnx2x_free_mem_bp(bp); bnx2x_init_bp() 12098 if (CHIP_REV_IS_FPGA(bp)) bnx2x_init_bp() 12099 dev_err(&bp->pdev->dev, "FPGA detected\n"); bnx2x_init_bp() 12101 if (BP_NOMCP(bp) && (func == 0)) bnx2x_init_bp() 12102 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); bnx2x_init_bp() 12104 bp->disable_tpa = disable_tpa; bnx2x_init_bp() 12105 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp); bnx2x_init_bp() 12107 bp->disable_tpa |= is_kdump_kernel(); bnx2x_init_bp() 12110 if (bp->disable_tpa) { bnx2x_init_bp() 12111 bp->dev->hw_features &= ~NETIF_F_LRO; bnx2x_init_bp() 12112 bp->dev->features &= ~NETIF_F_LRO; bnx2x_init_bp() 12115 if (CHIP_IS_E1(bp)) bnx2x_init_bp() 12116 bp->dropless_fc = 0; bnx2x_init_bp() 12118 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); bnx2x_init_bp() 12120 bp->mrrs = mrrs; bnx2x_init_bp() 12122 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL; bnx2x_init_bp() 12123 if (IS_VF(bp)) bnx2x_init_bp() 12124 bp->rx_ring_size = MAX_RX_AVAIL; bnx2x_init_bp() 12127 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; bnx2x_init_bp() 12128 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; bnx2x_init_bp() 12130 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; bnx2x_init_bp() 12132 init_timer(&bp->timer); bnx2x_init_bp() 12133 bp->timer.expires = jiffies + bp->current_interval; bnx2x_init_bp() 12134 bp->timer.data = (unsigned long) bp; bnx2x_init_bp() 12135 bp->timer.function = bnx2x_timer; bnx2x_init_bp() 12137 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && bnx2x_init_bp() 12138 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && bnx2x_init_bp() 12139 SHMEM2_RD(bp, dcbx_lldp_params_offset) && bnx2x_init_bp() 12140 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) { bnx2x_init_bp() 12141 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); bnx2x_init_bp() 12142 bnx2x_dcbx_init_params(bp); bnx2x_init_bp() 12144 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF); bnx2x_init_bp() 12147 if (CHIP_IS_E1x(bp)) bnx2x_init_bp() 12148 bp->cnic_base_cl_id = FP_SB_MAX_E1x; bnx2x_init_bp() 12150 bp->cnic_base_cl_id = FP_SB_MAX_E2; bnx2x_init_bp() 12153 if (IS_VF(bp)) bnx2x_init_bp() 12154 bp->max_cos = 1; bnx2x_init_bp() 12155 else if (CHIP_IS_E1x(bp)) bnx2x_init_bp() 12156 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; bnx2x_init_bp() 12157 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) bnx2x_init_bp() 12158 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; bnx2x_init_bp() 12159 else if (CHIP_IS_E3B0(bp)) bnx2x_init_bp() 12160 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; bnx2x_init_bp() 12163 CHIP_NUM(bp), CHIP_REV(bp)); bnx2x_init_bp() 12164 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); bnx2x_init_bp() 12170 if (IS_VF(bp)) bnx2x_init_bp() 12171 bp->min_msix_vec_cnt = 1; bnx2x_init_bp() 12172 else if (CNIC_SUPPORT(bp)) bnx2x_init_bp() 12173 bp->min_msix_vec_cnt = 3; bnx2x_init_bp() 12175 bp->min_msix_vec_cnt = 2; bnx2x_init_bp() 12176 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); bnx2x_init_bp() 12178 bp->dump_preset_idx = 1; bnx2x_init_bp() 12180 if (CHIP_IS_E3B0(bp)) bnx2x_init_bp() 12181 bp->flags |= PTP_SUPPORTED; bnx2x_init_bp() 12197 struct bnx2x *bp = netdev_priv(dev); bnx2x_open() local 12200 bp->stats_init = true; bnx2x_open() 12204 bnx2x_set_power_state(bp, PCI_D0); bnx2x_open() 12212 if (IS_PF(bp)) { bnx2x_open() 12213 int other_engine = BP_PATH(bp) ? 0 : 1; bnx2x_open() 12217 other_load_status = bnx2x_get_load_status(bp, other_engine); bnx2x_open() 12218 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); bnx2x_open() 12219 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || bnx2x_open() 12220 bnx2x_chk_parity_attn(bp, &global, true)) { bnx2x_open() 12228 bnx2x_set_reset_global(bp); bnx2x_open() 12237 bnx2x_trylock_leader_lock(bp) && bnx2x_open() 12238 !bnx2x_leader_reset(bp)) { bnx2x_open() 12239 netdev_info(bp->dev, bnx2x_open() 12245 bnx2x_set_power_state(bp, PCI_D3hot); bnx2x_open() 12246 bp->recovery_state = BNX2X_RECOVERY_FAILED; bnx2x_open() 12256 bp->recovery_state = BNX2X_RECOVERY_DONE; bnx2x_open() 12257 rc = bnx2x_nic_load(bp, LOAD_OPEN); bnx2x_open() 12266 struct bnx2x *bp = netdev_priv(dev); bnx2x_close() local 12269 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); bnx2x_close() 12274 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, bnx2x_init_mcast_macs_list() argument 12277 int mc_count = netdev_mc_count(bp->dev); bnx2x_init_mcast_macs_list() 12287 netdev_for_each_mc_addr(ha, bp->dev) { bnx2x_init_mcast_macs_list() 12312 * @bp: driver handle 12316 static int bnx2x_set_uc_list(struct bnx2x *bp) bnx2x_set_uc_list() argument 12319 struct net_device *dev = bp->dev; bnx2x_set_uc_list() 12321 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; bnx2x_set_uc_list() 12325 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); bnx2x_set_uc_list() 12332 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, netdev_for_each_uc_addr() 12350 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, 12354 static int bnx2x_set_mc_list(struct bnx2x *bp) bnx2x_set_mc_list() argument 12356 struct net_device *dev = bp->dev; bnx2x_set_mc_list() 12360 rparam.mcast_obj = &bp->mcast_obj; bnx2x_set_mc_list() 12363 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); bnx2x_set_mc_list() 12371 rc = bnx2x_init_mcast_macs_list(bp, &rparam); bnx2x_set_mc_list() 12379 rc = bnx2x_config_mcast(bp, &rparam, bnx2x_set_mc_list() 12391 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ bnx2x_set_rx_mode() 12394 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_rx_mode() local 12396 if (bp->state != BNX2X_STATE_OPEN) { bnx2x_set_rx_mode() 12397 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); bnx2x_set_rx_mode() 12401 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE, bnx2x_set_rx_mode() 12406 void bnx2x_set_rx_mode_inner(struct bnx2x *bp) bnx2x_set_rx_mode_inner() argument 12410 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); bnx2x_set_rx_mode_inner() 12412 netif_addr_lock_bh(bp->dev); bnx2x_set_rx_mode_inner() 12414 if (bp->dev->flags & IFF_PROMISC) { bnx2x_set_rx_mode_inner() 12416 } else if ((bp->dev->flags & IFF_ALLMULTI) || bnx2x_set_rx_mode_inner() 12417 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && bnx2x_set_rx_mode_inner() 12418 CHIP_IS_E1(bp))) { bnx2x_set_rx_mode_inner() 12421 if (IS_PF(bp)) { bnx2x_set_rx_mode_inner() 12423 if (bnx2x_set_mc_list(bp) < 0) bnx2x_set_rx_mode_inner() 12427 netif_addr_unlock_bh(bp->dev); bnx2x_set_rx_mode_inner() 12428 if (bnx2x_set_uc_list(bp) < 0) bnx2x_set_rx_mode_inner() 12430 netif_addr_lock_bh(bp->dev); bnx2x_set_rx_mode_inner() 12435 bnx2x_schedule_sp_rtnl(bp, bnx2x_set_rx_mode_inner() 12440 bp->rx_mode = rx_mode; bnx2x_set_rx_mode_inner() 12442 if (IS_MF_ISCSI_ONLY(bp)) bnx2x_set_rx_mode_inner() 12443 bp->rx_mode = BNX2X_RX_MODE_NONE; bnx2x_set_rx_mode_inner() 12446 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { bnx2x_set_rx_mode_inner() 12447 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); bnx2x_set_rx_mode_inner() 12448 netif_addr_unlock_bh(bp->dev); bnx2x_set_rx_mode_inner() 12452 if (IS_PF(bp)) { bnx2x_set_rx_mode_inner() 12453 bnx2x_set_storm_rx_mode(bp); bnx2x_set_rx_mode_inner() 12454 netif_addr_unlock_bh(bp->dev); bnx2x_set_rx_mode_inner() 12460 netif_addr_unlock_bh(bp->dev); bnx2x_set_rx_mode_inner() 12461 bnx2x_vfpf_storm_rx_mode(bp); bnx2x_set_rx_mode_inner() 12469 struct bnx2x *bp = netdev_priv(netdev); bnx2x_mdio_read() local 12479 bnx2x_acquire_phy_lock(bp); bnx2x_mdio_read() 12480 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); bnx2x_mdio_read() 12481 bnx2x_release_phy_lock(bp); bnx2x_mdio_read() 12493 struct bnx2x *bp = netdev_priv(netdev); bnx2x_mdio_write() local 12503 bnx2x_acquire_phy_lock(bp); bnx2x_mdio_write() 12504 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); bnx2x_mdio_write() 12505 bnx2x_release_phy_lock(bp); bnx2x_mdio_write() 12512 struct bnx2x *bp = netdev_priv(dev); bnx2x_ioctl() local 12520 return bnx2x_hwtstamp_ioctl(bp, ifr); bnx2x_ioctl() 12524 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); bnx2x_ioctl() 12531 struct bnx2x *bp = netdev_priv(dev); poll_bnx2x() local 12534 for_each_eth_queue(bp, i) { for_each_eth_queue() 12535 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue() 12536 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); for_each_eth_queue() 12543 struct bnx2x *bp = netdev_priv(dev); bnx2x_validate_addr() local 12546 if (IS_VF(bp)) bnx2x_validate_addr() 12547 bnx2x_sample_bulletin(bp); bnx2x_validate_addr() 12559 struct bnx2x *bp = netdev_priv(netdev); bnx2x_get_phys_port_id() local 12561 if (!(bp->flags & HAS_PHYS_PORT_ID)) bnx2x_get_phys_port_id() 12564 ppid->id_len = sizeof(bp->phys_port_id); bnx2x_get_phys_port_id() 12565 memcpy(ppid->id, bp->phys_port_id, ppid->id_len); bnx2x_get_phys_port_id() 12612 static int bnx2x_set_coherency_mask(struct bnx2x *bp) bnx2x_set_coherency_mask() argument 12614 struct device *dev = &bp->pdev->dev; bnx2x_set_coherency_mask() 12625 static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp) bnx2x_disable_pcie_error_reporting() argument 12627 if (bp->flags & AER_ENABLED) { bnx2x_disable_pcie_error_reporting() 12628 pci_disable_pcie_error_reporting(bp->pdev); bnx2x_disable_pcie_error_reporting() 12629 bp->flags &= ~AER_ENABLED; bnx2x_disable_pcie_error_reporting() 12633 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, bnx2x_init_dev() argument 12644 bp->dev = dev; bnx2x_init_dev() 12645 bp->pdev = pdev; bnx2x_init_dev() 12649 dev_err(&bp->pdev->dev, bnx2x_init_dev() 12655 dev_err(&bp->pdev->dev, bnx2x_init_dev() 12661 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { bnx2x_init_dev() 12662 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); bnx2x_init_dev() 12678 dev_err(&bp->pdev->dev, bnx2x_init_dev() 12687 if (IS_PF(bp)) { bnx2x_init_dev() 12689 dev_err(&bp->pdev->dev, bnx2x_init_dev() 12697 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); bnx2x_init_dev() 12702 rc = bnx2x_set_coherency_mask(bp); bnx2x_init_dev() 12712 bp->regview = pci_ioremap_bar(pdev, 0); bnx2x_init_dev() 12713 if (!bp->regview) { bnx2x_init_dev() 12714 dev_err(&bp->pdev->dev, bnx2x_init_dev() 12726 bp->pf_num = PCI_FUNC(pdev->devfn); bnx2x_init_dev() 12729 pci_read_config_dword(bp->pdev, bnx2x_init_dev() 12731 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> bnx2x_init_dev() 12734 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); bnx2x_init_dev() 12737 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, bnx2x_init_dev() 12746 bp->flags |= AER_ENABLED; bnx2x_init_dev() 12754 if (IS_PF(bp)) { bnx2x_init_dev() 12755 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); bnx2x_init_dev() 12756 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); bnx2x_init_dev() 12757 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); bnx2x_init_dev() 12758 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); bnx2x_init_dev() 12761 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); bnx2x_init_dev() 12762 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); bnx2x_init_dev() 12763 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); bnx2x_init_dev() 12764 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); bnx2x_init_dev() 12772 REG_WR(bp, bnx2x_init_dev() 12779 bnx2x_set_ethtool_ops(bp, dev); bnx2x_init_dev() 12812 bp->mdio.prtad = MDIO_PRTAD_NONE; bnx2x_init_dev() 12813 bp->mdio.mmds = 0; bnx2x_init_dev() 12814 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; bnx2x_init_dev() 12815 bp->mdio.dev = dev; bnx2x_init_dev() 12816 bp->mdio.mdio_read = bnx2x_mdio_read; bnx2x_init_dev() 12817 bp->mdio.mdio_write = bnx2x_mdio_write; bnx2x_init_dev() 12832 static int bnx2x_check_firmware(struct bnx2x *bp) bnx2x_check_firmware() argument 12834 const struct firmware *firmware = bp->firmware; bnx2x_check_firmware() 12956 bp->arr = kmalloc(len, GFP_KERNEL); \ 12957 if (!bp->arr) \ 12959 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 12960 (u8 *)bp->arr, len); \ 12963 static int bnx2x_init_firmware(struct bnx2x *bp) bnx2x_init_firmware() argument 12969 if (bp->firmware) bnx2x_init_firmware() 12972 if (CHIP_IS_E1(bp)) bnx2x_init_firmware() 12974 else if (CHIP_IS_E1H(bp)) bnx2x_init_firmware() 12976 else if (!CHIP_IS_E1x(bp)) bnx2x_init_firmware() 12984 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); bnx2x_init_firmware() 12991 rc = bnx2x_check_firmware(bp); bnx2x_init_firmware() 12997 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; bnx2x_init_firmware() 13011 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + bnx2x_init_firmware() 13013 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + bnx2x_init_firmware() 13015 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + bnx2x_init_firmware() 13017 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + bnx2x_init_firmware() 13019 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + bnx2x_init_firmware() 13021 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + bnx2x_init_firmware() 13023 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + bnx2x_init_firmware() 13025 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + bnx2x_init_firmware() 13033 kfree(bp->init_ops_offsets); bnx2x_init_firmware() 13035 kfree(bp->init_ops); bnx2x_init_firmware() 13037 kfree(bp->init_data); bnx2x_init_firmware() 13039 release_firmware(bp->firmware); bnx2x_init_firmware() 13040 bp->firmware = NULL; bnx2x_init_firmware() 13045 static void bnx2x_release_firmware(struct bnx2x *bp) bnx2x_release_firmware() argument 13047 kfree(bp->init_ops_offsets); bnx2x_release_firmware() 13048 kfree(bp->init_ops); bnx2x_release_firmware() 13049 kfree(bp->init_data); bnx2x_release_firmware() 13050 release_firmware(bp->firmware); bnx2x_release_firmware() 13051 bp->firmware = NULL; bnx2x_release_firmware() 13071 void bnx2x__init_func_obj(struct bnx2x *bp) bnx2x__init_func_obj() argument 13074 bnx2x_setup_dmae(bp); bnx2x__init_func_obj() 13076 bnx2x_init_func_obj(bp, &bp->func_obj, bnx2x__init_func_obj() 13077 bnx2x_sp(bp, func_rdata), bnx2x__init_func_obj() 13078 bnx2x_sp_mapping(bp, func_rdata), bnx2x__init_func_obj() 13079 bnx2x_sp(bp, func_afex_rdata), bnx2x__init_func_obj() 13080 bnx2x_sp_mapping(bp, func_afex_rdata), bnx2x__init_func_obj() 13085 static int bnx2x_set_qm_cid_count(struct bnx2x *bp) bnx2x_set_qm_cid_count() argument 13087 int cid_count = BNX2X_L2_MAX_CID(bp); bnx2x_set_qm_cid_count() 13089 if (IS_SRIOV(bp)) bnx2x_set_qm_cid_count() 13092 if (CNIC_SUPPORT(bp)) bnx2x_set_qm_cid_count() 13192 static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir, bnx2x_send_update_drift_ramrod() argument 13203 func_params.f_obj = &bp->func_obj; bnx2x_send_update_drift_ramrod() 13214 return bnx2x_func_state_change(bp, &func_params); bnx2x_send_update_drift_ramrod() 13219 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); bnx2x_ptp_adjfreq() local 13227 if (!netif_running(bp->dev)) { bnx2x_ptp_adjfreq() 13272 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val, bnx2x_ptp_adjfreq() 13287 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); bnx2x_ptp_adjtime() local 13291 timecounter_adjtime(&bp->timecounter, delta); bnx2x_ptp_adjtime() 13298 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); bnx2x_ptp_gettime() local 13301 ns = timecounter_read(&bp->timecounter); bnx2x_ptp_gettime() 13313 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); bnx2x_ptp_settime() local 13321 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns); bnx2x_ptp_settime() 13330 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); bnx2x_ptp_enable() local 13336 static void bnx2x_register_phc(struct bnx2x *bp) bnx2x_register_phc() argument 13339 bp->ptp_clock_info.owner = THIS_MODULE; bnx2x_register_phc() 13340 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name); bnx2x_register_phc() 13341 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */ bnx2x_register_phc() 13342 bp->ptp_clock_info.n_alarm = 0; bnx2x_register_phc() 13343 bp->ptp_clock_info.n_ext_ts = 0; bnx2x_register_phc() 13344 bp->ptp_clock_info.n_per_out = 0; bnx2x_register_phc() 13345 bp->ptp_clock_info.pps = 0; bnx2x_register_phc() 13346 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; bnx2x_register_phc() 13347 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; bnx2x_register_phc() 13348 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime; bnx2x_register_phc() 13349 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime; bnx2x_register_phc() 13350 bp->ptp_clock_info.enable = bnx2x_ptp_enable; bnx2x_register_phc() 13352 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); bnx2x_register_phc() 13353 if (IS_ERR(bp->ptp_clock)) { bnx2x_register_phc() 13354 bp->ptp_clock = NULL; bnx2x_register_phc() 13363 struct bnx2x *bp; bnx2x_init_one() local 13388 * initialization of bp->max_cos based on the chip versions AND chip bnx2x_init_one() 13417 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); bnx2x_init_one() 13421 bp = netdev_priv(dev); bnx2x_init_one() 13423 bp->flags = 0; bnx2x_init_one() 13425 bp->flags |= IS_VF_FLAG; bnx2x_init_one() 13427 bp->igu_sb_cnt = max_non_def_sbs; bnx2x_init_one() 13428 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; bnx2x_init_one() 13429 bp->msg_enable = debug; bnx2x_init_one() 13430 bp->cnic_support = cnic_cnt; bnx2x_init_one() 13431 bp->cnic_probe = bnx2x_cnic_probe; bnx2x_init_one() 13435 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); bnx2x_init_one() 13442 IS_PF(bp) ? "physical" : "virtual"); bnx2x_init_one() 13443 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off"); bnx2x_init_one() 13448 rc = bnx2x_init_bp(bp); bnx2x_init_one() 13452 /* Map doorbells here as we need the real value of bp->max_cos which bnx2x_init_one() 13456 if (IS_VF(bp)) { bnx2x_init_one() 13457 bp->doorbells = bnx2x_vf_doorbells(bp); bnx2x_init_one() 13458 rc = bnx2x_vf_pci_alloc(bp); bnx2x_init_one() 13462 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); bnx2x_init_one() 13464 dev_err(&bp->pdev->dev, bnx2x_init_one() 13469 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), bnx2x_init_one() 13472 if (!bp->doorbells) { bnx2x_init_one() 13473 dev_err(&bp->pdev->dev, bnx2x_init_one() 13479 if (IS_VF(bp)) { bnx2x_init_one() 13480 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); bnx2x_init_one() 13486 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); bnx2x_init_one() 13491 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); bnx2x_init_one() 13492 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); bnx2x_init_one() 13495 if (CHIP_IS_E1x(bp)) bnx2x_init_one() 13496 bp->flags |= NO_FCOE_FLAG; bnx2x_init_one() 13498 /* Set bp->num_queues for MSI-X mode*/ bnx2x_init_one() 13499 bnx2x_set_num_queues(bp); bnx2x_init_one() 13504 rc = bnx2x_set_int_mode(bp); bnx2x_init_one() 13519 if (!NO_FCOE(bp)) { bnx2x_init_one() 13522 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); bnx2x_init_one() 13525 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) || bnx2x_init_one() 13533 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), bnx2x_init_one() 13539 dev->base_addr, bp->pdev->irq, dev->dev_addr); bnx2x_init_one() 13541 bnx2x_register_phc(bp); bnx2x_init_one() 13546 bnx2x_disable_pcie_error_reporting(bp); bnx2x_init_one() 13548 if (bp->regview) bnx2x_init_one() 13549 iounmap(bp->regview); bnx2x_init_one() 13551 if (IS_PF(bp) && bp->doorbells) bnx2x_init_one() 13552 iounmap(bp->doorbells); bnx2x_init_one() 13566 struct bnx2x *bp, __bnx2x_remove() 13569 if (bp->ptp_clock) { __bnx2x_remove() 13570 ptp_clock_unregister(bp->ptp_clock); __bnx2x_remove() 13571 bp->ptp_clock = NULL; __bnx2x_remove() 13575 if (!NO_FCOE(bp)) { __bnx2x_remove() 13577 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); __bnx2x_remove() 13583 bnx2x_dcbnl_update_applist(bp, true); __bnx2x_remove() 13586 if (IS_PF(bp) && __bnx2x_remove() 13587 !BP_NOMCP(bp) && __bnx2x_remove() 13588 (bp->flags & BC_SUPPORTS_RMMOD_CMD)) __bnx2x_remove() 13589 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); __bnx2x_remove() 13600 bnx2x_iov_remove_one(bp); __bnx2x_remove() 13603 if (IS_PF(bp)) { __bnx2x_remove() 13604 bnx2x_set_power_state(bp, PCI_D0); __bnx2x_remove() 13609 bnx2x_reset_endianity(bp); __bnx2x_remove() 13613 bnx2x_disable_msi(bp); __bnx2x_remove() 13616 if (IS_PF(bp)) __bnx2x_remove() 13617 bnx2x_set_power_state(bp, PCI_D3hot); __bnx2x_remove() 13620 cancel_delayed_work_sync(&bp->sp_rtnl_task); __bnx2x_remove() 13623 if (IS_VF(bp)) __bnx2x_remove() 13624 bnx2x_vfpf_release(bp); __bnx2x_remove() 13628 pci_wake_from_d3(pdev, bp->wol); __bnx2x_remove() 13632 bnx2x_disable_pcie_error_reporting(bp); __bnx2x_remove() 13634 if (bp->regview) __bnx2x_remove() 13635 iounmap(bp->regview); __bnx2x_remove() 13640 if (IS_PF(bp)) { __bnx2x_remove() 13641 if (bp->doorbells) __bnx2x_remove() 13642 iounmap(bp->doorbells); __bnx2x_remove() 13644 bnx2x_release_firmware(bp); __bnx2x_remove() 13646 bnx2x_vf_pci_dealloc(bp); __bnx2x_remove() 13648 bnx2x_free_mem_bp(bp); __bnx2x_remove() 13662 struct bnx2x *bp; bnx2x_remove_one() local 13668 bp = netdev_priv(dev); bnx2x_remove_one() 13670 __bnx2x_remove(pdev, dev, bp, true); bnx2x_remove_one() 13673 static int bnx2x_eeh_nic_unload(struct bnx2x *bp) bnx2x_eeh_nic_unload() argument 13675 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; bnx2x_eeh_nic_unload() 13677 bp->rx_mode = BNX2X_RX_MODE_NONE; bnx2x_eeh_nic_unload() 13679 if (CNIC_LOADED(bp)) bnx2x_eeh_nic_unload() 13680 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); bnx2x_eeh_nic_unload() 13683 bnx2x_tx_disable(bp); bnx2x_eeh_nic_unload() 13685 bnx2x_del_all_napi(bp); bnx2x_eeh_nic_unload() 13686 if (CNIC_LOADED(bp)) bnx2x_eeh_nic_unload() 13687 bnx2x_del_all_napi_cnic(bp); bnx2x_eeh_nic_unload() 13688 netdev_reset_tc(bp->dev); bnx2x_eeh_nic_unload() 13690 del_timer_sync(&bp->timer); bnx2x_eeh_nic_unload() 13691 cancel_delayed_work_sync(&bp->sp_task); bnx2x_eeh_nic_unload() 13692 cancel_delayed_work_sync(&bp->period_task); bnx2x_eeh_nic_unload() 13694 if (!down_timeout(&bp->stats_lock, HZ / 10)) { bnx2x_eeh_nic_unload() 13695 bp->stats_state = STATS_STATE_DISABLED; bnx2x_eeh_nic_unload() 13696 up(&bp->stats_lock); bnx2x_eeh_nic_unload() 13699 bnx2x_save_statistics(bp); bnx2x_eeh_nic_unload() 13701 netif_carrier_off(bp->dev); bnx2x_eeh_nic_unload() 13718 struct bnx2x *bp = netdev_priv(dev); bnx2x_io_error_detected() local 13732 bnx2x_eeh_nic_unload(bp); bnx2x_io_error_detected() 13734 bnx2x_prev_path_mark_eeh(bp); bnx2x_io_error_detected() 13753 struct bnx2x *bp = netdev_priv(dev); bnx2x_io_slot_reset() local 13770 bnx2x_set_power_state(bp, PCI_D0); bnx2x_io_slot_reset() 13776 bnx2x_init_shmem(bp); bnx2x_io_slot_reset() 13778 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { bnx2x_io_slot_reset() 13781 v = SHMEM2_RD(bp, bnx2x_io_slot_reset() 13782 drv_capabilities_flag[BP_FW_MB_IDX(bp)]); bnx2x_io_slot_reset() 13783 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], bnx2x_io_slot_reset() 13786 bnx2x_drain_tx_queues(bp); bnx2x_io_slot_reset() 13787 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); bnx2x_io_slot_reset() 13788 bnx2x_netif_stop(bp, 1); bnx2x_io_slot_reset() 13789 bnx2x_free_irq(bp); bnx2x_io_slot_reset() 13792 bnx2x_send_unload_done(bp, true); bnx2x_io_slot_reset() 13794 bp->sp_state = 0; bnx2x_io_slot_reset() 13795 bp->port.pmf = 0; bnx2x_io_slot_reset() 13797 bnx2x_prev_unload(bp); bnx2x_io_slot_reset() 13802 bnx2x_squeeze_objects(bp); bnx2x_io_slot_reset() 13803 bnx2x_free_skbs(bp); bnx2x_io_slot_reset() 13804 for_each_rx_queue(bp, i) bnx2x_io_slot_reset() 13805 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); bnx2x_io_slot_reset() 13806 bnx2x_free_fp_mem(bp); bnx2x_io_slot_reset() 13807 bnx2x_free_mem(bp); bnx2x_io_slot_reset() 13809 bp->state = BNX2X_STATE_CLOSED; bnx2x_io_slot_reset() 13815 if (bp->flags & AER_ENABLED) { bnx2x_io_slot_reset() 13835 struct bnx2x *bp = netdev_priv(dev); bnx2x_io_resume() local 13837 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_io_resume() 13838 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); bnx2x_io_resume() 13844 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & bnx2x_io_resume() 13848 bnx2x_nic_load(bp, LOAD_NORMAL); bnx2x_io_resume() 13864 struct bnx2x *bp; bnx2x_shutdown() local 13869 bp = netdev_priv(dev); bnx2x_shutdown() 13870 if (!bp) bnx2x_shutdown() 13881 __bnx2x_remove(pdev, dev, bp, false); bnx2x_shutdown() 13943 void bnx2x_notify_link_changed(struct bnx2x *bp) bnx2x_notify_link_changed() argument 13945 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); bnx2x_notify_link_changed() 13954 * @bp: driver handle 13960 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) bnx2x_set_iscsi_eth_mac_addr() argument 13965 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, bnx2x_set_iscsi_eth_mac_addr() 13966 &bp->iscsi_l2_mac_obj, true, bnx2x_set_iscsi_eth_mac_addr() 13971 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) bnx2x_cnic_sp_post() argument 13977 if (unlikely(bp->panic)) bnx2x_cnic_sp_post() 13981 spin_lock_bh(&bp->spq_lock); bnx2x_cnic_sp_post() 13982 BUG_ON(bp->cnic_spq_pending < count); bnx2x_cnic_sp_post() 13983 bp->cnic_spq_pending -= count; bnx2x_cnic_sp_post() 13985 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { bnx2x_cnic_sp_post() 13986 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) bnx2x_cnic_sp_post() 13989 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) bnx2x_cnic_sp_post() 13997 cxt_index = BNX2X_ISCSI_ETH_CID(bp) / bnx2x_cnic_sp_post() 13999 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - bnx2x_cnic_sp_post() 14001 bnx2x_set_ctx_validation(bp, bnx2x_cnic_sp_post() 14002 &bp->context[cxt_index]. bnx2x_cnic_sp_post() 14004 BNX2X_ISCSI_ETH_CID(bp)); bnx2x_cnic_sp_post() 14015 if (!atomic_read(&bp->cq_spq_left)) bnx2x_cnic_sp_post() 14018 atomic_dec(&bp->cq_spq_left); bnx2x_cnic_sp_post() 14020 if (!atomic_read(&bp->eq_spq_left)) bnx2x_cnic_sp_post() 14023 atomic_dec(&bp->eq_spq_left); bnx2x_cnic_sp_post() 14026 if (bp->cnic_spq_pending >= bnx2x_cnic_sp_post() 14027 bp->cnic_eth_dev.max_kwqe_pending) bnx2x_cnic_sp_post() 14030 bp->cnic_spq_pending++; bnx2x_cnic_sp_post() 14037 spe = bnx2x_sp_get_next(bp); bnx2x_cnic_sp_post() 14038 *spe = *bp->cnic_kwq_cons; bnx2x_cnic_sp_post() 14041 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); bnx2x_cnic_sp_post() 14043 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) bnx2x_cnic_sp_post() 14044 bp->cnic_kwq_cons = bp->cnic_kwq; bnx2x_cnic_sp_post() 14046 bp->cnic_kwq_cons++; bnx2x_cnic_sp_post() 14048 bnx2x_sp_prod_update(bp); bnx2x_cnic_sp_post() 14049 spin_unlock_bh(&bp->spq_lock); bnx2x_cnic_sp_post() 14055 struct bnx2x *bp = netdev_priv(dev); bnx2x_cnic_sp_queue() local 14059 if (unlikely(bp->panic)) { bnx2x_cnic_sp_queue() 14065 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && bnx2x_cnic_sp_queue() 14066 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { bnx2x_cnic_sp_queue() 14071 spin_lock_bh(&bp->spq_lock); bnx2x_cnic_sp_queue() 14076 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) bnx2x_cnic_sp_queue() 14079 *bp->cnic_kwq_prod = *spe; bnx2x_cnic_sp_queue() 14081 bp->cnic_kwq_pending++; bnx2x_cnic_sp_queue() 14087 bp->cnic_kwq_pending); bnx2x_cnic_sp_queue() 14089 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) bnx2x_cnic_sp_queue() 14090 bp->cnic_kwq_prod = bp->cnic_kwq; bnx2x_cnic_sp_queue() 14092 bp->cnic_kwq_prod++; bnx2x_cnic_sp_queue() 14095 spin_unlock_bh(&bp->spq_lock); bnx2x_cnic_sp_queue() 14097 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) bnx2x_cnic_sp_queue() 14098 bnx2x_cnic_sp_post(bp, 0); bnx2x_cnic_sp_queue() 14103 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) bnx2x_cnic_ctl_send() argument 14108 mutex_lock(&bp->cnic_mutex); bnx2x_cnic_ctl_send() 14109 c_ops = rcu_dereference_protected(bp->cnic_ops, bnx2x_cnic_ctl_send() 14110 lockdep_is_held(&bp->cnic_mutex)); bnx2x_cnic_ctl_send() 14112 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); bnx2x_cnic_ctl_send() 14113 mutex_unlock(&bp->cnic_mutex); bnx2x_cnic_ctl_send() 14118 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) bnx2x_cnic_ctl_send_bh() argument 14124 c_ops = rcu_dereference(bp->cnic_ops); bnx2x_cnic_ctl_send_bh() 14126 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); bnx2x_cnic_ctl_send_bh() 14135 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) bnx2x_cnic_notify() argument 14141 return bnx2x_cnic_ctl_send(bp, &ctl); bnx2x_cnic_notify() 14144 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) bnx2x_cnic_cfc_comp() argument 14153 bnx2x_cnic_ctl_send_bh(bp, &ctl); bnx2x_cnic_cfc_comp() 14154 bnx2x_cnic_sp_post(bp, 0); bnx2x_cnic_cfc_comp() 14162 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) bnx2x_set_iscsi_eth_rx_mode() argument 14165 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); bnx2x_set_iscsi_eth_rx_mode() 14181 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); bnx2x_set_iscsi_eth_rx_mode() 14186 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); bnx2x_set_iscsi_eth_rx_mode() 14188 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) bnx2x_set_iscsi_eth_rx_mode() 14189 set_bit(sched_state, &bp->sp_state); bnx2x_set_iscsi_eth_rx_mode() 14192 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, bnx2x_set_iscsi_eth_rx_mode() 14199 struct bnx2x *bp = netdev_priv(dev); bnx2x_drv_ctl() local 14207 bnx2x_ilt_wr(bp, index, addr); bnx2x_drv_ctl() 14214 bnx2x_cnic_sp_post(bp, count); bnx2x_drv_ctl() 14220 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_drv_ctl() 14224 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, bnx2x_drv_ctl() 14226 cp->iscsi_l2_cid, BP_FUNC(bp), bnx2x_drv_ctl() 14227 bnx2x_sp(bp, mac_rdata), bnx2x_drv_ctl() 14228 bnx2x_sp_mapping(bp, mac_rdata), bnx2x_drv_ctl() 14230 &bp->sp_state, BNX2X_OBJ_TYPE_RX, bnx2x_drv_ctl() 14231 &bp->macs_pool); bnx2x_drv_ctl() 14234 rc = bnx2x_set_iscsi_eth_mac_addr(bp); bnx2x_drv_ctl() 14244 bnx2x_set_iscsi_eth_rx_mode(bp, true); bnx2x_drv_ctl() 14251 if (!bnx2x_wait_sp_comp(bp, sp_bits)) bnx2x_drv_ctl() 14263 bnx2x_set_iscsi_eth_rx_mode(bp, false); bnx2x_drv_ctl() 14270 if (!bnx2x_wait_sp_comp(bp, sp_bits)) bnx2x_drv_ctl() 14277 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, bnx2x_drv_ctl() 14285 atomic_add(count, &bp->cq_spq_left); bnx2x_drv_ctl() 14292 if (CHIP_IS_E3(bp)) { bnx2x_drv_ctl() 14293 int idx = BP_FW_MB_IDX(bp); bnx2x_drv_ctl() 14294 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); bnx2x_drv_ctl() 14295 int path = BP_PATH(bp); bnx2x_drv_ctl() 14296 int port = BP_PORT(bp); bnx2x_drv_ctl() 14306 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); bnx2x_drv_ctl() 14309 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) || bnx2x_drv_ctl() 14310 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) bnx2x_drv_ctl() 14314 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr); bnx2x_drv_ctl() 14323 REG_WR(bp, scratch_offset + i, bnx2x_drv_ctl() 14326 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); bnx2x_drv_ctl() 14333 if (CHIP_IS_E3(bp)) { bnx2x_drv_ctl() 14334 int idx = BP_FW_MB_IDX(bp); bnx2x_drv_ctl() 14337 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); bnx2x_drv_ctl() 14342 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); bnx2x_drv_ctl() 14344 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); bnx2x_drv_ctl() 14356 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) bnx2x_setup_cnic_irq_info() argument 14358 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_setup_cnic_irq_info() 14360 if (bp->flags & USING_MSIX_FLAG) { bnx2x_setup_cnic_irq_info() 14363 cp->irq_arr[0].vector = bp->msix_table[1].vector; bnx2x_setup_cnic_irq_info() 14368 if (!CHIP_IS_E1x(bp)) bnx2x_setup_cnic_irq_info() 14369 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; bnx2x_setup_cnic_irq_info() 14371 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; bnx2x_setup_cnic_irq_info() 14373 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); bnx2x_setup_cnic_irq_info() 14374 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); bnx2x_setup_cnic_irq_info() 14375 cp->irq_arr[1].status_blk = bp->def_status_blk; bnx2x_setup_cnic_irq_info() 14382 void bnx2x_setup_cnic_info(struct bnx2x *bp) bnx2x_setup_cnic_info() argument 14384 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_setup_cnic_info() 14386 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + bnx2x_setup_cnic_info() 14387 bnx2x_cid_ilt_lines(bp); bnx2x_setup_cnic_info() 14388 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; bnx2x_setup_cnic_info() 14389 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); bnx2x_setup_cnic_info() 14390 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); bnx2x_setup_cnic_info() 14392 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n", bnx2x_setup_cnic_info() 14393 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, bnx2x_setup_cnic_info() 14396 if (NO_ISCSI_OOO(bp)) bnx2x_setup_cnic_info() 14403 struct bnx2x *bp = netdev_priv(dev); bnx2x_register_cnic() local 14404 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_register_cnic() 14414 if (!CNIC_SUPPORT(bp)) { bnx2x_register_cnic() 14419 if (!CNIC_LOADED(bp)) { bnx2x_register_cnic() 14420 rc = bnx2x_load_cnic(bp); bnx2x_register_cnic() 14427 bp->cnic_enabled = true; bnx2x_register_cnic() 14429 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); bnx2x_register_cnic() 14430 if (!bp->cnic_kwq) bnx2x_register_cnic() 14433 bp->cnic_kwq_cons = bp->cnic_kwq; bnx2x_register_cnic() 14434 bp->cnic_kwq_prod = bp->cnic_kwq; bnx2x_register_cnic() 14435 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; bnx2x_register_cnic() 14437 bp->cnic_spq_pending = 0; bnx2x_register_cnic() 14438 bp->cnic_kwq_pending = 0; bnx2x_register_cnic() 14440 bp->cnic_data = data; bnx2x_register_cnic() 14444 cp->iro_arr = bp->iro_arr; bnx2x_register_cnic() 14446 bnx2x_setup_cnic_irq_info(bp); bnx2x_register_cnic() 14448 rcu_assign_pointer(bp->cnic_ops, ops); bnx2x_register_cnic() 14451 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); bnx2x_register_cnic() 14458 struct bnx2x *bp = netdev_priv(dev); bnx2x_unregister_cnic() local 14459 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_unregister_cnic() 14461 mutex_lock(&bp->cnic_mutex); bnx2x_unregister_cnic() 14463 RCU_INIT_POINTER(bp->cnic_ops, NULL); bnx2x_unregister_cnic() 14464 mutex_unlock(&bp->cnic_mutex); bnx2x_unregister_cnic() 14466 bp->cnic_enabled = false; bnx2x_unregister_cnic() 14467 kfree(bp->cnic_kwq); bnx2x_unregister_cnic() 14468 bp->cnic_kwq = NULL; bnx2x_unregister_cnic() 14475 struct bnx2x *bp = netdev_priv(dev); bnx2x_cnic_probe() local 14476 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_cnic_probe() 14482 if (NO_ISCSI(bp) && NO_FCOE(bp)) bnx2x_cnic_probe() 14486 cp->chip_id = CHIP_ID(bp); bnx2x_cnic_probe() 14487 cp->pdev = bp->pdev; bnx2x_cnic_probe() 14488 cp->io_base = bp->regview; bnx2x_cnic_probe() 14489 cp->io_base2 = bp->doorbells; bnx2x_cnic_probe() 14492 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + bnx2x_cnic_probe() 14493 bnx2x_cid_ilt_lines(bp); bnx2x_cnic_probe() 14495 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; bnx2x_cnic_probe() 14500 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); bnx2x_cnic_probe() 14502 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); bnx2x_cnic_probe() 14503 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); bnx2x_cnic_probe() 14505 if (NO_ISCSI_OOO(bp)) bnx2x_cnic_probe() 14508 if (NO_ISCSI(bp)) bnx2x_cnic_probe() 14511 if (NO_FCOE(bp)) bnx2x_cnic_probe() 14525 struct bnx2x *bp = fp->bp; bnx2x_rx_ustorm_prods_offset() local 14528 if (IS_VF(bp)) bnx2x_rx_ustorm_prods_offset() 14529 return bnx2x_vf_ustorm_prods_offset(bp, fp); bnx2x_rx_ustorm_prods_offset() 14530 else if (!CHIP_IS_E1x(bp)) bnx2x_rx_ustorm_prods_offset() 14533 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); bnx2x_rx_ustorm_prods_offset() 14543 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) bnx2x_pretend_func() argument 14547 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX) bnx2x_pretend_func() 14551 pretend_reg = bnx2x_get_pretend_reg(bp); bnx2x_pretend_func() 14552 REG_WR(bp, pretend_reg, pretend_func_val); bnx2x_pretend_func() 14553 REG_RD(bp, pretend_reg); bnx2x_pretend_func() 14559 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task); bnx2x_ptp_task() local 14560 int port = BP_PORT(bp); bnx2x_ptp_task() 14566 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : bnx2x_ptp_task() 14570 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB : bnx2x_ptp_task() 14573 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB : bnx2x_ptp_task() 14576 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : bnx2x_ptp_task() 14578 ns = timecounter_cyc2time(&bp->timecounter, timestamp); bnx2x_ptp_task() 14582 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); bnx2x_ptp_task() 14583 dev_kfree_skb_any(bp->ptp_tx_skb); bnx2x_ptp_task() 14584 bp->ptp_tx_skb = NULL; bnx2x_ptp_task() 14591 schedule_work(&bp->ptp_task); bnx2x_ptp_task() 14595 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb) bnx2x_set_rx_ts() argument 14597 int port = BP_PORT(bp); bnx2x_set_rx_ts() 14600 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB : bnx2x_set_rx_ts() 14603 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB : bnx2x_set_rx_ts() 14607 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : bnx2x_set_rx_ts() 14610 ns = timecounter_cyc2time(&bp->timecounter, timestamp); bnx2x_set_rx_ts() 14621 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter); bnx2x_cyclecounter_read() local 14622 int port = BP_PORT(bp); bnx2x_cyclecounter_read() 14626 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 : bnx2x_cyclecounter_read() 14636 static void bnx2x_init_cyclecounter(struct bnx2x *bp) bnx2x_init_cyclecounter() argument 14638 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); bnx2x_init_cyclecounter() 14639 bp->cyclecounter.read = bnx2x_cyclecounter_read; bnx2x_init_cyclecounter() 14640 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); bnx2x_init_cyclecounter() 14641 bp->cyclecounter.shift = 1; bnx2x_init_cyclecounter() 14642 bp->cyclecounter.mult = 1; bnx2x_init_cyclecounter() 14645 static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp) bnx2x_send_reset_timesync_ramrod() argument 14655 func_params.f_obj = &bp->func_obj; bnx2x_send_reset_timesync_ramrod() 14662 return bnx2x_func_state_change(bp, &func_params); bnx2x_send_reset_timesync_ramrod() 14665 static int bnx2x_enable_ptp_packets(struct bnx2x *bp) bnx2x_enable_ptp_packets() argument 14680 for_each_eth_queue(bp, i) { for_each_eth_queue() 14681 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue() 14684 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; for_each_eth_queue() 14687 rc = bnx2x_queue_state_change(bp, &q_params); for_each_eth_queue() 14697 int bnx2x_configure_ptp_filters(struct bnx2x *bp) bnx2x_configure_ptp_filters() argument 14699 int port = BP_PORT(bp); bnx2x_configure_ptp_filters() 14702 if (!bp->hwtstamp_ioctl_called) bnx2x_configure_ptp_filters() 14705 switch (bp->tx_type) { bnx2x_configure_ptp_filters() 14707 bp->flags |= TX_TIMESTAMPING_EN; bnx2x_configure_ptp_filters() 14708 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : bnx2x_configure_ptp_filters() 14710 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : bnx2x_configure_ptp_filters() 14718 switch (bp->rx_filter) { bnx2x_configure_ptp_filters() 14723 bp->rx_filter = HWTSTAMP_FILTER_NONE; bnx2x_configure_ptp_filters() 14728 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; bnx2x_configure_ptp_filters() 14730 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_configure_ptp_filters() 14732 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_configure_ptp_filters() 14738 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; bnx2x_configure_ptp_filters() 14740 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_configure_ptp_filters() 14742 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_configure_ptp_filters() 14748 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; bnx2x_configure_ptp_filters() 14750 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_configure_ptp_filters() 14752 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_configure_ptp_filters() 14759 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; bnx2x_configure_ptp_filters() 14761 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_configure_ptp_filters() 14763 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_configure_ptp_filters() 14769 rc = bnx2x_enable_ptp_packets(bp); bnx2x_configure_ptp_filters() 14774 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : bnx2x_configure_ptp_filters() 14780 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) bnx2x_hwtstamp_ioctl() argument 14798 bp->hwtstamp_ioctl_called = 1; bnx2x_hwtstamp_ioctl() 14799 bp->tx_type = config.tx_type; bnx2x_hwtstamp_ioctl() 14800 bp->rx_filter = config.rx_filter; bnx2x_hwtstamp_ioctl() 14802 rc = bnx2x_configure_ptp_filters(bp); bnx2x_hwtstamp_ioctl() 14806 config.rx_filter = bp->rx_filter; bnx2x_hwtstamp_ioctl() 14813 static int bnx2x_configure_ptp(struct bnx2x *bp) bnx2x_configure_ptp() argument 14815 int rc, port = BP_PORT(bp); bnx2x_configure_ptp() 14819 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_configure_ptp() 14821 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_configure_ptp() 14823 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : bnx2x_configure_ptp() 14825 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : bnx2x_configure_ptp() 14829 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : bnx2x_configure_ptp() 14833 REG_WR(bp, port ? NIG_REG_P1_PTP_EN : bnx2x_configure_ptp() 14839 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2); bnx2x_configure_ptp() 14842 rc = bnx2x_send_reset_timesync_ramrod(bp); bnx2x_configure_ptp() 14849 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : bnx2x_configure_ptp() 14851 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : bnx2x_configure_ptp() 14858 void bnx2x_init_ptp(struct bnx2x *bp) bnx2x_init_ptp() argument 14863 rc = bnx2x_configure_ptp(bp); bnx2x_init_ptp() 14870 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task); bnx2x_init_ptp() 14876 if (!bp->timecounter_init_done) { bnx2x_init_ptp() 14877 bnx2x_init_cyclecounter(bp); bnx2x_init_ptp() 14878 timecounter_init(&bp->timecounter, &bp->cyclecounter, bnx2x_init_ptp() 14880 bp->timecounter_init_done = 1; bnx2x_init_ptp() 5392 bnx2x_cid_to_q_obj( struct bnx2x *bp, u32 cid) bnx2x_cid_to_q_obj() argument 13564 __bnx2x_remove(struct pci_dev *pdev, struct net_device *dev, struct bnx2x *bp, bool remove_netdev) __bnx2x_remove() argument
|
H A D | bnx2x_stats.c | 42 static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) bnx2x_get_port_stats_dma_len() argument 47 if (SHMEM2_HAS(bp, sizeof_port_stats)) { bnx2x_get_port_stats_dma_len() 48 u32 size = SHMEM2_RD(bp, sizeof_port_stats); bnx2x_get_port_stats_dma_len() 64 if (bp->flags & BC_SUPPORTS_PFC_STATS) { bnx2x_get_port_stats_dma_len() 82 static void bnx2x_dp_stats(struct bnx2x *bp) bnx2x_dp_stats() argument 94 bp->fw_stats_req->hdr.cmd_num, bnx2x_dp_stats() 95 bp->fw_stats_req->hdr.reserved0, bnx2x_dp_stats() 96 bp->fw_stats_req->hdr.drv_stats_counter, bnx2x_dp_stats() 97 bp->fw_stats_req->hdr.reserved1, bnx2x_dp_stats() 98 bp->fw_stats_req->hdr.stats_counters_addrs.hi, bnx2x_dp_stats() 99 bp->fw_stats_req->hdr.stats_counters_addrs.lo); bnx2x_dp_stats() 101 for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) { bnx2x_dp_stats() 109 i, bp->fw_stats_req->query[i].kind, bnx2x_dp_stats() 110 bp->fw_stats_req->query[i].index, bnx2x_dp_stats() 111 bp->fw_stats_req->query[i].funcID, bnx2x_dp_stats() 112 bp->fw_stats_req->query[i].reserved, bnx2x_dp_stats() 113 bp->fw_stats_req->query[i].address.hi, bnx2x_dp_stats() 114 bp->fw_stats_req->query[i].address.lo); bnx2x_dp_stats() 121 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be 124 static void bnx2x_storm_stats_post(struct bnx2x *bp) bnx2x_storm_stats_post() argument 128 if (bp->stats_pending) bnx2x_storm_stats_post() 131 bp->fw_stats_req->hdr.drv_stats_counter = bnx2x_storm_stats_post() 132 cpu_to_le16(bp->stats_counter++); bnx2x_storm_stats_post() 135 le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); bnx2x_storm_stats_post() 138 bnx2x_iov_adjust_stats_req(bp); bnx2x_storm_stats_post() 139 bnx2x_dp_stats(bp); bnx2x_storm_stats_post() 142 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, bnx2x_storm_stats_post() 143 U64_HI(bp->fw_stats_req_mapping), bnx2x_storm_stats_post() 144 U64_LO(bp->fw_stats_req_mapping), bnx2x_storm_stats_post() 147 bp->stats_pending = 1; bnx2x_storm_stats_post() 150 static void bnx2x_hw_stats_post(struct bnx2x *bp) bnx2x_hw_stats_post() argument 152 struct dmae_command *dmae = &bp->stats_dmae; bnx2x_hw_stats_post() 153 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_hw_stats_post() 156 if (CHIP_REV_IS_SLOW(bp)) bnx2x_hw_stats_post() 160 if (bp->func_stx) bnx2x_hw_stats_post() 161 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, bnx2x_hw_stats_post() 162 sizeof(bp->func_stats)); bnx2x_hw_stats_post() 165 if (bp->executer_idx) { bnx2x_hw_stats_post() 166 int loader_idx = PMF_DMAE_C(bp); bnx2x_hw_stats_post() 167 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, bnx2x_hw_stats_post() 173 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); bnx2x_hw_stats_post() 174 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); bnx2x_hw_stats_post() 180 if (CHIP_IS_E1(bp)) bnx2x_hw_stats_post() 187 bnx2x_post_dmae(bp, dmae, loader_idx); bnx2x_hw_stats_post() 189 } else if (bp->func_stx) { bnx2x_hw_stats_post() 191 bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp); bnx2x_hw_stats_post() 195 static void bnx2x_stats_comp(struct bnx2x *bp) bnx2x_stats_comp() argument 197 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_stats_comp() 216 static void bnx2x_stats_pmf_update(struct bnx2x *bp) bnx2x_stats_pmf_update() argument 220 int loader_idx = PMF_DMAE_C(bp); bnx2x_stats_pmf_update() 221 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_stats_pmf_update() 224 if (!bp->port.pmf || !bp->port.port_stx) { bnx2x_stats_pmf_update() 229 bp->executer_idx = 0; bnx2x_stats_pmf_update() 231 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); bnx2x_stats_pmf_update() 233 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_stats_pmf_update() 235 dmae->src_addr_lo = bp->port.port_stx >> 2; bnx2x_stats_pmf_update() 237 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); bnx2x_stats_pmf_update() 238 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); bnx2x_stats_pmf_update() 244 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_stats_pmf_update() 246 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; bnx2x_stats_pmf_update() 248 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + bnx2x_stats_pmf_update() 250 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + bnx2x_stats_pmf_update() 252 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; bnx2x_stats_pmf_update() 254 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_stats_pmf_update() 255 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_stats_pmf_update() 259 bnx2x_hw_stats_post(bp); bnx2x_stats_pmf_update() 260 bnx2x_stats_comp(bp); bnx2x_stats_pmf_update() 263 static void bnx2x_port_stats_init(struct bnx2x *bp) bnx2x_port_stats_init() argument 266 int port = BP_PORT(bp); bnx2x_port_stats_init() 268 int loader_idx = PMF_DMAE_C(bp); bnx2x_port_stats_init() 270 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_port_stats_init() 273 if (!bp->link_vars.link_up || !bp->port.pmf) { bnx2x_port_stats_init() 278 bp->executer_idx = 0; bnx2x_port_stats_init() 281 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, bnx2x_port_stats_init() 284 if (bp->port.port_stx) { bnx2x_port_stats_init() 286 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init() 288 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_init() 289 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_init() 290 dmae->dst_addr_lo = bp->port.port_stx >> 2; bnx2x_port_stats_init() 292 dmae->len = bnx2x_get_port_stats_dma_len(bp); bnx2x_port_stats_init() 298 if (bp->func_stx) { bnx2x_port_stats_init() 300 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init() 302 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); bnx2x_port_stats_init() 303 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); bnx2x_port_stats_init() 304 dmae->dst_addr_lo = bp->func_stx >> 2; bnx2x_port_stats_init() 313 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, bnx2x_port_stats_init() 317 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { bnx2x_port_stats_init() 321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init() 326 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); bnx2x_port_stats_init() 327 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); bnx2x_port_stats_init() 334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init() 339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + bnx2x_port_stats_init() 341 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + bnx2x_port_stats_init() 349 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init() 354 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + bnx2x_port_stats_init() 356 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + bnx2x_port_stats_init() 367 switch (bp->link_vars.mac_type) { bnx2x_port_stats_init() 374 if (CHIP_IS_E1x(bp)) { bnx2x_port_stats_init() 403 tx_len = sizeof(bp->slowpath-> bnx2x_port_stats_init() 405 rx_len = sizeof(bp->slowpath-> bnx2x_port_stats_init() 411 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init() 416 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); bnx2x_port_stats_init() 417 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); bnx2x_port_stats_init() 423 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init() 428 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); bnx2x_port_stats_init() 430 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); bnx2x_port_stats_init() 438 if (!CHIP_IS_E3(bp)) { bnx2x_port_stats_init() 439 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init() 444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + bnx2x_port_stats_init() 446 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + bnx2x_port_stats_init() 453 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init() 458 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + bnx2x_port_stats_init() 460 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + bnx2x_port_stats_init() 468 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init() 469 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, bnx2x_port_stats_init() 474 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); bnx2x_port_stats_init() 475 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); bnx2x_port_stats_init() 478 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_init() 479 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_init() 485 static void bnx2x_func_stats_init(struct bnx2x *bp) bnx2x_func_stats_init() argument 487 struct dmae_command *dmae = &bp->stats_dmae; bnx2x_func_stats_init() 488 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_func_stats_init() 491 if (!bp->func_stx) { bnx2x_func_stats_init() 496 bp->executer_idx = 0; bnx2x_func_stats_init() 499 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, bnx2x_func_stats_init() 501 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); bnx2x_func_stats_init() 502 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); bnx2x_func_stats_init() 503 dmae->dst_addr_lo = bp->func_stx >> 2; bnx2x_func_stats_init() 506 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_func_stats_init() 507 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_func_stats_init() 514 static void bnx2x_stats_start(struct bnx2x *bp) bnx2x_stats_start() argument 516 if (IS_PF(bp)) { bnx2x_stats_start() 517 if (bp->port.pmf) bnx2x_stats_start() 518 bnx2x_port_stats_init(bp); bnx2x_stats_start() 520 else if (bp->func_stx) bnx2x_stats_start() 521 bnx2x_func_stats_init(bp); bnx2x_stats_start() 523 bnx2x_hw_stats_post(bp); bnx2x_stats_start() 524 bnx2x_storm_stats_post(bp); bnx2x_stats_start() 528 static void bnx2x_stats_pmf_start(struct bnx2x *bp) bnx2x_stats_pmf_start() argument 530 bnx2x_stats_comp(bp); bnx2x_stats_pmf_start() 531 bnx2x_stats_pmf_update(bp); bnx2x_stats_pmf_start() 532 bnx2x_stats_start(bp); bnx2x_stats_pmf_start() 535 static void bnx2x_stats_restart(struct bnx2x *bp) bnx2x_stats_restart() argument 540 if (IS_VF(bp)) bnx2x_stats_restart() 543 bnx2x_stats_comp(bp); bnx2x_stats_restart() 544 bnx2x_stats_start(bp); bnx2x_stats_restart() 547 static void bnx2x_bmac_stats_update(struct bnx2x *bp) bnx2x_bmac_stats_update() argument 549 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); bnx2x_bmac_stats_update() 550 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_bmac_stats_update() 556 if (CHIP_IS_E1x(bp)) { bnx2x_bmac_stats_update() 557 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); bnx2x_bmac_stats_update() 592 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); bnx2x_bmac_stats_update() 653 static void bnx2x_mstat_stats_update(struct bnx2x *bp) bnx2x_mstat_stats_update() argument 655 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); bnx2x_mstat_stats_update() 656 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_mstat_stats_update() 658 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); bnx2x_mstat_stats_update() 743 static void bnx2x_emac_stats_update(struct bnx2x *bp) bnx2x_emac_stats_update() argument 745 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); bnx2x_emac_stats_update() 746 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); bnx2x_emac_stats_update() 747 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_emac_stats_update() 800 static int bnx2x_hw_stats_update(struct bnx2x *bp) bnx2x_hw_stats_update() argument 802 struct nig_stats *new = bnx2x_sp(bp, nig_stats); bnx2x_hw_stats_update() 803 struct nig_stats *old = &(bp->port.old_nig_stats); bnx2x_hw_stats_update() 804 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); bnx2x_hw_stats_update() 805 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_hw_stats_update() 811 switch (bp->link_vars.mac_type) { bnx2x_hw_stats_update() 813 bnx2x_bmac_stats_update(bp); bnx2x_hw_stats_update() 817 bnx2x_emac_stats_update(bp); bnx2x_hw_stats_update() 822 bnx2x_mstat_stats_update(bp); bnx2x_hw_stats_update() 839 if (!CHIP_IS_E3(bp)) { bnx2x_hw_stats_update() 855 if (CHIP_IS_E3(bp)) { bnx2x_hw_stats_update() 856 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 bnx2x_hw_stats_update() 858 estats->eee_tx_lpi += REG_RD(bp, lpi_reg); bnx2x_hw_stats_update() 861 if (!BP_NOMCP(bp)) { bnx2x_hw_stats_update() 863 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); bnx2x_hw_stats_update() 874 static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) bnx2x_storm_stats_validate_counters() argument 876 struct stats_counter *counters = &bp->fw_stats_data->storm_counters; bnx2x_storm_stats_validate_counters() 881 cur_stats_counter = bp->stats_counter - 1; bnx2x_storm_stats_validate_counters() 887 le16_to_cpu(counters->xstats_counter), bp->stats_counter); bnx2x_storm_stats_validate_counters() 894 le16_to_cpu(counters->ustats_counter), bp->stats_counter); bnx2x_storm_stats_validate_counters() 901 le16_to_cpu(counters->cstats_counter), bp->stats_counter); bnx2x_storm_stats_validate_counters() 908 le16_to_cpu(counters->tstats_counter), bp->stats_counter); bnx2x_storm_stats_validate_counters() 914 static int bnx2x_storm_stats_update(struct bnx2x *bp) bnx2x_storm_stats_update() argument 917 &bp->fw_stats_data->port.tstorm_port_statistics; bnx2x_storm_stats_update() 919 &bp->fw_stats_data->pf.tstorm_pf_statistics; bnx2x_storm_stats_update() 920 struct host_func_stats *fstats = &bp->func_stats; bnx2x_storm_stats_update() 921 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_storm_stats_update() 922 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; bnx2x_storm_stats_update() 926 if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp)) bnx2x_storm_stats_update() 932 for_each_eth_queue(bp, i) { for_each_eth_queue() 933 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue() 935 &bp->fw_stats_data->queue_stats[i]. for_each_eth_queue() 938 &bnx2x_fp_stats(bp, fp)->old_tclient; for_each_eth_queue() 940 &bp->fw_stats_data->queue_stats[i]. for_each_eth_queue() 943 &bnx2x_fp_stats(bp, fp)->old_uclient; for_each_eth_queue() 945 &bp->fw_stats_data->queue_stats[i]. for_each_eth_queue() 948 &bnx2x_fp_stats(bp, fp)->old_xclient; for_each_eth_queue() 950 &bnx2x_fp_stats(bp, fp)->eth_q_stats; for_each_eth_queue() 952 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; for_each_eth_queue() 1097 if (bp->port.pmf) { 1098 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1107 bp->stats_pending = 0; 1112 static void bnx2x_net_stats_update(struct bnx2x *bp) bnx2x_net_stats_update() argument 1114 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_net_stats_update() 1115 struct net_device_stats *nstats = &bp->dev->stats; bnx2x_net_stats_update() 1134 for_each_rx_queue(bp, i) { for_each_rx_queue() 1136 &bp->fp_stats[i].old_tclient; for_each_rx_queue() 1139 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; 1182 static void bnx2x_drv_stats_update(struct bnx2x *bp) bnx2x_drv_stats_update() argument 1184 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_drv_stats_update() 1187 for_each_queue(bp, i) { for_each_queue() 1188 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; for_each_queue() 1190 &bp->fp_stats[i].eth_q_stats_old; for_each_queue() 1200 static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) bnx2x_edebug_stats_stopped() argument 1204 if (SHMEM2_HAS(bp, edebug_driver_if[1])) { bnx2x_edebug_stats_stopped() 1205 val = SHMEM2_RD(bp, edebug_driver_if[1]); bnx2x_edebug_stats_stopped() 1214 static void bnx2x_stats_update(struct bnx2x *bp) bnx2x_stats_update() argument 1216 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_stats_update() 1218 if (bnx2x_edebug_stats_stopped(bp)) bnx2x_stats_update() 1221 if (IS_PF(bp)) { bnx2x_stats_update() 1225 if (bp->port.pmf) bnx2x_stats_update() 1226 bnx2x_hw_stats_update(bp); bnx2x_stats_update() 1228 if (bnx2x_storm_stats_update(bp)) { bnx2x_stats_update() 1229 if (bp->stats_pending++ == 3) { bnx2x_stats_update() 1239 bnx2x_storm_stats_update(bp); bnx2x_stats_update() 1242 bnx2x_net_stats_update(bp); bnx2x_stats_update() 1243 bnx2x_drv_stats_update(bp); bnx2x_stats_update() 1246 if (IS_VF(bp)) bnx2x_stats_update() 1249 if (netif_msg_timer(bp)) { bnx2x_stats_update() 1250 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_stats_update() 1252 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", bnx2x_stats_update() 1256 bnx2x_hw_stats_post(bp); bnx2x_stats_update() 1257 bnx2x_storm_stats_post(bp); bnx2x_stats_update() 1260 static void bnx2x_port_stats_stop(struct bnx2x *bp) bnx2x_port_stats_stop() argument 1264 int loader_idx = PMF_DMAE_C(bp); bnx2x_port_stats_stop() 1265 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_port_stats_stop() 1267 bp->executer_idx = 0; bnx2x_port_stats_stop() 1269 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); bnx2x_port_stats_stop() 1271 if (bp->port.port_stx) { bnx2x_port_stats_stop() 1273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_stop() 1274 if (bp->func_stx) bnx2x_port_stats_stop() 1281 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_stop() 1282 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_stop() 1283 dmae->dst_addr_lo = bp->port.port_stx >> 2; bnx2x_port_stats_stop() 1285 dmae->len = bnx2x_get_port_stats_dma_len(bp); bnx2x_port_stats_stop() 1286 if (bp->func_stx) { bnx2x_port_stats_stop() 1292 U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_stop() 1294 U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_stop() 1301 if (bp->func_stx) { bnx2x_port_stats_stop() 1303 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_stop() 1306 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); bnx2x_port_stats_stop() 1307 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); bnx2x_port_stats_stop() 1308 dmae->dst_addr_lo = bp->func_stx >> 2; bnx2x_port_stats_stop() 1311 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_stop() 1312 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_stop() 1319 static void bnx2x_stats_stop(struct bnx2x *bp) bnx2x_stats_stop() argument 1323 bnx2x_stats_comp(bp); bnx2x_stats_stop() 1325 if (bp->port.pmf) bnx2x_stats_stop() 1326 update = (bnx2x_hw_stats_update(bp) == 0); bnx2x_stats_stop() 1328 update |= (bnx2x_storm_stats_update(bp) == 0); bnx2x_stats_stop() 1331 bnx2x_net_stats_update(bp); bnx2x_stats_stop() 1333 if (bp->port.pmf) bnx2x_stats_stop() 1334 bnx2x_port_stats_stop(bp); bnx2x_stats_stop() 1336 bnx2x_hw_stats_post(bp); bnx2x_stats_stop() 1337 bnx2x_stats_comp(bp); bnx2x_stats_stop() 1341 static void bnx2x_stats_do_nothing(struct bnx2x *bp) bnx2x_stats_do_nothing() argument 1346 void (*action)(struct bnx2x *bp); 1364 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) bnx2x_stats_handle() argument 1366 enum bnx2x_stats_state state = bp->stats_state; bnx2x_stats_handle() 1368 if (unlikely(bp->panic)) bnx2x_stats_handle() 1375 if (down_trylock(&bp->stats_lock)) { bnx2x_stats_handle() 1381 if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) { bnx2x_stats_handle() 1388 bnx2x_stats_stm[state][event].action(bp); bnx2x_stats_handle() 1389 bp->stats_state = bnx2x_stats_stm[state][event].next_state; bnx2x_stats_handle() 1391 up(&bp->stats_lock); bnx2x_stats_handle() 1393 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) bnx2x_stats_handle() 1395 state, event, bp->stats_state); bnx2x_stats_handle() 1398 static void bnx2x_port_stats_base_init(struct bnx2x *bp) bnx2x_port_stats_base_init() argument 1401 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_port_stats_base_init() 1404 if (!bp->port.pmf || !bp->port.port_stx) { bnx2x_port_stats_base_init() 1409 bp->executer_idx = 0; bnx2x_port_stats_base_init() 1411 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_base_init() 1412 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, bnx2x_port_stats_base_init() 1414 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_base_init() 1415 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_base_init() 1416 dmae->dst_addr_lo = bp->port.port_stx >> 2; bnx2x_port_stats_base_init() 1418 dmae->len = bnx2x_get_port_stats_dma_len(bp); bnx2x_port_stats_base_init() 1419 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_base_init() 1420 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_base_init() 1424 bnx2x_hw_stats_post(bp); bnx2x_port_stats_base_init() 1425 bnx2x_stats_comp(bp); bnx2x_port_stats_base_init() 1432 static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) bnx2x_prep_fw_stats_req() argument 1436 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; bnx2x_prep_fw_stats_req() 1441 stats_hdr->cmd_num = bp->fw_stats_num; bnx2x_prep_fw_stats_req() 1450 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_prep_fw_stats_req() 1461 memset(&bp->fw_stats_data->storm_counters, 0xff, bnx2x_prep_fw_stats_req() 1465 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_prep_fw_stats_req() 1468 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; bnx2x_prep_fw_stats_req() 1472 cur_query_entry->index = BP_PORT(bp); bnx2x_prep_fw_stats_req() 1474 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); bnx2x_prep_fw_stats_req() 1479 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_prep_fw_stats_req() 1482 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; bnx2x_prep_fw_stats_req() 1486 cur_query_entry->index = BP_PORT(bp); bnx2x_prep_fw_stats_req() 1487 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); bnx2x_prep_fw_stats_req() 1492 if (!NO_FCOE(bp)) { bnx2x_prep_fw_stats_req() 1493 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_prep_fw_stats_req() 1497 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; bnx2x_prep_fw_stats_req() 1501 cur_query_entry->index = BP_PORT(bp); bnx2x_prep_fw_stats_req() 1502 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); bnx2x_prep_fw_stats_req() 1510 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_prep_fw_stats_req() 1516 if (!NO_FCOE(bp)) bnx2x_prep_fw_stats_req() 1521 for_each_eth_queue(bp, i) { for_each_eth_queue() 1523 &bp->fw_stats_req-> for_each_eth_queue() 1527 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); for_each_eth_queue() 1528 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); for_each_eth_queue() 1538 if (!NO_FCOE(bp)) { 1540 &bp->fw_stats_req-> 1544 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]); 1545 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1553 void bnx2x_memset_stats(struct bnx2x *bp) bnx2x_memset_stats() argument 1558 for_each_queue(bp, i) { for_each_queue() 1559 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; for_each_queue() 1567 if (bp->stats_init) { for_each_queue() 1575 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); 1577 if (bp->stats_init) { 1578 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); 1579 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); 1580 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); 1581 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); 1582 memset(&bp->func_stats, 0, sizeof(bp->func_stats)); 1585 bp->stats_state = STATS_STATE_DISABLED; 1587 if (bp->port.pmf && bp->port.port_stx) 1588 bnx2x_port_stats_base_init(bp); 1591 bp->stats_init = false; 1594 void bnx2x_stats_init(struct bnx2x *bp) bnx2x_stats_init() argument 1596 int /*abs*/port = BP_PORT(bp); bnx2x_stats_init() 1597 int mb_idx = BP_FW_MB_IDX(bp); bnx2x_stats_init() 1599 if (IS_VF(bp)) { bnx2x_stats_init() 1600 bnx2x_memset_stats(bp); bnx2x_stats_init() 1604 bp->stats_pending = 0; bnx2x_stats_init() 1605 bp->executer_idx = 0; bnx2x_stats_init() 1606 bp->stats_counter = 0; bnx2x_stats_init() 1609 if (!BP_NOMCP(bp)) { bnx2x_stats_init() 1610 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); bnx2x_stats_init() 1611 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); bnx2x_stats_init() 1614 bp->port.port_stx = 0; bnx2x_stats_init() 1615 bp->func_stx = 0; bnx2x_stats_init() 1618 bp->port.port_stx, bp->func_stx); bnx2x_stats_init() 1621 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx) bnx2x_stats_init() 1622 bnx2x_stats_handle(bp, STATS_EVENT_PMF); bnx2x_stats_init() 1624 port = BP_PORT(bp); bnx2x_stats_init() 1626 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); bnx2x_stats_init() 1627 bp->port.old_nig_stats.brb_discard = bnx2x_stats_init() 1628 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); bnx2x_stats_init() 1629 bp->port.old_nig_stats.brb_truncate = bnx2x_stats_init() 1630 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); bnx2x_stats_init() 1631 if (!CHIP_IS_E3(bp)) { bnx2x_stats_init() 1632 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, bnx2x_stats_init() 1633 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); bnx2x_stats_init() 1634 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, bnx2x_stats_init() 1635 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); bnx2x_stats_init() 1639 bnx2x_prep_fw_stats_req(bp); bnx2x_stats_init() 1642 if (bp->stats_init) { bnx2x_stats_init() 1643 if (bp->func_stx) { bnx2x_stats_init() 1644 memset(bnx2x_sp(bp, func_stats), 0, bnx2x_stats_init() 1646 bnx2x_func_stats_init(bp); bnx2x_stats_init() 1647 bnx2x_hw_stats_post(bp); bnx2x_stats_init() 1648 bnx2x_stats_comp(bp); bnx2x_stats_init() 1652 bnx2x_memset_stats(bp); bnx2x_stats_init() 1655 void bnx2x_save_statistics(struct bnx2x *bp) bnx2x_save_statistics() argument 1658 struct net_device_stats *nstats = &bp->dev->stats; bnx2x_save_statistics() 1661 for_each_eth_queue(bp, i) { for_each_eth_queue() 1662 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue() 1664 &bnx2x_fp_stats(bp, fp)->eth_q_stats; for_each_eth_queue() 1666 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; for_each_eth_queue() 1685 bp->net_stats_old.rx_dropped = nstats->rx_dropped; 1688 if (bp->port.pmf && IS_MF(bp)) { 1689 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1690 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1698 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, bnx2x_afex_collect_stats() argument 1703 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_afex_collect_stats() 1705 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; bnx2x_afex_collect_stats() 1717 &bp->fw_stats_data->fcoe; bnx2x_afex_collect_stats() 1721 for_each_eth_queue(bp, i) { for_each_eth_queue() 1722 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; for_each_eth_queue() 1816 if (!NO_FCOE(bp)) { 1952 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) { 1968 int bnx2x_stats_safe_exec(struct bnx2x *bp, bnx2x_stats_safe_exec() argument 1977 rc = down_timeout(&bp->stats_lock, HZ / 10); bnx2x_stats_safe_exec() 1983 bnx2x_stats_comp(bp); bnx2x_stats_safe_exec() 1984 while (bp->stats_pending && cnt--) bnx2x_stats_safe_exec() 1985 if (bnx2x_storm_stats_update(bp)) bnx2x_stats_safe_exec() 1987 if (bp->stats_pending) { bnx2x_stats_safe_exec() 1999 up(&bp->stats_lock); bnx2x_stats_safe_exec()
|
H A D | bnx2x_init_ops.h | 20 #define BP_ILT(bp) NULL 24 #define BP_FUNC(bp) 0 28 #define BP_PORT(bp) 0 43 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); 44 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); 45 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, 49 static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, bnx2x_init_str_wr() argument 55 REG_WR(bp, addr + i*4, data[i]); bnx2x_init_str_wr() 58 static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, bnx2x_init_ind_wr() argument 64 bnx2x_reg_wr_ind(bp, addr + i*4, data[i]); bnx2x_init_ind_wr() 67 static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len, bnx2x_write_big_buf() argument 70 if (bp->dmae_ready) bnx2x_write_big_buf() 71 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); bnx2x_write_big_buf() 74 else if (wb && CHIP_IS_E1(bp)) bnx2x_write_big_buf() 75 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); bnx2x_write_big_buf() 79 bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); bnx2x_write_big_buf() 82 static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, bnx2x_init_fill() argument 89 memset(GUNZIP_BUF(bp), (u8)fill, buf_len); bnx2x_init_fill() 94 bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb); bnx2x_init_fill() 98 static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) bnx2x_write_big_buf_wb() argument 100 if (bp->dmae_ready) bnx2x_write_big_buf_wb() 101 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); bnx2x_write_big_buf_wb() 104 else if (CHIP_IS_E1(bp)) bnx2x_write_big_buf_wb() 105 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); bnx2x_write_big_buf_wb() 109 bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); bnx2x_write_big_buf_wb() 112 static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, bnx2x_init_wr_64() argument 125 u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i; bnx2x_init_wr_64() 133 bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len); bnx2x_init_wr_64() 151 static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, bnx2x_sel_blob() argument 155 data = INIT_TSEM_INT_TABLE_DATA(bp); bnx2x_sel_blob() 158 data = INIT_CSEM_INT_TABLE_DATA(bp); bnx2x_sel_blob() 161 data = INIT_USEM_INT_TABLE_DATA(bp); bnx2x_sel_blob() 164 data = INIT_XSEM_INT_TABLE_DATA(bp); bnx2x_sel_blob() 167 data = INIT_TSEM_PRAM_DATA(bp); bnx2x_sel_blob() 170 data = INIT_CSEM_PRAM_DATA(bp); bnx2x_sel_blob() 173 data = INIT_USEM_PRAM_DATA(bp); bnx2x_sel_blob() 176 data = INIT_XSEM_PRAM_DATA(bp); bnx2x_sel_blob() 181 static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, bnx2x_init_wr_wb() argument 184 if (bp->dmae_ready) bnx2x_init_wr_wb() 185 VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); bnx2x_init_wr_wb() 188 else if (CHIP_IS_E1(bp)) bnx2x_init_wr_wb() 189 bnx2x_init_ind_wr(bp, addr, data, len); bnx2x_init_wr_wb() 193 bnx2x_init_str_wr(bp, addr, data, len); bnx2x_init_wr_wb() 196 static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, bnx2x_wr_64() argument 203 REG_WR_DMAE_LEN(bp, reg, wb_write, 2); bnx2x_wr_64() 205 static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, bnx2x_init_wr_zp() argument 212 data = bnx2x_sel_blob(bp, addr, data) + blob_off*4; bnx2x_init_wr_zp() 214 rc = bnx2x_gunzip(bp, data, len); bnx2x_init_wr_zp() 219 len = GUNZIP_OUTLEN(bp); bnx2x_init_wr_zp() 221 ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32) bnx2x_init_wr_zp() 222 cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]); bnx2x_init_wr_zp() 224 bnx2x_write_big_buf_wb(bp, addr, len); bnx2x_init_wr_zp() 227 static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) bnx2x_init_block() argument 230 INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, bnx2x_init_block() 233 INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, bnx2x_init_block() 243 data_base = INIT_DATA(bp); bnx2x_init_block() 247 op = (const union init_op *)&(INIT_OPS(bp)[op_idx]); bnx2x_init_block() 260 REG_RD(bp, addr); bnx2x_init_block() 263 REG_WR(bp, addr, op->write.val); bnx2x_init_block() 266 bnx2x_init_str_wr(bp, addr, data, len); bnx2x_init_block() 269 bnx2x_init_wr_wb(bp, addr, data, len); bnx2x_init_block() 272 bnx2x_init_fill(bp, addr, 0, op->zero.len, 0); bnx2x_init_block() 275 bnx2x_init_fill(bp, addr, 0, op->zero.len, 1); bnx2x_init_block() 278 bnx2x_init_wr_zp(bp, addr, len, bnx2x_init_block() 282 bnx2x_init_wr_64(bp, addr, data, len); bnx2x_init_block() 288 if ((INIT_MODE_FLAGS(bp) & bnx2x_init_block() 297 if ((INIT_MODE_FLAGS(bp) & bnx2x_init_block() 473 static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, bnx2x_init_pxp_arb() argument 488 if (CHIP_REV_IS_FPGA(bp)) { bnx2x_init_pxp_arb() 495 REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l); bnx2x_init_pxp_arb() 496 REG_WR(bp, read_arb_addr[i].add, bnx2x_init_pxp_arb() 498 REG_WR(bp, read_arb_addr[i].ubound, bnx2x_init_pxp_arb() 506 REG_WR(bp, write_arb_addr[i].l, bnx2x_init_pxp_arb() 509 REG_WR(bp, write_arb_addr[i].add, bnx2x_init_pxp_arb() 512 REG_WR(bp, write_arb_addr[i].ubound, bnx2x_init_pxp_arb() 516 val = REG_RD(bp, write_arb_addr[i].l); bnx2x_init_pxp_arb() 517 REG_WR(bp, write_arb_addr[i].l, bnx2x_init_pxp_arb() 520 val = REG_RD(bp, write_arb_addr[i].add); bnx2x_init_pxp_arb() 521 REG_WR(bp, write_arb_addr[i].add, bnx2x_init_pxp_arb() 524 val = REG_RD(bp, write_arb_addr[i].ubound); bnx2x_init_pxp_arb() 525 REG_WR(bp, write_arb_addr[i].ubound, bnx2x_init_pxp_arb() 533 REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val); bnx2x_init_pxp_arb() 538 REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val); bnx2x_init_pxp_arb() 540 REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order); bnx2x_init_pxp_arb() 541 REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order); bnx2x_init_pxp_arb() 542 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order); bnx2x_init_pxp_arb() 543 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order); bnx2x_init_pxp_arb() 545 if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD)) bnx2x_init_pxp_arb() 546 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); bnx2x_init_pxp_arb() 548 if (CHIP_IS_E3(bp)) bnx2x_init_pxp_arb() 549 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); bnx2x_init_pxp_arb() 550 else if (CHIP_IS_E2(bp)) bnx2x_init_pxp_arb() 551 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); bnx2x_init_pxp_arb() 553 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); bnx2x_init_pxp_arb() 555 if (!CHIP_IS_E1(bp)) { bnx2x_init_pxp_arb() 562 if (!CHIP_IS_E1H(bp)) { bnx2x_init_pxp_arb() 565 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val); bnx2x_init_pxp_arb() 568 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); bnx2x_init_pxp_arb() 571 REG_WR(bp, PXP2_REG_WR_HC_MPS, val); bnx2x_init_pxp_arb() 572 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val); bnx2x_init_pxp_arb() 573 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val); bnx2x_init_pxp_arb() 574 REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val); bnx2x_init_pxp_arb() 575 REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val); bnx2x_init_pxp_arb() 576 REG_WR(bp, PXP2_REG_WR_QM_MPS, val); bnx2x_init_pxp_arb() 577 REG_WR(bp, PXP2_REG_WR_TM_MPS, val); bnx2x_init_pxp_arb() 578 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val); bnx2x_init_pxp_arb() 579 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val); bnx2x_init_pxp_arb() 580 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val); bnx2x_init_pxp_arb() 585 val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST); bnx2x_init_pxp_arb() 588 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20); bnx2x_init_pxp_arb() 616 static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, bnx2x_ilt_line_mem_op() argument 631 static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, bnx2x_ilt_client_mem_op() argument 635 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_ilt_client_mem_op() 645 rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i], bnx2x_ilt_client_mem_op() 651 static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop) bnx2x_ilt_mem_op_cnic() argument 655 if (CONFIGURE_NIC_MODE(bp)) bnx2x_ilt_mem_op_cnic() 656 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); bnx2x_ilt_mem_op_cnic() 658 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop); bnx2x_ilt_mem_op_cnic() 663 static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop) bnx2x_ilt_mem_op() argument 665 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop); bnx2x_ilt_mem_op() 667 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop); bnx2x_ilt_mem_op() 668 if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp)) bnx2x_ilt_mem_op() 669 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); bnx2x_ilt_mem_op() 674 static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx, bnx2x_ilt_line_wr() argument 679 if (CHIP_IS_E1(bp)) bnx2x_ilt_line_wr() 684 bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); bnx2x_ilt_line_wr() 687 static void bnx2x_ilt_line_init_op(struct bnx2x *bp, bnx2x_ilt_line_init_op() argument 698 bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping); bnx2x_ilt_line_init_op() 702 bnx2x_ilt_line_wr(bp, abs_idx, null_mapping); bnx2x_ilt_line_init_op() 707 static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp, bnx2x_ilt_boundry_init_op() argument 718 if (CHIP_IS_E1(bp)) { bnx2x_ilt_boundry_init_op() 733 REG_WR(bp, start_reg + BP_FUNC(bp)*4, bnx2x_ilt_boundry_init_op() 755 REG_WR(bp, start_reg, (ilt_start + ilt_cli->start)); bnx2x_ilt_boundry_init_op() 756 REG_WR(bp, end_reg, (ilt_start + ilt_cli->end)); bnx2x_ilt_boundry_init_op() 760 static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, bnx2x_ilt_client_init_op_ilt() argument 771 bnx2x_ilt_line_init_op(bp, ilt, i, initop); bnx2x_ilt_client_init_op_ilt() 774 bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop); bnx2x_ilt_client_init_op_ilt() 777 static void bnx2x_ilt_client_init_op(struct bnx2x *bp, bnx2x_ilt_client_init_op() argument 780 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_ilt_client_init_op() 782 bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop); bnx2x_ilt_client_init_op() 785 static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp, bnx2x_ilt_client_id_init_op() argument 788 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_ilt_client_id_init_op() 791 bnx2x_ilt_client_init_op(bp, ilt_cli, initop); bnx2x_ilt_client_id_init_op() 794 static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop) bnx2x_ilt_init_op_cnic() argument 796 if (CONFIGURE_NIC_MODE(bp)) bnx2x_ilt_init_op_cnic() 797 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); bnx2x_ilt_init_op_cnic() 798 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop); bnx2x_ilt_init_op_cnic() 801 static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) bnx2x_ilt_init_op() argument 803 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop); bnx2x_ilt_init_op() 804 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop); bnx2x_ilt_init_op() 805 if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp)) bnx2x_ilt_init_op() 806 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); bnx2x_ilt_init_op() 809 static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, bnx2x_ilt_init_client_psz() argument 812 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_ilt_init_client_psz() 822 REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12)); bnx2x_ilt_init_client_psz() 833 static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop) bnx2x_ilt_init_page_size() argument 835 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU, bnx2x_ilt_init_page_size() 837 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM, bnx2x_ilt_init_page_size() 839 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC, bnx2x_ilt_init_page_size() 841 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM, bnx2x_ilt_init_page_size() 853 static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count, bnx2x_qm_init_cid_count() argument 856 int port = BP_PORT(bp); bnx2x_qm_init_cid_count() 863 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, bnx2x_qm_init_cid_count() 872 static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count, bnx2x_qm_set_ptr_table() argument 878 REG_WR(bp, base_reg + i*4, bnx2x_qm_set_ptr_table() 880 bnx2x_init_wr_wb(bp, reg + i*8, wb_data, 2); bnx2x_qm_set_ptr_table() 885 static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count, bnx2x_qm_init_ptr_table() argument 895 bnx2x_qm_set_ptr_table(bp, qm_cid_count, bnx2x_qm_init_ptr_table() 897 if (CHIP_IS_E1H(bp)) bnx2x_qm_init_ptr_table() 898 bnx2x_qm_set_ptr_table(bp, qm_cid_count, bnx2x_qm_init_ptr_table() 911 static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, bnx2x_src_init_t2() argument 915 int port = BP_PORT(bp); bnx2x_src_init_t2() 923 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count); bnx2x_src_init_t2() 925 bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16, bnx2x_src_init_t2() 928 bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16, bnx2x_src_init_t2()
|
H A D | bnx2x_cmn.h | 37 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ 53 void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 62 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 80 * @bp: driver handle 85 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); 90 * @bp: driver handle 93 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); 98 * @bp: driver handle 104 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 110 * @bp: driver handle 116 void bnx2x__init_func_obj(struct bnx2x *bp); 121 * @bp: driver handle 126 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 132 * @bp: driver handle 134 int bnx2x_setup_leading(struct bnx2x *bp); 139 * @bp: driver handle 145 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 150 * @bp: driver handle 153 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 158 * @bp: driver handle 160 void bnx2x_link_set(struct bnx2x *bp); 166 * @bp: driver handle 168 void bnx2x_force_link_reset(struct bnx2x *bp); 173 * @bp: driver handle 178 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); 183 * @bp: driver handle 185 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox 188 void bnx2x_drv_pulse(struct bnx2x *bp); 193 * @bp: driver handle 200 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 204 void bnx2x_pf_disable(struct bnx2x *bp); 205 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val); 210 * @bp: driver handle 212 void bnx2x__link_status_update(struct bnx2x *bp); 217 * @bp: driver handle 219 void bnx2x_link_report(struct bnx2x *bp); 222 void __bnx2x_link_report(struct bnx2x *bp); 227 * @bp: driver handle 231 u16 bnx2x_get_mf_speed(struct bnx2x *bp); 252 * @bp: driver handle 255 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); 260 * @bp: driver handle 262 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 267 * @bp: driver handle 269 void bnx2x_setup_cnic_info(struct bnx2x *bp); 274 * @bp: driver handle 276 void bnx2x_int_enable(struct bnx2x *bp); 281 * @bp: driver handle 287 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 292 * @bp: driver handle 300 void bnx2x_nic_init_cnic(struct bnx2x *bp); 305 * @bp: driver handle 312 void bnx2x_pre_irq_nic_init(struct bnx2x *bp); 317 * @bp: driver handle 325 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code); 329 * @bp: driver handle 331 int bnx2x_alloc_mem_cnic(struct bnx2x *bp); 335 * @bp: driver handle 337 int bnx2x_alloc_mem(struct bnx2x *bp); 342 * @bp: driver handle 344 void bnx2x_free_mem_cnic(struct bnx2x *bp); 348 * @bp: driver handle 350 void bnx2x_free_mem(struct bnx2x *bp); 355 * @bp: driver handle 357 void bnx2x_set_num_queues(struct bnx2x *bp); 362 * @bp: driver handle 370 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link); 375 * @bp: driver handle 378 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); 383 * @bp: driver handle 386 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); 391 * @bp: driver handle 393 int bnx2x_release_leader_lock(struct bnx2x *bp); 398 * @bp: driver handle 403 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); 411 * If bp->state is OPEN, should be called with 414 void bnx2x_set_rx_mode_inner(struct bnx2x *bp); 417 void bnx2x_set_pf_load(struct bnx2x *bp); 418 bool bnx2x_clear_pf_load(struct bnx2x *bp); 419 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); 420 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); 421 void bnx2x_set_reset_in_progress(struct bnx2x *bp); 422 void bnx2x_set_reset_global(struct bnx2x *bp); 423 void bnx2x_disable_close_the_gate(struct bnx2x *bp); 424 int bnx2x_init_hw_func_cnic(struct bnx2x *bp); 437 * @bp: driver handle 439 void bnx2x_ilt_set_info(struct bnx2x *bp); 445 * @bp: driver handle 447 void bnx2x_ilt_set_info_cnic(struct bnx2x *bp); 452 * @bp: driver handle 454 void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem); 459 * @bp: driver handle 464 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 469 * @bp: driver handle 472 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); 474 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); 477 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); 480 int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 497 static inline void bnx2x_update_rx_prod(struct bnx2x *bp, bnx2x_update_rx_prod() argument 520 REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, bnx2x_update_rx_prod() 536 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); 543 void bnx2x_free_irq(struct bnx2x *bp); 545 void bnx2x_free_fp_mem(struct bnx2x *bp); 546 void bnx2x_init_rx_rings(struct bnx2x *bp); 547 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp); 548 void bnx2x_free_skbs(struct bnx2x *bp); 549 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 550 void bnx2x_netif_start(struct bnx2x *bp); 551 int bnx2x_load_cnic(struct bnx2x *bp); 556 * @bp: driver handle 561 int bnx2x_enable_msix(struct bnx2x *bp); 566 * @bp: driver handle 568 int bnx2x_enable_msi(struct bnx2x *bp); 580 * @bp: driver handle 582 int bnx2x_alloc_mem_bp(struct bnx2x *bp); 587 * @bp: driver handle 589 void bnx2x_free_mem_bp(struct bnx2x *bp); 631 static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, bnx2x_igu_ack_sb_gen() argument 645 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); bnx2x_igu_ack_sb_gen() 652 static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, bnx2x_hc_ack_sb() argument 655 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + bnx2x_hc_ack_sb() 666 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); bnx2x_hc_ack_sb() 673 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, bnx2x_ack_sb() argument 676 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_ack_sb() 677 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); bnx2x_ack_sb() 681 if (CHIP_INT_MODE_IS_BC(bp)) bnx2x_ack_sb() 683 else if (igu_sb_id != bp->igu_dsb_id) bnx2x_ack_sb() 689 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); bnx2x_ack_sb() 693 static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) bnx2x_hc_ack_int() argument 695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + bnx2x_hc_ack_int() 697 u32 result = REG_RD(bp, hc_addr); bnx2x_hc_ack_int() 703 static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) bnx2x_igu_ack_int() argument 706 u32 result = REG_RD(bp, igu_addr); bnx2x_igu_ack_int() 715 static inline u16 bnx2x_ack_int(struct bnx2x *bp) bnx2x_ack_int() argument 718 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_ack_int() 719 return bnx2x_hc_ack_int(bp); bnx2x_ack_int() 721 return bnx2x_igu_ack_int(bp); bnx2x_ack_int() 731 static inline u16 bnx2x_tx_avail(struct bnx2x *bp, bnx2x_tx_avail() argument 788 * @bp: driver handle 790 static inline void bnx2x_tx_disable(struct bnx2x *bp) bnx2x_tx_disable() argument 792 netif_tx_disable(bp->dev); bnx2x_tx_disable() 793 netif_carrier_off(bp->dev); bnx2x_tx_disable() 796 static inline void bnx2x_free_rx_sge(struct bnx2x *bp, bnx2x_free_rx_sge() argument 807 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), bnx2x_free_rx_sge() 816 static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) bnx2x_del_all_napi_cnic() argument 820 for_each_rx_queue_cnic(bp, i) { for_each_rx_queue_cnic() 821 napi_hash_del(&bnx2x_fp(bp, i, napi)); for_each_rx_queue_cnic() 822 netif_napi_del(&bnx2x_fp(bp, i, napi)); for_each_rx_queue_cnic() 826 static inline void bnx2x_del_all_napi(struct bnx2x *bp) bnx2x_del_all_napi() argument 830 for_each_eth_queue(bp, i) { for_each_eth_queue() 831 napi_hash_del(&bnx2x_fp(bp, i, napi)); for_each_eth_queue() 832 netif_napi_del(&bnx2x_fp(bp, i, napi)); for_each_eth_queue() 836 int bnx2x_set_int_mode(struct bnx2x *bp); 838 static inline void bnx2x_disable_msi(struct bnx2x *bp) bnx2x_disable_msi() argument 840 if (bp->flags & USING_MSIX_FLAG) { bnx2x_disable_msi() 841 pci_disable_msix(bp->pdev); bnx2x_disable_msi() 842 bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG); bnx2x_disable_msi() 843 } else if (bp->flags & USING_MSI_FLAG) { bnx2x_disable_msi() 844 pci_disable_msi(bp->pdev); bnx2x_disable_msi() 845 bp->flags &= ~USING_MSI_FLAG; bnx2x_disable_msi() 897 static inline int func_by_vn(struct bnx2x *bp, int vn) func_by_vn() argument 899 return 2 * vn + BP_PORT(bp); func_by_vn() 902 static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) bnx2x_config_rss_eth() argument 904 return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true); bnx2x_config_rss_eth() 910 * @bp: driver handle 914 static inline int bnx2x_func_start(struct bnx2x *bp) bnx2x_func_start() argument 923 func_params.f_obj = &bp->func_obj; bnx2x_func_start() 927 start_params->mf_mode = bp->mf_mode; bnx2x_func_start() 928 start_params->sd_vlan_tag = bp->mf_ov; bnx2x_func_start() 930 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) bnx2x_func_start() 939 if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { bnx2x_func_start() 945 return bnx2x_func_state_change(bp, &func_params); bnx2x_func_start() 967 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, bnx2x_free_rx_sge_range() argument 976 bnx2x_free_rx_sge(bp, fp, i); bnx2x_free_rx_sge_range() 1001 struct bnx2x *bp = fp->bp; bnx2x_stats_id() local 1002 if (!CHIP_IS_E1x(bp)) { bnx2x_stats_id() 1005 return bp->cnic_base_cl_id + (bp->pf_num >> 1); bnx2x_stats_id() 1008 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; bnx2x_stats_id() 1014 struct bnx2x *bp = fp->bp; bnx2x_init_vlan_mac_fp_objs() local 1017 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id, bnx2x_init_vlan_mac_fp_objs() 1018 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), bnx2x_init_vlan_mac_fp_objs() 1019 bnx2x_sp_mapping(bp, mac_rdata), bnx2x_init_vlan_mac_fp_objs() 1021 &bp->sp_state, obj_type, bnx2x_init_vlan_mac_fp_objs() 1022 &bp->macs_pool); bnx2x_init_vlan_mac_fp_objs() 1028 * @bp: driver handle 1033 static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) bnx2x_get_path_func_num() argument 1038 if (CHIP_IS_E1(bp)) bnx2x_get_path_func_num() 1044 if (CHIP_REV_IS_SLOW(bp)) { bnx2x_get_path_func_num() 1045 if (IS_MF(bp)) bnx2x_get_path_func_num() 1052 MF_CFG_RD(bp, bnx2x_get_path_func_num() 1053 func_mf_config[BP_PORT(bp) + 2 * i]. bnx2x_get_path_func_num() 1065 static inline void bnx2x_init_bp_objs(struct bnx2x *bp) bnx2x_init_bp_objs() argument 1068 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); bnx2x_init_bp_objs() 1071 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, bnx2x_init_bp_objs() 1072 BP_FUNC(bp), BP_FUNC(bp), bnx2x_init_bp_objs() 1073 bnx2x_sp(bp, mcast_rdata), bnx2x_init_bp_objs() 1074 bnx2x_sp_mapping(bp, mcast_rdata), bnx2x_init_bp_objs() 1075 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, bnx2x_init_bp_objs() 1079 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), bnx2x_init_bp_objs() 1080 bnx2x_get_path_func_num(bp)); bnx2x_init_bp_objs() 1082 bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1, bnx2x_init_bp_objs() 1083 bnx2x_get_path_func_num(bp)); bnx2x_init_bp_objs() 1086 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, bnx2x_init_bp_objs() 1087 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), bnx2x_init_bp_objs() 1088 bnx2x_sp(bp, rss_rdata), bnx2x_init_bp_objs() 1089 bnx2x_sp_mapping(bp, rss_rdata), bnx2x_init_bp_objs() 1090 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, bnx2x_init_bp_objs() 1096 if (CHIP_IS_E1x(fp->bp)) bnx2x_fp_qzone_id() 1097 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; bnx2x_fp_qzone_id() 1102 static inline void bnx2x_init_txdata(struct bnx2x *bp, bnx2x_init_txdata() argument 1111 txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size; bnx2x_init_txdata() 1117 static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) bnx2x_cnic_eth_cl_id() argument 1119 return bp->cnic_base_cl_id + cl_idx + bnx2x_cnic_eth_cl_id() 1120 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; bnx2x_cnic_eth_cl_id() 1123 static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) bnx2x_cnic_fw_sb_id() argument 1126 return bp->base_fw_ndsb; bnx2x_cnic_fw_sb_id() 1129 static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) bnx2x_cnic_igu_sb_id() argument 1131 return bp->igu_base_sb; bnx2x_cnic_igu_sb_id() 1134 static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, bnx2x_clean_tx_queue() argument 1158 int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1160 static inline void __storm_memset_struct(struct bnx2x *bp, __storm_memset_struct() argument 1165 REG_WR(bp, addr + (i * 4), data[i]); __storm_memset_struct() 1171 * @bp: driver handle 1174 static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) bnx2x_wait_sp_comp() argument 1180 netif_addr_lock_bh(bp->dev); bnx2x_wait_sp_comp() 1181 if (!(bp->sp_state & mask)) { bnx2x_wait_sp_comp() 1182 netif_addr_unlock_bh(bp->dev); bnx2x_wait_sp_comp() 1185 netif_addr_unlock_bh(bp->dev); bnx2x_wait_sp_comp() 1192 netif_addr_lock_bh(bp->dev); bnx2x_wait_sp_comp() 1193 if (bp->sp_state & mask) { bnx2x_wait_sp_comp() 1195 bp->sp_state, mask); bnx2x_wait_sp_comp() 1196 netif_addr_unlock_bh(bp->dev); bnx2x_wait_sp_comp() 1199 netif_addr_unlock_bh(bp->dev); bnx2x_wait_sp_comp() 1207 * @bp: driver handle 1211 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 1214 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 1216 void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1217 void bnx2x_release_phy_lock(struct bnx2x *bp); 1222 * @bp: driver handle 1226 static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) bnx2x_extract_max_cfg() argument 1254 * @bp: driver handle 1257 void bnx2x_get_iscsi_info(struct bnx2x *bp); 1262 * @bp: driver handle 1265 static inline void bnx2x_link_sync_notify(struct bnx2x *bp) bnx2x_link_sync_notify() argument 1271 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { bnx2x_link_sync_notify() 1272 if (vn == BP_VN(bp)) bnx2x_link_sync_notify() 1275 func = func_by_vn(bp, vn); bnx2x_link_sync_notify() 1276 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + bnx2x_link_sync_notify() 1284 * @bp: driver handle 1289 static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) bnx2x_update_drv_flags() argument 1291 if (SHMEM2_HAS(bp, drv_flags)) { bnx2x_update_drv_flags() 1293 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); bnx2x_update_drv_flags() 1294 drv_flags = SHMEM2_RD(bp, drv_flags); bnx2x_update_drv_flags() 1301 SHMEM2_WR(bp, drv_flags, drv_flags); bnx2x_update_drv_flags() 1303 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); bnx2x_update_drv_flags() 1312 * @bp: driver handle 1317 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); 1319 int bnx2x_drain_tx_queues(struct bnx2x *bp); 1320 void bnx2x_squeeze_objects(struct bnx2x *bp);
|
H A D | bnx2x_cmn.c | 34 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); 35 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); 36 static int bnx2x_alloc_fp_mem(struct bnx2x *bp); 39 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) bnx2x_add_all_napi_cnic() argument 44 for_each_rx_queue_cnic(bp, i) { for_each_rx_queue_cnic() 45 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), for_each_rx_queue_cnic() 47 napi_hash_add(&bnx2x_fp(bp, i, napi)); for_each_rx_queue_cnic() 51 static void bnx2x_add_all_napi(struct bnx2x *bp) bnx2x_add_all_napi() argument 56 for_each_eth_queue(bp, i) { for_each_eth_queue() 57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), for_each_eth_queue() 59 napi_hash_add(&bnx2x_fp(bp, i, napi)); for_each_eth_queue() 63 static int bnx2x_calc_num_queues(struct bnx2x *bp) bnx2x_calc_num_queues() argument 71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); bnx2x_calc_num_queues() 78 * @bp: driver handle 82 * Makes sure the contents of the bp->fp[to].napi is kept 88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) bnx2x_move_fp() argument 90 struct bnx2x_fastpath *from_fp = &bp->fp[from]; bnx2x_move_fp() 91 struct bnx2x_fastpath *to_fp = &bp->fp[to]; bnx2x_move_fp() 92 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; bnx2x_move_fp() 93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; bnx2x_move_fp() 94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; bnx2x_move_fp() 95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; bnx2x_move_fp() 123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; bnx2x_move_fp() 124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * bnx2x_move_fp() 125 (bp)->max_cos; bnx2x_move_fp() 126 if (from == FCOE_IDX(bp)) { bnx2x_move_fp() 131 memcpy(&bp->bnx2x_txq[new_txdata_index], bnx2x_move_fp() 132 &bp->bnx2x_txq[old_txdata_index], bnx2x_move_fp() 134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; bnx2x_move_fp() 140 * @bp: driver handle 145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) bnx2x_fill_fw_str() argument 147 if (IS_PF(bp)) { bnx2x_fill_fw_str() 151 bnx2x_get_ext_phy_fw_version(&bp->link_params, bnx2x_fill_fw_str() 153 strlcpy(buf, bp->fw_ver, buf_len); bnx2x_fill_fw_str() 154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), bnx2x_fill_fw_str() 156 (bp->common.bc_ver & 0xff0000) >> 16, bnx2x_fill_fw_str() 157 (bp->common.bc_ver & 0xff00) >> 8, bnx2x_fill_fw_str() 158 (bp->common.bc_ver & 0xff), bnx2x_fill_fw_str() 161 bnx2x_vf_fill_fw_str(bp, buf, buf_len); bnx2x_fill_fw_str() 168 * @bp: driver handle 171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) bnx2x_shrink_eth_fp() argument 173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_shrink_eth_fp() 178 for (cos = 1; cos < bp->max_cos; cos++) { bnx2x_shrink_eth_fp() 180 struct bnx2x_fastpath *fp = &bp->fp[i]; bnx2x_shrink_eth_fp() 183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], bnx2x_shrink_eth_fp() 185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; bnx2x_shrink_eth_fp() 195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, bnx2x_free_tx_pkt() argument 246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), bnx2x_free_tx_pkt() 254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), bnx2x_free_tx_pkt() 274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) bnx2x_tx_int() argument 281 if (unlikely(bp->panic)) bnx2x_tx_int() 285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); bnx2x_tx_int() 298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, bnx2x_tx_int() 334 (bp->state == BNX2X_STATE_OPEN) && bnx2x_tx_int() 335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)) bnx2x_tx_int() 356 struct bnx2x *bp = fp->bp; bnx2x_update_sge_prod() local 408 static u32 bnx2x_get_rxhash(const struct bnx2x *bp, bnx2x_get_rxhash() argument 413 if ((bp->dev->features & NETIF_F_RXHASH) && bnx2x_get_rxhash() 432 struct bnx2x *bp = fp->bp; bnx2x_tpa_start() local 445 mapping = dma_map_single(&bp->pdev->dev, bnx2x_tpa_start() 454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_tpa_start() 478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type); bnx2x_tpa_start() 544 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_alloc_rx_sge() argument 557 mapping = dma_map_page(&bp->pdev->dev, page, 0, bnx2x_alloc_rx_sge() 559 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_alloc_rx_sge() 574 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_fill_frag_skb() argument 626 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); bnx2x_fill_frag_skb() 628 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; bnx2x_fill_frag_skb() 633 dma_unmap_page(&bp->pdev->dev, bnx2x_fill_frag_skb() 684 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) bnx2x_gro_ip_csum() argument 696 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) bnx2x_gro_ipv6_csum() argument 708 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb, bnx2x_gro_csum() argument 712 gro_func(bp, skb); bnx2x_gro_csum() 717 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_gro_receive() argument 724 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum); bnx2x_gro_receive() 727 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); bnx2x_gro_receive() 739 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_tpa_stop() argument 765 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), bnx2x_tpa_stop() 784 skb->protocol = eth_type_trans(skb, bp->dev); bnx2x_tpa_stop() 787 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, bnx2x_tpa_stop() 791 bnx2x_gro_receive(bp, fp, skb); bnx2x_tpa_stop() 809 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; bnx2x_tpa_stop() 812 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_alloc_rx_data() argument 824 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, bnx2x_alloc_rx_data() 827 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_alloc_rx_data() 868 struct bnx2x *bp = fp->bp; bnx2x_rx_int() local 876 if (unlikely(bp->panic)) bnx2x_rx_int() 906 if (unlikely(bp->panic)) bnx2x_rx_int() 985 bnx2x_tpa_stop(bp, fp, tpa_info, pages, bnx2x_rx_int() 988 if (bp->panic) bnx2x_rx_int() 998 dma_sync_single_for_cpu(&bp->pdev->dev, bnx2x_rx_int() 1009 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; bnx2x_rx_int() 1016 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && bnx2x_rx_int() 1022 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; bnx2x_rx_int() 1028 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, bnx2x_rx_int() 1030 dma_unmap_single(&bp->pdev->dev, bnx2x_rx_int() 1037 bnx2x_fp_qstats(bp, fp)-> bnx2x_rx_int() 1045 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; bnx2x_rx_int() 1053 skb->protocol = eth_type_trans(skb, bp->dev); bnx2x_rx_int() 1056 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type); bnx2x_rx_int() 1061 if (bp->dev->features & NETIF_F_RXCSUM) bnx2x_rx_int() 1063 bnx2x_fp_qstats(bp, fp)); bnx2x_rx_int() 1070 bnx2x_set_rx_ts(bp, skb); bnx2x_rx_int() 1111 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, bnx2x_rx_int() 1123 struct bnx2x *bp = fp->bp; bnx2x_msix_fp_int() local 1130 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bnx2x_msix_fp_int() 1133 if (unlikely(bp->panic)) bnx2x_msix_fp_int() 1142 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); bnx2x_msix_fp_int() 1148 void bnx2x_acquire_phy_lock(struct bnx2x *bp) bnx2x_acquire_phy_lock() argument 1150 mutex_lock(&bp->port.phy_mutex); bnx2x_acquire_phy_lock() 1152 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); bnx2x_acquire_phy_lock() 1155 void bnx2x_release_phy_lock(struct bnx2x *bp) bnx2x_release_phy_lock() argument 1157 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); bnx2x_release_phy_lock() 1159 mutex_unlock(&bp->port.phy_mutex); bnx2x_release_phy_lock() 1163 u16 bnx2x_get_mf_speed(struct bnx2x *bp) bnx2x_get_mf_speed() argument 1165 u16 line_speed = bp->link_vars.line_speed; bnx2x_get_mf_speed() 1166 if (IS_MF(bp)) { bnx2x_get_mf_speed() 1167 u16 maxCfg = bnx2x_extract_max_cfg(bp, bnx2x_get_mf_speed() 1168 bp->mf_config[BP_VN(bp)]); bnx2x_get_mf_speed() 1173 if (IS_MF_SI(bp)) bnx2x_get_mf_speed() 1189 * @bp: driver handle 1194 static void bnx2x_fill_report_data(struct bnx2x *bp, bnx2x_fill_report_data() argument 1199 if (IS_PF(bp)) { bnx2x_fill_report_data() 1201 data->line_speed = bnx2x_get_mf_speed(bp); bnx2x_fill_report_data() 1204 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) bnx2x_fill_report_data() 1208 if (!BNX2X_NUM_ETH_QUEUES(bp)) bnx2x_fill_report_data() 1213 if (bp->link_vars.duplex == DUPLEX_FULL) bnx2x_fill_report_data() 1218 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) bnx2x_fill_report_data() 1223 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) bnx2x_fill_report_data() 1227 *data = bp->vf_link_vars; bnx2x_fill_report_data() 1234 * @bp: driver handle 1241 void bnx2x_link_report(struct bnx2x *bp) bnx2x_link_report() argument 1243 bnx2x_acquire_phy_lock(bp); bnx2x_link_report() 1244 __bnx2x_link_report(bp); bnx2x_link_report() 1245 bnx2x_release_phy_lock(bp); bnx2x_link_report() 1251 * @bp: driver handle 1256 void __bnx2x_link_report(struct bnx2x *bp) __bnx2x_link_report() argument 1261 if (IS_PF(bp) && !CHIP_IS_E1(bp)) __bnx2x_link_report() 1262 bnx2x_read_mf_cfg(bp); __bnx2x_link_report() 1265 bnx2x_fill_report_data(bp, &cur_data); __bnx2x_link_report() 1268 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || __bnx2x_link_report() 1270 &bp->last_reported_link.link_report_flags) && __bnx2x_link_report() 1275 bp->link_cnt++; __bnx2x_link_report() 1280 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); __bnx2x_link_report() 1283 if (IS_PF(bp)) __bnx2x_link_report() 1284 bnx2x_iov_link_update(bp); __bnx2x_link_report() 1288 netif_carrier_off(bp->dev); __bnx2x_link_report() 1289 netdev_err(bp->dev, "NIC Link is Down\n"); __bnx2x_link_report() 1295 netif_carrier_on(bp->dev); __bnx2x_link_report() 1321 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", __bnx2x_link_report() 1344 static void bnx2x_free_tpa_pool(struct bnx2x *bp, bnx2x_free_tpa_pool() argument 1359 dma_unmap_single(&bp->pdev->dev, bnx2x_free_tpa_pool() 1367 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp) bnx2x_init_rx_rings_cnic() argument 1371 for_each_rx_queue_cnic(bp, j) { for_each_rx_queue_cnic() 1372 struct bnx2x_fastpath *fp = &bp->fp[j]; for_each_rx_queue_cnic() 1381 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, for_each_rx_queue_cnic() 1386 void bnx2x_init_rx_rings(struct bnx2x *bp) bnx2x_init_rx_rings() argument 1388 int func = BP_FUNC(bp); bnx2x_init_rx_rings() 1393 for_each_eth_queue(bp, j) { for_each_eth_queue() 1394 struct bnx2x_fastpath *fp = &bp->fp[j]; for_each_eth_queue() 1397 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); for_each_eth_queue() 1401 for (i = 0; i < MAX_AGG_QS(bp); i++) { for_each_eth_queue() 1412 bnx2x_free_tpa_pool(bp, fp, i); for_each_eth_queue() 1430 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, for_each_eth_queue() 1437 bnx2x_free_rx_sge_range(bp, fp, for_each_eth_queue() 1439 bnx2x_free_tpa_pool(bp, fp, for_each_eth_queue() 1440 MAX_AGG_QS(bp)); for_each_eth_queue() 1452 for_each_eth_queue(bp, j) { for_each_eth_queue() 1453 struct bnx2x_fastpath *fp = &bp->fp[j]; for_each_eth_queue() 1462 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, for_each_eth_queue() 1468 if (CHIP_IS_E1(bp)) { for_each_eth_queue() 1469 REG_WR(bp, BAR_USTRORM_INTMEM + for_each_eth_queue() 1472 REG_WR(bp, BAR_USTRORM_INTMEM + for_each_eth_queue() 1482 struct bnx2x *bp = fp->bp; bnx2x_free_tx_skbs_queue() local 1492 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), for_each_cos_in_tx_queue() 1498 netdev_get_tx_queue(bp->dev, for_each_cos_in_tx_queue() 1503 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp) bnx2x_free_tx_skbs_cnic() argument 1507 for_each_tx_queue_cnic(bp, i) { for_each_tx_queue_cnic() 1508 bnx2x_free_tx_skbs_queue(&bp->fp[i]); for_each_tx_queue_cnic() 1512 static void bnx2x_free_tx_skbs(struct bnx2x *bp) bnx2x_free_tx_skbs() argument 1516 for_each_eth_queue(bp, i) { for_each_eth_queue() 1517 bnx2x_free_tx_skbs_queue(&bp->fp[i]); for_each_eth_queue() 1523 struct bnx2x *bp = fp->bp; bnx2x_free_rx_bds() local 1536 dma_unmap_single(&bp->pdev->dev, bnx2x_free_rx_bds() 1545 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp) bnx2x_free_rx_skbs_cnic() argument 1549 for_each_rx_queue_cnic(bp, j) { for_each_rx_queue_cnic() 1550 bnx2x_free_rx_bds(&bp->fp[j]); for_each_rx_queue_cnic() 1554 static void bnx2x_free_rx_skbs(struct bnx2x *bp) bnx2x_free_rx_skbs() argument 1558 for_each_eth_queue(bp, j) { for_each_eth_queue() 1559 struct bnx2x_fastpath *fp = &bp->fp[j]; for_each_eth_queue() 1564 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); for_each_eth_queue() 1568 static void bnx2x_free_skbs_cnic(struct bnx2x *bp) bnx2x_free_skbs_cnic() argument 1570 bnx2x_free_tx_skbs_cnic(bp); bnx2x_free_skbs_cnic() 1571 bnx2x_free_rx_skbs_cnic(bp); bnx2x_free_skbs_cnic() 1574 void bnx2x_free_skbs(struct bnx2x *bp) bnx2x_free_skbs() argument 1576 bnx2x_free_tx_skbs(bp); bnx2x_free_skbs() 1577 bnx2x_free_rx_skbs(bp); bnx2x_free_skbs() 1580 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) bnx2x_update_max_mf_config() argument 1583 u32 mf_cfg = bp->mf_config[BP_VN(bp)]; bnx2x_update_max_mf_config() 1585 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { bnx2x_update_max_mf_config() 1593 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); bnx2x_update_max_mf_config() 1600 * @bp: driver handle 1603 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) bnx2x_free_msix_irqs() argument 1611 if (IS_PF(bp)) { bnx2x_free_msix_irqs() 1612 free_irq(bp->msix_table[offset].vector, bp->dev); bnx2x_free_msix_irqs() 1614 bp->msix_table[offset].vector); bnx2x_free_msix_irqs() 1618 if (CNIC_SUPPORT(bp)) { bnx2x_free_msix_irqs() 1624 for_each_eth_queue(bp, i) { for_each_eth_queue() 1628 i, bp->msix_table[offset].vector); for_each_eth_queue() 1630 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); for_each_eth_queue() 1634 void bnx2x_free_irq(struct bnx2x *bp) bnx2x_free_irq() argument 1636 if (bp->flags & USING_MSIX_FLAG && bnx2x_free_irq() 1637 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { bnx2x_free_irq() 1638 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); bnx2x_free_irq() 1641 if (IS_PF(bp)) bnx2x_free_irq() 1644 bnx2x_free_msix_irqs(bp, nvecs); bnx2x_free_irq() 1646 free_irq(bp->dev->irq, bp->dev); bnx2x_free_irq() 1650 int bnx2x_enable_msix(struct bnx2x *bp) bnx2x_enable_msix() argument 1655 if (IS_PF(bp)) { bnx2x_enable_msix() 1656 bp->msix_table[msix_vec].entry = msix_vec; bnx2x_enable_msix() 1658 bp->msix_table[0].entry); bnx2x_enable_msix() 1663 if (CNIC_SUPPORT(bp)) { bnx2x_enable_msix() 1664 bp->msix_table[msix_vec].entry = msix_vec; bnx2x_enable_msix() 1666 msix_vec, bp->msix_table[msix_vec].entry); bnx2x_enable_msix() 1671 for_each_eth_queue(bp, i) { for_each_eth_queue() 1672 bp->msix_table[msix_vec].entry = msix_vec; for_each_eth_queue() 1681 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1682 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec); 1689 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); 1697 bp->flags |= USING_SINGLE_MSIX_FLAG; 1700 bp->num_ethernet_queues = 1; 1701 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 1714 bp->num_ethernet_queues -= diff; 1715 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 1718 bp->num_queues); 1721 bp->flags |= USING_MSIX_FLAG; 1728 bp->flags |= DISABLE_MSI_FLAG; 1733 static int bnx2x_req_msix_irqs(struct bnx2x *bp) bnx2x_req_msix_irqs() argument 1738 if (IS_PF(bp)) { bnx2x_req_msix_irqs() 1739 rc = request_irq(bp->msix_table[offset++].vector, bnx2x_req_msix_irqs() 1741 bp->dev->name, bp->dev); bnx2x_req_msix_irqs() 1748 if (CNIC_SUPPORT(bp)) bnx2x_req_msix_irqs() 1751 for_each_eth_queue(bp, i) { for_each_eth_queue() 1752 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue() 1754 bp->dev->name, i); for_each_eth_queue() 1756 rc = request_irq(bp->msix_table[offset].vector, for_each_eth_queue() 1760 bp->msix_table[offset].vector, rc); for_each_eth_queue() 1761 bnx2x_free_msix_irqs(bp, offset); for_each_eth_queue() 1768 i = BNX2X_NUM_ETH_QUEUES(bp); 1769 if (IS_PF(bp)) { 1770 offset = 1 + CNIC_SUPPORT(bp); 1771 netdev_info(bp->dev, 1773 bp->msix_table[0].vector, 1774 0, bp->msix_table[offset].vector, 1775 i - 1, bp->msix_table[offset + i - 1].vector); 1777 offset = CNIC_SUPPORT(bp); 1778 netdev_info(bp->dev, 1780 0, bp->msix_table[offset].vector, 1781 i - 1, bp->msix_table[offset + i - 1].vector); 1786 int bnx2x_enable_msi(struct bnx2x *bp) bnx2x_enable_msi() argument 1790 rc = pci_enable_msi(bp->pdev); bnx2x_enable_msi() 1795 bp->flags |= USING_MSI_FLAG; bnx2x_enable_msi() 1800 static int bnx2x_req_irq(struct bnx2x *bp) bnx2x_req_irq() argument 1805 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) bnx2x_req_irq() 1810 if (bp->flags & USING_MSIX_FLAG) bnx2x_req_irq() 1811 irq = bp->msix_table[0].vector; bnx2x_req_irq() 1813 irq = bp->pdev->irq; bnx2x_req_irq() 1815 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); bnx2x_req_irq() 1818 static int bnx2x_setup_irqs(struct bnx2x *bp) bnx2x_setup_irqs() argument 1821 if (bp->flags & USING_MSIX_FLAG && bnx2x_setup_irqs() 1822 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { bnx2x_setup_irqs() 1823 rc = bnx2x_req_msix_irqs(bp); bnx2x_setup_irqs() 1827 rc = bnx2x_req_irq(bp); bnx2x_setup_irqs() 1832 if (bp->flags & USING_MSI_FLAG) { bnx2x_setup_irqs() 1833 bp->dev->irq = bp->pdev->irq; bnx2x_setup_irqs() 1834 netdev_info(bp->dev, "using MSI IRQ %d\n", bnx2x_setup_irqs() 1835 bp->dev->irq); bnx2x_setup_irqs() 1837 if (bp->flags & USING_MSIX_FLAG) { bnx2x_setup_irqs() 1838 bp->dev->irq = bp->msix_table[0].vector; bnx2x_setup_irqs() 1839 netdev_info(bp->dev, "using MSIX IRQ %d\n", bnx2x_setup_irqs() 1840 bp->dev->irq); bnx2x_setup_irqs() 1847 static void bnx2x_napi_enable_cnic(struct bnx2x *bp) bnx2x_napi_enable_cnic() argument 1851 for_each_rx_queue_cnic(bp, i) { for_each_rx_queue_cnic() 1852 bnx2x_fp_busy_poll_init(&bp->fp[i]); for_each_rx_queue_cnic() 1853 napi_enable(&bnx2x_fp(bp, i, napi)); for_each_rx_queue_cnic() 1857 static void bnx2x_napi_enable(struct bnx2x *bp) bnx2x_napi_enable() argument 1861 for_each_eth_queue(bp, i) { for_each_eth_queue() 1862 bnx2x_fp_busy_poll_init(&bp->fp[i]); for_each_eth_queue() 1863 napi_enable(&bnx2x_fp(bp, i, napi)); for_each_eth_queue() 1867 static void bnx2x_napi_disable_cnic(struct bnx2x *bp) bnx2x_napi_disable_cnic() argument 1871 for_each_rx_queue_cnic(bp, i) { for_each_rx_queue_cnic() 1872 napi_disable(&bnx2x_fp(bp, i, napi)); for_each_rx_queue_cnic() 1873 while (!bnx2x_fp_ll_disable(&bp->fp[i])) for_each_rx_queue_cnic() 1878 static void bnx2x_napi_disable(struct bnx2x *bp) bnx2x_napi_disable() argument 1882 for_each_eth_queue(bp, i) { for_each_eth_queue() 1883 napi_disable(&bnx2x_fp(bp, i, napi)); for_each_eth_queue() 1884 while (!bnx2x_fp_ll_disable(&bp->fp[i])) for_each_eth_queue() 1889 void bnx2x_netif_start(struct bnx2x *bp) bnx2x_netif_start() argument 1891 if (netif_running(bp->dev)) { bnx2x_netif_start() 1892 bnx2x_napi_enable(bp); bnx2x_netif_start() 1893 if (CNIC_LOADED(bp)) bnx2x_netif_start() 1894 bnx2x_napi_enable_cnic(bp); bnx2x_netif_start() 1895 bnx2x_int_enable(bp); bnx2x_netif_start() 1896 if (bp->state == BNX2X_STATE_OPEN) bnx2x_netif_start() 1897 netif_tx_wake_all_queues(bp->dev); bnx2x_netif_start() 1901 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) bnx2x_netif_stop() argument 1903 bnx2x_int_disable_sync(bp, disable_hw); bnx2x_netif_stop() 1904 bnx2x_napi_disable(bp); bnx2x_netif_stop() 1905 if (CNIC_LOADED(bp)) bnx2x_netif_stop() 1906 bnx2x_napi_disable_cnic(bp); bnx2x_netif_stop() 1912 struct bnx2x *bp = netdev_priv(dev); bnx2x_select_queue() local 1914 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) { bnx2x_select_queue() 1928 return bnx2x_fcoe_tx(bp, txq_index); bnx2x_select_queue() 1932 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); bnx2x_select_queue() 1935 void bnx2x_set_num_queues(struct bnx2x *bp) bnx2x_set_num_queues() argument 1938 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); bnx2x_set_num_queues() 1941 if (IS_MF_STORAGE_ONLY(bp)) bnx2x_set_num_queues() 1942 bp->num_ethernet_queues = 1; bnx2x_set_num_queues() 1945 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ bnx2x_set_num_queues() 1946 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; bnx2x_set_num_queues() 1948 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); bnx2x_set_num_queues() 1954 * @bp: Driver handle 1958 * bp->max_cos. 1973 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic) bnx2x_set_real_num_queues() argument 1977 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; bnx2x_set_real_num_queues() 1978 rx = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_set_real_num_queues() 1981 if (include_cnic && !NO_FCOE(bp)) { bnx2x_set_real_num_queues() 1986 rc = netif_set_real_num_tx_queues(bp->dev, tx); bnx2x_set_real_num_queues() 1991 rc = netif_set_real_num_rx_queues(bp->dev, rx); bnx2x_set_real_num_queues() 2003 static void bnx2x_set_rx_buf_size(struct bnx2x *bp) bnx2x_set_rx_buf_size() argument 2007 for_each_queue(bp, i) { for_each_queue() 2008 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_queue() 2021 mtu = bp->dev->mtu; for_each_queue() 2035 static int bnx2x_init_rss(struct bnx2x *bp) bnx2x_init_rss() argument 2038 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_init_rss() 2043 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) bnx2x_init_rss() 2044 bp->rss_conf_obj.ind_table[i] = bnx2x_init_rss() 2045 bp->fp->cl_id + bnx2x_init_rss() 2056 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); bnx2x_init_rss() 2059 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, bnx2x_rss() argument 2067 * if (!is_eth_multi(bp)) bnx2x_rss() 2068 * bp->multi_mode = ETH_RSS_MODE_DISABLED; bnx2x_rss() 2088 if (!CHIP_IS_E1x(bp)) bnx2x_rss() 2106 if (IS_PF(bp)) bnx2x_rss() 2107 return bnx2x_config_rss(bp, ¶ms); bnx2x_rss() 2109 return bnx2x_vfpf_config_rss(bp, ¶ms); bnx2x_rss() 2112 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) bnx2x_init_hw() argument 2119 func_params.f_obj = &bp->func_obj; bnx2x_init_hw() 2124 return bnx2x_func_state_change(bp, &func_params); bnx2x_init_hw() 2131 void bnx2x_squeeze_objects(struct bnx2x *bp) bnx2x_squeeze_objects() argument 2136 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; bnx2x_squeeze_objects() 2147 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, bnx2x_squeeze_objects() 2155 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, bnx2x_squeeze_objects() 2161 rparam.mcast_obj = &bp->mcast_obj; bnx2x_squeeze_objects() 2168 netif_addr_lock_bh(bp->dev); bnx2x_squeeze_objects() 2169 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); bnx2x_squeeze_objects() 2175 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); bnx2x_squeeze_objects() 2180 netif_addr_unlock_bh(bp->dev); bnx2x_squeeze_objects() 2184 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); bnx2x_squeeze_objects() 2186 netif_addr_unlock_bh(bp->dev); bnx2x_squeeze_objects() 2190 #define LOAD_ERROR_EXIT(bp, label) \ 2192 (bp)->state = BNX2X_STATE_ERROR; \ 2196 #define LOAD_ERROR_EXIT_CNIC(bp, label) \ 2198 bp->cnic_loaded = false; \ 2202 #define LOAD_ERROR_EXIT(bp, label) \ 2204 (bp)->state = BNX2X_STATE_ERROR; \ 2205 (bp)->panic = 1; \ 2208 #define LOAD_ERROR_EXIT_CNIC(bp, label) \ 2210 bp->cnic_loaded = false; \ 2211 (bp)->panic = 1; \ 2216 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) bnx2x_free_fw_stats_mem() argument 2218 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, bnx2x_free_fw_stats_mem() 2219 bp->fw_stats_data_sz + bp->fw_stats_req_sz); bnx2x_free_fw_stats_mem() 2223 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) bnx2x_alloc_fw_stats_mem() argument 2226 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; bnx2x_alloc_fw_stats_mem() 2229 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; bnx2x_alloc_fw_stats_mem() 2236 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; bnx2x_alloc_fw_stats_mem() 2239 * the VFs themselves. We don't include them in the bp->fw_stats_num as bnx2x_alloc_fw_stats_mem() 2243 if (IS_SRIOV(bp)) bnx2x_alloc_fw_stats_mem() 2244 vf_headroom = bnx2x_vf_headroom(bp); bnx2x_alloc_fw_stats_mem() 2252 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + bnx2x_alloc_fw_stats_mem() 2253 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? bnx2x_alloc_fw_stats_mem() 2257 bp->fw_stats_num, vf_headroom, num_groups); bnx2x_alloc_fw_stats_mem() 2258 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + bnx2x_alloc_fw_stats_mem() 2269 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + bnx2x_alloc_fw_stats_mem() 2275 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, bnx2x_alloc_fw_stats_mem() 2276 bp->fw_stats_data_sz + bp->fw_stats_req_sz); bnx2x_alloc_fw_stats_mem() 2277 if (!bp->fw_stats) bnx2x_alloc_fw_stats_mem() 2281 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; bnx2x_alloc_fw_stats_mem() 2282 bp->fw_stats_req_mapping = bp->fw_stats_mapping; bnx2x_alloc_fw_stats_mem() 2283 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) bnx2x_alloc_fw_stats_mem() 2284 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); bnx2x_alloc_fw_stats_mem() 2285 bp->fw_stats_data_mapping = bp->fw_stats_mapping + bnx2x_alloc_fw_stats_mem() 2286 bp->fw_stats_req_sz; bnx2x_alloc_fw_stats_mem() 2289 U64_HI(bp->fw_stats_req_mapping), bnx2x_alloc_fw_stats_mem() 2290 U64_LO(bp->fw_stats_req_mapping)); bnx2x_alloc_fw_stats_mem() 2292 U64_HI(bp->fw_stats_data_mapping), bnx2x_alloc_fw_stats_mem() 2293 U64_LO(bp->fw_stats_data_mapping)); bnx2x_alloc_fw_stats_mem() 2297 bnx2x_free_fw_stats_mem(bp); bnx2x_alloc_fw_stats_mem() 2303 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) bnx2x_nic_load_request() argument 2308 bp->fw_seq = bnx2x_nic_load_request() 2309 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & bnx2x_nic_load_request() 2311 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); bnx2x_nic_load_request() 2314 bp->fw_drv_pulse_wr_seq = bnx2x_nic_load_request() 2315 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & bnx2x_nic_load_request() 2317 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); bnx2x_nic_load_request() 2321 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) bnx2x_nic_load_request() 2325 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); bnx2x_nic_load_request() 2347 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) bnx2x_compare_fw_ver() argument 2359 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); bnx2x_compare_fw_ver() 2379 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) bnx2x_nic_load_no_mcp() argument 2381 int path = BP_PATH(bp); bnx2x_nic_load_no_mcp() 2400 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) bnx2x_nic_load_pmf() argument 2405 bp->port.pmf = 1; bnx2x_nic_load_pmf() 2407 * writing to bp->port.pmf here and reading it from the bnx2x_nic_load_pmf() 2412 bp->port.pmf = 0; bnx2x_nic_load_pmf() 2415 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); bnx2x_nic_load_pmf() 2418 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) bnx2x_nic_load_afex_dcc() argument 2422 (bp->common.shmem2_base)) { bnx2x_nic_load_afex_dcc() 2423 if (SHMEM2_HAS(bp, dcc_support)) bnx2x_nic_load_afex_dcc() 2424 SHMEM2_WR(bp, dcc_support, bnx2x_nic_load_afex_dcc() 2427 if (SHMEM2_HAS(bp, afex_driver_support)) bnx2x_nic_load_afex_dcc() 2428 SHMEM2_WR(bp, afex_driver_support, bnx2x_nic_load_afex_dcc() 2433 bp->afex_def_vlan_tag = -1; bnx2x_nic_load_afex_dcc() 2439 * @bp: driver handle 2442 * Makes sure the contents of the bp->fp[index].napi is kept 2445 static void bnx2x_bz_fp(struct bnx2x *bp, int index) bnx2x_bz_fp() argument 2447 struct bnx2x_fastpath *fp = &bp->fp[index]; bnx2x_bz_fp() 2461 fp->bp = bp; bnx2x_bz_fp() 2464 fp->max_cos = bp->max_cos; bnx2x_bz_fp() 2471 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; bnx2x_bz_fp() 2474 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * bnx2x_bz_fp() 2475 BNX2X_NUM_ETH_QUEUES(bp) + index]; bnx2x_bz_fp() 2480 if (bp->dev->features & NETIF_F_LRO) bnx2x_bz_fp() 2482 else if (bp->dev->features & NETIF_F_GRO && bnx2x_bz_fp() 2483 bnx2x_mtu_allows_gro(bp->dev->mtu)) bnx2x_bz_fp() 2488 /* We don't want TPA if it's disabled in bp bnx2x_bz_fp() 2491 if (bp->disable_tpa || IS_FCOE_FP(fp)) bnx2x_bz_fp() 2495 int bnx2x_load_cnic(struct bnx2x *bp) bnx2x_load_cnic() argument 2497 int i, rc, port = BP_PORT(bp); bnx2x_load_cnic() 2501 mutex_init(&bp->cnic_mutex); bnx2x_load_cnic() 2503 if (IS_PF(bp)) { bnx2x_load_cnic() 2504 rc = bnx2x_alloc_mem_cnic(bp); bnx2x_load_cnic() 2506 BNX2X_ERR("Unable to allocate bp memory for cnic\n"); bnx2x_load_cnic() 2507 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); bnx2x_load_cnic() 2511 rc = bnx2x_alloc_fp_mem_cnic(bp); bnx2x_load_cnic() 2514 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); bnx2x_load_cnic() 2518 rc = bnx2x_set_real_num_queues(bp, 1); bnx2x_load_cnic() 2521 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); bnx2x_load_cnic() 2525 bnx2x_add_all_napi_cnic(bp); bnx2x_load_cnic() 2527 bnx2x_napi_enable_cnic(bp); bnx2x_load_cnic() 2529 rc = bnx2x_init_hw_func_cnic(bp); bnx2x_load_cnic() 2531 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1); bnx2x_load_cnic() 2533 bnx2x_nic_init_cnic(bp); bnx2x_load_cnic() 2535 if (IS_PF(bp)) { bnx2x_load_cnic() 2537 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); bnx2x_load_cnic() 2540 for_each_cnic_queue(bp, i) { for_each_cnic_queue() 2541 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); for_each_cnic_queue() 2544 LOAD_ERROR_EXIT(bp, load_error_cnic2); for_each_cnic_queue() 2550 bnx2x_set_rx_mode_inner(bp); 2553 bnx2x_get_iscsi_info(bp); 2554 bnx2x_setup_cnic_irq_info(bp); 2555 bnx2x_setup_cnic_info(bp); 2556 bp->cnic_loaded = true; 2557 if (bp->state == BNX2X_STATE_OPEN) 2558 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 2567 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 2570 bnx2x_napi_disable_cnic(bp); 2572 if (bnx2x_set_real_num_queues(bp, 0)) 2576 bnx2x_free_fp_mem_cnic(bp); 2577 bnx2x_free_mem_cnic(bp); 2583 int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bnx2x_nic_load() argument 2585 int port = BP_PORT(bp); bnx2x_nic_load() 2590 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled"); bnx2x_nic_load() 2593 if (unlikely(bp->panic)) { bnx2x_nic_load() 2599 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; bnx2x_nic_load() 2602 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); bnx2x_nic_load() 2604 &bp->last_reported_link.link_report_flags); bnx2x_nic_load() 2606 if (IS_PF(bp)) bnx2x_nic_load() 2608 bnx2x_ilt_set_info(bp); bnx2x_nic_load() 2612 * allocated only once, fp index, max_cos, bp pointer. bnx2x_nic_load() 2615 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); bnx2x_nic_load() 2616 for_each_queue(bp, i) bnx2x_nic_load() 2617 bnx2x_bz_fp(bp, i); bnx2x_nic_load() 2618 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + bnx2x_nic_load() 2619 bp->num_cnic_queues) * bnx2x_nic_load() 2622 bp->fcoe_init = false; bnx2x_nic_load() 2625 bnx2x_set_rx_buf_size(bp); bnx2x_nic_load() 2627 if (IS_PF(bp)) { bnx2x_nic_load() 2628 rc = bnx2x_alloc_mem(bp); bnx2x_nic_load() 2630 BNX2X_ERR("Unable to allocate bp memory\n"); bnx2x_nic_load() 2638 rc = bnx2x_alloc_fp_mem(bp); bnx2x_nic_load() 2641 LOAD_ERROR_EXIT(bp, load_error0); bnx2x_nic_load() 2645 if (bnx2x_alloc_fw_stats_mem(bp)) bnx2x_nic_load() 2646 LOAD_ERROR_EXIT(bp, load_error0); bnx2x_nic_load() 2649 if (IS_VF(bp)) { bnx2x_nic_load() 2650 rc = bnx2x_vfpf_init(bp); bnx2x_nic_load() 2652 LOAD_ERROR_EXIT(bp, load_error0); bnx2x_nic_load() 2656 * bp->num_queues, bnx2x_set_real_num_queues() should always bnx2x_nic_load() 2659 rc = bnx2x_set_real_num_queues(bp, 0); bnx2x_nic_load() 2662 LOAD_ERROR_EXIT(bp, load_error0); bnx2x_nic_load() 2669 bnx2x_setup_tc(bp->dev, bp->max_cos); bnx2x_nic_load() 2672 bnx2x_add_all_napi(bp); bnx2x_nic_load() 2674 bnx2x_napi_enable(bp); bnx2x_nic_load() 2676 if (IS_PF(bp)) { bnx2x_nic_load() 2678 bnx2x_set_pf_load(bp); bnx2x_nic_load() 2681 if (!BP_NOMCP(bp)) { bnx2x_nic_load() 2683 rc = bnx2x_nic_load_request(bp, &load_code); bnx2x_nic_load() 2685 LOAD_ERROR_EXIT(bp, load_error1); bnx2x_nic_load() 2688 rc = bnx2x_compare_fw_ver(bp, load_code, true); bnx2x_nic_load() 2690 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); bnx2x_nic_load() 2691 LOAD_ERROR_EXIT(bp, load_error2); bnx2x_nic_load() 2694 load_code = bnx2x_nic_load_no_mcp(bp, port); bnx2x_nic_load() 2698 bnx2x_nic_load_pmf(bp, load_code); bnx2x_nic_load() 2701 bnx2x__init_func_obj(bp); bnx2x_nic_load() 2704 rc = bnx2x_init_hw(bp, load_code); bnx2x_nic_load() 2707 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); bnx2x_nic_load() 2708 LOAD_ERROR_EXIT(bp, load_error2); bnx2x_nic_load() 2712 bnx2x_pre_irq_nic_init(bp); bnx2x_nic_load() 2715 rc = bnx2x_setup_irqs(bp); bnx2x_nic_load() 2718 if (IS_PF(bp)) bnx2x_nic_load() 2719 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); bnx2x_nic_load() 2720 LOAD_ERROR_EXIT(bp, load_error2); bnx2x_nic_load() 2724 if (IS_PF(bp)) { bnx2x_nic_load() 2726 bnx2x_post_irq_nic_init(bp, load_code); bnx2x_nic_load() 2728 bnx2x_init_bp_objs(bp); bnx2x_nic_load() 2729 bnx2x_iov_nic_init(bp); bnx2x_nic_load() 2732 bp->afex_def_vlan_tag = -1; bnx2x_nic_load() 2733 bnx2x_nic_load_afex_dcc(bp, load_code); bnx2x_nic_load() 2734 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; bnx2x_nic_load() 2735 rc = bnx2x_func_start(bp); bnx2x_nic_load() 2738 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); bnx2x_nic_load() 2740 LOAD_ERROR_EXIT(bp, load_error3); bnx2x_nic_load() 2744 if (!BP_NOMCP(bp)) { bnx2x_nic_load() 2745 load_code = bnx2x_fw_command(bp, bnx2x_nic_load() 2750 LOAD_ERROR_EXIT(bp, load_error3); bnx2x_nic_load() 2755 bnx2x_update_coalesce(bp); bnx2x_nic_load() 2759 rc = bnx2x_setup_leading(bp); bnx2x_nic_load() 2762 LOAD_ERROR_EXIT(bp, load_error3); bnx2x_nic_load() 2766 for_each_nondefault_eth_queue(bp, i) { for_each_nondefault_eth_queue() 2767 if (IS_PF(bp)) for_each_nondefault_eth_queue() 2768 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); for_each_nondefault_eth_queue() 2770 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); for_each_nondefault_eth_queue() 2773 LOAD_ERROR_EXIT(bp, load_error3); for_each_nondefault_eth_queue() 2778 rc = bnx2x_init_rss(bp); 2781 LOAD_ERROR_EXIT(bp, load_error3); 2785 bp->state = BNX2X_STATE_OPEN; 2788 if (IS_PF(bp)) 2789 rc = bnx2x_set_eth_mac(bp, true); 2791 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, 2795 LOAD_ERROR_EXIT(bp, load_error3); 2798 if (IS_PF(bp) && bp->pending_max) { 2799 bnx2x_update_max_mf_config(bp, bp->pending_max); 2800 bp->pending_max = 0; 2803 if (bp->port.pmf) { 2804 rc = bnx2x_initial_phy_init(bp, load_mode); 2806 LOAD_ERROR_EXIT(bp, load_error3); 2808 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; 2813 bnx2x_set_rx_mode_inner(bp); 2815 if (bp->flags & PTP_SUPPORTED) { 2816 bnx2x_init_ptp(bp); 2817 bnx2x_configure_ptp_filters(bp); 2823 netif_tx_wake_all_queues(bp->dev); 2827 netif_tx_start_all_queues(bp->dev); 2833 bp->state = BNX2X_STATE_DIAG; 2840 if (bp->port.pmf) 2841 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0); 2843 bnx2x__link_status_update(bp); 2846 mod_timer(&bp->timer, jiffies + bp->current_interval); 2848 if (CNIC_ENABLED(bp)) 2849 bnx2x_load_cnic(bp); 2851 if (IS_PF(bp)) 2852 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 2854 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 2857 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); 2858 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], 2864 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) { 2866 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 2871 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) 2872 bnx2x_dcbx_init(bp, false); 2880 if (IS_PF(bp)) { 2881 bnx2x_int_disable_sync(bp, 1); 2884 bnx2x_squeeze_objects(bp); 2888 bnx2x_free_skbs(bp); 2889 for_each_rx_queue(bp, i) 2890 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 2893 bnx2x_free_irq(bp); 2895 if (IS_PF(bp) && !BP_NOMCP(bp)) { 2896 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 2897 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 2900 bp->port.pmf = 0; 2902 bnx2x_napi_disable(bp); 2903 bnx2x_del_all_napi(bp); 2906 if (IS_PF(bp)) 2907 bnx2x_clear_pf_load(bp); 2909 bnx2x_free_fw_stats_mem(bp); 2910 bnx2x_free_fp_mem(bp); 2911 bnx2x_free_mem(bp); 2917 int bnx2x_drain_tx_queues(struct bnx2x *bp) bnx2x_drain_tx_queues() argument 2922 for_each_tx_queue(bp, i) { for_each_tx_queue() 2923 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_tx_queue() 2926 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); for_each_tx_queue() 2934 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_nic_unload() argument 2942 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { bnx2x_nic_unload() 2944 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); bnx2x_nic_unload() 2945 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], bnx2x_nic_unload() 2949 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && bnx2x_nic_unload() 2950 (bp->state == BNX2X_STATE_CLOSED || bnx2x_nic_unload() 2951 bp->state == BNX2X_STATE_ERROR)) { bnx2x_nic_unload() 2959 bp->recovery_state = BNX2X_RECOVERY_DONE; bnx2x_nic_unload() 2960 bp->is_leader = 0; bnx2x_nic_unload() 2961 bnx2x_release_leader_lock(bp); bnx2x_nic_unload() 2975 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) bnx2x_nic_unload() 2978 /* It's important to set the bp->state to the value different from bnx2x_nic_unload() 2982 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; bnx2x_nic_unload() 2986 bnx2x_iov_channel_down(bp); bnx2x_nic_unload() 2988 if (CNIC_LOADED(bp)) bnx2x_nic_unload() 2989 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); bnx2x_nic_unload() 2992 bnx2x_tx_disable(bp); bnx2x_nic_unload() 2993 netdev_reset_tc(bp->dev); bnx2x_nic_unload() 2995 bp->rx_mode = BNX2X_RX_MODE_NONE; bnx2x_nic_unload() 2997 del_timer_sync(&bp->timer); bnx2x_nic_unload() 2999 if (IS_PF(bp)) { bnx2x_nic_unload() 3001 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bnx2x_nic_unload() 3002 bnx2x_drv_pulse(bp); bnx2x_nic_unload() 3003 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_nic_unload() 3004 bnx2x_save_statistics(bp); bnx2x_nic_unload() 3008 bnx2x_drain_tx_queues(bp); bnx2x_nic_unload() 3013 if (IS_VF(bp)) bnx2x_nic_unload() 3014 bnx2x_vfpf_close_vf(bp); bnx2x_nic_unload() 3017 bnx2x_chip_cleanup(bp, unload_mode, keep_link); bnx2x_nic_unload() 3020 bnx2x_send_unload_req(bp, unload_mode); bnx2x_nic_unload() 3028 if (!CHIP_IS_E1x(bp)) bnx2x_nic_unload() 3029 bnx2x_pf_disable(bp); bnx2x_nic_unload() 3032 bnx2x_netif_stop(bp, 1); bnx2x_nic_unload() 3034 bnx2x_del_all_napi(bp); bnx2x_nic_unload() 3035 if (CNIC_LOADED(bp)) bnx2x_nic_unload() 3036 bnx2x_del_all_napi_cnic(bp); bnx2x_nic_unload() 3038 bnx2x_free_irq(bp); bnx2x_nic_unload() 3041 bnx2x_send_unload_done(bp, false); bnx2x_nic_unload() 3048 if (IS_PF(bp)) bnx2x_nic_unload() 3049 bnx2x_squeeze_objects(bp); bnx2x_nic_unload() 3052 bp->sp_state = 0; bnx2x_nic_unload() 3054 bp->port.pmf = 0; bnx2x_nic_unload() 3057 bp->sp_rtnl_state = 0; bnx2x_nic_unload() 3061 bnx2x_free_skbs(bp); bnx2x_nic_unload() 3062 if (CNIC_LOADED(bp)) bnx2x_nic_unload() 3063 bnx2x_free_skbs_cnic(bp); bnx2x_nic_unload() 3064 for_each_rx_queue(bp, i) bnx2x_nic_unload() 3065 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); bnx2x_nic_unload() 3067 bnx2x_free_fp_mem(bp); bnx2x_nic_unload() 3068 if (CNIC_LOADED(bp)) bnx2x_nic_unload() 3069 bnx2x_free_fp_mem_cnic(bp); bnx2x_nic_unload() 3071 if (IS_PF(bp)) { bnx2x_nic_unload() 3072 if (CNIC_LOADED(bp)) bnx2x_nic_unload() 3073 bnx2x_free_mem_cnic(bp); bnx2x_nic_unload() 3075 bnx2x_free_mem(bp); bnx2x_nic_unload() 3077 bp->state = BNX2X_STATE_CLOSED; bnx2x_nic_unload() 3078 bp->cnic_loaded = false; bnx2x_nic_unload() 3081 if (IS_PF(bp)) bnx2x_nic_unload() 3082 bnx2x_update_mng_version(bp); bnx2x_nic_unload() 3087 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) { bnx2x_nic_unload() 3088 bnx2x_set_reset_in_progress(bp); bnx2x_nic_unload() 3092 bnx2x_set_reset_global(bp); bnx2x_nic_unload() 3098 if (IS_PF(bp) && bnx2x_nic_unload() 3099 !bnx2x_clear_pf_load(bp) && bnx2x_nic_unload() 3100 bnx2x_reset_is_done(bp, BP_PATH(bp))) bnx2x_nic_unload() 3101 bnx2x_disable_close_the_gate(bp); bnx2x_nic_unload() 3108 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) bnx2x_set_power_state() argument 3113 if (!bp->pdev->pm_cap) { bnx2x_set_power_state() 3118 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); bnx2x_set_power_state() 3122 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, bnx2x_set_power_state() 3134 if (atomic_read(&bp->pdev->enable_cnt) != 1) bnx2x_set_power_state() 3137 if (CHIP_REV_IS_SLOW(bp)) bnx2x_set_power_state() 3143 if (bp->wol) bnx2x_set_power_state() 3146 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, bnx2x_set_power_state() 3155 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); bnx2x_set_power_state() 3170 struct bnx2x *bp = fp->bp; bnx2x_poll() local 3174 if (unlikely(bp->panic)) { bnx2x_poll() 3184 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); for_each_cos_in_tx_queue() 3230 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 3247 struct bnx2x *bp = fp->bp; bnx2x_low_latency_recv() local 3250 if ((bp->state == BNX2X_STATE_CLOSED) || bnx2x_low_latency_recv() 3251 (bp->state == BNX2X_STATE_ERROR) || bnx2x_low_latency_recv() 3252 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO))) bnx2x_low_latency_recv() 3271 static u16 bnx2x_tx_split(struct bnx2x *bp, bnx2x_tx_split() argument 3330 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) bnx2x_xmit_type() argument 3348 if (!CHIP_IS_E1x(bp) && skb->encapsulation) { bnx2x_xmit_type() 3381 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, bnx2x_pkt_req_lin() argument 3492 * @bp: driver handle 3499 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, bnx2x_set_pbd_csum_enc() argument 3526 * @bp: driver handle 3533 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, bnx2x_set_pbd_csum_e2() argument 3555 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, bnx2x_set_sbd_csum() argument 3571 * @bp: driver handle 3576 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, bnx2x_set_pbd_csum() argument 3719 struct bnx2x *bp = netdev_priv(dev); bnx2x_start_xmit() local 3733 u32 xmit_type = bnx2x_xmit_type(bp, skb); bnx2x_start_xmit() 3741 if (unlikely(bp->panic)) bnx2x_start_xmit() 3748 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)); bnx2x_start_xmit() 3750 txdata = &bp->bnx2x_txq[txq_index]; bnx2x_start_xmit() 3761 if (unlikely(bnx2x_tx_avail(bp, txdata) < bnx2x_start_xmit() 3768 bnx2x_fp_qstats(bp, txdata->parent_fp); bnx2x_start_xmit() 3773 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; bnx2x_start_xmit() 3800 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { bnx2x_start_xmit() 3802 bp->lin_cnt++; bnx2x_start_xmit() 3812 mapping = dma_map_single(&bp->pdev->dev, skb->data, bnx2x_start_xmit() 3814 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_start_xmit() 3846 if (!(bp->flags & TX_TIMESTAMPING_EN)) { bnx2x_start_xmit() 3848 } else if (bp->ptp_tx_skb) { bnx2x_start_xmit() 3853 bp->ptp_tx_skb = skb_get(skb); bnx2x_start_xmit() 3854 bp->ptp_tx_start = jiffies; bnx2x_start_xmit() 3855 schedule_work(&bp->ptp_task); bnx2x_start_xmit() 3881 if (IS_VF(bp)) bnx2x_start_xmit() 3898 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); bnx2x_start_xmit() 3900 if (!CHIP_IS_E1x(bp)) { bnx2x_start_xmit() 3908 hlen = bnx2x_set_pbd_csum_enc(bp, skb, bnx2x_start_xmit() 3942 hlen = bnx2x_set_pbd_csum_e2(bp, skb, bnx2x_start_xmit() 3951 if (IS_VF(bp)) { bnx2x_start_xmit() 3963 if (bp->flags & TX_SWITCHING) bnx2x_start_xmit() 3988 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); bnx2x_start_xmit() 4019 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, bnx2x_start_xmit() 4023 if (!CHIP_IS_E1x(bp)) bnx2x_start_xmit() 4044 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, bnx2x_start_xmit() 4046 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_start_xmit() 4058 bnx2x_free_tx_pkt(bp, txdata, bnx2x_start_xmit() 4141 DOORBELL(bp, txdata->cid, txdata->tx_db.raw); bnx2x_start_xmit() 4147 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { bnx2x_start_xmit() 4155 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; bnx2x_start_xmit() 4156 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT) bnx2x_start_xmit() 4175 struct bnx2x *bp = netdev_priv(dev); bnx2x_setup_tc() local 4187 if (num_tc > bp->max_cos) { bnx2x_setup_tc() 4189 num_tc, bp->max_cos); bnx2x_setup_tc() 4201 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); bnx2x_setup_tc() 4204 prio, bp->prio_to_cos[prio]); bnx2x_setup_tc() 4218 for (cos = 0; cos < bp->max_cos; cos++) { bnx2x_setup_tc() 4219 count = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_setup_tc() 4220 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); bnx2x_setup_tc() 4234 struct bnx2x *bp = netdev_priv(dev); bnx2x_change_mac_addr() local 4242 if (IS_MF_STORAGE_ONLY(bp)) { bnx2x_change_mac_addr() 4248 rc = bnx2x_set_eth_mac(bp, false); bnx2x_change_mac_addr() 4256 rc = bnx2x_set_eth_mac(bp, true); bnx2x_change_mac_addr() 4261 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) bnx2x_free_fp_mem_at() argument 4263 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); bnx2x_free_fp_mem_at() 4264 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; bnx2x_free_fp_mem_at() 4274 if (!CHIP_IS_E1x(bp)) bnx2x_free_fp_mem_at() 4276 bnx2x_fp(bp, fp_index, bnx2x_free_fp_mem_at() 4281 bnx2x_fp(bp, fp_index, bnx2x_free_fp_mem_at() 4287 if (!skip_rx_queue(bp, fp_index)) { bnx2x_free_fp_mem_at() 4291 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); bnx2x_free_fp_mem_at() 4292 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), bnx2x_free_fp_mem_at() 4293 bnx2x_fp(bp, fp_index, rx_desc_mapping), bnx2x_free_fp_mem_at() 4296 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), bnx2x_free_fp_mem_at() 4297 bnx2x_fp(bp, fp_index, rx_comp_mapping), bnx2x_free_fp_mem_at() 4302 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); bnx2x_free_fp_mem_at() 4303 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), bnx2x_free_fp_mem_at() 4304 bnx2x_fp(bp, fp_index, rx_sge_mapping), bnx2x_free_fp_mem_at() 4309 if (!skip_tx_queue(bp, fp_index)) { bnx2x_free_fp_mem_at() 4327 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) bnx2x_free_fp_mem_cnic() argument 4330 for_each_cnic_queue(bp, i) bnx2x_free_fp_mem_cnic() 4331 bnx2x_free_fp_mem_at(bp, i); bnx2x_free_fp_mem_cnic() 4334 void bnx2x_free_fp_mem(struct bnx2x *bp) bnx2x_free_fp_mem() argument 4337 for_each_eth_queue(bp, i) bnx2x_free_fp_mem() 4338 bnx2x_free_fp_mem_at(bp, i); bnx2x_free_fp_mem() 4341 static void set_sb_shortcuts(struct bnx2x *bp, int index) set_sb_shortcuts() argument 4343 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); set_sb_shortcuts() 4344 if (!CHIP_IS_E1x(bp)) { set_sb_shortcuts() 4345 bnx2x_fp(bp, index, sb_index_values) = set_sb_shortcuts() 4347 bnx2x_fp(bp, index, sb_running_index) = set_sb_shortcuts() 4350 bnx2x_fp(bp, index, sb_index_values) = set_sb_shortcuts() 4352 bnx2x_fp(bp, index, sb_running_index) = set_sb_shortcuts() 4361 struct bnx2x *bp = fp->bp; bnx2x_alloc_rx_bds() local 4372 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { bnx2x_alloc_rx_bds() 4391 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; bnx2x_alloc_rx_bds() 4414 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) bnx2x_alloc_fp_mem_at() argument 4417 struct bnx2x_fastpath *fp = &bp->fp[index]; bnx2x_alloc_fp_mem_at() 4422 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { bnx2x_alloc_fp_mem_at() 4424 bp->rx_ring_size = rx_ring_size; bnx2x_alloc_fp_mem_at() 4425 } else if (!bp->rx_ring_size) { bnx2x_alloc_fp_mem_at() 4426 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); bnx2x_alloc_fp_mem_at() 4428 if (CHIP_IS_E3(bp)) { bnx2x_alloc_fp_mem_at() 4429 u32 cfg = SHMEM_RD(bp, bnx2x_alloc_fp_mem_at() 4430 dev_info.port_hw_config[BP_PORT(bp)]. bnx2x_alloc_fp_mem_at() 4440 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : bnx2x_alloc_fp_mem_at() 4443 bp->rx_ring_size = rx_ring_size; bnx2x_alloc_fp_mem_at() 4445 rx_ring_size = bp->rx_ring_size; bnx2x_alloc_fp_mem_at() 4450 sb = &bnx2x_fp(bp, index, status_blk); bnx2x_alloc_fp_mem_at() 4454 if (!CHIP_IS_E1x(bp)) { bnx2x_alloc_fp_mem_at() 4455 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), bnx2x_alloc_fp_mem_at() 4460 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), bnx2x_alloc_fp_mem_at() 4471 set_sb_shortcuts(bp, index); bnx2x_alloc_fp_mem_at() 4474 if (!skip_tx_queue(bp, index)) { bnx2x_alloc_fp_mem_at() 4496 if (!skip_rx_queue(bp, index)) { 4498 bnx2x_fp(bp, index, rx_buf_ring) = 4500 if (!bnx2x_fp(bp, index, rx_buf_ring)) 4502 bnx2x_fp(bp, index, rx_desc_ring) = 4503 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), 4505 if (!bnx2x_fp(bp, index, rx_desc_ring)) 4509 bnx2x_fp(bp, index, rx_comp_ring) = 4510 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), 4512 if (!bnx2x_fp(bp, index, rx_comp_ring)) 4516 bnx2x_fp(bp, index, rx_page_ring) = 4519 if (!bnx2x_fp(bp, index, rx_page_ring)) 4521 bnx2x_fp(bp, index, rx_sge_ring) = 4522 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), 4524 if (!bnx2x_fp(bp, index, rx_sge_ring)) 4551 bnx2x_free_fp_mem_at(bp, index); 4557 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) bnx2x_alloc_fp_mem_cnic() argument 4559 if (!NO_FCOE(bp)) bnx2x_alloc_fp_mem_cnic() 4561 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) bnx2x_alloc_fp_mem_cnic() 4570 static int bnx2x_alloc_fp_mem(struct bnx2x *bp) bnx2x_alloc_fp_mem() argument 4579 if (bnx2x_alloc_fp_mem_at(bp, 0)) bnx2x_alloc_fp_mem() 4583 for_each_nondefault_eth_queue(bp, i) for_each_nondefault_eth_queue() 4584 if (bnx2x_alloc_fp_mem_at(bp, i)) for_each_nondefault_eth_queue() 4588 if (i != BNX2X_NUM_ETH_QUEUES(bp)) { for_each_nondefault_eth_queue() 4589 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; for_each_nondefault_eth_queue() 4592 bnx2x_shrink_eth_fp(bp, delta); for_each_nondefault_eth_queue() 4593 if (CNIC_SUPPORT(bp)) for_each_nondefault_eth_queue() 4600 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); for_each_nondefault_eth_queue() 4601 bp->num_ethernet_queues -= delta; for_each_nondefault_eth_queue() 4602 bp->num_queues = bp->num_ethernet_queues + for_each_nondefault_eth_queue() 4603 bp->num_cnic_queues; for_each_nondefault_eth_queue() 4605 bp->num_queues + delta, bp->num_queues); for_each_nondefault_eth_queue() 4611 void bnx2x_free_mem_bp(struct bnx2x *bp) bnx2x_free_mem_bp() argument 4615 for (i = 0; i < bp->fp_array_size; i++) bnx2x_free_mem_bp() 4616 kfree(bp->fp[i].tpa_info); bnx2x_free_mem_bp() 4617 kfree(bp->fp); bnx2x_free_mem_bp() 4618 kfree(bp->sp_objs); bnx2x_free_mem_bp() 4619 kfree(bp->fp_stats); bnx2x_free_mem_bp() 4620 kfree(bp->bnx2x_txq); bnx2x_free_mem_bp() 4621 kfree(bp->msix_table); bnx2x_free_mem_bp() 4622 kfree(bp->ilt); bnx2x_free_mem_bp() 4625 int bnx2x_alloc_mem_bp(struct bnx2x *bp) bnx2x_alloc_mem_bp() argument 4638 msix_table_size = bp->igu_sb_cnt; bnx2x_alloc_mem_bp() 4639 if (IS_PF(bp)) bnx2x_alloc_mem_bp() 4644 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); bnx2x_alloc_mem_bp() 4645 bp->fp_array_size = fp_array_size; bnx2x_alloc_mem_bp() 4646 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); bnx2x_alloc_mem_bp() 4648 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); bnx2x_alloc_mem_bp() 4651 for (i = 0; i < bp->fp_array_size; i++) { bnx2x_alloc_mem_bp() 4659 bp->fp = fp; bnx2x_alloc_mem_bp() 4662 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), bnx2x_alloc_mem_bp() 4664 if (!bp->sp_objs) bnx2x_alloc_mem_bp() 4668 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), bnx2x_alloc_mem_bp() 4670 if (!bp->fp_stats) bnx2x_alloc_mem_bp() 4675 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); bnx2x_alloc_mem_bp() 4678 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata), bnx2x_alloc_mem_bp() 4680 if (!bp->bnx2x_txq) bnx2x_alloc_mem_bp() 4687 bp->msix_table = tbl; bnx2x_alloc_mem_bp() 4693 bp->ilt = ilt; bnx2x_alloc_mem_bp() 4697 bnx2x_free_mem_bp(bp); bnx2x_alloc_mem_bp() 4703 struct bnx2x *bp = netdev_priv(dev); bnx2x_reload_if_running() local 4708 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_reload_if_running() 4709 return bnx2x_nic_load(bp, LOAD_NORMAL); bnx2x_reload_if_running() 4712 int bnx2x_get_cur_phy_idx(struct bnx2x *bp) bnx2x_get_cur_phy_idx() argument 4715 if (bp->link_params.num_phys <= 1) bnx2x_get_cur_phy_idx() 4718 if (bp->link_vars.link_up) { bnx2x_get_cur_phy_idx() 4721 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && bnx2x_get_cur_phy_idx() 4722 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) bnx2x_get_cur_phy_idx() 4726 switch (bnx2x_phy_selection(&bp->link_params)) { bnx2x_get_cur_phy_idx() 4741 int bnx2x_get_link_cfg_idx(struct bnx2x *bp) bnx2x_get_link_cfg_idx() argument 4743 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); bnx2x_get_link_cfg_idx() 4750 if (bp->link_params.multi_phy_config & bnx2x_get_link_cfg_idx() 4763 struct bnx2x *bp = netdev_priv(dev); bnx2x_fcoe_get_wwn() local 4764 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_fcoe_get_wwn() 4787 struct bnx2x *bp = netdev_priv(dev); bnx2x_change_mtu() local 4789 if (pci_num_vf(bp->pdev)) { bnx2x_change_mtu() 4794 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_change_mtu() 4817 struct bnx2x *bp = netdev_priv(dev); bnx2x_fix_features() local 4819 if (pci_num_vf(bp->pdev)) { bnx2x_fix_features() 4825 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { bnx2x_fix_features() 4847 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_features() local 4853 if (!pci_num_vf(bp->pdev)) { bnx2x_set_features() 4855 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { bnx2x_set_features() 4856 bp->link_params.loopback_mode = LOOPBACK_BMAC; bnx2x_set_features() 4860 if (bp->link_params.loopback_mode != LOOPBACK_NONE) { bnx2x_set_features() 4861 bp->link_params.loopback_mode = LOOPBACK_NONE; bnx2x_set_features() 4872 if ((changes & NETIF_F_GRO) && bp->disable_tpa) bnx2x_set_features() 4879 if (bp->recovery_state == BNX2X_RECOVERY_DONE) { bnx2x_set_features() 4892 struct bnx2x *bp = netdev_priv(dev); bnx2x_tx_timeout() local 4895 if (!bp->panic) bnx2x_tx_timeout() 4900 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); bnx2x_tx_timeout() 4906 struct bnx2x *bp; bnx2x_suspend() local 4912 bp = netdev_priv(dev); bnx2x_suspend() 4925 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); bnx2x_suspend() 4927 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); bnx2x_suspend() 4937 struct bnx2x *bp; bnx2x_resume() local 4944 bp = netdev_priv(dev); bnx2x_resume() 4946 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_resume() 4960 bnx2x_set_power_state(bp, PCI_D0); bnx2x_resume() 4963 rc = bnx2x_nic_load(bp, LOAD_OPEN); bnx2x_resume() 4970 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, bnx2x_set_ctx_validation() argument 4980 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), bnx2x_set_ctx_validation() 4984 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), bnx2x_set_ctx_validation() 4988 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, storm_memset_hc_timeout() argument 4994 REG_WR8(bp, addr, ticks); storm_memset_hc_timeout() 5000 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, storm_memset_hc_disable() argument 5007 u8 flags = REG_RD8(bp, addr); storm_memset_hc_disable() 5011 REG_WR8(bp, addr, flags); storm_memset_hc_disable() 5017 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, bnx2x_update_coalesce_sb_index() argument 5020 int port = BP_PORT(bp); bnx2x_update_coalesce_sb_index() 5023 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); bnx2x_update_coalesce_sb_index() 5026 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); bnx2x_update_coalesce_sb_index() 5029 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, bnx2x_schedule_sp_rtnl() argument 5033 set_bit(flag, &bp->sp_rtnl_state); bnx2x_schedule_sp_rtnl() 5037 schedule_delayed_work(&bp->sp_rtnl_task, 0); bnx2x_schedule_sp_rtnl()
|
H A D | bnx2x_ethtool.c | 190 static int bnx2x_get_port_type(struct bnx2x *bp) bnx2x_get_port_type() argument 193 u32 phy_idx = bnx2x_get_cur_phy_idx(bp); bnx2x_get_port_type() 194 switch (bp->link_params.phy[phy_idx].media_type) { bnx2x_get_port_type() 222 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_vf_settings() local 224 if (bp->state == BNX2X_STATE_OPEN) { bnx2x_get_vf_settings() 226 &bp->vf_link_vars.link_report_flags)) bnx2x_get_vf_settings() 231 ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed); bnx2x_get_vf_settings() 258 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_settings() local 259 int cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_get_settings() 262 cmd->supported = bp->port.supported[cfg_idx] | bnx2x_get_settings() 263 (bp->port.supported[cfg_idx ^ 1] & bnx2x_get_settings() 265 cmd->advertising = bp->port.advertising[cfg_idx]; bnx2x_get_settings() 266 if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type == bnx2x_get_settings() 272 if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up && bnx2x_get_settings() 273 !(bp->flags & MF_FUNC_DIS)) { bnx2x_get_settings() 274 cmd->duplex = bp->link_vars.duplex; bnx2x_get_settings() 276 if (IS_MF(bp) && !BP_NOMCP(bp)) bnx2x_get_settings() 277 ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); bnx2x_get_settings() 279 ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); bnx2x_get_settings() 285 cmd->port = bnx2x_get_port_type(bp); bnx2x_get_settings() 287 cmd->phy_address = bp->mdio.prtad; bnx2x_get_settings() 290 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) bnx2x_get_settings() 296 if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { bnx2x_get_settings() 297 u32 status = bp->link_vars.link_status; bnx2x_get_settings() 342 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_settings() local 346 if (IS_MF_SD(bp)) bnx2x_set_settings() 364 if (IS_MF_SI(bp)) { bnx2x_set_settings() 366 u32 line_speed = bp->link_vars.line_speed; bnx2x_set_settings() 372 if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) { bnx2x_set_settings() 387 if (bp->state != BNX2X_STATE_OPEN) bnx2x_set_settings() 389 bp->pending_max = part; bnx2x_set_settings() 391 bnx2x_update_max_mf_config(bp, part); bnx2x_set_settings() 396 cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_set_settings() 397 old_multi_phy_config = bp->link_params.multi_phy_config; bnx2x_set_settings() 398 if (cmd->port != bnx2x_get_port_type(bp)) { bnx2x_set_settings() 401 if (!(bp->port.supported[0] & SUPPORTED_TP || bnx2x_set_settings() 402 bp->port.supported[1] & SUPPORTED_TP)) { bnx2x_set_settings() 407 bp->link_params.multi_phy_config &= bnx2x_set_settings() 409 if (bp->link_params.multi_phy_config & bnx2x_set_settings() 411 bp->link_params.multi_phy_config |= bnx2x_set_settings() 414 bp->link_params.multi_phy_config |= bnx2x_set_settings() 420 if (!(bp->port.supported[0] & SUPPORTED_FIBRE || bnx2x_set_settings() 421 bp->port.supported[1] & SUPPORTED_FIBRE)) { bnx2x_set_settings() 426 bp->link_params.multi_phy_config &= bnx2x_set_settings() 428 if (bp->link_params.multi_phy_config & bnx2x_set_settings() 430 bp->link_params.multi_phy_config |= bnx2x_set_settings() 433 bp->link_params.multi_phy_config |= bnx2x_set_settings() 442 new_multi_phy_config = bp->link_params.multi_phy_config; bnx2x_set_settings() 444 cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_set_settings() 446 bp->link_params.multi_phy_config = old_multi_phy_config; bnx2x_set_settings() 450 u32 an_supported_speed = bp->port.supported[cfg_idx]; bnx2x_set_settings() 451 if (bp->link_params.phy[EXT_PHY1].type == bnx2x_set_settings() 455 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { bnx2x_set_settings() 467 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; bnx2x_set_settings() 468 bp->link_params.req_duplex[cfg_idx] = cmd->duplex; bnx2x_set_settings() 469 bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | bnx2x_set_settings() 473 bp->link_params.speed_cap_mask[cfg_idx] = 0; bnx2x_set_settings() 475 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings() 479 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings() 483 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings() 487 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings() 491 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings() 496 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings() 502 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings() 506 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings() 514 if (!(bp->port.supported[cfg_idx] & bnx2x_set_settings() 524 if (!(bp->port.supported[cfg_idx] & bnx2x_set_settings() 538 if (!(bp->port.supported[cfg_idx] & bnx2x_set_settings() 548 if (!(bp->port.supported[cfg_idx] & bnx2x_set_settings() 567 if (!(bp->port.supported[cfg_idx] & bnx2x_set_settings() 585 if (!(bp->port.supported[cfg_idx] bnx2x_set_settings() 602 phy_idx = bnx2x_get_cur_phy_idx(bp); bnx2x_set_settings() 603 if (!(bp->port.supported[cfg_idx] bnx2x_set_settings() 605 (bp->link_params.phy[phy_idx].media_type == bnx2x_set_settings() 621 bp->link_params.req_line_speed[cfg_idx] = speed; bnx2x_set_settings() 622 bp->link_params.req_duplex[cfg_idx] = cmd->duplex; bnx2x_set_settings() 623 bp->port.advertising[cfg_idx] = advertising; bnx2x_set_settings() 628 bp->link_params.req_line_speed[cfg_idx], bnx2x_set_settings() 629 bp->link_params.req_duplex[cfg_idx], bnx2x_set_settings() 630 bp->port.advertising[cfg_idx]); bnx2x_set_settings() 633 bp->link_params.multi_phy_config = new_multi_phy_config; bnx2x_set_settings() 635 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_set_settings() 636 bnx2x_link_set(bp); bnx2x_set_settings() 645 static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset) __bnx2x_get_preset_regs_len() argument 647 if (CHIP_IS_E1(bp)) __bnx2x_get_preset_regs_len() 649 else if (CHIP_IS_E1H(bp)) __bnx2x_get_preset_regs_len() 651 else if (CHIP_IS_E2(bp)) __bnx2x_get_preset_regs_len() 653 else if (CHIP_IS_E3A0(bp)) __bnx2x_get_preset_regs_len() 655 else if (CHIP_IS_E3B0(bp)) __bnx2x_get_preset_regs_len() 661 static int __bnx2x_get_regs_len(struct bnx2x *bp) __bnx2x_get_regs_len() argument 668 regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx); __bnx2x_get_regs_len() 675 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_regs_len() local 678 if (IS_VF(bp)) bnx2x_get_regs_len() 681 regdump_len = __bnx2x_get_regs_len(bp); bnx2x_get_regs_len() 698 static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) __bnx2x_get_page_addr_ar() argument 700 if (CHIP_IS_E2(bp)) __bnx2x_get_page_addr_ar() 702 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_addr_ar() 708 static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) __bnx2x_get_page_reg_num() argument 710 if (CHIP_IS_E2(bp)) __bnx2x_get_page_reg_num() 712 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_reg_num() 718 static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) __bnx2x_get_page_write_ar() argument 720 if (CHIP_IS_E2(bp)) __bnx2x_get_page_write_ar() 722 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_write_ar() 728 static u32 __bnx2x_get_page_write_num(struct bnx2x *bp) __bnx2x_get_page_write_num() argument 730 if (CHIP_IS_E2(bp)) __bnx2x_get_page_write_num() 732 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_write_num() 738 static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) __bnx2x_get_page_read_ar() argument 740 if (CHIP_IS_E2(bp)) __bnx2x_get_page_read_ar() 742 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_read_ar() 748 static u32 __bnx2x_get_page_read_num(struct bnx2x *bp) __bnx2x_get_page_read_num() argument 750 if (CHIP_IS_E2(bp)) __bnx2x_get_page_read_num() 752 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_read_num() 758 static bool bnx2x_is_reg_in_chip(struct bnx2x *bp, bnx2x_is_reg_in_chip() argument 761 if (CHIP_IS_E1(bp)) bnx2x_is_reg_in_chip() 763 else if (CHIP_IS_E1H(bp)) bnx2x_is_reg_in_chip() 765 else if (CHIP_IS_E2(bp)) bnx2x_is_reg_in_chip() 767 else if (CHIP_IS_E3A0(bp)) bnx2x_is_reg_in_chip() 769 else if (CHIP_IS_E3B0(bp)) bnx2x_is_reg_in_chip() 775 static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp, bnx2x_is_wreg_in_chip() argument 778 if (CHIP_IS_E1(bp)) bnx2x_is_wreg_in_chip() 780 else if (CHIP_IS_E1H(bp)) bnx2x_is_wreg_in_chip() 782 else if (CHIP_IS_E2(bp)) bnx2x_is_wreg_in_chip() 784 else if (CHIP_IS_E3A0(bp)) bnx2x_is_wreg_in_chip() 786 else if (CHIP_IS_E3B0(bp)) bnx2x_is_wreg_in_chip() 795 * @bp device handle 803 static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset) bnx2x_read_pages_regs() argument 808 const u32 *page_addr = __bnx2x_get_page_addr_ar(bp); bnx2x_read_pages_regs() 810 int num_pages = __bnx2x_get_page_reg_num(bp); bnx2x_read_pages_regs() 812 const u32 *write_addr = __bnx2x_get_page_write_ar(bp); bnx2x_read_pages_regs() 814 int write_num = __bnx2x_get_page_write_num(bp); bnx2x_read_pages_regs() 816 const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp); bnx2x_read_pages_regs() 818 int read_num = __bnx2x_get_page_read_num(bp); bnx2x_read_pages_regs() 823 REG_WR(bp, write_addr[j], page_addr[i]); bnx2x_read_pages_regs() 831 *p++ = REG_RD(bp, addr); bnx2x_read_pages_regs() 839 static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset) __bnx2x_get_preset_regs() argument 844 if (CHIP_IS_E1(bp)) __bnx2x_get_preset_regs() 846 else if (CHIP_IS_E1H(bp)) __bnx2x_get_preset_regs() 848 else if (CHIP_IS_E2(bp)) __bnx2x_get_preset_regs() 850 else if (CHIP_IS_E3A0(bp)) __bnx2x_get_preset_regs() 852 else if (CHIP_IS_E3B0(bp)) __bnx2x_get_preset_regs() 857 if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) && __bnx2x_get_preset_regs() 860 *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4); __bnx2x_get_preset_regs() 866 if (bnx2x_is_reg_in_chip(bp, ®_addrs[i]) && __bnx2x_get_preset_regs() 869 *p++ = REG_RD(bp, reg_addrs[i].addr + j*4); __bnx2x_get_preset_regs() 874 if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) && __bnx2x_get_preset_regs() 877 *p++ = REG_RD(bp, wreg_addr_p->addr + i*4); __bnx2x_get_preset_regs() 884 *p++ = REG_RD(bp, addr + j*4); __bnx2x_get_preset_regs() 890 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { __bnx2x_get_preset_regs() 892 bnx2x_read_pages_regs(bp, p, preset); __bnx2x_get_preset_regs() 898 static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) __bnx2x_get_regs() argument 910 __bnx2x_get_preset_regs(bp, p, preset_idx); __bnx2x_get_regs() 911 p += __bnx2x_get_preset_regs_len(bp, preset_idx); __bnx2x_get_regs() 919 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_regs() local 925 if (!netif_running(bp->dev)) bnx2x_get_regs() 933 bnx2x_disable_blocks_parity(bp); bnx2x_get_regs() 940 if (CHIP_IS_E1(bp)) { bnx2x_get_regs() 942 } else if (CHIP_IS_E1H(bp)) { bnx2x_get_regs() 944 } else if (CHIP_IS_E2(bp)) { bnx2x_get_regs() 946 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_regs() 947 } else if (CHIP_IS_E3A0(bp)) { bnx2x_get_regs() 949 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_regs() 950 } else if (CHIP_IS_E3B0(bp)) { bnx2x_get_regs() 952 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_regs() 959 __bnx2x_get_regs(bp, p); bnx2x_get_regs() 962 bnx2x_clear_blocks_parity(bp); bnx2x_get_regs() 963 bnx2x_enable_blocks_parity(bp); bnx2x_get_regs() 968 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_preset_regs_len() local 971 regdump_len = __bnx2x_get_preset_regs_len(bp, preset); bnx2x_get_preset_regs_len() 980 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_dump() local 986 bp->dump_preset_idx = val->flag; bnx2x_set_dump() 993 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_dump_flag() local 996 dump->flag = bp->dump_preset_idx; bnx2x_get_dump_flag() 998 dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx); bnx2x_get_dump_flag() 1000 bp->dump_preset_idx, dump->len); bnx2x_get_dump_flag() 1009 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_dump_data() local 1017 bnx2x_disable_blocks_parity(bp); bnx2x_get_dump_data() 1020 dump_hdr.preset = bp->dump_preset_idx; bnx2x_get_dump_data() 1026 if (CHIP_IS_E1(bp)) { bnx2x_get_dump_data() 1028 } else if (CHIP_IS_E1H(bp)) { bnx2x_get_dump_data() 1030 } else if (CHIP_IS_E2(bp)) { bnx2x_get_dump_data() 1032 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_dump_data() 1033 } else if (CHIP_IS_E3A0(bp)) { bnx2x_get_dump_data() 1035 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_dump_data() 1036 } else if (CHIP_IS_E3B0(bp)) { bnx2x_get_dump_data() 1038 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_dump_data() 1045 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); bnx2x_get_dump_data() 1048 bnx2x_clear_blocks_parity(bp); bnx2x_get_dump_data() 1049 bnx2x_enable_blocks_parity(bp); bnx2x_get_dump_data() 1057 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_drvinfo() local 1062 bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version)); bnx2x_get_drvinfo() 1064 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); bnx2x_get_drvinfo() 1066 info->testinfo_len = BNX2X_NUM_TESTS(bp); bnx2x_get_drvinfo() 1067 info->eedump_len = bp->common.flash_size; bnx2x_get_drvinfo() 1073 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_wol() local 1075 if (bp->flags & NO_WOL_FLAG) { bnx2x_get_wol() 1080 if (bp->wol) bnx2x_get_wol() 1090 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_wol() local 1098 if (bp->flags & NO_WOL_FLAG) { bnx2x_set_wol() 1102 bp->wol = 1; bnx2x_set_wol() 1104 bp->wol = 0; bnx2x_set_wol() 1111 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_msglevel() local 1113 return bp->msg_enable; bnx2x_get_msglevel() 1118 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_msglevel() local 1122 if (IS_PF(bp) && (level & BNX2X_MSG_MCP)) bnx2x_set_msglevel() 1123 bnx2x_fw_dump_lvl(bp, KERN_INFO); bnx2x_set_msglevel() 1124 bp->msg_enable = level; bnx2x_set_msglevel() 1130 struct bnx2x *bp = netdev_priv(dev); bnx2x_nway_reset() local 1132 if (!bp->port.pmf) bnx2x_nway_reset() 1136 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_nway_reset() 1137 bnx2x_force_link_reset(bp); bnx2x_nway_reset() 1138 bnx2x_link_set(bp); bnx2x_nway_reset() 1146 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_link() local 1148 if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN)) bnx2x_get_link() 1151 if (IS_VF(bp)) bnx2x_get_link() 1153 &bp->vf_link_vars.link_report_flags); bnx2x_get_link() 1155 return bp->link_vars.link_up; bnx2x_get_link() 1160 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_eeprom_len() local 1162 return bp->common.flash_size; bnx2x_get_eeprom_len() 1178 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) bnx2x_acquire_nvram_lock() argument 1180 int port = BP_PORT(bp); bnx2x_acquire_nvram_lock() 1185 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); bnx2x_acquire_nvram_lock() 1189 if (CHIP_REV_IS_SLOW(bp)) bnx2x_acquire_nvram_lock() 1193 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, bnx2x_acquire_nvram_lock() 1197 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); bnx2x_acquire_nvram_lock() 1213 static int bnx2x_release_nvram_lock(struct bnx2x *bp) bnx2x_release_nvram_lock() argument 1215 int port = BP_PORT(bp); bnx2x_release_nvram_lock() 1221 if (CHIP_REV_IS_SLOW(bp)) bnx2x_release_nvram_lock() 1225 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, bnx2x_release_nvram_lock() 1229 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); bnx2x_release_nvram_lock() 1243 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); bnx2x_release_nvram_lock() 1247 static void bnx2x_enable_nvram_access(struct bnx2x *bp) bnx2x_enable_nvram_access() argument 1251 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); bnx2x_enable_nvram_access() 1254 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, bnx2x_enable_nvram_access() 1259 static void bnx2x_disable_nvram_access(struct bnx2x *bp) bnx2x_disable_nvram_access() argument 1263 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); bnx2x_disable_nvram_access() 1266 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, bnx2x_disable_nvram_access() 1271 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, bnx2x_nvram_read_dword() argument 1281 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); bnx2x_nvram_read_dword() 1284 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, bnx2x_nvram_read_dword() 1288 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); bnx2x_nvram_read_dword() 1292 if (CHIP_REV_IS_SLOW(bp)) bnx2x_nvram_read_dword() 1300 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); bnx2x_nvram_read_dword() 1303 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); bnx2x_nvram_read_dword() 1319 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, bnx2x_nvram_read() argument 1333 if (offset + buf_size > bp->common.flash_size) { bnx2x_nvram_read() 1336 offset, buf_size, bp->common.flash_size); bnx2x_nvram_read() 1341 rc = bnx2x_acquire_nvram_lock(bp); bnx2x_nvram_read() 1346 bnx2x_enable_nvram_access(bp); bnx2x_nvram_read() 1351 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); bnx2x_nvram_read() 1363 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); bnx2x_nvram_read() 1368 bnx2x_disable_nvram_access(bp); bnx2x_nvram_read() 1369 bnx2x_release_nvram_lock(bp); bnx2x_nvram_read() 1374 static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf, bnx2x_nvram_read32() argument 1379 rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size); bnx2x_nvram_read32() 1391 static bool bnx2x_is_nvm_accessible(struct bnx2x *bp) bnx2x_is_nvm_accessible() argument 1395 struct net_device *dev = pci_get_drvdata(bp->pdev); bnx2x_is_nvm_accessible() 1397 if (bp->pdev->pm_cap) bnx2x_is_nvm_accessible() 1398 rc = pci_read_config_word(bp->pdev, bnx2x_is_nvm_accessible() 1399 bp->pdev->pm_cap + PCI_PM_CTRL, &pm); bnx2x_is_nvm_accessible() 1411 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_eeprom() local 1413 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_get_eeprom() 1426 return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); bnx2x_get_eeprom() 1433 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_module_eeprom() local 1438 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_get_module_eeprom() 1444 phy_idx = bnx2x_get_cur_phy_idx(bp); bnx2x_get_module_eeprom() 1453 bnx2x_acquire_phy_lock(bp); bnx2x_get_module_eeprom() 1454 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], bnx2x_get_module_eeprom() 1455 &bp->link_params, bnx2x_get_module_eeprom() 1460 bnx2x_release_phy_lock(bp); bnx2x_get_module_eeprom() 1478 bnx2x_acquire_phy_lock(bp); bnx2x_get_module_eeprom() 1479 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], bnx2x_get_module_eeprom() 1480 &bp->link_params, bnx2x_get_module_eeprom() 1485 bnx2x_release_phy_lock(bp); bnx2x_get_module_eeprom() 1497 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_module_info() local 1501 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_get_module_info() 1506 phy_idx = bnx2x_get_cur_phy_idx(bp); bnx2x_get_module_info() 1507 bnx2x_acquire_phy_lock(bp); bnx2x_get_module_info() 1508 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], bnx2x_get_module_info() 1509 &bp->link_params, bnx2x_get_module_info() 1514 bnx2x_release_phy_lock(bp); bnx2x_get_module_info() 1520 bnx2x_acquire_phy_lock(bp); bnx2x_get_module_info() 1521 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], bnx2x_get_module_info() 1522 &bp->link_params, bnx2x_get_module_info() 1527 bnx2x_release_phy_lock(bp); bnx2x_get_module_info() 1544 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, bnx2x_nvram_write_dword() argument 1553 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); bnx2x_nvram_write_dword() 1556 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val); bnx2x_nvram_write_dword() 1559 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, bnx2x_nvram_write_dword() 1563 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); bnx2x_nvram_write_dword() 1567 if (CHIP_REV_IS_SLOW(bp)) bnx2x_nvram_write_dword() 1574 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); bnx2x_nvram_write_dword() 1589 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, bnx2x_nvram_write1() argument 1596 if (offset + buf_size > bp->common.flash_size) { bnx2x_nvram_write1() 1599 offset, buf_size, bp->common.flash_size); bnx2x_nvram_write1() 1604 rc = bnx2x_acquire_nvram_lock(bp); bnx2x_nvram_write1() 1609 bnx2x_enable_nvram_access(bp); bnx2x_nvram_write1() 1613 rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags); bnx2x_nvram_write1() 1626 rc = bnx2x_nvram_write_dword(bp, align_offset, val, bnx2x_nvram_write1() 1631 bnx2x_disable_nvram_access(bp); bnx2x_nvram_write1() 1632 bnx2x_release_nvram_lock(bp); bnx2x_nvram_write1() 1637 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, bnx2x_nvram_write() argument 1646 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); bnx2x_nvram_write() 1655 if (offset + buf_size > bp->common.flash_size) { bnx2x_nvram_write() 1658 offset, buf_size, bp->common.flash_size); bnx2x_nvram_write() 1663 rc = bnx2x_acquire_nvram_lock(bp); bnx2x_nvram_write() 1668 bnx2x_enable_nvram_access(bp); bnx2x_nvram_write() 1688 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); bnx2x_nvram_write() 1698 bnx2x_disable_nvram_access(bp); bnx2x_nvram_write() 1699 bnx2x_release_nvram_lock(bp); bnx2x_nvram_write() 1707 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_eeprom() local 1708 int port = BP_PORT(bp); bnx2x_set_eeprom() 1712 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_set_eeprom() 1727 !bp->port.pmf) { bnx2x_set_eeprom() 1734 SHMEM_RD(bp, bnx2x_set_eeprom() 1739 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_set_eeprom() 1741 bnx2x_acquire_phy_lock(bp); bnx2x_set_eeprom() 1742 rc |= bnx2x_link_reset(&bp->link_params, bnx2x_set_eeprom() 1743 &bp->link_vars, 0); bnx2x_set_eeprom() 1746 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, bnx2x_set_eeprom() 1748 bnx2x_release_phy_lock(bp); bnx2x_set_eeprom() 1749 bnx2x_link_report(bp); bnx2x_set_eeprom() 1753 if (bp->state == BNX2X_STATE_OPEN) { bnx2x_set_eeprom() 1754 bnx2x_acquire_phy_lock(bp); bnx2x_set_eeprom() 1755 rc |= bnx2x_link_reset(&bp->link_params, bnx2x_set_eeprom() 1756 &bp->link_vars, 1); bnx2x_set_eeprom() 1758 rc |= bnx2x_phy_init(&bp->link_params, bnx2x_set_eeprom() 1759 &bp->link_vars); bnx2x_set_eeprom() 1760 bnx2x_release_phy_lock(bp); bnx2x_set_eeprom() 1761 bnx2x_calc_fc_adv(bp); bnx2x_set_eeprom() 1769 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, bnx2x_set_eeprom() 1772 bnx2x_acquire_phy_lock(bp); bnx2x_set_eeprom() 1774 bnx2x_sfx7101_sp_sw_reset(bp, bnx2x_set_eeprom() 1775 &bp->link_params.phy[EXT_PHY1]); bnx2x_set_eeprom() 1779 bnx2x_ext_phy_hw_reset(bp, port); bnx2x_set_eeprom() 1781 bnx2x_release_phy_lock(bp); bnx2x_set_eeprom() 1784 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); bnx2x_set_eeprom() 1792 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_coalesce() local 1796 coal->rx_coalesce_usecs = bp->rx_ticks; bnx2x_get_coalesce() 1797 coal->tx_coalesce_usecs = bp->tx_ticks; bnx2x_get_coalesce() 1805 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_coalesce() local 1807 bp->rx_ticks = (u16)coal->rx_coalesce_usecs; bnx2x_set_coalesce() 1808 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) bnx2x_set_coalesce() 1809 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; bnx2x_set_coalesce() 1811 bp->tx_ticks = (u16)coal->tx_coalesce_usecs; bnx2x_set_coalesce() 1812 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) bnx2x_set_coalesce() 1813 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; bnx2x_set_coalesce() 1816 bnx2x_update_coalesce(bp); bnx2x_set_coalesce() 1824 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_ringparam() local 1828 if (bp->rx_ring_size) bnx2x_get_ringparam() 1829 ering->rx_pending = bp->rx_ring_size; bnx2x_get_ringparam() 1833 ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; bnx2x_get_ringparam() 1834 ering->tx_pending = bp->tx_ring_size; bnx2x_get_ringparam() 1840 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_ringparam() local 1846 if (pci_num_vf(bp->pdev)) { bnx2x_set_ringparam() 1852 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_set_ringparam() 1859 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : bnx2x_set_ringparam() 1861 (ering->tx_pending > (IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL)) || bnx2x_set_ringparam() 1867 bp->rx_ring_size = ering->rx_pending; bnx2x_set_ringparam() 1868 bp->tx_ring_size = ering->tx_pending; bnx2x_set_ringparam() 1876 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_pauseparam() local 1877 int cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_get_pauseparam() 1880 epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] == bnx2x_get_pauseparam() 1884 cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx]; bnx2x_get_pauseparam() 1886 cfg_reg = bp->link_params.req_fc_auto_adv; bnx2x_get_pauseparam() 1901 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_pauseparam() local 1902 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_set_pauseparam() 1903 if (IS_MF(bp)) bnx2x_set_pauseparam() 1910 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO; bnx2x_set_pauseparam() 1913 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX; bnx2x_set_pauseparam() 1916 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX; bnx2x_set_pauseparam() 1918 if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO) bnx2x_set_pauseparam() 1919 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE; bnx2x_set_pauseparam() 1922 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { bnx2x_set_pauseparam() 1927 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) { bnx2x_set_pauseparam() 1928 bp->link_params.req_flow_ctrl[cfg_idx] = bnx2x_set_pauseparam() 1931 bp->link_params.req_fc_auto_adv = 0; bnx2x_set_pauseparam() 1933 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX; bnx2x_set_pauseparam() 1936 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX; bnx2x_set_pauseparam() 1938 if (!bp->link_params.req_fc_auto_adv) bnx2x_set_pauseparam() 1939 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE; bnx2x_set_pauseparam() 1943 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]); bnx2x_set_pauseparam() 1946 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_set_pauseparam() 1947 bnx2x_link_set(bp); bnx2x_set_pauseparam() 2005 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_eee() local 2008 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) { bnx2x_get_eee() 2013 eee_cfg = bp->link_vars.eee_status; bnx2x_get_eee() 2038 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_eee() local 2042 if (IS_MF(bp)) bnx2x_set_eee() 2045 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) { bnx2x_set_eee() 2050 eee_cfg = bp->link_vars.eee_status; bnx2x_set_eee() 2081 bp->link_params.eee_mode |= EEE_MODE_ADV_LPI; bnx2x_set_eee() 2083 bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI; bnx2x_set_eee() 2086 bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI; bnx2x_set_eee() 2088 bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI; bnx2x_set_eee() 2090 bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK; bnx2x_set_eee() 2091 bp->link_params.eee_mode |= (edata->tx_lpi_timer & bnx2x_set_eee() 2098 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_set_eee() 2099 bnx2x_force_link_reset(bp); bnx2x_set_eee() 2100 bnx2x_link_set(bp); bnx2x_set_eee() 2124 static int bnx2x_test_registers(struct bnx2x *bp) bnx2x_test_registers() argument 2128 int port = BP_PORT(bp); bnx2x_test_registers() 2215 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_test_registers() 2221 if (CHIP_IS_E1(bp)) bnx2x_test_registers() 2223 else if (CHIP_IS_E1H(bp)) bnx2x_test_registers() 2225 else if (CHIP_IS_E2(bp)) bnx2x_test_registers() 2227 else if (CHIP_IS_E3B0(bp)) bnx2x_test_registers() 2254 save_val = REG_RD(bp, offset); bnx2x_test_registers() 2256 REG_WR(bp, offset, wr_val & mask); bnx2x_test_registers() 2258 val = REG_RD(bp, offset); bnx2x_test_registers() 2261 REG_WR(bp, offset, save_val); bnx2x_test_registers() 2279 static int bnx2x_test_memory(struct bnx2x *bp) bnx2x_test_memory() argument 2319 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_test_memory() 2325 if (CHIP_IS_E1(bp)) bnx2x_test_memory() 2327 else if (CHIP_IS_E1H(bp)) bnx2x_test_memory() 2329 else if (CHIP_IS_E2(bp)) bnx2x_test_memory() 2336 val = REG_RD(bp, prty_tbl[i].offset); bnx2x_test_memory() 2347 REG_RD(bp, mem_tbl[i].offset + j*4); bnx2x_test_memory() 2351 val = REG_RD(bp, prty_tbl[i].offset); bnx2x_test_memory() 2365 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) bnx2x_wait_for_link() argument 2370 while (bnx2x_link_test(bp, is_serdes) && cnt--) bnx2x_wait_for_link() 2373 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) bnx2x_wait_for_link() 2377 while (!bp->link_vars.link_up && cnt--) bnx2x_wait_for_link() 2380 if (cnt <= 0 && !bp->link_vars.link_up) bnx2x_wait_for_link() 2386 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) bnx2x_run_loopback() argument 2391 struct bnx2x_fastpath *fp_rx = &bp->fp[0]; bnx2x_run_loopback() 2392 struct bnx2x_fastpath *fp_tx = &bp->fp[0]; bnx2x_run_loopback() 2406 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, bnx2x_run_loopback() 2412 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) { bnx2x_run_loopback() 2418 if (CHIP_IS_E3(bp)) { bnx2x_run_loopback() 2419 int cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_run_loopback() 2420 if (bp->port.supported[cfg_idx] & bnx2x_run_loopback() 2424 bp->link_params.loopback_mode = LOOPBACK_XMAC; bnx2x_run_loopback() 2426 bp->link_params.loopback_mode = LOOPBACK_UMAC; bnx2x_run_loopback() 2428 bp->link_params.loopback_mode = LOOPBACK_BMAC; bnx2x_run_loopback() 2430 bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_run_loopback() 2433 if (bp->link_params.loopback_mode != LOOPBACK_EXT) { bnx2x_run_loopback() 2445 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? bnx2x_run_loopback() 2446 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); bnx2x_run_loopback() 2447 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size); bnx2x_run_loopback() 2454 memcpy(packet, bp->dev->dev_addr, ETH_ALEN); bnx2x_run_loopback() 2459 mapping = dma_map_single(&bp->pdev->dev, skb->data, bnx2x_run_loopback() 2461 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_run_loopback() 2499 if (CHIP_IS_E1x(bp)) { bnx2x_run_loopback() 2520 DOORBELL(bp, txdata->cid, txdata->tx_db.raw); bnx2x_run_loopback() 2538 if (bp->common.int_block == INT_BLOCK_IGU) { bnx2x_run_loopback() 2544 bnx2x_tx_int(bp, txdata); bnx2x_run_loopback() 2563 dma_sync_single_for_cpu(&bp->pdev->dev, bnx2x_run_loopback() 2581 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod, bnx2x_run_loopback() 2585 bp->link_params.loopback_mode = LOOPBACK_NONE; bnx2x_run_loopback() 2590 static int bnx2x_test_loopback(struct bnx2x *bp) bnx2x_test_loopback() argument 2594 if (BP_NOMCP(bp)) bnx2x_test_loopback() 2597 if (!netif_running(bp->dev)) bnx2x_test_loopback() 2600 bnx2x_netif_stop(bp, 1); bnx2x_test_loopback() 2601 bnx2x_acquire_phy_lock(bp); bnx2x_test_loopback() 2603 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK); bnx2x_test_loopback() 2609 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK); bnx2x_test_loopback() 2615 bnx2x_release_phy_lock(bp); bnx2x_test_loopback() 2616 bnx2x_netif_start(bp); bnx2x_test_loopback() 2621 static int bnx2x_test_ext_loopback(struct bnx2x *bp) bnx2x_test_ext_loopback() argument 2625 (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; bnx2x_test_ext_loopback() 2627 if (BP_NOMCP(bp)) bnx2x_test_ext_loopback() 2630 if (!netif_running(bp->dev)) bnx2x_test_ext_loopback() 2633 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); bnx2x_test_ext_loopback() 2634 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT); bnx2x_test_ext_loopback() 2640 bnx2x_wait_for_link(bp, 1, is_serdes); bnx2x_test_ext_loopback() 2642 bnx2x_netif_stop(bp, 1); bnx2x_test_ext_loopback() 2644 rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK); bnx2x_test_ext_loopback() 2648 bnx2x_netif_start(bp); bnx2x_test_ext_loopback() 2675 static int bnx2x_nvram_crc(struct bnx2x *bp, bnx2x_nvram_crc() argument 2689 rc = bnx2x_nvram_read(bp, offset + done, buff, count); bnx2x_nvram_crc() 2704 static int bnx2x_test_nvram_dir(struct bnx2x *bp, bnx2x_test_nvram_dir() argument 2716 rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff); bnx2x_test_nvram_dir() 2724 static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff) bnx2x_test_dir_entry() argument 2729 rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry)); bnx2x_test_dir_entry() 2733 return bnx2x_test_nvram_dir(bp, &entry, buff); bnx2x_test_dir_entry() 2736 static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff) bnx2x_test_nvram_ext_dirs() argument 2742 rc = bnx2x_nvram_read32(bp, bnx2x_test_nvram_ext_dirs() 2752 rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr, bnx2x_test_nvram_ext_dirs() 2760 rc = bnx2x_test_dir_entry(bp, dir_offset + bnx2x_test_nvram_ext_dirs() 2770 static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff) bnx2x_test_nvram_dirs() argument 2778 rc = bnx2x_test_dir_entry(bp, dir_offset + bnx2x_test_nvram_dirs() 2785 return bnx2x_test_nvram_ext_dirs(bp, buff); bnx2x_test_nvram_dirs() 2793 static int bnx2x_test_nvram_tbl(struct bnx2x *bp, bnx2x_test_nvram_tbl() argument 2799 int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset, bnx2x_test_nvram_tbl() 2812 static int bnx2x_test_nvram(struct bnx2x *bp) bnx2x_test_nvram() argument 2833 if (BP_NOMCP(bp)) bnx2x_test_nvram() 2843 rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic)); bnx2x_test_nvram() 2858 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf); bnx2x_test_nvram() 2862 if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) { bnx2x_test_nvram() 2863 u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & bnx2x_test_nvram() 2869 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf); bnx2x_test_nvram() 2875 rc = bnx2x_test_nvram_dirs(bp, buf); bnx2x_test_nvram() 2883 static int bnx2x_test_intr(struct bnx2x *bp) bnx2x_test_intr() argument 2887 if (!netif_running(bp->dev)) { bnx2x_test_intr() 2893 params.q_obj = &bp->sp_objs->q_obj; bnx2x_test_intr() 2898 return bnx2x_queue_state_change(bp, ¶ms); bnx2x_test_intr() 2904 struct bnx2x *bp = netdev_priv(dev); bnx2x_self_test() local 2908 if (pci_num_vf(bp->pdev)) { bnx2x_self_test() 2914 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_self_test() 2915 netdev_err(bp->dev, bnx2x_self_test() 2926 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp)); bnx2x_self_test() 2928 if (bnx2x_test_nvram(bp) != 0) { bnx2x_self_test() 2929 if (!IS_MF(bp)) bnx2x_self_test() 2941 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; bnx2x_self_test() 2942 link_up = bp->link_vars.link_up; bnx2x_self_test() 2944 if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) { bnx2x_self_test() 2945 int port = BP_PORT(bp); bnx2x_self_test() 2949 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); bnx2x_self_test() 2951 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); bnx2x_self_test() 2953 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); bnx2x_self_test() 2954 rc = bnx2x_nic_load(bp, LOAD_DIAG); bnx2x_self_test() 2963 bnx2x_wait_for_link(bp, 1, is_serdes); bnx2x_self_test() 2965 if (bnx2x_test_registers(bp) != 0) { bnx2x_self_test() 2969 if (bnx2x_test_memory(bp) != 0) { bnx2x_self_test() 2974 buf[2] = bnx2x_test_loopback(bp); /* internal LB */ bnx2x_self_test() 2979 buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */ bnx2x_self_test() 2985 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); bnx2x_self_test() 2988 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); bnx2x_self_test() 2989 rc = bnx2x_nic_load(bp, LOAD_NORMAL); bnx2x_self_test() 2997 bnx2x_wait_for_link(bp, link_up, is_serdes); bnx2x_self_test() 3000 if (bnx2x_test_intr(bp) != 0) { bnx2x_self_test() 3001 if (!IS_MF(bp)) bnx2x_self_test() 3010 while (bnx2x_link_test(bp, is_serdes) && --cnt) bnx2x_self_test() 3015 if (!IS_MF(bp)) bnx2x_self_test() 3026 #define HIDE_PORT_STAT(bp) \ 3027 ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \ 3028 IS_VF(bp)) 3033 static int bnx2x_num_stat_queues(struct bnx2x *bp) bnx2x_num_stat_queues() argument 3035 return BNX2X_NUM_ETH_QUEUES(bp); bnx2x_num_stat_queues() 3040 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_sset_count() local 3045 if (is_multi(bp)) { bnx2x_get_sset_count() 3046 num_strings = bnx2x_num_stat_queues(bp) * bnx2x_get_sset_count() 3050 if (HIDE_PORT_STAT(bp)) { bnx2x_get_sset_count() 3060 return BNX2X_NUM_TESTS(bp); bnx2x_get_sset_count() 3072 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_private_flags() local 3075 flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI; bnx2x_get_private_flags() 3076 flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE; bnx2x_get_private_flags() 3077 flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE; bnx2x_get_private_flags() 3084 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_strings() local 3091 if (is_multi(bp)) { for_each_eth_queue() 3092 for_each_eth_queue(bp, i) { for_each_eth_queue() 3105 if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) 3116 if (!IS_MF(bp)) 3121 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp)); 3134 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_ethtool_stats() local 3138 if (is_multi(bp)) { for_each_eth_queue() 3139 for_each_eth_queue(bp, i) { for_each_eth_queue() 3140 hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats; for_each_eth_queue() 3161 hw_stats = (u32 *)&bp->eth_stats; 3163 if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) 3187 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_phys_id() local 3189 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_set_phys_id() 3200 bnx2x_acquire_phy_lock(bp); bnx2x_set_phys_id() 3201 bnx2x_set_led(&bp->link_params, &bp->link_vars, bnx2x_set_phys_id() 3203 bnx2x_release_phy_lock(bp); bnx2x_set_phys_id() 3207 bnx2x_acquire_phy_lock(bp); bnx2x_set_phys_id() 3208 bnx2x_set_led(&bp->link_params, &bp->link_vars, bnx2x_set_phys_id() 3210 bnx2x_release_phy_lock(bp); bnx2x_set_phys_id() 3214 bnx2x_acquire_phy_lock(bp); bnx2x_set_phys_id() 3215 bnx2x_set_led(&bp->link_params, &bp->link_vars, bnx2x_set_phys_id() 3217 bp->link_vars.line_speed); bnx2x_set_phys_id() 3218 bnx2x_release_phy_lock(bp); bnx2x_set_phys_id() 3224 static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) bnx2x_get_rss_flags() argument 3233 if (bp->rss_conf_obj.udp_rss_v4) bnx2x_get_rss_flags() 3240 if (bp->rss_conf_obj.udp_rss_v6) bnx2x_get_rss_flags() 3261 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_rxnfc() local 3265 info->data = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_get_rxnfc() 3268 return bnx2x_get_rss_flags(bp, info); bnx2x_get_rxnfc() 3275 static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) bnx2x_set_rss_flags() argument 3306 (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { bnx2x_set_rss_flags() 3307 bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; bnx2x_set_rss_flags() 3311 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); bnx2x_set_rss_flags() 3313 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { bnx2x_set_rss_flags() 3314 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; bnx2x_set_rss_flags() 3318 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); bnx2x_set_rss_flags() 3357 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_rxnfc() local 3361 return bnx2x_set_rss_flags(bp, info); bnx2x_set_rxnfc() 3376 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_rxfh() local 3386 bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); bnx2x_get_rxfh() 3398 indir[i] = ind_table[i] - bp->fp->cl_id; bnx2x_get_rxfh() 3406 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_rxfh() local 3429 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; bnx2x_set_rxfh() 3432 return bnx2x_config_rss_eth(bp, false); bnx2x_set_rxfh() 3444 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_channels() local 3446 channels->max_combined = BNX2X_MAX_RSS_COUNT(bp); bnx2x_get_channels() 3447 channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_get_channels() 3453 * @bp: bnx2x private structure 3458 static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) bnx2x_change_num_queues() argument 3460 bnx2x_disable_msi(bp); bnx2x_change_num_queues() 3461 bp->num_ethernet_queues = num_rss; bnx2x_change_num_queues() 3462 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; bnx2x_change_num_queues() 3463 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); bnx2x_change_num_queues() 3464 bnx2x_set_int_mode(bp); bnx2x_change_num_queues() 3476 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_channels() local 3483 if (pci_num_vf(bp->pdev)) { bnx2x_set_channels() 3493 (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) { bnx2x_set_channels() 3499 if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) { bnx2x_set_channels() 3504 /* Set the requested number of queues in bp context. bnx2x_set_channels() 3509 bnx2x_change_num_queues(bp, channels->combined_count); bnx2x_set_channels() 3512 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_set_channels() 3513 bnx2x_change_num_queues(bp, channels->combined_count); bnx2x_set_channels() 3514 return bnx2x_nic_load(bp, LOAD_NORMAL); bnx2x_set_channels() 3520 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_ts_info() local 3522 if (bp->flags & PTP_SUPPORTED) { bnx2x_get_ts_info() 3530 if (bp->ptp_clock) bnx2x_get_ts_info() 3531 info->phc_index = ptp_clock_index(bp->ptp_clock); bnx2x_get_ts_info() 3622 void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) bnx2x_set_ethtool_ops() argument 3624 netdev->ethtool_ops = (IS_PF(bp)) ? bnx2x_set_ethtool_ops()
|
H A D | bnx2x_dcb.c | 33 static void bnx2x_pfc_set_pfc(struct bnx2x *bp); 34 static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); 35 static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, 38 static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, 41 static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, 45 static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, 49 static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, 53 static void bnx2x_read_data(struct bnx2x *bp, u32 *buff, bnx2x_read_data() argument 58 *buff = REG_RD(bp, addr + i); bnx2x_read_data() 61 static void bnx2x_write_data(struct bnx2x *bp, u32 *buff, bnx2x_write_data() argument 66 REG_WR(bp, addr + i, *buff); bnx2x_write_data() 69 static void bnx2x_pfc_set(struct bnx2x *bp) bnx2x_pfc_set() argument 76 bp->dcbx_port_params.ets.num_of_cos; bnx2x_pfc_set() 79 for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++) bnx2x_pfc_set() 86 bp->dcbx_port_params.ets.cos_params[i].pri_bitmask bnx2x_pfc_set() 87 & DCBX_PFC_PRI_PAUSE_MASK(bp); bnx2x_pfc_set() 97 if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))) bnx2x_pfc_set() 104 pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp); bnx2x_pfc_set() 108 bnx2x_acquire_phy_lock(bp); bnx2x_pfc_set() 109 bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED; bnx2x_pfc_set() 110 bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params); bnx2x_pfc_set() 111 bnx2x_release_phy_lock(bp); bnx2x_pfc_set() 114 static void bnx2x_pfc_clear(struct bnx2x *bp) bnx2x_pfc_clear() argument 118 bnx2x_acquire_phy_lock(bp); bnx2x_pfc_clear() 119 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED; bnx2x_pfc_clear() 120 bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params); bnx2x_pfc_clear() 121 bnx2x_release_phy_lock(bp); bnx2x_pfc_clear() 124 static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp, bnx2x_dump_dcbx_drv_param() argument 170 static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp, bnx2x_dcbx_get_ap_priority() argument 177 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_get_ap_priority() 191 static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, bnx2x_dcbx_get_ap_feature() argument 195 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_get_ap_feature() 209 bp->dcbx_port_params.app.enabled = true; bnx2x_dcbx_get_ap_feature() 224 bnx2x_dcbx_get_ap_priority(bp, bnx2x_dcbx_get_ap_feature() 231 bnx2x_dcbx_get_ap_priority(bp, bnx2x_dcbx_get_ap_feature() 237 bp->dcbx_port_params.app.enabled = false; bnx2x_dcbx_get_ap_feature() 243 static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, bnx2x_dcbx_get_ets_feature() argument 250 bp->dcbx_port_params.ets.cos_params; bnx2x_dcbx_get_ets_feature() 261 for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) { bnx2x_dcbx_get_ets_feature() 268 if (bp->dcbx_port_params.app.enabled && ets->enabled && bnx2x_dcbx_get_ets_feature() 272 bp->dcbx_port_params.ets.enabled = true; bnx2x_dcbx_get_ets_feature() 274 bnx2x_dcbx_get_ets_pri_pg_tbl(bp, bnx2x_dcbx_get_ets_feature() 278 bnx2x_dcbx_get_num_pg_traf_type(bp, bnx2x_dcbx_get_ets_feature() 282 bnx2x_dcbx_fill_cos_params(bp, &pg_help_data, bnx2x_dcbx_get_ets_feature() 287 bp->dcbx_port_params.ets.enabled = false; bnx2x_dcbx_get_ets_feature() 295 static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, bnx2x_dcbx_get_pfc_feature() argument 303 if (bp->dcbx_port_params.app.enabled && pfc->enabled && bnx2x_dcbx_get_pfc_feature() 306 bp->dcbx_port_params.pfc.enabled = true; bnx2x_dcbx_get_pfc_feature() 307 bp->dcbx_port_params.pfc.priority_non_pauseable_mask = bnx2x_dcbx_get_pfc_feature() 311 bp->dcbx_port_params.pfc.enabled = false; bnx2x_dcbx_get_pfc_feature() 312 bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0; bnx2x_dcbx_get_pfc_feature() 317 static void bnx2x_dcbx_map_nw(struct bnx2x *bp) bnx2x_dcbx_map_nw() argument 321 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_map_nw() 324 bp->dcbx_port_params.ets.cos_params; bnx2x_dcbx_map_nw() 331 for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) { bnx2x_dcbx_map_nw() 342 static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp, bnx2x_get_dcbx_drv_param() argument 346 bnx2x_dcbx_get_ap_feature(bp, &features->app, error); bnx2x_get_dcbx_drv_param() 348 bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error); bnx2x_get_dcbx_drv_param() 350 bnx2x_dcbx_get_ets_feature(bp, &features->ets, error); bnx2x_get_dcbx_drv_param() 352 bnx2x_dcbx_map_nw(bp); bnx2x_get_dcbx_drv_param() 356 static int bnx2x_dcbx_read_mib(struct bnx2x *bp, bnx2x_dcbx_read_mib() argument 377 offset += BP_PORT(bp) * mib_size; bnx2x_dcbx_read_mib() 380 bnx2x_read_data(bp, base_mib_addr, offset, mib_size); bnx2x_dcbx_read_mib() 409 static void bnx2x_pfc_set_pfc(struct bnx2x *bp) bnx2x_pfc_set_pfc() argument 411 int mfw_configured = SHMEM2_HAS(bp, drv_flags) && bnx2x_pfc_set_pfc() 412 GET_FLAGS(SHMEM2_RD(bp, drv_flags), bnx2x_pfc_set_pfc() 415 if (bp->dcbx_port_params.pfc.enabled && bnx2x_pfc_set_pfc() 416 (!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured)) bnx2x_pfc_set_pfc() 421 bnx2x_pfc_set(bp); bnx2x_pfc_set_pfc() 423 bnx2x_pfc_clear(bp); bnx2x_pfc_set_pfc() 426 int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) bnx2x_dcbx_stop_hw_tx() argument 431 func_params.f_obj = &bp->func_obj; bnx2x_dcbx_stop_hw_tx() 439 rc = bnx2x_func_state_change(bp, &func_params); bnx2x_dcbx_stop_hw_tx() 448 int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) bnx2x_dcbx_resume_hw_tx() argument 455 func_params.f_obj = &bp->func_obj; bnx2x_dcbx_resume_hw_tx() 461 bnx2x_dcbx_fw_struct(bp, tx_params); bnx2x_dcbx_resume_hw_tx() 465 rc = bnx2x_func_state_change(bp, &func_params); bnx2x_dcbx_resume_hw_tx() 474 static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) bnx2x_dcbx_2cos_limit_update_ets_config() argument 476 struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); bnx2x_dcbx_2cos_limit_update_ets_config() 521 bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1); bnx2x_dcbx_2cos_limit_update_ets_config() 524 rc = bnx2x_ets_strict(&bp->link_params, 0); bnx2x_dcbx_2cos_limit_update_ets_config() 527 rc = bnx2x_ets_strict(&bp->link_params, 1); bnx2x_dcbx_2cos_limit_update_ets_config() 536 static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) bnx2x_dcbx_update_ets_config() argument 538 struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); bnx2x_dcbx_update_ets_config() 567 if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars, bnx2x_dcbx_update_ets_config() 570 bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); bnx2x_dcbx_update_ets_config() 574 static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) bnx2x_dcbx_update_ets_params() argument 576 int mfw_configured = SHMEM2_HAS(bp, drv_flags) && bnx2x_dcbx_update_ets_params() 577 GET_FLAGS(SHMEM2_RD(bp, drv_flags), bnx2x_dcbx_update_ets_params() 580 bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); bnx2x_dcbx_update_ets_params() 582 if (!bp->dcbx_port_params.ets.enabled || bnx2x_dcbx_update_ets_params() 583 ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured)) bnx2x_dcbx_update_ets_params() 586 if (CHIP_IS_E3B0(bp)) bnx2x_dcbx_update_ets_params() 587 bnx2x_dcbx_update_ets_config(bp); bnx2x_dcbx_update_ets_params() 589 bnx2x_dcbx_2cos_limit_update_ets_config(bp); bnx2x_dcbx_update_ets_params() 593 static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) bnx2x_dcbx_read_shmem_remote_mib() argument 596 u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset); bnx2x_dcbx_read_shmem_remote_mib() 607 rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset, bnx2x_dcbx_read_shmem_remote_mib() 616 bp->dcbx_remote_feat = remote_mib.features; bnx2x_dcbx_read_shmem_remote_mib() 617 bp->dcbx_remote_flags = remote_mib.flags; bnx2x_dcbx_read_shmem_remote_mib() 622 static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) bnx2x_dcbx_read_shmem_neg_results() argument 625 u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset); bnx2x_dcbx_read_shmem_neg_results() 635 rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, bnx2x_dcbx_read_shmem_neg_results() 644 bp->dcbx_local_feat = local_mib.features; bnx2x_dcbx_read_shmem_neg_results() 645 bp->dcbx_error = local_mib.error; bnx2x_dcbx_read_shmem_neg_results() 670 int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) bnx2x_dcbnl_update_applist() argument 676 &bp->dcbx_local_feat.app.app_pri_tbl[i]; bnx2x_dcbnl_update_applist() 687 err = dcb_setapp(bp->dev, &app); bnx2x_dcbnl_update_applist() 695 static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) bnx2x_dcbx_update_tc_mapping() argument 698 for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) { bnx2x_dcbx_update_tc_mapping() 700 if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask bnx2x_dcbx_update_tc_mapping() 702 bp->prio_to_cos[prio] = cos; bnx2x_dcbx_update_tc_mapping() 713 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0); bnx2x_dcbx_update_tc_mapping() 716 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_dcbx_set_params() argument 727 bnx2x_dcbnl_update_applist(bp, true); bnx2x_dcbx_set_params() 730 if (bnx2x_dcbx_read_shmem_remote_mib(bp)) bnx2x_dcbx_set_params() 734 if (bnx2x_dcbx_read_shmem_neg_results(bp)) bnx2x_dcbx_set_params() 737 bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, bnx2x_dcbx_set_params() 738 bp->dcbx_error); bnx2x_dcbx_set_params() 740 bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, bnx2x_dcbx_set_params() 741 bp->dcbx_error); bnx2x_dcbx_set_params() 744 bnx2x_update_drv_flags(bp, bnx2x_dcbx_set_params() 751 bnx2x_dcbnl_update_applist(bp, false); bnx2x_dcbx_set_params() 757 bnx2x_dcbx_update_tc_mapping(bp); bnx2x_dcbx_set_params() 763 if (IS_MF(bp)) bnx2x_dcbx_set_params() 764 bnx2x_link_sync_notify(bp); bnx2x_dcbx_set_params() 766 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0); bnx2x_dcbx_set_params() 771 bnx2x_pfc_set_pfc(bp); bnx2x_dcbx_set_params() 773 bnx2x_dcbx_update_ets_params(bp); bnx2x_dcbx_set_params() 776 bnx2x_set_local_cmng(bp); bnx2x_dcbx_set_params() 780 bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); bnx2x_dcbx_set_params() 785 dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); bnx2x_dcbx_set_params() 793 #define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \ 794 BP_PORT(bp)*sizeof(struct lldp_admin_mib)) 796 static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, bnx2x_dcbx_admin_mib_updated_params() argument 801 u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp); bnx2x_dcbx_admin_mib_updated_params() 805 struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params; bnx2x_dcbx_admin_mib_updated_params() 810 bnx2x_read_data(bp, (u32 *)&admin_mib, offset, bnx2x_dcbx_admin_mib_updated_params() 813 if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON) bnx2x_dcbx_admin_mib_updated_params() 917 bnx2x_write_data(bp, (u32 *)&admin_mib, offset, bnx2x_dcbx_admin_mib_updated_params() 921 void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) bnx2x_dcbx_set_state() argument 923 if (!CHIP_IS_E1x(bp)) { bnx2x_dcbx_set_state() 924 bp->dcb_state = dcb_on; bnx2x_dcbx_set_state() 925 bp->dcbx_enabled = dcbx_enabled; bnx2x_dcbx_set_state() 927 bp->dcb_state = false; bnx2x_dcbx_set_state() 928 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; bnx2x_dcbx_set_state() 938 void bnx2x_dcbx_init_params(struct bnx2x *bp) bnx2x_dcbx_init_params() argument 940 bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */ bnx2x_dcbx_init_params() 941 bp->dcbx_config_params.admin_ets_willing = 1; bnx2x_dcbx_init_params() 942 bp->dcbx_config_params.admin_pfc_willing = 1; bnx2x_dcbx_init_params() 943 bp->dcbx_config_params.overwrite_settings = 1; bnx2x_dcbx_init_params() 944 bp->dcbx_config_params.admin_ets_enable = 1; bnx2x_dcbx_init_params() 945 bp->dcbx_config_params.admin_pfc_enable = 1; bnx2x_dcbx_init_params() 946 bp->dcbx_config_params.admin_tc_supported_tx_enable = 1; bnx2x_dcbx_init_params() 947 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; bnx2x_dcbx_init_params() 948 bp->dcbx_config_params.admin_pfc_tx_enable = 1; bnx2x_dcbx_init_params() 949 bp->dcbx_config_params.admin_application_priority_tx_enable = 1; bnx2x_dcbx_init_params() 950 bp->dcbx_config_params.admin_ets_reco_valid = 1; bnx2x_dcbx_init_params() 951 bp->dcbx_config_params.admin_app_priority_willing = 1; bnx2x_dcbx_init_params() 952 bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 100; bnx2x_dcbx_init_params() 953 bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 0; bnx2x_dcbx_init_params() 954 bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 0; bnx2x_dcbx_init_params() 955 bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0; bnx2x_dcbx_init_params() 956 bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0; bnx2x_dcbx_init_params() 957 bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0; bnx2x_dcbx_init_params() 958 bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0; bnx2x_dcbx_init_params() 959 bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0; bnx2x_dcbx_init_params() 960 bp->dcbx_config_params.admin_configuration_ets_pg[0] = 0; bnx2x_dcbx_init_params() 961 bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0; bnx2x_dcbx_init_params() 962 bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0; bnx2x_dcbx_init_params() 963 bp->dcbx_config_params.admin_configuration_ets_pg[3] = 0; bnx2x_dcbx_init_params() 964 bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0; bnx2x_dcbx_init_params() 965 bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0; bnx2x_dcbx_init_params() 966 bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0; bnx2x_dcbx_init_params() 967 bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0; bnx2x_dcbx_init_params() 968 bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 100; bnx2x_dcbx_init_params() 969 bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 0; bnx2x_dcbx_init_params() 970 bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 0; bnx2x_dcbx_init_params() 971 bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0; bnx2x_dcbx_init_params() 972 bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 0; bnx2x_dcbx_init_params() 973 bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 0; bnx2x_dcbx_init_params() 974 bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 0; bnx2x_dcbx_init_params() 975 bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 0; bnx2x_dcbx_init_params() 976 bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0; bnx2x_dcbx_init_params() 977 bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1; bnx2x_dcbx_init_params() 978 bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2; bnx2x_dcbx_init_params() 979 bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3; bnx2x_dcbx_init_params() 980 bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4; bnx2x_dcbx_init_params() 981 bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5; bnx2x_dcbx_init_params() 982 bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6; bnx2x_dcbx_init_params() 983 bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7; bnx2x_dcbx_init_params() 984 bp->dcbx_config_params.admin_pfc_bitmap = 0x0; bnx2x_dcbx_init_params() 985 bp->dcbx_config_params.admin_priority_app_table[0].valid = 0; bnx2x_dcbx_init_params() 986 bp->dcbx_config_params.admin_priority_app_table[1].valid = 0; bnx2x_dcbx_init_params() 987 bp->dcbx_config_params.admin_priority_app_table[2].valid = 0; bnx2x_dcbx_init_params() 988 bp->dcbx_config_params.admin_priority_app_table[3].valid = 0; bnx2x_dcbx_init_params() 989 bp->dcbx_config_params.admin_default_priority = 0; bnx2x_dcbx_init_params() 992 void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem) bnx2x_dcbx_init() argument 997 if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF))) bnx2x_dcbx_init() 1000 if (bp->dcbx_enabled <= 0) bnx2x_dcbx_init() 1008 DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n", bnx2x_dcbx_init() 1009 bp->dcb_state, bp->port.pmf); bnx2x_dcbx_init() 1011 if (bp->dcb_state == BNX2X_DCB_STATE_ON && bnx2x_dcbx_init() 1012 SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { bnx2x_dcbx_init() 1014 SHMEM2_RD(bp, dcbx_lldp_params_offset); bnx2x_dcbx_init() 1019 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); bnx2x_dcbx_init() 1025 bnx2x_acquire_hw_lock(bp, bnx2x_dcbx_init() 1028 bnx2x_dcbx_admin_mib_updated_params(bp, bnx2x_dcbx_init() 1032 bnx2x_fw_command(bp, bnx2x_dcbx_init() 1037 bnx2x_release_hw_lock(bp, bnx2x_dcbx_init() 1043 bnx2x_dcbx_print_cos_params(struct bnx2x *bp, bnx2x_dcbx_print_cos_params() argument 1053 bp->dcbx_port_params.pfc.priority_non_pauseable_mask); bnx2x_dcbx_print_cos_params() 1055 for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) { bnx2x_dcbx_print_cos_params() 1058 cos, bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask); bnx2x_dcbx_print_cos_params() 1062 cos, bp->dcbx_port_params.ets.cos_params[cos].bw_tbl); bnx2x_dcbx_print_cos_params() 1066 cos, bp->dcbx_port_params.ets.cos_params[cos].strict); bnx2x_dcbx_print_cos_params() 1070 cos, bp->dcbx_port_params.ets.cos_params[cos].pauseable); bnx2x_dcbx_print_cos_params() 1085 static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, bnx2x_dcbx_get_num_pg_traf_type() argument 1091 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_get_num_pg_traf_type() 1132 static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp, bnx2x_dcbx_ets_disabled_entry_data() argument 1138 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); bnx2x_dcbx_ets_disabled_entry_data() 1144 static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp, bnx2x_dcbx_add_to_cos_bw() argument 1154 static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, bnx2x_dcbx_separate_pauseable_from_non() argument 1170 pri_tested = 1 << bp->dcbx_port_params. bnx2x_dcbx_separate_pauseable_from_non() 1173 if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) { bnx2x_dcbx_separate_pauseable_from_non() 1180 pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params. bnx2x_dcbx_separate_pauseable_from_non() 1184 bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry], bnx2x_dcbx_separate_pauseable_from_non() 1202 static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() argument 1211 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() 1214 bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() 1223 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() 1235 if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp, bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() 1245 } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) { bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() 1269 bnx2x_dcbx_ets_disabled_entry_data(bp, bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() 1273 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() 1279 if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) > bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() 1280 DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) { bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() 1305 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() 1308 pri_tested = 1 << bp->dcbx_port_params. bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() 1336 struct bnx2x *bp, bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params() 1351 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params() 1352 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params() 1354 IS_DCBX_PFC_PRI_MIX_PAUSE(bp, bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params() 1359 bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data, bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params() 1361 bp->dcbx_port_params.ets.enabled = false; bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params() 1369 if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params() 1391 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params() 1412 struct bnx2x *bp, bnx2x_dcbx_join_pgs() 1463 struct bnx2x *bp, bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params() 1483 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params() 1484 bnx2x_dcbx_separate_pauseable_from_non(bp, bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params() 1505 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params() 1508 pri_tested = 1 << bp->dcbx_port_params. bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params() 1510 pg_entry = (u8)pg_pri_orginal_spread[bp-> bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params() 1525 bnx2x_dcbx_add_to_cos_bw(bp, bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params() 1542 static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, bnx2x_dcbx_2cos_limit_cee_fill_cos_params() argument 1556 bp, bnx2x_dcbx_2cos_limit_cee_fill_cos_params() 1564 bp, bnx2x_dcbx_2cos_limit_cee_fill_cos_params() 1575 bp, bnx2x_dcbx_2cos_limit_cee_fill_cos_params() 1585 bnx2x_dcbx_ets_disabled_entry_data(bp, bnx2x_dcbx_2cos_limit_cee_fill_cos_params() 1590 static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp, bnx2x_dcbx_spread_strict_pri() argument 1611 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, bnx2x_dcbx_spread_strict_pri() 1619 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, bnx2x_dcbx_spread_strict_pri() 1639 static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, bnx2x_dcbx_cee_fill_strict_pri() argument 1645 if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, bnx2x_dcbx_cee_fill_strict_pri() 1654 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, bnx2x_dcbx_cee_fill_strict_pri() 1662 static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp, bnx2x_dcbx_cee_fill_cos_params() argument 1679 if (bnx2x_dcbx_join_pgs(bp, ets, help_data, bnx2x_dcbx_cee_fill_cos_params() 1682 bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, bnx2x_dcbx_cee_fill_cos_params() 1697 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, bnx2x_dcbx_cee_fill_cos_params() 1711 entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data, bnx2x_dcbx_cee_fill_cos_params() 1719 static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, bnx2x_dcbx_fill_cos_params() argument 1750 if (CHIP_IS_E3B0(bp)) bnx2x_dcbx_fill_cos_params() 1751 bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets, bnx2x_dcbx_fill_cos_params() 1754 bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp, bnx2x_dcbx_fill_cos_params() 1763 &bp->dcbx_port_params.ets.cos_params[i]; bnx2x_dcbx_fill_cos_params() 1776 if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) { bnx2x_dcbx_fill_cos_params() 1779 DCBX_PFC_PRI_GET_NON_PAUSE(bp, bnx2x_dcbx_fill_cos_params() 1785 DCBX_PFC_PRI_GET_PAUSE(bp, bnx2x_dcbx_fill_cos_params() 1801 bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; bnx2x_dcbx_fill_cos_params() 1804 static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, bnx2x_dcbx_get_ets_pri_pg_tbl() argument 1818 static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, bnx2x_dcbx_fw_struct() argument 1824 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_fw_struct() 1825 int mfw_configured = SHMEM2_HAS(bp, drv_flags) && bnx2x_dcbx_fw_struct() 1826 GET_FLAGS(SHMEM2_RD(bp, drv_flags), bnx2x_dcbx_fw_struct() 1832 if ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured) bnx2x_dcbx_fw_struct() 1839 pfc_fw_cfg->dcb_version = ++bp->dcb_version; bnx2x_dcbx_fw_struct() 1849 for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) bnx2x_dcbx_fw_struct() 1850 if (bp->dcbx_port_params.ets.cos_params[cos]. bnx2x_dcbx_fw_struct() 1858 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); bnx2x_dcbx_fw_struct() 1861 void bnx2x_dcbx_pmf_update(struct bnx2x *bp) bnx2x_dcbx_pmf_update() argument 1864 * read it from shmem and update bp and netdev accordingly bnx2x_dcbx_pmf_update() 1866 if (SHMEM2_HAS(bp, drv_flags) && bnx2x_dcbx_pmf_update() 1867 GET_FLAGS(SHMEM2_RD(bp, drv_flags), 1 << DRV_FLAGS_DCB_CONFIGURED)) { bnx2x_dcbx_pmf_update() 1869 if (bnx2x_dcbx_read_shmem_neg_results(bp)) bnx2x_dcbx_pmf_update() 1872 bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, bnx2x_dcbx_pmf_update() 1873 bp->dcbx_error); bnx2x_dcbx_pmf_update() 1874 bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, bnx2x_dcbx_pmf_update() 1875 bp->dcbx_error); bnx2x_dcbx_pmf_update() 1880 bnx2x_dcbnl_update_applist(bp, false); bnx2x_dcbx_pmf_update() 1884 dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); bnx2x_dcbx_pmf_update() 1890 bnx2x_dcbx_update_tc_mapping(bp); bnx2x_dcbx_pmf_update() 1900 static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp) bnx2x_dcbnl_set_valid() argument 1905 return bp->dcb_state && bp->dcbx_mode_uset; bnx2x_dcbnl_set_valid() 1910 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_state() local 1911 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcb_state); bnx2x_dcbnl_get_state() 1912 return bp->dcb_state; bnx2x_dcbnl_get_state() 1917 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_state() local 1921 if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) || bnx2x_dcbnl_set_state() 1922 (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) { bnx2x_dcbnl_set_state() 1927 bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled); bnx2x_dcbnl_set_state() 1934 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_perm_hw_addr() local 1940 if (CNIC_LOADED(bp)) bnx2x_dcbnl_get_perm_hw_addr() 1942 memcpy(perm_addr+netdev->addr_len, bp->fip_mac, bnx2x_dcbnl_get_perm_hw_addr() 1950 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pg_tccfg_tx() local 1953 if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) bnx2x_dcbnl_set_pg_tccfg_tx() 1969 bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid; bnx2x_dcbnl_set_pg_tccfg_tx() 1970 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; bnx2x_dcbnl_set_pg_tccfg_tx() 1976 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pg_bwgcfg_tx() local 1979 if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) bnx2x_dcbnl_set_pg_bwgcfg_tx() 1982 bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct; bnx2x_dcbnl_set_pg_bwgcfg_tx() 1983 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; bnx2x_dcbnl_set_pg_bwgcfg_tx() 1990 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pg_tccfg_rx() local 1997 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pg_bwgcfg_rx() local 2005 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pg_tccfg_tx() local 2022 if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) bnx2x_dcbnl_get_pg_tccfg_tx() 2025 *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio); bnx2x_dcbnl_get_pg_tccfg_tx() 2031 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pg_bwgcfg_tx() local 2036 if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) bnx2x_dcbnl_get_pg_bwgcfg_tx() 2039 *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid); bnx2x_dcbnl_get_pg_bwgcfg_tx() 2046 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pg_tccfg_rx() local 2055 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pg_bwgcfg_rx() local 2064 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pfc_cfg() local 2067 if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES) bnx2x_dcbnl_set_pfc_cfg() 2071 bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio); bnx2x_dcbnl_set_pfc_cfg() 2072 bp->dcbx_config_params.admin_pfc_tx_enable = 1; bnx2x_dcbnl_set_pfc_cfg() 2074 bp->dcbx_config_params.admin_pfc_bitmap &= ~(1 << prio); bnx2x_dcbnl_set_pfc_cfg() 2081 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pfc_cfg() local 2086 if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES) bnx2x_dcbnl_get_pfc_cfg() 2089 *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1; bnx2x_dcbnl_get_pfc_cfg() 2094 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_all() local 2098 if (!bnx2x_dcbnl_set_valid(bp)) bnx2x_dcbnl_set_all() 2101 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_dcbnl_set_all() 2102 netdev_err(bp->dev, bnx2x_dcbnl_set_all() 2106 if (netif_running(bp->dev)) { bnx2x_dcbnl_set_all() 2107 bnx2x_update_drv_flags(bp, bnx2x_dcbnl_set_all() 2110 bnx2x_dcbx_init(bp, true); bnx2x_dcbnl_set_all() 2119 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_cap() local 2122 if (bp->dcb_state) { bnx2x_dcbnl_get_cap() 2164 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_numtcs() local 2169 if (bp->dcb_state) { bnx2x_dcbnl_get_numtcs() 2172 *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : bnx2x_dcbnl_get_numtcs() 2176 *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : bnx2x_dcbnl_get_numtcs() 2194 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_numtcs() local 2201 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pfc_state() local 2202 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); bnx2x_dcbnl_get_pfc_state() 2204 if (!bp->dcb_state) bnx2x_dcbnl_get_pfc_state() 2207 return bp->dcbx_local_feat.pfc.enabled; bnx2x_dcbnl_get_pfc_state() 2212 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pfc_state() local 2215 if (!bnx2x_dcbnl_set_valid(bp)) bnx2x_dcbnl_set_pfc_state() 2218 bp->dcbx_config_params.admin_pfc_tx_enable = bnx2x_dcbnl_set_pfc_state() 2219 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); bnx2x_dcbnl_set_pfc_state() 2267 static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) bnx2x_set_admin_app_up() argument 2274 &bp->dcbx_config_params.admin_priority_app_table[i]; bnx2x_set_admin_app_up() 2283 bp->dcbx_config_params. bnx2x_set_admin_app_up() 2288 &bp->dcbx_config_params.admin_priority_app_table[ff], bnx2x_set_admin_app_up() 2298 bp->dcbx_config_params.admin_application_priority_tx_enable = 1; bnx2x_set_admin_app_up() 2306 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_app_up() local 2311 if (!bnx2x_dcbnl_set_valid(bp)) { bnx2x_dcbnl_set_app_up() 2325 return bnx2x_set_admin_app_up(bp, idtype, idval, up); bnx2x_dcbnl_set_app_up() 2330 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_dcbx() local 2335 if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF) bnx2x_dcbnl_get_dcbx() 2343 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_dcbx() local 2354 if (bp->dcb_state != BNX2X_DCB_STATE_ON) { bnx2x_dcbnl_set_dcbx() 2360 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF; bnx2x_dcbnl_set_dcbx() 2362 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON; bnx2x_dcbnl_set_dcbx() 2364 bp->dcbx_mode_uset = true; bnx2x_dcbnl_set_dcbx() 2371 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_featcfg() local 2376 if (bp->dcb_state) { bnx2x_dcbnl_get_featcfg() 2380 if (bp->dcbx_local_feat.ets.enabled) bnx2x_dcbnl_get_featcfg() 2382 if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR | bnx2x_dcbnl_get_featcfg() 2387 if (bp->dcbx_local_feat.pfc.enabled) bnx2x_dcbnl_get_featcfg() 2389 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | bnx2x_dcbnl_get_featcfg() 2395 if (bp->dcbx_local_feat.app.enabled) bnx2x_dcbnl_get_featcfg() 2397 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | bnx2x_dcbnl_get_featcfg() 2418 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_featcfg() local 2424 if (bnx2x_dcbnl_set_valid(bp)) { bnx2x_dcbnl_set_featcfg() 2427 bp->dcbx_config_params.admin_ets_enable = bnx2x_dcbnl_set_featcfg() 2429 bp->dcbx_config_params.admin_ets_willing = bnx2x_dcbnl_set_featcfg() 2433 bp->dcbx_config_params.admin_pfc_enable = bnx2x_dcbnl_set_featcfg() 2435 bp->dcbx_config_params.admin_pfc_willing = bnx2x_dcbnl_set_featcfg() 2440 bp->dcbx_config_params.admin_app_priority_willing = bnx2x_dcbnl_set_featcfg() 2460 struct bnx2x *bp = netdev_priv(netdev); bnx2x_peer_appinfo() local 2464 info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0; bnx2x_peer_appinfo() 2465 info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0; bnx2x_peer_appinfo() 2469 if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield & bnx2x_peer_appinfo() 2479 struct bnx2x *bp = netdev_priv(netdev); bnx2x_peer_apptable() local 2485 &bp->dcbx_remote_feat.app.app_pri_tbl[i]; bnx2x_peer_apptable() 2499 struct bnx2x *bp = netdev_priv(netdev); bnx2x_cee_peer_getpg() local 2501 pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0; bnx2x_cee_peer_getpg() 2505 DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i); bnx2x_cee_peer_getpg() 2507 DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i); bnx2x_cee_peer_getpg() 2515 struct bnx2x *bp = netdev_priv(netdev); bnx2x_cee_peer_getpfc() local 2516 pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps; bnx2x_cee_peer_getpfc() 2517 pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap; bnx2x_cee_peer_getpfc() 1335 bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( struct bnx2x *bp, struct pg_help_data *pg_help_data, struct dcbx_ets_feature *ets, struct cos_help_data *cos_data, u32 *pg_pri_orginal_spread, u32 pri_join_mask, u8 num_of_dif_pri) bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params() argument 1411 bnx2x_dcbx_join_pgs( struct bnx2x *bp, struct dcbx_ets_feature *ets, struct pg_help_data *pg_help_data, u8 required_num_of_pg) bnx2x_dcbx_join_pgs() argument 1462 bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( struct bnx2x *bp, struct pg_help_data *pg_help_data, struct dcbx_ets_feature *ets, struct cos_help_data *cos_data, u32 *pg_pri_orginal_spread, u32 pri_join_mask, u8 num_of_dif_pri) bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params() argument
|
H A D | bnx2x_sriov.c | 27 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, 33 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, storm_memset_vf_to_pf() argument 36 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf() 38 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf() 40 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf() 42 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf() 46 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, storm_memset_func_en() argument 49 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en() 51 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en() 53 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en() 55 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en() 59 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) bnx2x_vf_idx_by_abs_fid() argument 63 for_each_vf(bp, idx) bnx2x_vf_idx_by_abs_fid() 64 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) bnx2x_vf_idx_by_abs_fid() 70 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) bnx2x_vf_by_abs_fid() argument 72 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); bnx2x_vf_by_abs_fid() 73 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; bnx2x_vf_by_abs_fid() 76 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_igu_ack_sb() argument 100 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); bnx2x_vf_igu_ack_sb() 106 REG_WR(bp, igu_addr_ctl, ctl); bnx2x_vf_igu_ack_sb() 111 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, bnx2x_validate_vf_sp_objs() argument 126 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vfop_qctor_dump_tx() argument 142 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vfop_qctor_dump_rx() argument 166 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, bnx2x_vfop_qctor_prep() argument 239 static int bnx2x_vf_queue_create(struct bnx2x *bp, bnx2x_vf_queue_create() argument 253 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == bnx2x_vf_queue_create() 261 rc = bnx2x_queue_state_change(bp, q_params); bnx2x_vf_queue_create() 268 rc = bnx2x_queue_state_change(bp, q_params); bnx2x_vf_queue_create() 273 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), bnx2x_vf_queue_create() 279 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_queue_destroy() argument 295 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == bnx2x_vf_queue_destroy() 304 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_vf_queue_destroy() 321 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) bnx2x_vf_set_igu_info() argument 323 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); bnx2x_vf_set_igu_info() 326 if (!BP_VFDB(bp)->first_vf_igu_entry) bnx2x_vf_set_igu_info() 327 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; bnx2x_vf_set_igu_info() 336 BP_VFDB(bp)->vf_sbs_pool++; bnx2x_vf_set_igu_info() 339 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, bnx2x_vf_vlan_credit() argument 347 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); bnx2x_vf_vlan_credit() 355 bnx2x_vlan_mac_h_read_unlock(bp, obj); bnx2x_vf_vlan_credit() 360 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_vlan_mac_clear() argument 388 rc = ramrod.vlan_mac_obj->delete_all(bp, bnx2x_vf_vlan_mac_clear() 405 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, bnx2x_vf_mac_vlan_config() argument 449 rc = bnx2x_config_vlan_mac(bp, &ramrod); bnx2x_vf_mac_vlan_config() 460 bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, bnx2x_vf_mac_vlan_config() 466 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mac_vlan_config_list() argument 474 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) bnx2x_vf_mac_vlan_config_list() 479 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, bnx2x_vf_mac_vlan_config_list() 491 bnx2x_vf_mac_vlan_config(bp, vf, qid, bnx2x_vf_mac_vlan_config_list() 503 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, bnx2x_vf_queue_setup() argument 510 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); bnx2x_vf_queue_setup() 522 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); bnx2x_vf_queue_setup() 529 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, bnx2x_vf_queue_setup() 537 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_queue_flr() argument 546 bnx2x_validate_vf_sp_objs(bp, vf, false)) { bnx2x_vf_queue_flr() 547 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); bnx2x_vf_queue_flr() 550 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); bnx2x_vf_queue_flr() 564 rc = bnx2x_queue_state_change(bp, &qstate); bnx2x_vf_queue_flr() 575 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mcast() argument 603 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); bnx2x_vf_mcast() 621 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); bnx2x_vf_mcast() 630 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, bnx2x_vf_prep_rx_mode() argument 640 ramrod->rx_mode_obj = &bp->rx_mode_obj; bnx2x_vf_prep_rx_mode() 651 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); bnx2x_vf_prep_rx_mode() 652 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); bnx2x_vf_prep_rx_mode() 655 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_rxmode() argument 662 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); bnx2x_vf_rxmode() 665 return bnx2x_config_rx_mode(bp, &ramrod); bnx2x_vf_rxmode() 668 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) bnx2x_vf_queue_teardown() argument 676 rc = bnx2x_vf_rxmode(bp, vf, qid, 0); bnx2x_vf_queue_teardown() 681 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { bnx2x_vf_queue_teardown() 682 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, bnx2x_vf_queue_teardown() 686 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, bnx2x_vf_queue_teardown() 690 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); bnx2x_vf_queue_teardown() 697 rc = bnx2x_vf_queue_destroy(bp, vf, qid); bnx2x_vf_queue_teardown() 715 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) bnx2x_vf_enable_internal() argument 717 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); bnx2x_vf_enable_internal() 721 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_semi_clear_err() argument 723 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); bnx2x_vf_semi_clear_err() 724 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); bnx2x_vf_semi_clear_err() 725 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); bnx2x_vf_semi_clear_err() 726 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); bnx2x_vf_semi_clear_err() 729 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_pglue_clear_err() argument 731 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; bnx2x_vf_pglue_clear_err() 748 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); bnx2x_vf_pglue_clear_err() 751 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_igu_reset() argument 757 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); bnx2x_vf_igu_reset() 759 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); bnx2x_vf_igu_reset() 760 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); bnx2x_vf_igu_reset() 761 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); bnx2x_vf_igu_reset() 762 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); bnx2x_vf_igu_reset() 763 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); bnx2x_vf_igu_reset() 764 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); bnx2x_vf_igu_reset() 766 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); bnx2x_vf_igu_reset() 771 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; bnx2x_vf_igu_reset() 772 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); bnx2x_vf_igu_reset() 778 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_vf_igu_reset() 785 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); bnx2x_vf_igu_reset() 788 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, bnx2x_vf_igu_reset() 792 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, bnx2x_vf_igu_reset() 797 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_enable_access() argument 800 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); bnx2x_vf_enable_access() 801 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); bnx2x_vf_enable_access() 804 bnx2x_vf_semi_clear_err(bp, abs_vfid); bnx2x_vf_enable_access() 805 bnx2x_vf_pglue_clear_err(bp, abs_vfid); bnx2x_vf_enable_access() 808 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); bnx2x_vf_enable_access() 810 bnx2x_vf_enable_internal(bp, true); bnx2x_vf_enable_access() 811 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_vf_enable_access() 814 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_enable_traffic() argument 817 bnx2x_vf_igu_reset(bp, vf); bnx2x_vf_enable_traffic() 820 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); bnx2x_vf_enable_traffic() 821 REG_WR(bp, PBF_REG_DISABLE_VF, 0); bnx2x_vf_enable_traffic() 822 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_vf_enable_traffic() 825 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_is_pcie_pending() argument 828 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); bnx2x_vf_is_pcie_pending() 839 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_flr_clnup_epilog() argument 842 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) bnx2x_vf_flr_clnup_epilog() 848 static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, bnx2x_iov_re_set_vlan_filters() argument 860 rc = bp->vlans_pool.get(&bp->vlans_pool, diff); bnx2x_iov_re_set_vlan_filters() 862 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); bnx2x_iov_re_set_vlan_filters() 875 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_iov_static_resc() argument 888 bnx2x_iov_re_set_vlan_filters(bp, vf, 0); bnx2x_iov_static_resc() 889 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); bnx2x_iov_static_resc() 891 bnx2x_iov_re_set_vlan_filters(bp, vf, bnx2x_iov_static_resc() 892 vlan_count / BNX2X_NR_VIRTFN(bp)); bnx2x_iov_static_resc() 902 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_free_resc() argument 905 bnx2x_iov_static_resc(bp, vf); bnx2x_vf_free_resc() 909 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_flr_clnup_hw() argument 911 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); bnx2x_vf_flr_clnup_hw() 914 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); bnx2x_vf_flr_clnup_hw() 915 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, bnx2x_vf_flr_clnup_hw() 918 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_vf_flr_clnup_hw() 921 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), bnx2x_vf_flr_clnup_hw() 926 bnx2x_tx_hw_flushed(bp, poll_cnt); bnx2x_vf_flr_clnup_hw() 929 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_flr() argument 939 rc = bnx2x_vf_queue_flr(bp, vf, i); bnx2x_vf_flr() 945 bnx2x_vf_mcast(bp, vf, NULL, 0, true); bnx2x_vf_flr() 948 bnx2x_vf_flr_clnup_hw(bp, vf); bnx2x_vf_flr() 951 bnx2x_vf_free_resc(bp, vf); bnx2x_vf_flr() 954 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); bnx2x_vf_flr() 961 static void bnx2x_vf_flr_clnup(struct bnx2x *bp) bnx2x_vf_flr_clnup() argument 966 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { bnx2x_vf_flr_clnup() 968 if (bnx2x_vf(bp, i, state) != VF_RESET || bnx2x_vf_flr_clnup() 969 !bnx2x_vf(bp, i, flr_clnup_stage)) bnx2x_vf_flr_clnup() 973 i, BNX2X_NR_VIRTFN(bp)); bnx2x_vf_flr_clnup() 975 vf = BP_VF(bp, i); bnx2x_vf_flr_clnup() 978 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); bnx2x_vf_flr_clnup() 981 bnx2x_vf_flr(bp, vf); bnx2x_vf_flr_clnup() 985 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); bnx2x_vf_flr_clnup() 996 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); bnx2x_vf_flr_clnup() 998 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], bnx2x_vf_flr_clnup() 999 bp->vfdb->flrd_vfs[i]); bnx2x_vf_flr_clnup() 1001 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); bnx2x_vf_flr_clnup() 1007 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); bnx2x_vf_flr_clnup() 1010 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) bnx2x_vf_handle_flr_event() argument 1016 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); bnx2x_vf_handle_flr_event() 1020 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); bnx2x_vf_handle_flr_event() 1022 for_each_vf(bp, i) { for_each_vf() 1023 struct bnx2x_virtf *vf = BP_VF(bp, i); for_each_vf() 1027 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); for_each_vf() 1029 reset = bp->vfdb->flrd_vfs[1] & for_each_vf() 1044 bnx2x_vf_flr_clnup(bp); 1048 void bnx2x_iov_init_dq(struct bnx2x *bp) bnx2x_iov_init_dq() argument 1050 if (!IS_SRIOV(bp)) bnx2x_iov_init_dq() 1054 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); bnx2x_iov_init_dq() 1055 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); bnx2x_iov_init_dq() 1060 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); bnx2x_iov_init_dq() 1063 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); bnx2x_iov_init_dq() 1068 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); bnx2x_iov_init_dq() 1074 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); bnx2x_iov_init_dq() 1075 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); bnx2x_iov_init_dq() 1076 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); bnx2x_iov_init_dq() 1077 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); bnx2x_iov_init_dq() 1082 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64); bnx2x_iov_init_dq() 1085 void bnx2x_iov_init_dmae(struct bnx2x *bp) bnx2x_iov_init_dmae() argument 1087 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) bnx2x_iov_init_dmae() 1088 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); bnx2x_iov_init_dmae() 1091 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) bnx2x_vf_bus() argument 1093 struct pci_dev *dev = bp->pdev; bnx2x_vf_bus() 1094 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_bus() 1100 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) bnx2x_vf_devfn() argument 1102 struct pci_dev *dev = bp->pdev; bnx2x_vf_devfn() 1103 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_devfn() 1108 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_set_bars() argument 1111 struct pci_dev *dev = bp->pdev; bnx2x_vf_set_bars() 1112 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_set_bars() 1130 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) bnx2x_get_vf_igu_cam_info() argument 1138 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); bnx2x_get_vf_igu_cam_info() 1144 else if (current_pf == BP_FUNC(bp)) bnx2x_get_vf_igu_cam_info() 1145 bnx2x_vf_set_igu_info(bp, sb_id, bnx2x_get_vf_igu_cam_info() 1153 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); bnx2x_get_vf_igu_cam_info() 1154 return BP_VFDB(bp)->vf_sbs_pool; bnx2x_get_vf_igu_cam_info() 1157 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) __bnx2x_iov_free_vfdb() argument 1159 if (bp->vfdb) { __bnx2x_iov_free_vfdb() 1160 kfree(bp->vfdb->vfqs); __bnx2x_iov_free_vfdb() 1161 kfree(bp->vfdb->vfs); __bnx2x_iov_free_vfdb() 1162 kfree(bp->vfdb); __bnx2x_iov_free_vfdb() 1164 bp->vfdb = NULL; __bnx2x_iov_free_vfdb() 1167 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) bnx2x_sriov_pci_cfg_info() argument 1170 struct pci_dev *dev = bp->pdev; bnx2x_sriov_pci_cfg_info() 1192 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) bnx2x_sriov_info() argument 1200 if (bnx2x_sriov_pci_cfg_info(bp, iov)) bnx2x_sriov_info() 1207 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); bnx2x_sriov_info() 1209 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); bnx2x_sriov_info() 1213 BP_FUNC(bp), bnx2x_sriov_info() 1221 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, bnx2x_iov_init_one() argument 1226 struct pci_dev *dev = bp->pdev; bnx2x_iov_init_one() 1228 bp->vfdb = NULL; bnx2x_iov_init_one() 1231 if (IS_VF(bp)) bnx2x_iov_init_one() 1239 if (CHIP_IS_E1x(bp)) bnx2x_iov_init_one() 1247 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { bnx2x_iov_init_one() 1249 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); bnx2x_iov_init_one() 1262 if (!bnx2x_ari_enabled(bp->pdev)) { bnx2x_iov_init_one() 1268 if (CHIP_INT_MODE_IS_BC(bp)) { bnx2x_iov_init_one() 1274 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); bnx2x_iov_init_one() 1275 if (!bp->vfdb) { bnx2x_iov_init_one() 1286 iov = &(bp->vfdb->sriov); bnx2x_iov_init_one() 1287 err = bnx2x_sriov_info(bp, iov); bnx2x_iov_init_one() 1301 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * bnx2x_iov_init_one() 1302 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); bnx2x_iov_init_one() 1303 if (!bp->vfdb->vfs) { bnx2x_iov_init_one() 1310 for_each_vf(bp, i) { for_each_vf() 1311 bnx2x_vf(bp, i, index) = i; for_each_vf() 1312 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; for_each_vf() 1313 bnx2x_vf(bp, i, state) = VF_FREE; for_each_vf() 1314 mutex_init(&bnx2x_vf(bp, i, op_mutex)); for_each_vf() 1315 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; for_each_vf() 1319 if (!bnx2x_get_vf_igu_cam_info(bp)) { 1326 bp->vfdb->vfqs = kzalloc( 1330 if (!bp->vfdb->vfqs) { 1337 mutex_init(&bp->vfdb->event_mutex); 1339 mutex_init(&bp->vfdb->bulletin_mutex); 1344 __bnx2x_iov_free_vfdb(bp); 1348 void bnx2x_iov_remove_one(struct bnx2x *bp) bnx2x_iov_remove_one() argument 1353 if (!IS_SRIOV(bp)) bnx2x_iov_remove_one() 1356 bnx2x_disable_sriov(bp); bnx2x_iov_remove_one() 1359 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { bnx2x_iov_remove_one() 1360 bnx2x_pretend_func(bp, bnx2x_iov_remove_one() 1361 HW_VF_HANDLE(bp, bnx2x_iov_remove_one() 1362 bp->vfdb->sriov.first_vf_in_pf + bnx2x_iov_remove_one() 1365 bp->vfdb->sriov.first_vf_in_pf + vf_idx); bnx2x_iov_remove_one() 1366 bnx2x_vf_enable_internal(bp, 0); bnx2x_iov_remove_one() 1367 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_iov_remove_one() 1371 __bnx2x_iov_free_vfdb(bp); bnx2x_iov_remove_one() 1374 void bnx2x_iov_free_mem(struct bnx2x *bp) bnx2x_iov_free_mem() argument 1378 if (!IS_SRIOV(bp)) bnx2x_iov_free_mem() 1383 struct hw_dma *cxt = &bp->vfdb->context[i]; bnx2x_iov_free_mem() 1387 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, bnx2x_iov_free_mem() 1388 BP_VFDB(bp)->sp_dma.mapping, bnx2x_iov_free_mem() 1389 BP_VFDB(bp)->sp_dma.size); bnx2x_iov_free_mem() 1391 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, bnx2x_iov_free_mem() 1392 BP_VF_MBX_DMA(bp)->mapping, bnx2x_iov_free_mem() 1393 BP_VF_MBX_DMA(bp)->size); bnx2x_iov_free_mem() 1395 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, bnx2x_iov_free_mem() 1396 BP_VF_BULLETIN_DMA(bp)->mapping, bnx2x_iov_free_mem() 1397 BP_VF_BULLETIN_DMA(bp)->size); bnx2x_iov_free_mem() 1400 int bnx2x_iov_alloc_mem(struct bnx2x *bp) bnx2x_iov_alloc_mem() argument 1405 if (!IS_SRIOV(bp)) bnx2x_iov_alloc_mem() 1409 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * bnx2x_iov_alloc_mem() 1413 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); bnx2x_iov_alloc_mem() 1428 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); bnx2x_iov_alloc_mem() 1429 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, bnx2x_iov_alloc_mem() 1431 if (!BP_VFDB(bp)->sp_dma.addr) bnx2x_iov_alloc_mem() 1433 BP_VFDB(bp)->sp_dma.size = tot_size; bnx2x_iov_alloc_mem() 1436 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; bnx2x_iov_alloc_mem() 1437 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, bnx2x_iov_alloc_mem() 1439 if (!BP_VF_MBX_DMA(bp)->addr) bnx2x_iov_alloc_mem() 1442 BP_VF_MBX_DMA(bp)->size = tot_size; bnx2x_iov_alloc_mem() 1445 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; bnx2x_iov_alloc_mem() 1446 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, bnx2x_iov_alloc_mem() 1448 if (!BP_VF_BULLETIN_DMA(bp)->addr) bnx2x_iov_alloc_mem() 1451 BP_VF_BULLETIN_DMA(bp)->size = tot_size; bnx2x_iov_alloc_mem() 1459 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vfq_init() argument 1470 bnx2x_init_queue_obj(bp, &q->sp_obj, bnx2x_vfq_init() 1472 bnx2x_vf_sp(bp, vf, q_data), bnx2x_vfq_init() 1473 bnx2x_vf_sp_map(bp, vf, q_data), bnx2x_vfq_init() 1484 static int bnx2x_max_speed_cap(struct bnx2x *bp) bnx2x_max_speed_cap() argument 1486 u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)]; bnx2x_max_speed_cap() 1495 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) bnx2x_iov_link_update_vf() argument 1497 struct bnx2x_link_report_data *state = &bp->last_reported_link; bnx2x_iov_link_update_vf() 1504 rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false); bnx2x_iov_link_update_vf() 1508 mutex_lock(&bp->vfdb->bulletin_mutex); bnx2x_iov_link_update_vf() 1534 bulletin->link_speed = bnx2x_max_speed_cap(bp); bnx2x_iov_link_update_vf() 1546 rc = bnx2x_post_vf_bulletin(bp, idx); bnx2x_iov_link_update_vf() 1554 mutex_unlock(&bp->vfdb->bulletin_mutex); bnx2x_iov_link_update_vf() 1560 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_vf_link_state() local 1561 struct bnx2x_virtf *vf = BP_VF(bp, idx); bnx2x_set_vf_link_state() 1571 return bnx2x_iov_link_update_vf(bp, idx); bnx2x_set_vf_link_state() 1574 void bnx2x_iov_link_update(struct bnx2x *bp) bnx2x_iov_link_update() argument 1578 if (!IS_SRIOV(bp)) bnx2x_iov_link_update() 1581 for_each_vf(bp, vfid) bnx2x_iov_link_update() 1582 bnx2x_iov_link_update_vf(bp, vfid); bnx2x_iov_link_update() 1586 int bnx2x_iov_nic_init(struct bnx2x *bp) bnx2x_iov_nic_init() argument 1590 if (!IS_SRIOV(bp)) { bnx2x_iov_nic_init() 1595 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); bnx2x_iov_nic_init() 1601 for_each_vf(bp, vfid) { for_each_vf() 1602 struct bnx2x_virtf *vf = BP_VF(bp, vfid); for_each_vf() 1604 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * for_each_vf() 1608 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + for_each_vf() 1617 bnx2x_iov_static_resc(bp, vf); for_each_vf() 1621 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); for_each_vf() 1629 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, for_each_vf() 1631 bnx2x_vf_sp(bp, vf, mcast_rdata), for_each_vf() 1632 bnx2x_vf_sp_map(bp, vf, mcast_rdata), for_each_vf() 1638 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) for_each_vf() 1639 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * for_each_vf() 1642 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + for_each_vf() 1646 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); for_each_vf() 1650 for_each_vf(bp, vfid) { for_each_vf() 1651 struct bnx2x_virtf *vf = BP_VF(bp, vfid); for_each_vf() 1654 vf->bus = bnx2x_vf_bus(bp, vfid); for_each_vf() 1655 vf->devfn = bnx2x_vf_devfn(bp, vfid); for_each_vf() 1656 bnx2x_vf_set_bars(bp, vf); for_each_vf() 1670 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) bnx2x_iov_chip_cleanup() argument 1674 if (!IS_SRIOV(bp)) bnx2x_iov_chip_cleanup() 1678 for_each_vf(bp, i) bnx2x_iov_chip_cleanup() 1679 bnx2x_vf_release(bp, BP_VF(bp, i)); bnx2x_iov_chip_cleanup() 1685 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) bnx2x_iov_init_ilt() argument 1688 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_iov_init_ilt() 1690 if (!IS_SRIOV(bp)) bnx2x_iov_init_ilt() 1695 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); bnx2x_iov_init_ilt() 1704 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) bnx2x_iov_is_vf_cid() argument 1711 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, bnx2x_vf_handle_classification_eqe() argument 1723 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, bnx2x_vf_handle_classification_eqe() 1727 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, bnx2x_vf_handle_classification_eqe() 1742 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, bnx2x_vf_handle_mcast_eqe() argument 1753 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); bnx2x_vf_handle_mcast_eqe() 1761 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, bnx2x_vf_handle_filters_eqe() argument 1769 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, bnx2x_vf_handle_rss_update_eqe() argument 1775 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) bnx2x_iov_eq_sp_event() argument 1782 if (!IS_SRIOV(bp)) bnx2x_iov_eq_sp_event() 1820 if (!bnx2x_iov_is_vf_cid(bp, cid)) { bnx2x_iov_eq_sp_event() 1832 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); bnx2x_iov_eq_sp_event() 1844 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, bnx2x_iov_eq_sp_event() 1852 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); bnx2x_iov_eq_sp_event() 1857 bnx2x_vf_handle_mcast_eqe(bp, vf); bnx2x_iov_eq_sp_event() 1862 bnx2x_vf_handle_filters_eqe(bp, vf); bnx2x_iov_eq_sp_event() 1867 bnx2x_vf_handle_rss_update_eqe(bp, vf); bnx2x_iov_eq_sp_event() 1877 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) bnx2x_vf_by_cid() argument 1884 return bnx2x_vf_by_abs_fid(bp, abs_vfid); bnx2x_vf_by_cid() 1887 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, bnx2x_iov_set_queue_sp_obj() argument 1892 if (!IS_SRIOV(bp)) bnx2x_iov_set_queue_sp_obj() 1895 vf = bnx2x_vf_by_cid(bp, vf_cid); bnx2x_iov_set_queue_sp_obj() 1909 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) bnx2x_iov_adjust_stats_req() argument 1918 if (!IS_SRIOV(bp)) bnx2x_iov_adjust_stats_req() 1921 if (!NO_FCOE(bp)) bnx2x_iov_adjust_stats_req() 1925 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; bnx2x_iov_adjust_stats_req() 1931 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, bnx2x_iov_adjust_stats_req() 1934 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_iov_adjust_stats_req() 1938 cur_query_entry = &bp->fw_stats_req-> bnx2x_iov_adjust_stats_req() 1941 for_each_vf(bp, i) { for_each_vf() 1943 struct bnx2x_virtf *vf = BP_VF(bp, i); for_each_vf() 1960 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == for_each_vfq() 1987 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 1991 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, bnx2x_vf_qtbl_set_q() argument 1997 REG_WR(bp, reg, val); bnx2x_vf_qtbl_set_q() 2000 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_clr_qtbl() argument 2005 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, bnx2x_vf_clr_qtbl() 2009 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_igu_disable() argument 2014 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); bnx2x_vf_igu_disable() 2015 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); bnx2x_vf_igu_disable() 2018 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); bnx2x_vf_igu_disable() 2019 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_vf_igu_disable() 2022 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_max_queue_cnt() argument 2029 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_chk_avail_resc() argument 2032 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_chk_avail_resc() 2033 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_chk_avail_resc() 2044 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_acquire() argument 2047 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * bnx2x_vf_acquire() 2051 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + bnx2x_vf_acquire() 2064 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { bnx2x_vf_acquire() 2083 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { bnx2x_vf_acquire() 2092 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_acquire() 2093 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_acquire() 2097 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); bnx2x_vf_acquire() 2127 bnx2x_vfq_init(bp, vf, q); for_each_vfq() 2133 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) bnx2x_vf_init() argument 2143 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, for_each_vf_sb() 2157 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2161 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2175 func_init.pf_id = BP_FUNC(bp); 2180 bnx2x_func_init(bp, &func_init); 2183 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2184 bnx2x_vf_enable_traffic(bp, vf); 2188 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2194 bnx2x_post_vf_bulletin(bp, vf->index); 2211 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_close() argument 2219 rc = bnx2x_vf_queue_teardown(bp, vf, i); bnx2x_vf_close() 2226 bnx2x_vf_igu_disable(bp, vf); bnx2x_vf_close() 2230 bnx2x_vf_clr_qtbl(bp, vf); bnx2x_vf_close() 2241 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); bnx2x_vf_close() 2258 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_free() argument 2271 rc = bnx2x_vf_close(bp, vf); bnx2x_vf_free() 2277 bnx2x_vf_free_resc(bp, vf); bnx2x_vf_free() 2291 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_rss_update() argument 2296 return bnx2x_config_rss(bp, rss); bnx2x_vf_rss_update() 2299 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_tpa_update() argument 2322 rc = bnx2x_queue_state_change(bp, &qstate); bnx2x_vf_tpa_update() 2338 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_release() argument 2343 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); bnx2x_vf_release() 2345 rc = bnx2x_vf_free(bp, vf); bnx2x_vf_release() 2350 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); bnx2x_vf_release() 2354 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_lock_vf_pf_channel() argument 2374 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_unlock_vf_pf_channel() argument 2405 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) bnx2x_set_pf_tx_switching() argument 2412 prev_flags = bp->flags; bnx2x_set_pf_tx_switching() 2414 bp->flags |= TX_SWITCHING; bnx2x_set_pf_tx_switching() 2416 bp->flags &= ~TX_SWITCHING; bnx2x_set_pf_tx_switching() 2417 if (prev_flags == bp->flags) bnx2x_set_pf_tx_switching() 2421 if ((bp->state != BNX2X_STATE_OPEN) || bnx2x_set_pf_tx_switching() 2422 (bnx2x_get_q_logical_state(bp, bnx2x_set_pf_tx_switching() 2423 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != bnx2x_set_pf_tx_switching() 2441 for_each_eth_queue(bp, i) { for_each_eth_queue() 2442 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue() 2445 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; for_each_eth_queue() 2448 rc = bnx2x_queue_state_change(bp, &q_params); for_each_eth_queue() 2461 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); bnx2x_sriov_configure() local 2463 if (!IS_SRIOV(bp)) { bnx2x_sriov_configure() 2468 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", bnx2x_sriov_configure() 2469 num_vfs_param, BNX2X_NR_VIRTFN(bp)); bnx2x_sriov_configure() 2472 if (bp->state != BNX2X_STATE_OPEN) { bnx2x_sriov_configure() 2478 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { bnx2x_sriov_configure() 2480 num_vfs_param, BNX2X_NR_VIRTFN(bp)); bnx2x_sriov_configure() 2481 num_vfs_param = BNX2X_NR_VIRTFN(bp); bnx2x_sriov_configure() 2484 bp->requested_nr_virtfn = num_vfs_param; bnx2x_sriov_configure() 2486 bnx2x_set_pf_tx_switching(bp, false); bnx2x_sriov_configure() 2487 bnx2x_disable_sriov(bp); bnx2x_sriov_configure() 2490 return bnx2x_enable_sriov(bp); bnx2x_sriov_configure() 2496 int bnx2x_enable_sriov(struct bnx2x *bp) bnx2x_enable_sriov() argument 2498 int rc = 0, req_vfs = bp->requested_nr_virtfn; bnx2x_enable_sriov() 2506 first_vf = bp->vfdb->sriov.first_vf_in_pf; bnx2x_enable_sriov() 2510 BP_VFDB(bp)->vf_sbs_pool / req_vfs); bnx2x_enable_sriov() 2514 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); bnx2x_enable_sriov() 2517 vf_sb_count(BP_VF(bp, vf_idx)) = 0; bnx2x_enable_sriov() 2519 bp->vfdb->vf_sbs_pool = 0; bnx2x_enable_sriov() 2522 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; bnx2x_enable_sriov() 2531 REG_WR(bp, address, igu_entry); bnx2x_enable_sriov() 2538 bnx2x_get_vf_igu_cam_info(bp); bnx2x_enable_sriov() 2541 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); bnx2x_enable_sriov() 2544 for_each_vf(bp, vf_idx) { for_each_vf() 2545 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); for_each_vf() 2548 vf->vfqs = &bp->vfdb->vfqs[qcount]; for_each_vf() 2550 bnx2x_iov_static_resc(bp, vf); for_each_vf() 2558 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 2559 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 2564 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2570 bnx2x_disable_sriov(bp); 2572 rc = bnx2x_set_pf_tx_switching(bp, true); 2576 rc = pci_enable_sriov(bp->pdev, req_vfs); 2585 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) bnx2x_pf_set_vfs_vlan() argument 2591 for_each_vf(bp, vfidx) { for_each_vf() 2592 bulletin = BP_VF_BULLETIN(bp, vfidx); for_each_vf() 2593 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) for_each_vf() 2594 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); for_each_vf() 2598 void bnx2x_disable_sriov(struct bnx2x *bp) bnx2x_disable_sriov() argument 2600 if (pci_vfs_assigned(bp->pdev)) { bnx2x_disable_sriov() 2606 pci_disable_sriov(bp->pdev); bnx2x_disable_sriov() 2609 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, bnx2x_vf_op_prep() argument 2614 if (bp->state != BNX2X_STATE_OPEN) { bnx2x_vf_op_prep() 2619 if (!IS_SRIOV(bp)) { bnx2x_vf_op_prep() 2624 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { bnx2x_vf_op_prep() 2626 vfidx, BNX2X_NR_VIRTFN(bp)); bnx2x_vf_op_prep() 2631 *vf = BP_VF(bp, vfidx); bnx2x_vf_op_prep() 2632 *bulletin = BP_VF_BULLETIN(bp, vfidx); bnx2x_vf_op_prep() 2657 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_vf_config() local 2665 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); bnx2x_get_vf_config() 2683 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { bnx2x_get_vf_config() 2684 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, bnx2x_get_vf_config() 2686 vlan_obj->get_n_elements(bp, vlan_obj, 1, bnx2x_get_vf_config() 2691 mutex_lock(&bp->vfdb->bulletin_mutex); bnx2x_get_vf_config() 2708 mutex_unlock(&bp->vfdb->bulletin_mutex); bnx2x_get_vf_config() 2733 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_vf_mac() local 2744 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); bnx2x_set_vf_mac() 2748 mutex_lock(&bp->vfdb->bulletin_mutex); bnx2x_set_vf_mac() 2757 rc = bnx2x_post_vf_bulletin(bp, vfidx); bnx2x_set_vf_mac() 2760 mutex_unlock(&bp->vfdb->bulletin_mutex); bnx2x_set_vf_mac() 2768 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); bnx2x_set_vf_mac() 2776 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) bnx2x_set_vf_mac() 2780 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); bnx2x_set_vf_mac() 2784 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); bnx2x_set_vf_mac() 2792 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); bnx2x_set_vf_mac() 2801 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, bnx2x_set_vf_mac() 2805 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); bnx2x_set_vf_mac() 2818 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_vf_vlan() local 2835 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); bnx2x_set_vf_vlan() 2845 mutex_lock(&bp->vfdb->bulletin_mutex); bnx2x_set_vf_vlan() 2853 mutex_unlock(&bp->vfdb->bulletin_mutex); bnx2x_set_vf_vlan() 2857 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != bnx2x_set_vf_vlan() 2862 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) bnx2x_set_vf_vlan() 2866 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); bnx2x_set_vf_vlan() 2871 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, bnx2x_set_vf_vlan() 2886 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, bnx2x_set_vf_vlan() 2889 bnx2x_config_rx_mode(bp, &rx_ramrod); bnx2x_set_vf_vlan() 2900 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); bnx2x_set_vf_vlan() 2942 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_set_vf_vlan() 2955 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); bnx2x_set_vf_vlan() 2974 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) bnx2x_sample_bulletin() argument 2986 memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin, bnx2x_sample_bulletin() 2989 crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content); bnx2x_sample_bulletin() 2991 if (bp->shadow_bulletin.content.crc == crc) bnx2x_sample_bulletin() 2995 bp->shadow_bulletin.content.crc, crc); bnx2x_sample_bulletin() 3003 bulletin = &bp->shadow_bulletin.content; bnx2x_sample_bulletin() 3006 if (bp->old_bulletin.version == bulletin->version) bnx2x_sample_bulletin() 3011 !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) { bnx2x_sample_bulletin() 3013 memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN); bnx2x_sample_bulletin() 3020 bp->vf_link_vars.line_speed = bulletin->link_speed; bnx2x_sample_bulletin() 3021 bp->vf_link_vars.link_report_flags = 0; bnx2x_sample_bulletin() 3025 &bp->vf_link_vars.link_report_flags); bnx2x_sample_bulletin() 3029 &bp->vf_link_vars.link_report_flags); bnx2x_sample_bulletin() 3033 &bp->vf_link_vars.link_report_flags); bnx2x_sample_bulletin() 3037 &bp->vf_link_vars.link_report_flags); bnx2x_sample_bulletin() 3038 __bnx2x_link_report(bp); bnx2x_sample_bulletin() 3041 /* copy new bulletin board to bp */ bnx2x_sample_bulletin() 3042 memcpy(&bp->old_bulletin, bulletin, bnx2x_sample_bulletin() 3048 void bnx2x_timer_sriov(struct bnx2x *bp) bnx2x_timer_sriov() argument 3050 bnx2x_sample_bulletin(bp); bnx2x_timer_sriov() 3053 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) bnx2x_timer_sriov() 3054 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, bnx2x_timer_sriov() 3058 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) bnx2x_vf_doorbells() argument 3061 return bp->regview + PXP_VF_ADDR_DB_START; bnx2x_vf_doorbells() 3064 void bnx2x_vf_pci_dealloc(struct bnx2x *bp) bnx2x_vf_pci_dealloc() argument 3066 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, bnx2x_vf_pci_dealloc() 3068 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, bnx2x_vf_pci_dealloc() 3072 int bnx2x_vf_pci_alloc(struct bnx2x *bp) bnx2x_vf_pci_alloc() argument 3074 mutex_init(&bp->vf2pf_mutex); bnx2x_vf_pci_alloc() 3077 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, bnx2x_vf_pci_alloc() 3079 if (!bp->vf2pf_mbox) bnx2x_vf_pci_alloc() 3083 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, bnx2x_vf_pci_alloc() 3085 if (!bp->pf2vf_bulletin) bnx2x_vf_pci_alloc() 3088 bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true); bnx2x_vf_pci_alloc() 3093 bnx2x_vf_pci_dealloc(bp); bnx2x_vf_pci_alloc() 3097 void bnx2x_iov_channel_down(struct bnx2x *bp) bnx2x_iov_channel_down() argument 3102 if (!IS_SRIOV(bp)) bnx2x_iov_channel_down() 3105 for_each_vf(bp, vf_idx) { for_each_vf() 3109 bulletin = BP_VF_BULLETIN(bp, vf_idx); for_each_vf() 3113 bnx2x_post_vf_bulletin(bp, vf_idx); for_each_vf() 3119 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); bnx2x_iov_task() local 3121 if (!netif_running(bp->dev)) bnx2x_iov_task() 3125 &bp->iov_task_state)) bnx2x_iov_task() 3126 bnx2x_vf_handle_flr_event(bp); bnx2x_iov_task() 3129 &bp->iov_task_state)) bnx2x_iov_task() 3130 bnx2x_vf_mbx(bp); bnx2x_iov_task() 3133 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) bnx2x_schedule_iov_task() argument 3136 set_bit(flag, &bp->iov_task_state); bnx2x_schedule_iov_task() 3139 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); bnx2x_schedule_iov_task()
|
H A D | bnx2x.h | 45 #define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt) 86 bp->dev ? (bp->dev->name) : "?", \ 91 if (unlikely(bp->msg_enable & (__mask))) \ 97 if (unlikely((bp->msg_enable & (__mask)) == __mask)) \ 103 if (unlikely(bp->msg_enable & (__mask))) \ 110 if (unlikely(netif_msg_probe(bp))) \ 113 bp->dev ? (bp->dev->name) : "?", \ 122 bp->dev ? (bp->dev->name) : "?", \ 132 if (unlikely(netif_msg_probe(bp))) \ 133 dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \ 137 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int); 141 bp->panic = 1; \ 143 bnx2x_panic_dump(bp, true); \ 148 bp->panic = 1; \ 150 bnx2x_panic_dump(bp, false); \ 161 #define REG_ADDR(bp, offset) ((bp->regview) + (offset)) 163 #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) 164 #define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) 165 #define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset)) 167 #define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) 168 #define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) 169 #define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset)) 171 #define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset) 172 #define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val) 174 #define REG_RD_DMAE(bp, offset, valp, len32) \ 176 bnx2x_read_dmae(bp, offset, len32);\ 177 memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \ 180 #define REG_WR_DMAE(bp, offset, valp, len32) \ 182 memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \ 183 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \ 187 #define REG_WR_DMAE_LEN(bp, offset, valp, len32) \ 188 REG_WR_DMAE(bp, offset, valp, len32) 190 #define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ 192 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ 193 bnx2x_write_big_buf_wb(bp, addr, len32); \ 196 #define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \ 198 #define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) 199 #define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) 201 #define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \ 203 #define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) 204 #define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) 205 #define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \ 207 #define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \ 210 #define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field)) 211 #define MF_CFG_WR(bp, field, val) REG_WR(bp,\ 212 MF_CFG_ADDR(bp, field), (val)) 213 #define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field)) 215 #define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \ 216 (SHMEM2_RD((bp), size) > \ 219 #define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) 220 #define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) 242 (&bp->def_status_blk->sp_sb.\ 246 (&bp->def_status_blk->sp_sb.\ 273 #define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \ 274 (bp)->max_cos) 278 #define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \ 281 #define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \ 284 #define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp)) 286 #define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \ 287 (UIO_DPM_ALIGN(bp) == UIO_DPM)) 289 #define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \ 290 (UIO_DPM_CID0_OFFSET(bp))) 292 #define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \ 293 BNX2X_1st_NON_L2_ETH_CID(bp)) 295 #define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp)) 297 #define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1) 299 #define CNIC_SUPPORT(bp) ((bp)->cnic_support) 300 #define CNIC_ENABLED(bp) ((bp)->cnic_enabled) 301 #define CNIC_LOADED(bp) ((bp)->cnic_loaded) 302 #define FCOE_INIT(bp) ((bp)->fcoe_init) 315 #define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp)) 316 #define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \ 317 (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp)) 320 #define FP_COS_TO_TXQ(fp, cos, bp) \ 321 ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp)) 334 #define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos) 335 #define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET) 368 #define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) 369 #define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ 372 #define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) 415 #define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ 416 (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) 419 #define SGE_TH_LO(bp) (NUM_SGE_REQ + \ 421 #define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) 530 struct bnx2x *bp; /* parent */ member in struct:bnx2x_fastpath 604 #define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var) 605 #define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index]) 606 #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) 607 #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) 723 #define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \ 725 #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)]) 726 #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) 727 #define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)]) 728 #define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var) 729 #define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ 733 #define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp)) 734 #define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp)) 735 #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp)) 788 #define NUM_BD_REQ BRB_SIZE(bp) 791 #define BD_TH_LO(bp) (NUM_BD_REQ + \ 793 FW_DROP_LEVEL(bp)) 794 #define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) 796 #define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) 798 #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ 836 #define NUM_RCQ_REQ BRB_SIZE(bp) 839 #define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ 841 FW_DROP_LEVEL(bp)) 842 #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) 865 #define DOORBELL(bp, cid, val) \ 867 writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \ 945 #define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0) 947 #define CHIP_NUM(bp) (bp->common.chip_id >> 16) 971 #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) 972 #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) 973 #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) 974 #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) 975 #define CHIP_IS_57712_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_VF) 976 #define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) 977 #define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) 978 #define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) 979 #define CHIP_IS_57800_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_VF) 980 #define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) 981 #define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) 982 #define CHIP_IS_57810_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_VF) 983 #define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811) 984 #define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF) 985 #define CHIP_IS_57811_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_VF) 986 #define CHIP_IS_57840(bp) \ 987 ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \ 988 (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \ 989 (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) 990 #define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \ 991 (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE)) 992 #define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF) 993 #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ 994 CHIP_IS_57711E(bp)) 995 #define CHIP_IS_57811xx(bp) (CHIP_IS_57811(bp) || \ 996 CHIP_IS_57811_MF(bp) || \ 997 CHIP_IS_57811_VF(bp)) 998 #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ 999 CHIP_IS_57712_MF(bp) || \ 1000 CHIP_IS_57712_VF(bp)) 1001 #define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ 1002 CHIP_IS_57800_MF(bp) || \ 1003 CHIP_IS_57800_VF(bp) || \ 1004 CHIP_IS_57810(bp) || \ 1005 CHIP_IS_57810_MF(bp) || \ 1006 CHIP_IS_57810_VF(bp) || \ 1007 CHIP_IS_57811xx(bp) || \ 1008 CHIP_IS_57840(bp) || \ 1009 CHIP_IS_57840_MF(bp) || \ 1010 CHIP_IS_57840_VF(bp)) 1011 #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) 1012 #define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) 1013 #define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) 1017 #define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK) 1021 #define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000) 1023 #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ 1024 !(CHIP_REV_VAL(bp) & 0x00001000)) 1026 #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ 1027 (CHIP_REV_VAL(bp) & 0x00001000)) 1029 #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ 1030 ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) 1032 #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) 1033 #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) 1034 #define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\ 1037 #define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \ 1038 CHIP_REV_SIM(bp) :\ 1039 CHIP_REV_VAL(bp)) 1040 #define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \ 1041 (CHIP_REV(bp) == CHIP_REV_Bx)) 1042 #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ 1043 (CHIP_REV(bp) == CHIP_REV_Ax)) 1055 #define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp)) 1076 #define CHIP_INT_MODE_IS_NBC(bp) \ 1077 (!CHIP_IS_E1x(bp) && \ 1078 !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) 1079 #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) 1085 #define CHIP_MODE(bp) (bp->common.chip_port_mode) 1086 #define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE) 1270 #define bnx2x_sp(bp, var) (&bp->slowpath->var) 1271 #define bnx2x_sp_mapping(bp, var) \ 1272 (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) 1327 (&bp->def_status_blk->sp_sb.\ 1433 #define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1)) 1434 #define BP_PORT(bp) (bp->pfid & 1) 1435 #define BP_FUNC(bp) (bp->pfid) 1436 #define BP_ABS_FUNC(bp) (bp->pf_num) 1437 #define BP_VN(bp) ((bp)->pfid >> 1) 1438 #define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) 1439 #define BP_L_ID(bp) (BP_VN(bp) << 2) 1440 #define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ 1441 (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) 1442 #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) 1468 #define IRO (bp->iro_arr) 1566 #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) 1569 #define IS_VF(bp) ((bp)->flags & IS_VF_FLAG) 1570 #define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG)) 1572 #define IS_VF(bp) false 1573 #define IS_PF(bp) true 1576 #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) 1577 #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) 1578 #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) 1623 #define IS_MF(bp) (bp->mf_mode != 0) 1624 #define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) 1625 #define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) 1626 #define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX) 1628 #define IS_MF_UFP(bp) (IS_MF_SD(bp) && \ 1629 bp->mf_sub_mode == SUB_MF_MODE_UFP) 1721 #define BP_ILT(bp) ((bp)->ilt) 1727 #define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_SUPPORT(bp)) 1734 #define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \ 1735 + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp))) 1736 #define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \ 1737 + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp))) 1738 #define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ 1796 #define GUNZIP_BUF(bp) (bp->gunzip_buf) 1797 #define GUNZIP_PHYS(bp) (bp->gunzip_mapping) 1798 #define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen) 1806 #define INIT_MODE_FLAGS(bp) (bp->init_mode_flags) 1816 #define INIT_OPS(bp) (bp->init_ops) 1817 #define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets) 1818 #define INIT_DATA(bp) (bp->init_data) 1819 #define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data) 1820 #define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data) 1821 #define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data) 1822 #define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data) 1823 #define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data) 1824 #define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data) 1825 #define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) 1826 #define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) 1833 #define IS_SRIOV(bp) ((bp)->vfdb) 1922 #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1923 #define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues) 1924 #define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \ 1925 (bp)->num_cnic_queues) 1926 #define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) 1928 #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1930 #define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp) 1931 /* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */ 1964 #define for_each_cnic_queue(bp, var) \ 1965 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \ 1967 if (skip_queue(bp, var)) \ 1971 #define for_each_eth_queue(bp, var) \ 1972 for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) 1974 #define for_each_nondefault_eth_queue(bp, var) \ 1975 for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) 1977 #define for_each_queue(bp, var) \ 1978 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1979 if (skip_queue(bp, var)) \ 1984 #define for_each_valid_rx_queue(bp, var) \ 1986 (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \ 1987 BNX2X_NUM_ETH_QUEUES(bp)); \ 1989 if (skip_rx_queue(bp, var)) \ 1993 #define for_each_rx_queue_cnic(bp, var) \ 1994 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \ 1996 if (skip_rx_queue(bp, var)) \ 2000 #define for_each_rx_queue(bp, var) \ 2001 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 2002 if (skip_rx_queue(bp, var)) \ 2007 #define for_each_valid_tx_queue(bp, var) \ 2009 (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \ 2010 BNX2X_NUM_ETH_QUEUES(bp)); \ 2012 if (skip_tx_queue(bp, var)) \ 2016 #define for_each_tx_queue_cnic(bp, var) \ 2017 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \ 2019 if (skip_tx_queue(bp, var)) \ 2023 #define for_each_tx_queue(bp, var) \ 2024 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 2025 if (skip_tx_queue(bp, var)) \ 2029 #define for_each_nondefault_queue(bp, var) \ 2030 for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 2031 if (skip_queue(bp, var)) \ 2041 #define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) 2046 #define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) 2048 #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) 2053 * @bp: driver handle 2068 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, 2074 * @bp: driver handle 2085 int bnx2x_del_all_macs(struct bnx2x *bp, 2090 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); 2091 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 2093 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); 2094 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 2095 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); 2096 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 2097 void bnx2x_read_mf_cfg(struct bnx2x *bp); 2099 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val); 2102 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 2103 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 2105 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); 2108 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, 2111 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 2113 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 2117 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); 2118 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count); 2119 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt); 2121 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 2124 void bnx2x_calc_fc_adv(struct bnx2x *bp); 2125 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 2127 void bnx2x_update_coalesce(struct bnx2x *bp); 2128 int bnx2x_get_cur_phy_idx(struct bnx2x *bp); 2130 bool bnx2x_port_after_undi(struct bnx2x *bp); 2132 static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, reg_poll() argument 2138 val = REG_RD(bp, reg); reg_poll() 2149 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, 2153 x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) 2158 dma_free_coherent(&bp->pdev->dev, size, x, y); \ 2252 #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) 2259 #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 2260 BP_VN(bp)) 2261 #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 2272 #define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \ 2273 IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF) 2287 #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ 2288 (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ 2330 #define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_PERSONALITY_ONLY(bp) || \ 2331 IS_MF_FCOE_AFEX(bp)) 2336 GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp)) 2445 (&bp->def_status_blk->sp_sb.\ 2455 #define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ 2456 TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4) 2475 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err); 2492 void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev); 2493 void bnx2x_notify_link_changed(struct bnx2x *bp); 2495 #define BNX2X_MF_SD_PROTOCOL(bp) \ 2496 ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK) 2498 #define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \ 2499 (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI) 2501 #define BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) \ 2502 (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_FCOE) 2504 #define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) 2505 #define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) 2506 #define IS_MF_ISCSI_SI(bp) (IS_MF_SI(bp) && BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp)) 2508 #define IS_MF_ISCSI_ONLY(bp) (IS_MF_ISCSI_SD(bp) || IS_MF_ISCSI_SI(bp)) 2515 #define BNX2X_MF_EXT_PROT(bp) ((bp)->mf_ext_config & \ 2518 #define BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp) \ 2519 (BNX2X_MF_EXT_PROT(bp) & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) 2521 #define BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp) \ 2522 (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) 2524 #define BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) \ 2525 (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) 2527 #define IS_MF_FCOE_AFEX(bp) \ 2528 (IS_MF_AFEX(bp) && BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp)) 2530 #define IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) \ 2531 (IS_MF_SD(bp) && \ 2532 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ 2533 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) 2535 #define IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp) \ 2536 (IS_MF_SI(bp) && \ 2537 (BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) || \ 2538 BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp))) 2540 #define IS_MF_STORAGE_PERSONALITY_ONLY(bp) \ 2541 (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \ 2542 IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp)) 2564 void bnx2x_set_local_cmng(struct bnx2x *bp); 2566 void bnx2x_update_mng_version(struct bnx2x *bp); 2568 #define MCPR_SCRATCH_BASE(bp) \ 2569 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 2573 void bnx2x_init_ptp(struct bnx2x *bp); 2574 int bnx2x_configure_ptp_filters(struct bnx2x *bp); 2575 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
|
H A D | bnx2x_sriov.h | 212 #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) 214 #define for_each_vf(bp, var) \ 215 for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++) 225 #define HW_VF_HANDLE(bp, abs_vfid) \ 226 (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4)) 234 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 237 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 301 #define BP_VFDB(bp) ((bp)->vfdb) 304 #define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \ 305 &((bp)->vfdb->vfs[idx]) : NULL) 306 #define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var) 313 #define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[i]) 318 #define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma)) 320 #define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[vfid])) 323 #define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma)) 324 #define BP_VF_BULLETIN(bp, vf) \ 325 (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \ 329 #define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \ 332 #define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \ 387 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line); 388 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param); 389 void bnx2x_iov_remove_one(struct bnx2x *bp); 390 void bnx2x_iov_free_mem(struct bnx2x *bp); 391 int bnx2x_iov_alloc_mem(struct bnx2x *bp); 392 int bnx2x_iov_nic_init(struct bnx2x *bp); 393 int bnx2x_iov_chip_cleanup(struct bnx2x *bp); 394 void bnx2x_iov_init_dq(struct bnx2x *bp); 395 void bnx2x_iov_init_dmae(struct bnx2x *bp); 396 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 398 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); 399 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); 400 void bnx2x_iov_storm_stats_update(struct bnx2x *bp); 402 void bnx2x_vf_mbx(struct bnx2x *bp); 403 void bnx2x_vf_mbx_schedule(struct bnx2x *bp, 405 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); 411 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 414 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 418 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 423 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 428 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 434 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, 438 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, 441 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid); 443 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, 446 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, 449 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf); 451 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf); 453 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 456 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 465 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf); 466 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); 467 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); 472 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid); 473 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid); 476 void bnx2x_vf_handle_flr_event(struct bnx2x *bp); 481 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); 485 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 488 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count); 489 int bnx2x_vfpf_release(struct bnx2x *bp); 490 int bnx2x_vfpf_release(struct bnx2x *bp); 491 int bnx2x_vfpf_init(struct bnx2x *bp); 492 void bnx2x_vfpf_close_vf(struct bnx2x *bp); 493 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, 495 int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); 496 int bnx2x_vfpf_config_rss(struct bnx2x *bp, 499 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); 501 static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, bnx2x_vf_fill_fw_str() argument 504 strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len); bnx2x_vf_fill_fw_str() 507 static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, bnx2x_vf_ustorm_prods_offset() argument 511 bp->acquire_resp.resc.hw_qid[fp->index] * bnx2x_vf_ustorm_prods_offset() 515 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 516 void bnx2x_timer_sriov(struct bnx2x *bp); 517 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); 518 void bnx2x_vf_pci_dealloc(struct bnx2x *bp); 519 int bnx2x_vf_pci_alloc(struct bnx2x *bp); 520 int bnx2x_enable_sriov(struct bnx2x *bp); 521 void bnx2x_disable_sriov(struct bnx2x *bp); bnx2x_vf_headroom() 522 static inline int bnx2x_vf_headroom(struct bnx2x *bp) bnx2x_vf_headroom() argument 524 return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; bnx2x_vf_headroom() 526 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); 528 void bnx2x_iov_channel_down(struct bnx2x *bp); 532 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag); 534 void bnx2x_iov_link_update(struct bnx2x *bp); 535 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx); 541 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, bnx2x_iov_set_queue_sp_obj() argument 543 static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} bnx2x_iov_eq_sp_event() argument 544 static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, bnx2x_iov_eq_sp_event() argument 546 static inline void bnx2x_vf_mbx(struct bnx2x *bp) {} bnx2x_vf_mbx_schedule() argument 547 static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp, bnx2x_vf_mbx_schedule() argument 549 static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } bnx2x_iov_init_dq() argument 550 static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} bnx2x_iov_alloc_mem() argument 551 static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } bnx2x_iov_free_mem() argument 552 static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {} bnx2x_iov_chip_cleanup() argument 553 static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; } bnx2x_iov_init_dmae() argument 554 static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {} bnx2x_iov_init_one() argument 555 static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, bnx2x_iov_init_one() argument 557 static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} bnx2x_enable_sriov() argument 558 static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; } bnx2x_disable_sriov() argument 559 static inline void bnx2x_disable_sriov(struct bnx2x *bp) {} bnx2x_vfpf_acquire() argument 560 static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, bnx2x_vfpf_acquire() argument 562 static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } bnx2x_vfpf_init() argument 563 static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } bnx2x_vfpf_close_vf() argument 564 static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} bnx2x_vfpf_setup_q() argument 565 static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } bnx2x_vfpf_config_mac() argument 566 static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, bnx2x_vfpf_config_mac() argument 568 static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, bnx2x_vfpf_config_rss() argument 571 static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } bnx2x_iov_nic_init() argument 572 static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } bnx2x_vf_headroom() argument 573 static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; } bnx2x_iov_adjust_stats_req() argument 574 static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {} bnx2x_vf_fill_fw_str() argument 575 static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, bnx2x_vf_fill_fw_str() argument 577 static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, bnx2x_vf_ustorm_prods_offset() argument 579 static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) bnx2x_sample_bulletin() argument 583 static inline void bnx2x_timer_sriov(struct bnx2x *bp) {} bnx2x_timer_sriov() argument 585 static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) bnx2x_vf_doorbells() argument 590 static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {} bnx2x_vf_pci_alloc() argument 591 static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } bnx2x_pf_set_vfs_vlan() argument 592 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} bnx2x_sriov_configure() argument 594 static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} bnx2x_iov_channel_down() argument 597 static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {} bnx2x_iov_link_update() argument 598 static inline void bnx2x_iov_link_update(struct bnx2x *bp) {} bnx2x_iov_link_update_vf() argument 599 static inline int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) {return 0; } bnx2x_iov_link_update_vf() argument
|
H A D | bnx2x_vfpf.c | 24 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); 27 static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, bnx2x_add_tlv() argument 38 static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, bnx2x_vfpf_prep() argument 41 mutex_lock(&bp->vf2pf_mutex); bnx2x_vfpf_prep() 47 memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg)); bnx2x_vfpf_prep() 50 bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length); bnx2x_vfpf_prep() 53 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); bnx2x_vfpf_prep() 57 static void bnx2x_vfpf_finalize(struct bnx2x *bp, bnx2x_vfpf_finalize() argument 63 mutex_unlock(&bp->vf2pf_mutex); bnx2x_vfpf_finalize() 67 static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list, bnx2x_search_tlv_list() argument 91 static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) bnx2x_dp_tlv_list() argument 139 static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) bnx2x_send_msg2pf() argument 142 REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); bnx2x_send_msg2pf() 154 bnx2x_sample_bulletin(bp); bnx2x_send_msg2pf() 155 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { bnx2x_send_msg2pf() 192 static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) bnx2x_get_vf_id() argument 199 me_reg = readl(bp->doorbells); bnx2x_get_vf_id() 221 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) bnx2x_vfpf_acquire() argument 224 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; bnx2x_vfpf_acquire() 225 struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; bnx2x_vfpf_acquire() 232 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req)); bnx2x_vfpf_acquire() 234 if (bnx2x_get_vf_id(bp, &vf_id)) { bnx2x_vfpf_acquire() 245 req->resc_request.num_sbs = bp->igu_sb_cnt; bnx2x_vfpf_acquire() 250 req->bulletin_addr = bp->pf2vf_bulletin_mapping; bnx2x_vfpf_acquire() 253 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, bnx2x_vfpf_acquire() 260 bnx2x_add_tlv(bp, req, bnx2x_vfpf_acquire() 266 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_acquire() 272 rc = bnx2x_send_msg2pf(bp, bnx2x_vfpf_acquire() 274 bp->vf2pf_mbox_mapping); bnx2x_vfpf_acquire() 280 /* copy acquire response from buffer to bp */ bnx2x_vfpf_acquire() 281 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp)); bnx2x_vfpf_acquire() 288 if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { bnx2x_vfpf_acquire() 291 } else if (bp->acquire_resp.hdr.status == bnx2x_vfpf_acquire() 300 bp->acquire_resp.resc.num_txqs); bnx2x_vfpf_acquire() 303 bp->acquire_resp.resc.num_rxqs); bnx2x_vfpf_acquire() 306 bp->acquire_resp.resc.num_sbs); bnx2x_vfpf_acquire() 309 bp->acquire_resp.resc.num_mac_filters); bnx2x_vfpf_acquire() 312 bp->acquire_resp.resc.num_vlan_filters); bnx2x_vfpf_acquire() 315 bp->acquire_resp.resc.num_mc_filters); bnx2x_vfpf_acquire() 318 memset(&bp->vf2pf_mbox->resp, 0, bnx2x_vfpf_acquire() 322 fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, bnx2x_vfpf_acquire() 328 bp->acquire_resp.hdr.status); bnx2x_vfpf_acquire() 336 bnx2x_search_tlv_list(bp, resp, bnx2x_vfpf_acquire() 339 memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN); bnx2x_vfpf_acquire() 340 bp->flags |= HAS_PHYS_PORT_ID; bnx2x_vfpf_acquire() 347 fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, bnx2x_vfpf_acquire() 355 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_acquire() 356 bnx2x_vfpf_release(bp); bnx2x_vfpf_acquire() 363 bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff); bnx2x_vfpf_acquire() 364 bp->link_params.chip_id = bp->common.chip_id; bnx2x_vfpf_acquire() 365 bp->db_size = bp->acquire_resp.pfdev_info.db_size; bnx2x_vfpf_acquire() 366 bp->common.int_block = INT_BLOCK_IGU; bnx2x_vfpf_acquire() 367 bp->common.chip_port_mode = CHIP_2_PORT_MODE; bnx2x_vfpf_acquire() 368 bp->igu_dsb_id = -1; bnx2x_vfpf_acquire() 369 bp->mf_ov = 0; bnx2x_vfpf_acquire() 370 bp->mf_mode = 0; bnx2x_vfpf_acquire() 371 bp->common.flash_size = 0; bnx2x_vfpf_acquire() 372 bp->flags |= bnx2x_vfpf_acquire() 374 bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs; bnx2x_vfpf_acquire() 375 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; bnx2x_vfpf_acquire() 376 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, bnx2x_vfpf_acquire() 377 sizeof(bp->fw_ver)); bnx2x_vfpf_acquire() 379 if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr)) bnx2x_vfpf_acquire() 380 memcpy(bp->dev->dev_addr, bnx2x_vfpf_acquire() 381 bp->acquire_resp.resc.current_mac_addr, bnx2x_vfpf_acquire() 385 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_acquire() 389 int bnx2x_vfpf_release(struct bnx2x *bp) bnx2x_vfpf_release() argument 391 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release; bnx2x_vfpf_release() 392 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_release() 396 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req)); bnx2x_vfpf_release() 398 if (bnx2x_get_vf_id(bp, &vf_id)) { bnx2x_vfpf_release() 406 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_release() 410 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_release() 413 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_release() 430 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_release() 436 int bnx2x_vfpf_init(struct bnx2x *bp) bnx2x_vfpf_init() argument 438 struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init; bnx2x_vfpf_init() 439 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_init() 443 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req)); bnx2x_vfpf_init() 446 for_each_eth_queue(bp, i) bnx2x_vfpf_init() 447 req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i, bnx2x_vfpf_init() 451 req->stats_addr = bp->fw_stats_data_mapping + bnx2x_vfpf_init() 457 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_init() 461 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_init() 463 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_init() 476 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_init() 482 void bnx2x_vfpf_close_vf(struct bnx2x *bp) bnx2x_vfpf_close_vf() argument 484 struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close; bnx2x_vfpf_close_vf() 485 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_close_vf() 492 if (bnx2x_get_vf_id(bp, &vf_id)) bnx2x_vfpf_close_vf() 496 for_each_queue(bp, i) bnx2x_vfpf_close_vf() 497 bnx2x_vfpf_teardown_queue(bp, i); bnx2x_vfpf_close_vf() 500 bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false); bnx2x_vfpf_close_vf() 503 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req)); bnx2x_vfpf_close_vf() 508 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_close_vf() 512 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_close_vf() 514 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_close_vf() 523 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_close_vf() 527 bnx2x_netif_stop(bp, 0); bnx2x_vfpf_close_vf() 529 bnx2x_del_all_napi(bp); bnx2x_vfpf_close_vf() 532 bnx2x_free_irq(bp); bnx2x_vfpf_close_vf() 535 static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_leading_vfq_init() argument 542 bnx2x_init_mac_obj(bp, &q->mac_obj, bnx2x_leading_vfq_init() 544 bnx2x_vf_sp(bp, vf, mac_rdata), bnx2x_leading_vfq_init() 545 bnx2x_vf_sp_map(bp, vf, mac_rdata), bnx2x_leading_vfq_init() 549 &bp->macs_pool); bnx2x_leading_vfq_init() 551 bnx2x_init_vlan_obj(bp, &q->vlan_obj, bnx2x_leading_vfq_init() 553 bnx2x_vf_sp(bp, vf, vlan_rdata), bnx2x_leading_vfq_init() 554 bnx2x_vf_sp_map(bp, vf, vlan_rdata), bnx2x_leading_vfq_init() 558 &bp->vlans_pool); bnx2x_leading_vfq_init() 561 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, bnx2x_leading_vfq_init() 563 bnx2x_vf_sp(bp, vf, mcast_rdata), bnx2x_leading_vfq_init() 564 bnx2x_vf_sp_map(bp, vf, mcast_rdata), bnx2x_leading_vfq_init() 570 bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid, bnx2x_leading_vfq_init() 572 bnx2x_vf_sp(bp, vf, rss_rdata), bnx2x_leading_vfq_init() 573 bnx2x_vf_sp_map(bp, vf, rss_rdata), bnx2x_leading_vfq_init() 584 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_vfpf_setup_q() argument 587 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; bnx2x_vfpf_setup_q() 588 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_setup_q() 594 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); bnx2x_vfpf_setup_q() 624 req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0; bnx2x_vfpf_setup_q() 625 req->rxq.mtu = bp->dev->mtu; bnx2x_vfpf_setup_q() 629 req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; bnx2x_vfpf_setup_q() 641 req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0; bnx2x_vfpf_setup_q() 646 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_setup_q() 650 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_setup_q() 652 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_setup_q() 663 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_setup_q() 668 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) bnx2x_vfpf_teardown_queue() argument 670 struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op; bnx2x_vfpf_teardown_queue() 671 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_teardown_queue() 675 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q, bnx2x_vfpf_teardown_queue() 681 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_teardown_queue() 685 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_teardown_queue() 687 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_teardown_queue() 703 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_teardown_queue() 709 int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) bnx2x_vfpf_config_mac() argument 711 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; bnx2x_vfpf_config_mac() 712 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_config_mac() 713 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; bnx2x_vfpf_config_mac() 717 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, bnx2x_vfpf_config_mac() 729 bnx2x_sample_bulletin(bp); bnx2x_vfpf_config_mac() 735 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_config_mac() 739 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_config_mac() 742 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_config_mac() 754 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); bnx2x_vfpf_config_mac() 757 if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) { bnx2x_vfpf_config_mac() 759 memcpy(req->filters[0].mac, bp->dev->dev_addr, bnx2x_vfpf_config_mac() 763 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bnx2x_vfpf_config_mac() 764 bp->vf2pf_mbox_mapping); bnx2x_vfpf_config_mac() 776 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_config_mac() 782 int bnx2x_vfpf_config_rss(struct bnx2x *bp, bnx2x_vfpf_config_rss() argument 785 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_config_rss() 786 struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss; bnx2x_vfpf_config_rss() 790 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS, bnx2x_vfpf_config_rss() 794 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_config_rss() 826 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_config_rss() 829 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_config_rss() 845 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_config_rss() 852 struct bnx2x *bp = netdev_priv(dev); bnx2x_vfpf_set_mcast() local 853 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; bnx2x_vfpf_set_mcast() 854 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_set_mcast() 858 if (bp->state != BNX2X_STATE_OPEN) { bnx2x_vfpf_set_mcast() 859 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); bnx2x_vfpf_set_mcast() 864 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, bnx2x_vfpf_set_mcast() 892 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, 896 bnx2x_dp_tlv_list(bp, req); 897 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); 909 bnx2x_vfpf_finalize(bp, &req->first_tlv); 914 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) bnx2x_vfpf_storm_rx_mode() argument 916 int mode = bp->rx_mode; bnx2x_vfpf_storm_rx_mode() 917 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; bnx2x_vfpf_storm_rx_mode() 918 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_storm_rx_mode() 922 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, bnx2x_vfpf_storm_rx_mode() 943 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_storm_rx_mode() 947 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_storm_rx_mode() 949 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_storm_rx_mode() 958 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_storm_rx_mode() 964 static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid) storm_memset_vf_mbx_ack() argument 969 REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY); storm_memset_vf_mbx_ack() 972 static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid) storm_memset_vf_mbx_valid() argument 977 REG_WR8(bp, addr, 1); storm_memset_vf_mbx_valid() 981 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_enable_mbx() argument 983 bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); bnx2x_vf_enable_mbx() 986 storm_memset_vf_mbx_ack(bp, abs_vfid); bnx2x_vf_enable_mbx() 987 storm_memset_vf_mbx_valid(bp, abs_vfid); bnx2x_vf_enable_mbx() 990 bnx2x_vf_enable_access(bp, abs_vfid); bnx2x_vf_enable_mbx() 994 static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, bnx2x_copy32_vf_dmae() argument 1000 if (CHIP_IS_E1x(bp)) { bnx2x_copy32_vf_dmae() 1005 if (!bp->dmae_ready) { bnx2x_copy32_vf_dmae() 1011 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI); bnx2x_copy32_vf_dmae() 1039 return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); bnx2x_copy32_vf_dmae() 1042 static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, bnx2x_vf_mbx_resp_single_tlv() argument 1045 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); bnx2x_vf_mbx_resp_single_tlv() 1053 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length); bnx2x_vf_mbx_resp_single_tlv() 1054 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, bnx2x_vf_mbx_resp_single_tlv() 1058 static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, bnx2x_vf_mbx_resp_send_msg() argument 1062 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); bnx2x_vf_mbx_resp_send_msg() 1068 bnx2x_dp_tlv_list(bp, resp); bnx2x_vf_mbx_resp_send_msg() 1085 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, bnx2x_vf_mbx_resp_send_msg() 1098 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); bnx2x_vf_mbx_resp_send_msg() 1104 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, bnx2x_vf_mbx_resp_send_msg() 1110 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); bnx2x_vf_mbx_resp_send_msg() 1120 bnx2x_vf_release(bp, vf); bnx2x_vf_mbx_resp_send_msg() 1123 static void bnx2x_vf_mbx_resp(struct bnx2x *bp, bnx2x_vf_mbx_resp() argument 1127 bnx2x_vf_mbx_resp_single_tlv(bp, vf); bnx2x_vf_mbx_resp() 1128 bnx2x_vf_mbx_resp_send_msg(bp, vf, rc); bnx2x_vf_mbx_resp() 1131 static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, bnx2x_vf_mbx_resp_phys_port() argument 1138 if (!(bp->flags & HAS_PHYS_PORT_ID)) bnx2x_vf_mbx_resp_phys_port() 1141 bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID, bnx2x_vf_mbx_resp_phys_port() 1146 memcpy(port_id->id, bp->phys_port_id, ETH_ALEN); bnx2x_vf_mbx_resp_phys_port() 1154 static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp, bnx2x_vf_mbx_resp_fp_hsi_ver() argument 1161 bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT, bnx2x_vf_mbx_resp_fp_hsi_ver() 1174 static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_acquire_resp() argument 1186 resp->pfdev_info.chip_num = bp->common.chip_id; bnx2x_vf_mbx_acquire_resp() 1187 resp->pfdev_info.db_size = bp->db_size; bnx2x_vf_mbx_acquire_resp() 1192 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, bnx2x_vf_mbx_acquire_resp() 1201 bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_mbx_acquire_resp() 1203 bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_mbx_acquire_resp() 1212 BP_VF_BULLETIN(bp, vf->index); bnx2x_vf_mbx_acquire_resp() 1258 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length); 1264 if (bnx2x_search_tlv_list(bp, &mbx->msg->req, 1266 bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length); 1272 bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length); 1274 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, 1278 bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status); 1281 static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp, bnx2x_vf_mbx_is_windows_vm() argument 1298 static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp, bnx2x_vf_mbx_acquire_chk_dorq() argument 1305 if (bnx2x_search_tlv_list(bp, &mbx->msg->req, bnx2x_vf_mbx_acquire_chk_dorq() 1310 if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) bnx2x_vf_mbx_acquire_chk_dorq() 1316 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_acquire() argument 1335 rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx); bnx2x_vf_mbx_acquire() 1346 if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) bnx2x_vf_mbx_acquire() 1361 rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request); bnx2x_vf_mbx_acquire() 1375 bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); bnx2x_vf_mbx_acquire() 1378 static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_init_vf() argument 1388 rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); bnx2x_vf_mbx_init_vf() 1396 bnx2x_iov_link_update_vf(bp, vf->index); bnx2x_vf_mbx_init_vf() 1399 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_init_vf() 1403 static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, bnx2x_vf_mbx_set_q_flags() argument 1426 if (IS_MF_SD(bp)) bnx2x_vf_mbx_set_q_flags() 1430 static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_setup_q() argument 1456 bnx2x_leading_vfq_init(bp, vf, q); bnx2x_vf_mbx_setup_q() 1480 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, bnx2x_vf_mbx_setup_q() 1484 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, bnx2x_vf_mbx_setup_q() 1494 bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p, bnx2x_vf_mbx_setup_q() 1512 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, bnx2x_vf_mbx_setup_q() 1516 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, bnx2x_vf_mbx_setup_q() 1544 bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p, bnx2x_vf_mbx_setup_q() 1548 bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type); bnx2x_vf_mbx_setup_q() 1550 rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor); bnx2x_vf_mbx_setup_q() 1555 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_setup_q() 1558 static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, bnx2x_vf_mbx_macvlan_list() argument 1601 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx, bnx2x_vf_mbx_dp_q_filter() argument 1612 static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, bnx2x_vf_mbx_dp_q_filters() argument 1619 bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i, bnx2x_vf_mbx_dp_q_filters() 1633 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_mbx_qfilters() argument 1638 &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; bnx2x_vf_mbx_qfilters() 1645 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, bnx2x_vf_mbx_qfilters() 1653 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, bnx2x_vf_mbx_qfilters() 1663 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, bnx2x_vf_mbx_qfilters() 1670 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, bnx2x_vf_mbx_qfilters() 1681 BP_VF_BULLETIN(bp, vf->index); bnx2x_vf_mbx_qfilters() 1698 rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept); bnx2x_vf_mbx_qfilters() 1705 rc = bnx2x_vf_mcast(bp, vf, msg->multicast, bnx2x_vf_mbx_qfilters() 1717 static int bnx2x_filters_validate_mac(struct bnx2x *bp, bnx2x_filters_validate_mac() argument 1721 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); bnx2x_filters_validate_mac() 1753 static int bnx2x_filters_validate_vlan(struct bnx2x *bp, bnx2x_filters_validate_vlan() argument 1757 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); bnx2x_filters_validate_vlan() 1786 static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, bnx2x_vf_mbx_set_q_filters() argument 1793 rc = bnx2x_filters_validate_mac(bp, vf, filters); bnx2x_vf_mbx_set_q_filters() 1797 rc = bnx2x_filters_validate_vlan(bp, vf, filters); bnx2x_vf_mbx_set_q_filters() 1806 bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); bnx2x_vf_mbx_set_q_filters() 1808 rc = bnx2x_vf_mbx_qfilters(bp, vf); bnx2x_vf_mbx_set_q_filters() 1810 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_set_q_filters() 1813 static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_teardown_q() argument 1822 rc = bnx2x_vf_queue_teardown(bp, vf, qid); bnx2x_vf_mbx_teardown_q() 1823 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_teardown_q() 1826 static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_close_vf() argument 1833 rc = bnx2x_vf_close(bp, vf); bnx2x_vf_mbx_close_vf() 1834 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_close_vf() 1837 static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_release_vf() argument 1844 rc = bnx2x_vf_free(bp, vf); bnx2x_vf_mbx_release_vf() 1845 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_release_vf() 1848 static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_update_rss() argument 1904 rc = bnx2x_vf_rss_update(bp, vf, &rss); bnx2x_vf_mbx_update_rss() 1906 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_update_rss() 1909 static int bnx2x_validate_tpa_params(struct bnx2x *bp, bnx2x_validate_tpa_params() argument 1922 if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) { bnx2x_validate_tpa_params() 1926 MAX_AGG_QS(bp)); bnx2x_validate_tpa_params() 1932 static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_update_tpa() argument 1941 if (bnx2x_validate_tpa_params(bp, tpa_tlv)) bnx2x_vf_mbx_update_tpa() 1967 rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params); bnx2x_vf_mbx_update_tpa() 1970 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_update_tpa() 1974 static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_request() argument 1984 bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); bnx2x_vf_mbx_request() 1989 bnx2x_vf_mbx_acquire(bp, vf, mbx); bnx2x_vf_mbx_request() 1992 bnx2x_vf_mbx_init_vf(bp, vf, mbx); bnx2x_vf_mbx_request() 1995 bnx2x_vf_mbx_setup_q(bp, vf, mbx); bnx2x_vf_mbx_request() 1998 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); bnx2x_vf_mbx_request() 2001 bnx2x_vf_mbx_teardown_q(bp, vf, mbx); bnx2x_vf_mbx_request() 2004 bnx2x_vf_mbx_close_vf(bp, vf, mbx); bnx2x_vf_mbx_request() 2007 bnx2x_vf_mbx_release_vf(bp, vf, mbx); bnx2x_vf_mbx_request() 2010 bnx2x_vf_mbx_update_rss(bp, vf, mbx); bnx2x_vf_mbx_request() 2013 bnx2x_vf_mbx_update_tpa(bp, vf, mbx); bnx2x_vf_mbx_request() 2035 bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED); bnx2x_vf_mbx_request() 2041 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); bnx2x_vf_mbx_request() 2044 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); bnx2x_vf_mbx_request() 2048 void bnx2x_vf_mbx_schedule(struct bnx2x *bp, bnx2x_vf_mbx_schedule() argument 2059 if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf > bnx2x_vf_mbx_schedule() 2060 BNX2X_NR_VIRTFN(bp)) { bnx2x_vf_mbx_schedule() 2062 vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); bnx2x_vf_mbx_schedule() 2066 vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); bnx2x_vf_mbx_schedule() 2069 mutex_lock(&BP_VFDB(bp)->event_mutex); bnx2x_vf_mbx_schedule() 2070 BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; bnx2x_vf_mbx_schedule() 2071 BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; bnx2x_vf_mbx_schedule() 2072 BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); bnx2x_vf_mbx_schedule() 2073 mutex_unlock(&BP_VFDB(bp)->event_mutex); bnx2x_vf_mbx_schedule() 2075 bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG); bnx2x_vf_mbx_schedule() 2079 void bnx2x_vf_mbx(struct bnx2x *bp) bnx2x_vf_mbx() argument 2081 struct bnx2x_vfdb *vfdb = BP_VFDB(bp); bnx2x_vf_mbx() 2094 for_each_vf(bp, vf_idx) { for_each_vf() 2095 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx); for_each_vf() 2096 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); for_each_vf() 2108 rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, for_each_vf() 2115 bnx2x_vf_release(bp, vf); for_each_vf() 2128 bnx2x_vf_mbx_request(bp, vf, mbx); for_each_vf() 2144 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf) bnx2x_post_vf_bulletin() argument 2146 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf); bnx2x_post_vf_bulletin() 2147 dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping + bnx2x_post_vf_bulletin() 2149 dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map); bnx2x_post_vf_bulletin() 2153 if (bnx2x_vf(bp, vf, state) != VF_ENABLED && bnx2x_post_vf_bulletin() 2154 bnx2x_vf(bp, vf, state) != VF_ACQUIRED) bnx2x_post_vf_bulletin() 2160 (bnx2x_vf(bp, vf, cfg_flags) & bnx2x_post_vf_bulletin() 2164 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, bnx2x_post_vf_bulletin() 2165 bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr), bnx2x_post_vf_bulletin()
|
H A D | bnx2x_link.c | 224 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) bnx2x_bits_en() argument 226 u32 val = REG_RD(bp, reg); bnx2x_bits_en() 229 REG_WR(bp, reg, val); bnx2x_bits_en() 233 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) bnx2x_bits_dis() argument 235 u32 val = REG_RD(bp, reg); bnx2x_bits_dis() 238 REG_WR(bp, reg, val); bnx2x_bits_dis() 255 struct bnx2x *bp = params->bp; bnx2x_check_lfa() local 258 REG_RD(bp, params->lfa_base + bnx2x_check_lfa() 266 REG_WR(bp, params->lfa_base + bnx2x_check_lfa() 273 link_status = REG_RD(bp, params->shmem_base + bnx2x_check_lfa() 302 saved_val = REG_RD(bp, params->lfa_base + bnx2x_check_lfa() 311 saved_val = REG_RD(bp, params->lfa_base + bnx2x_check_lfa() 320 saved_val = REG_RD(bp, params->lfa_base + bnx2x_check_lfa() 330 cur_speed_cap_mask = REG_RD(bp, params->lfa_base + bnx2x_check_lfa() 343 REG_RD(bp, params->lfa_base + bnx2x_check_lfa() 353 eee_status = REG_RD(bp, params->shmem2_base + bnx2x_check_lfa() 372 static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en) bnx2x_get_epio() argument 384 gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); bnx2x_get_epio() 385 REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask); bnx2x_get_epio() 387 *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin; bnx2x_get_epio() 389 static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en) bnx2x_set_epio() argument 401 gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS); bnx2x_set_epio() 407 REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output); bnx2x_set_epio() 410 gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); bnx2x_set_epio() 411 REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask); bnx2x_set_epio() 414 static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val) bnx2x_set_cfg_pin() argument 419 bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); bnx2x_set_cfg_pin() 423 bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port); bnx2x_set_cfg_pin() 427 static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val) bnx2x_get_cfg_pin() argument 432 bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); bnx2x_get_cfg_pin() 436 *val = bnx2x_get_gpio(bp, gpio_num, gpio_port); bnx2x_get_cfg_pin() 447 struct bnx2x *bp = params->bp; bnx2x_ets_e2e3a0_disabled() local 458 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); bnx2x_ets_e2e3a0_disabled() 467 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); bnx2x_ets_e2e3a0_disabled() 469 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); bnx2x_ets_e2e3a0_disabled() 473 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); bnx2x_ets_e2e3a0_disabled() 477 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); bnx2x_ets_e2e3a0_disabled() 478 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0); bnx2x_ets_e2e3a0_disabled() 479 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0); bnx2x_ets_e2e3a0_disabled() 481 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0); bnx2x_ets_e2e3a0_disabled() 482 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0); bnx2x_ets_e2e3a0_disabled() 483 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); bnx2x_ets_e2e3a0_disabled() 485 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); bnx2x_ets_e2e3a0_disabled() 489 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710); bnx2x_ets_e2e3a0_disabled() 490 REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710); bnx2x_ets_e2e3a0_disabled() 492 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680); bnx2x_ets_e2e3a0_disabled() 493 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680); bnx2x_ets_e2e3a0_disabled() 495 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); bnx2x_ets_e2e3a0_disabled() 538 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_set_credit_upper_bound_nig() local 543 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 : bnx2x_ets_e3b0_set_credit_upper_bound_nig() 545 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 : bnx2x_ets_e3b0_set_credit_upper_bound_nig() 547 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 : bnx2x_ets_e3b0_set_credit_upper_bound_nig() 549 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 : bnx2x_ets_e3b0_set_credit_upper_bound_nig() 551 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 : bnx2x_ets_e3b0_set_credit_upper_bound_nig() 553 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 : bnx2x_ets_e3b0_set_credit_upper_bound_nig() 557 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6, bnx2x_ets_e3b0_set_credit_upper_bound_nig() 559 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7, bnx2x_ets_e3b0_set_credit_upper_bound_nig() 561 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8, bnx2x_ets_e3b0_set_credit_upper_bound_nig() 576 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_nig_disabled() local 585 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210); bnx2x_ets_e3b0_nig_disabled() 586 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0); bnx2x_ets_e3b0_nig_disabled() 588 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); bnx2x_ets_e3b0_nig_disabled() 589 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); bnx2x_ets_e3b0_nig_disabled() 594 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : bnx2x_ets_e3b0_nig_disabled() 601 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); bnx2x_ets_e3b0_nig_disabled() 602 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0); bnx2x_ets_e3b0_nig_disabled() 605 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB, bnx2x_ets_e3b0_nig_disabled() 607 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); bnx2x_ets_e3b0_nig_disabled() 618 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f); bnx2x_ets_e3b0_nig_disabled() 620 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff); bnx2x_ets_e3b0_nig_disabled() 622 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : bnx2x_ets_e3b0_nig_disabled() 631 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : bnx2x_ets_e3b0_nig_disabled() 633 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : bnx2x_ets_e3b0_nig_disabled() 635 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : bnx2x_ets_e3b0_nig_disabled() 637 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 : bnx2x_ets_e3b0_nig_disabled() 639 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 : bnx2x_ets_e3b0_nig_disabled() 641 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 : bnx2x_ets_e3b0_nig_disabled() 644 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0); bnx2x_ets_e3b0_nig_disabled() 645 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0); bnx2x_ets_e3b0_nig_disabled() 646 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0); bnx2x_ets_e3b0_nig_disabled() 660 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_set_credit_upper_bound_pbf() local 679 REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound); bnx2x_ets_e3b0_set_credit_upper_bound_pbf() 692 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_pbf_disabled() local 705 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688); bnx2x_ets_e3b0_pbf_disabled() 708 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688); bnx2x_ets_e3b0_pbf_disabled() 713 REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688); bnx2x_ets_e3b0_pbf_disabled() 716 REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688); bnx2x_ets_e3b0_pbf_disabled() 718 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 : bnx2x_ets_e3b0_pbf_disabled() 722 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : bnx2x_ets_e3b0_pbf_disabled() 725 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : bnx2x_ets_e3b0_pbf_disabled() 739 REG_WR(bp, base_weight + (0x4 * i), 0); bnx2x_ets_e3b0_pbf_disabled() 751 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_disabled() local 753 if (!CHIP_IS_E3B0(bp)) { bnx2x_ets_e3b0_disabled() 774 struct bnx2x *bp = params->bp; bnx2x_ets_disabled() local 777 if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp))) bnx2x_ets_disabled() 779 else if (CHIP_IS_E3B0(bp)) bnx2x_ets_disabled() 799 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_cli_map() local 806 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT : bnx2x_ets_e3b0_cli_map() 809 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : bnx2x_ets_e3b0_cli_map() 812 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : bnx2x_ets_e3b0_cli_map() 816 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : bnx2x_ets_e3b0_cli_map() 828 static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, bnx2x_ets_e3b0_set_cos_bw() argument 889 REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig); bnx2x_ets_e3b0_set_cos_bw() 891 REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf); bnx2x_ets_e3b0_set_cos_bw() 905 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_get_total_bw() local 965 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_sp_pri_to_cos_set() local 1046 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_sp_set_pri_cli_reg() local 1113 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, bnx2x_ets_e3b0_sp_set_pri_cli_reg() 1116 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf); bnx2x_ets_e3b0_sp_set_pri_cli_reg() 1122 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, bnx2x_ets_e3b0_sp_set_pri_cli_reg() 1124 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, bnx2x_ets_e3b0_sp_set_pri_cli_reg() 1127 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf); bnx2x_ets_e3b0_sp_set_pri_cli_reg() 1140 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_config() local 1153 if (!CHIP_IS_E3B0(bp)) { bnx2x_ets_e3b0_config() 1191 bp, cos_entry, min_w_val_nig, min_w_val_pbf, bnx2x_ets_e3b0_config() 1241 struct bnx2x *bp = params->bp; bnx2x_ets_bw_limit_common() local 1247 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); bnx2x_ets_bw_limit_common() 1254 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); bnx2x_ets_bw_limit_common() 1256 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, bnx2x_ets_bw_limit_common() 1258 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, bnx2x_ets_bw_limit_common() 1262 REG_WR(bp, PBF_REG_ETS_ENABLED, 1); bnx2x_ets_bw_limit_common() 1265 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); bnx2x_ets_bw_limit_common() 1273 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); bnx2x_ets_bw_limit_common() 1276 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, bnx2x_ets_bw_limit_common() 1278 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, bnx2x_ets_bw_limit_common() 1286 struct bnx2x *bp = params->bp; bnx2x_ets_bw_limit() local 1307 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight); bnx2x_ets_bw_limit() 1308 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight); bnx2x_ets_bw_limit() 1310 REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight); bnx2x_ets_bw_limit() 1311 REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight); bnx2x_ets_bw_limit() 1317 struct bnx2x *bp = params->bp; bnx2x_ets_strict() local 1328 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); bnx2x_ets_strict() 1332 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); bnx2x_ets_strict() 1334 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); bnx2x_ets_strict() 1336 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100); bnx2x_ets_strict() 1339 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); bnx2x_ets_strict() 1349 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); bnx2x_ets_strict() 1361 struct bnx2x *bp = params->bp; bnx2x_update_pfc_xmac() local 1392 REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); bnx2x_update_pfc_xmac() 1393 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); bnx2x_update_pfc_xmac() 1394 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); bnx2x_update_pfc_xmac() 1400 REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); bnx2x_update_pfc_xmac() 1401 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); bnx2x_update_pfc_xmac() 1402 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); bnx2x_update_pfc_xmac() 1406 REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO, bnx2x_update_pfc_xmac() 1411 REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI, bnx2x_update_pfc_xmac() 1421 static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, bnx2x_set_mdio_clk() argument 1429 cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); bnx2x_set_mdio_clk() 1431 if (USES_WARPCORE(bp)) bnx2x_set_mdio_clk() 1447 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode); bnx2x_set_mdio_clk() 1451 static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp, bnx2x_set_mdio_emac_per_phy() argument 1458 bnx2x_set_mdio_clk(bp, params->chip_id, bnx2x_set_mdio_emac_per_phy() 1462 static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) bnx2x_is_4_port_mode() argument 1466 port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); bnx2x_is_4_port_mode() 1472 return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); bnx2x_is_4_port_mode() 1479 struct bnx2x *bp = params->bp; bnx2x_emac_init() local 1485 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_emac_init() 1488 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_emac_init() 1493 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); bnx2x_emac_init() 1494 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); bnx2x_emac_init() 1498 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); bnx2x_emac_init() 1507 bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_emac_init() 1511 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val); bnx2x_emac_init() 1517 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val); bnx2x_emac_init() 1524 struct bnx2x *bp = params->bp; bnx2x_set_xumac_nig() local 1526 REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN, bnx2x_set_xumac_nig() 1528 REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN, bnx2x_set_xumac_nig() 1530 REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN : bnx2x_set_xumac_nig() 1538 struct bnx2x *bp = params->bp; bnx2x_set_umac_rxtx() local 1539 if (!(REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_set_umac_rxtx() 1542 val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG); bnx2x_set_umac_rxtx() 1550 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); bnx2x_set_umac_rxtx() 1558 struct bnx2x *bp = params->bp; bnx2x_umac_enable() local 1560 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_umac_enable() 1564 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_umac_enable() 1570 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); bnx2x_umac_enable() 1603 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); bnx2x_umac_enable() 1609 REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, bnx2x_umac_enable() 1611 REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11); bnx2x_umac_enable() 1613 REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0); bnx2x_umac_enable() 1617 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, bnx2x_umac_enable() 1622 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1, bnx2x_umac_enable() 1630 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); bnx2x_umac_enable() 1639 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); bnx2x_umac_enable() 1644 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); bnx2x_umac_enable() 1654 struct bnx2x *bp = params->bp; bnx2x_xmac_init() local 1655 u32 is_port4mode = bnx2x_is_4_port_mode(bp); bnx2x_xmac_init() 1663 if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || bnx2x_xmac_init() 1664 (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || bnx2x_xmac_init() 1665 (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) && bnx2x_xmac_init() 1667 (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_xmac_init() 1675 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_xmac_init() 1679 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_xmac_init() 1685 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); bnx2x_xmac_init() 1688 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); bnx2x_xmac_init() 1691 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); bnx2x_xmac_init() 1696 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); bnx2x_xmac_init() 1701 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1); bnx2x_xmac_init() 1705 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_xmac_init() 1709 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_xmac_init() 1717 struct bnx2x *bp = params->bp; bnx2x_set_xmac_rxtx() local 1721 if (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_set_xmac_rxtx() 1727 pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI); bnx2x_set_xmac_rxtx() 1728 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, bnx2x_set_xmac_rxtx() 1730 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, bnx2x_set_xmac_rxtx() 1733 val = REG_RD(bp, xmac_base + XMAC_REG_CTRL); bnx2x_set_xmac_rxtx() 1738 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); bnx2x_set_xmac_rxtx() 1746 struct bnx2x *bp = params->bp; bnx2x_xmac_enable() local 1760 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); bnx2x_xmac_enable() 1766 REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL, bnx2x_xmac_enable() 1769 REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); bnx2x_xmac_enable() 1770 REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, bnx2x_xmac_enable() 1775 REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); bnx2x_xmac_enable() 1778 REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800); bnx2x_xmac_enable() 1785 REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008); bnx2x_xmac_enable() 1786 REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1); bnx2x_xmac_enable() 1788 REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0); bnx2x_xmac_enable() 1803 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); bnx2x_xmac_enable() 1815 struct bnx2x *bp = params->bp; bnx2x_emac_enable() local 1823 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_emac_enable() 1827 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); bnx2x_emac_enable() 1837 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane); bnx2x_emac_enable() 1839 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); bnx2x_emac_enable() 1844 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0); bnx2x_emac_enable() 1847 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, bnx2x_emac_enable() 1849 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, bnx2x_emac_enable() 1853 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, bnx2x_emac_enable() 1856 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, bnx2x_emac_enable() 1862 bnx2x_bits_en(bp, emac_base + bnx2x_emac_enable() 1867 bnx2x_bits_en(bp, emac_base + bnx2x_emac_enable() 1872 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, bnx2x_emac_enable() 1876 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); bnx2x_emac_enable() 1886 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); bnx2x_emac_enable() 1890 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, bnx2x_emac_enable() 1895 EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM, bnx2x_emac_enable() 1902 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val); bnx2x_emac_enable() 1905 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); bnx2x_emac_enable() 1910 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); bnx2x_emac_enable() 1913 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1); bnx2x_emac_enable() 1916 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, bnx2x_emac_enable() 1921 REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1); bnx2x_emac_enable() 1924 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0); bnx2x_emac_enable() 1925 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0); bnx2x_emac_enable() 1926 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0); bnx2x_emac_enable() 1929 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); bnx2x_emac_enable() 1936 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); bnx2x_emac_enable() 1937 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); bnx2x_emac_enable() 1939 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0); bnx2x_emac_enable() 1949 struct bnx2x *bp = params->bp; bnx2x_update_pfc_bmac1() local 1961 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac1() 1971 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac1() 1982 struct bnx2x *bp = params->bp; bnx2x_update_pfc_bmac2() local 1994 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac2() 2005 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac2() 2017 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, bnx2x_update_pfc_bmac2() 2028 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac2() 2041 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, bnx2x_update_pfc_bmac2() 2056 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac2() 2064 static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, bnx2x_pfc_nig_rx_priority_mask() argument 2103 REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask); bnx2x_pfc_nig_rx_priority_mask() 2109 struct bnx2x *bp = params->bp; bnx2x_update_mng() local 2111 REG_WR(bp, params->shmem_base + bnx2x_update_mng() 2118 struct bnx2x *bp = params->bp; bnx2x_update_link_attr() local 2120 if (SHMEM2_HAS(bp, link_attr_sync)) bnx2x_update_link_attr() 2121 REG_WR(bp, params->shmem2_base + bnx2x_update_link_attr() 2133 struct bnx2x *bp = params->bp; bnx2x_update_pfc_nig() local 2144 xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : bnx2x_update_pfc_nig() 2153 if (CHIP_IS_E3(bp)) bnx2x_update_pfc_nig() 2174 if (CHIP_IS_E3(bp)) bnx2x_update_pfc_nig() 2175 REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN : bnx2x_update_pfc_nig() 2177 REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 : bnx2x_update_pfc_nig() 2179 REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 : bnx2x_update_pfc_nig() 2181 REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 : bnx2x_update_pfc_nig() 2184 REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 : bnx2x_update_pfc_nig() 2187 REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK : bnx2x_update_pfc_nig() 2190 REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : bnx2x_update_pfc_nig() 2194 REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN : bnx2x_update_pfc_nig() 2198 REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE : bnx2x_update_pfc_nig() 2206 bnx2x_pfc_nig_rx_priority_mask(bp, i, bnx2x_update_pfc_nig() 2209 REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : bnx2x_update_pfc_nig() 2213 REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 : bnx2x_update_pfc_nig() 2217 REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS : bnx2x_update_pfc_nig() 2231 struct bnx2x *bp = params->bp; bnx2x_update_pfc() local 2249 if (CHIP_IS_E3(bp)) { bnx2x_update_pfc() 2253 val = REG_RD(bp, MISC_REG_RESET_REG_2); bnx2x_update_pfc() 2261 if (CHIP_IS_E2(bp)) bnx2x_update_pfc() 2271 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); bnx2x_update_pfc() 2280 struct bnx2x *bp = params->bp; bnx2x_bmac1_enable() local 2292 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, bnx2x_bmac1_enable() 2302 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); bnx2x_bmac1_enable() 2312 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); bnx2x_bmac1_enable() 2317 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); bnx2x_bmac1_enable() 2324 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); bnx2x_bmac1_enable() 2329 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); bnx2x_bmac1_enable() 2334 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, bnx2x_bmac1_enable() 2344 struct bnx2x *bp = params->bp; bnx2x_bmac2_enable() local 2354 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); bnx2x_bmac2_enable() 2360 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, bnx2x_bmac2_enable() 2372 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, bnx2x_bmac2_enable() 2380 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, bnx2x_bmac2_enable() 2387 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); bnx2x_bmac2_enable() 2393 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); bnx2x_bmac2_enable() 2398 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); bnx2x_bmac2_enable() 2411 struct bnx2x *bp = params->bp; bnx2x_bmac_enable() local 2415 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_bmac_enable() 2420 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_bmac_enable() 2424 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); bnx2x_bmac_enable() 2427 if (CHIP_IS_E2(bp)) bnx2x_bmac_enable() 2431 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1); bnx2x_bmac_enable() 2432 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0); bnx2x_bmac_enable() 2433 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0); bnx2x_bmac_enable() 2439 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val); bnx2x_bmac_enable() 2440 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0); bnx2x_bmac_enable() 2441 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0); bnx2x_bmac_enable() 2442 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0); bnx2x_bmac_enable() 2443 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1); bnx2x_bmac_enable() 2444 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1); bnx2x_bmac_enable() 2450 static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en) bnx2x_set_bmac_rx() argument 2455 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); bnx2x_set_bmac_rx() 2457 if (CHIP_IS_E2(bp)) bnx2x_set_bmac_rx() 2462 if (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_set_bmac_rx() 2466 REG_RD_DMAE(bp, bmac_addr, wb_data, 2); bnx2x_set_bmac_rx() 2471 REG_WR_DMAE(bp, bmac_addr, wb_data, 2); bnx2x_set_bmac_rx() 2479 struct bnx2x *bp = params->bp; bnx2x_pbf_update() local 2485 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); bnx2x_pbf_update() 2488 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4); bnx2x_pbf_update() 2489 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); bnx2x_pbf_update() 2494 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); bnx2x_pbf_update() 2497 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); bnx2x_pbf_update() 2509 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); bnx2x_pbf_update() 2511 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); bnx2x_pbf_update() 2518 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); bnx2x_pbf_update() 2520 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); bnx2x_pbf_update() 2532 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd); bnx2x_pbf_update() 2537 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1); bnx2x_pbf_update() 2539 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0); bnx2x_pbf_update() 2542 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0); bnx2x_pbf_update() 2549 * @bp: driver handle 2561 static u32 bnx2x_get_emac_base(struct bnx2x *bp, bnx2x_get_emac_base() argument 2569 if (REG_RD(bp, NIG_REG_PORT_SWAP)) bnx2x_get_emac_base() 2575 if (REG_RD(bp, NIG_REG_PORT_SWAP)) bnx2x_get_emac_base() 2596 static int bnx2x_cl22_write(struct bnx2x *bp, bnx2x_cl22_write() argument 2604 mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); bnx2x_cl22_write() 2605 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, bnx2x_cl22_write() 2612 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); bnx2x_cl22_write() 2617 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); bnx2x_cl22_write() 2627 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); bnx2x_cl22_write() 2631 static int bnx2x_cl22_read(struct bnx2x *bp, bnx2x_cl22_read() argument 2640 mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); bnx2x_cl22_read() 2641 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, bnx2x_cl22_read() 2648 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); bnx2x_cl22_read() 2653 val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); bnx2x_cl22_read() 2666 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); bnx2x_cl22_read() 2673 static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, bnx2x_cl45_read() argument 2681 chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | bnx2x_cl45_read() 2682 ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); bnx2x_cl45_read() 2683 bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl); bnx2x_cl45_read() 2687 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, bnx2x_cl45_read() 2693 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); bnx2x_cl45_read() 2698 val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); bnx2x_cl45_read() 2706 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); bnx2x_cl45_read() 2714 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); bnx2x_cl45_read() 2719 val = REG_RD(bp, phy->mdio_ctrl + bnx2x_cl45_read() 2728 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); bnx2x_cl45_read() 2738 bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); bnx2x_cl45_read() 2743 bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, bnx2x_cl45_read() 2748 static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, bnx2x_cl45_write() argument 2756 chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | bnx2x_cl45_write() 2757 ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); bnx2x_cl45_write() 2758 bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl); bnx2x_cl45_write() 2762 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, bnx2x_cl45_write() 2769 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); bnx2x_cl45_write() 2774 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); bnx2x_cl45_write() 2782 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); bnx2x_cl45_write() 2789 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); bnx2x_cl45_write() 2794 tmp = REG_RD(bp, phy->mdio_ctrl + bnx2x_cl45_write() 2803 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); bnx2x_cl45_write() 2812 bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); bnx2x_cl45_write() 2816 bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, bnx2x_cl45_write() 2826 struct bnx2x *bp = params->bp; bnx2x_eee_has_cap() local 2828 if (REG_RD(bp, params->shmem2_base) <= bnx2x_eee_has_cap() 2878 struct bnx2x *bp = params->bp; bnx2x_eee_calc_timer() local 2893 eee_mode = ((REG_RD(bp, params->shmem_base + bnx2x_eee_calc_timer() 2911 struct bnx2x *bp = params->bp; bnx2x_eee_set_timers() local 2916 REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), bnx2x_eee_set_timers() 2963 struct bnx2x *bp = params->bp; bnx2x_eee_disable() local 2966 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); bnx2x_eee_disable() 2968 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0); bnx2x_eee_disable() 2979 struct bnx2x *bp = params->bp; bnx2x_eee_advertise() local 2983 REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); bnx2x_eee_advertise() 2994 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val); bnx2x_eee_advertise() 3004 struct bnx2x *bp = params->bp; bnx2x_update_mng_eee() local 3007 REG_WR(bp, params->shmem2_base + bnx2x_update_mng_eee() 3016 struct bnx2x *bp = params->bp; bnx2x_eee_an_resolve() local 3021 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv); bnx2x_eee_an_resolve() 3022 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp); bnx2x_eee_an_resolve() 3067 struct bnx2x *bp = params->bp; bnx2x_bsc_module_sel() local 3070 board_cfg = REG_RD(bp, params->shmem_base + bnx2x_bsc_module_sel() 3078 sfp_ctrl = REG_RD(bp, params->shmem_base + bnx2x_bsc_module_sel() 3085 bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]); bnx2x_bsc_module_sel() 3089 struct bnx2x *bp, bnx2x_bsc_read() 3109 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); bnx2x_bsc_read() 3111 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); bnx2x_bsc_read() 3115 REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); bnx2x_bsc_read() 3122 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); bnx2x_bsc_read() 3126 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); bnx2x_bsc_read() 3129 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); bnx2x_bsc_read() 3146 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); bnx2x_bsc_read() 3150 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); bnx2x_bsc_read() 3153 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); bnx2x_bsc_read() 3164 data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4)); bnx2x_bsc_read() 3175 static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy, bnx2x_cl45_read_or_write() argument 3179 bnx2x_cl45_read(bp, phy, devad, reg, &val); bnx2x_cl45_read_or_write() 3180 bnx2x_cl45_write(bp, phy, devad, reg, val | or_val); bnx2x_cl45_read_or_write() 3183 static void bnx2x_cl45_read_and_write(struct bnx2x *bp, bnx2x_cl45_read_and_write() argument 3188 bnx2x_cl45_read(bp, phy, devad, reg, &val); bnx2x_cl45_read_and_write() 3189 bnx2x_cl45_write(bp, phy, devad, reg, val & and_val); bnx2x_cl45_read_and_write() 3201 return bnx2x_cl45_read(params->bp, bnx2x_phy_read() 3218 return bnx2x_cl45_write(params->bp, bnx2x_phy_write() 3229 struct bnx2x *bp = params->bp; bnx2x_get_warpcore_lane() local 3233 path = BP_PATH(bp); bnx2x_get_warpcore_lane() 3236 if (bnx2x_is_4_port_mode(bp)) { bnx2x_get_warpcore_lane() 3240 path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); bnx2x_get_warpcore_lane() 3244 path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP); bnx2x_get_warpcore_lane() 3250 port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); bnx2x_get_warpcore_lane() 3254 port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP); bnx2x_get_warpcore_lane() 3264 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); bnx2x_get_warpcore_lane() 3269 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP); bnx2x_get_warpcore_lane() 3284 struct bnx2x *bp = params->bp; bnx2x_set_aer_mmd() local 3292 if (USES_WARPCORE(bp)) { bnx2x_set_aer_mmd() 3302 } else if (CHIP_IS_E2(bp)) bnx2x_set_aer_mmd() 3307 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_set_aer_mmd() 3316 static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port) bnx2x_set_serdes_access() argument 3321 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1); bnx2x_set_serdes_access() 3322 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000); bnx2x_set_serdes_access() 3324 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f); bnx2x_set_serdes_access() 3327 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0); bnx2x_set_serdes_access() 3330 static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) bnx2x_serdes_deassert() argument 3339 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); bnx2x_serdes_deassert() 3341 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); bnx2x_serdes_deassert() 3343 bnx2x_set_serdes_access(bp, port); bnx2x_serdes_deassert() 3345 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10, bnx2x_serdes_deassert() 3353 struct bnx2x *bp = params->bp; bnx2x_xgxs_specific_func() local 3357 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0); bnx2x_xgxs_specific_func() 3358 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18, bnx2x_xgxs_specific_func() 3366 struct bnx2x *bp = params->bp; bnx2x_xgxs_deassert() local 3375 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); bnx2x_xgxs_deassert() 3377 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); bnx2x_xgxs_deassert() 3385 struct bnx2x *bp = params->bp; bnx2x_calc_ieee_aneg_adv() local 3426 struct bnx2x *bp = params->bp; set_phy_vars() local 3469 struct bnx2x *bp = params->bp; bnx2x_ext_phy_set_pause() local 3471 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); bnx2x_ext_phy_set_pause() 3488 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); bnx2x_ext_phy_set_pause() 3526 struct bnx2x *bp = params->bp; bnx2x_ext_phy_update_adv_fc() local 3528 bnx2x_cl22_read(bp, phy, 0x4, &ld_pause); bnx2x_ext_phy_update_adv_fc() 3529 bnx2x_cl22_read(bp, phy, 0x5, &lp_pause); bnx2x_ext_phy_update_adv_fc() 3530 } else if (CHIP_IS_E3(bp) && bnx2x_ext_phy_update_adv_fc() 3534 bnx2x_cl45_read(bp, phy, bnx2x_ext_phy_update_adv_fc() 3541 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_ext_phy_update_adv_fc() 3543 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_ext_phy_update_adv_fc() 3546 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_ext_phy_update_adv_fc() 3548 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_ext_phy_update_adv_fc() 3558 bnx2x_cl45_read(bp, phy, bnx2x_ext_phy_update_adv_fc() 3561 bnx2x_cl45_read(bp, phy, bnx2x_ext_phy_update_adv_fc() 3617 struct bnx2x *bp = params->bp; bnx2x_warpcore_enable_AN_KR2() local 3640 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR2() 3644 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, bnx2x_warpcore_enable_AN_KR2() 3656 struct bnx2x *bp = params->bp; bnx2x_disable_kr2() local 3679 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, bnx2x_disable_kr2() 3690 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_lpi_passthrough() local 3693 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_lpi_passthrough() 3695 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_lpi_passthrough() 3703 struct bnx2x *bp = params->bp; bnx2x_warpcore_restart_AN_KR() local 3705 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_restart_AN_KR() 3707 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_restart_AN_KR() 3719 struct bnx2x *bp = params->bp; bnx2x_warpcore_enable_AN_KR() local 3733 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, bnx2x_warpcore_enable_AN_KR() 3736 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3740 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3751 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); bnx2x_warpcore_enable_AN_KR() 3760 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_enable_AN_KR() 3763 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_enable_AN_KR() 3771 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3776 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3779 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3782 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3787 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_enable_AN_KR() 3791 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_enable_AN_KR() 3797 if (REG_RD(bp, params->shmem_base + bnx2x_warpcore_enable_AN_KR() 3801 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3810 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3814 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3821 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_enable_AN_KR() 3824 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3828 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3835 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3837 wc_lane_config = REG_RD(bp, params->shmem_base + bnx2x_warpcore_enable_AN_KR() 3840 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3855 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR() 3870 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_10G_KR() local 3886 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, bnx2x_warpcore_set_10G_KR() 3891 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_set_10G_KR() 3894 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR() 3897 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR() 3900 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR() 3903 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR() 3908 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, bnx2x_warpcore_set_10G_KR() 3911 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, bnx2x_warpcore_set_10G_KR() 3915 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR() 3919 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR() 3923 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR() 3927 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR() 3929 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR() 3938 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_10G_XFI() local 3944 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 3948 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 3952 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); bnx2x_warpcore_set_10G_XFI() 3955 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 3959 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 3963 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 3967 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 3972 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 3974 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 3979 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 3989 cfg_tap_val = REG_RD(bp, params->shmem_base + bnx2x_warpcore_set_10G_XFI() 4035 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 4040 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 4043 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 4048 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 4052 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 4058 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 4062 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 4066 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI() 4074 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_20G_force_KR2() local 4076 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_set_20G_force_KR2() 4080 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2() 4085 bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD, bnx2x_warpcore_set_20G_force_KR2() 4087 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_set_20G_force_KR2() 4090 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2() 4094 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2() 4098 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2() 4101 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2() 4104 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2() 4108 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2() 4110 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2() 4114 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_set_20G_force_KR2() 4117 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2() 4123 static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, bnx2x_warpcore_set_20G_DXGXS() argument 4128 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4132 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4135 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4138 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4141 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4144 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4147 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4150 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4153 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4156 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4160 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4164 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4168 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4172 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS() 4182 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_sgmii_speed() local 4186 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed() 4193 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed() 4198 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed() 4219 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed() 4224 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed() 4230 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed() 4237 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed() 4242 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed() 4244 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed() 4249 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed() 4254 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed() 4259 static void bnx2x_warpcore_reset_lane(struct bnx2x *bp, bnx2x_warpcore_reset_lane() argument 4265 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_reset_lane() 4271 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_reset_lane() 4273 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_reset_lane() 4281 struct bnx2x *bp = params->bp; bnx2x_warpcore_clear_regs() local 4300 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_clear_regs() 4304 bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg, bnx2x_warpcore_clear_regs() 4308 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_clear_regs() 4313 static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, bnx2x_get_mod_abs_int_cfg() argument 4321 if (CHIP_IS_E3(bp)) { bnx2x_get_mod_abs_int_cfg() 4322 cfg_pin = (REG_RD(bp, shmem_base + bnx2x_get_mod_abs_int_cfg() 4355 struct bnx2x *bp = params->bp; bnx2x_is_sfp_module_plugged() local 4358 if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, bnx2x_is_sfp_module_plugged() 4362 gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); bnx2x_is_sfp_module_plugged() 4374 struct bnx2x *bp = params->bp; bnx2x_warpcore_get_sigdet() local 4378 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0, bnx2x_warpcore_get_sigdet() 4388 struct bnx2x *bp = params->bp; bnx2x_warpcore_config_runtime() local 4399 serdes_net_if = (REG_RD(bp, params->shmem_base + bnx2x_warpcore_config_runtime() 4407 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1, bnx2x_warpcore_config_runtime() 4417 bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_config_runtime() 4418 bnx2x_warpcore_reset_lane(bp, phy, 0); bnx2x_warpcore_config_runtime() 4421 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_config_runtime() 4441 struct bnx2x *bp = params->bp; bnx2x_warpcore_config_sfi() local 4458 struct bnx2x *bp = params->bp; bnx2x_sfp_e3_set_transmitter() local 4462 cfg_pin = REG_RD(bp, params->shmem_base + bnx2x_sfp_e3_set_transmitter() 4470 bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1); bnx2x_sfp_e3_set_transmitter() 4472 bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1); bnx2x_sfp_e3_set_transmitter() 4479 struct bnx2x *bp = params->bp; bnx2x_warpcore_config_init() local 4483 serdes_net_if = (REG_RD(bp, params->shmem_base + bnx2x_warpcore_config_init() 4491 bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_config_init() 4557 bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane); bnx2x_warpcore_config_init() 4579 bnx2x_warpcore_reset_lane(bp, phy, 0); bnx2x_warpcore_config_init() 4586 struct bnx2x *bp = params->bp; bnx2x_warpcore_link_reset() local 4589 bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_warpcore_link_reset() 4592 bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_link_reset() 4596 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset() 4599 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset() 4603 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_link_reset() 4606 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset() 4610 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset() 4614 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset() 4619 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset() 4622 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset() 4631 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset() 4641 struct bnx2x *bp = params->bp; bnx2x_set_warpcore_loopback() local 4652 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_set_warpcore_loopback() 4655 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_set_warpcore_loopback() 4660 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_set_warpcore_loopback() 4665 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_set_warpcore_loopback() 4673 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_set_warpcore_loopback() 4676 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_set_warpcore_loopback() 4686 struct bnx2x *bp = params->bp; bnx2x_sync_link() local 4754 USES_WARPCORE(bp) && bnx2x_sync_link() 4761 if (USES_WARPCORE(bp)) bnx2x_sync_link() 4766 if (USES_WARPCORE(bp)) bnx2x_sync_link() 4792 struct bnx2x *bp = params->bp; bnx2x_link_status_update() local 4798 vars->link_status = REG_RD(bp, params->shmem_base + bnx2x_link_status_update() 4808 vars->eee_status = REG_RD(bp, params->shmem2_base + bnx2x_link_status_update() 4818 media_types = REG_RD(bp, sync_offset); bnx2x_link_status_update() 4836 vars->aeu_int_mask = REG_RD(bp, sync_offset); bnx2x_link_status_update() 4846 if (SHMEM2_HAS(bp, link_attr_sync)) bnx2x_link_status_update() 4847 params->link_attr_sync = SHMEM2_RD(bp, bnx2x_link_status_update() 4859 struct bnx2x *bp = params->bp; bnx2x_set_master_ln() local 4866 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_master_ln() 4871 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_master_ln() 4881 struct bnx2x *bp = params->bp; bnx2x_reset_unicore() local 4884 CL22_RD_OVER_CL45(bp, phy, bnx2x_reset_unicore() 4889 CL22_WR_OVER_CL45(bp, phy, bnx2x_reset_unicore() 4895 bnx2x_set_serdes_access(bp, params->port); bnx2x_reset_unicore() 4902 CL22_RD_OVER_CL45(bp, phy, bnx2x_reset_unicore() 4913 netdev_err(bp->dev, "Warning: PHY was not initialized," bnx2x_reset_unicore() 4924 struct bnx2x *bp = params->bp; bnx2x_set_swap_lanes() local 4938 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_swap_lanes() 4945 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_swap_lanes() 4951 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_swap_lanes() 4957 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_swap_lanes() 4966 struct bnx2x *bp = params->bp; bnx2x_set_parallel_detection() local 4968 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_parallel_detection() 4978 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_parallel_detection() 4988 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_parallel_detection() 4993 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_parallel_detection() 5002 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_parallel_detection() 5008 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_parallel_detection() 5021 struct bnx2x *bp = params->bp; bnx2x_set_autoneg() local 5025 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_autoneg() 5036 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg() 5042 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_autoneg() 5053 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg() 5058 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_autoneg() 5071 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg() 5078 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg() 5084 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg() 5092 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_autoneg() 5103 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg() 5114 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg() 5124 struct bnx2x *bp = params->bp; bnx2x_program_serdes() local 5128 CL22_RD_OVER_CL45(bp, phy, bnx2x_program_serdes() 5136 CL22_WR_OVER_CL45(bp, phy, bnx2x_program_serdes() 5143 CL22_RD_OVER_CL45(bp, phy, bnx2x_program_serdes() 5163 CL22_WR_OVER_CL45(bp, phy, bnx2x_program_serdes() 5172 struct bnx2x *bp = params->bp; bnx2x_set_brcm_cl37_advertisement() local 5180 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_brcm_cl37_advertisement() 5184 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_brcm_cl37_advertisement() 5193 struct bnx2x *bp = params->bp; bnx2x_set_ieee_aneg_advertisement() local 5197 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_ieee_aneg_advertisement() 5200 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_ieee_aneg_advertisement() 5205 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_ieee_aneg_advertisement() 5214 struct bnx2x *bp = params->bp; bnx2x_restart_autoneg() local 5221 CL22_RD_OVER_CL45(bp, phy, bnx2x_restart_autoneg() 5226 CL22_WR_OVER_CL45(bp, phy, bnx2x_restart_autoneg() 5234 CL22_RD_OVER_CL45(bp, phy, bnx2x_restart_autoneg() 5241 CL22_WR_OVER_CL45(bp, phy, bnx2x_restart_autoneg() 5254 struct bnx2x *bp = params->bp; bnx2x_initialize_sgmii_process() local 5259 CL22_RD_OVER_CL45(bp, phy, bnx2x_initialize_sgmii_process() 5268 CL22_WR_OVER_CL45(bp, phy, bnx2x_initialize_sgmii_process() 5278 CL22_RD_OVER_CL45(bp, phy, bnx2x_initialize_sgmii_process() 5309 CL22_WR_OVER_CL45(bp, phy, bnx2x_initialize_sgmii_process() 5325 struct bnx2x *bp = params->bp; bnx2x_direct_parallel_detect_used() local 5329 CL22_RD_OVER_CL45(bp, phy, bnx2x_direct_parallel_detect_used() 5333 CL22_RD_OVER_CL45(bp, phy, bnx2x_direct_parallel_detect_used() 5343 CL22_RD_OVER_CL45(bp, phy, bnx2x_direct_parallel_detect_used() 5364 struct bnx2x *bp = params->bp; bnx2x_update_adv_fc() local 5371 CL22_RD_OVER_CL45(bp, phy, bnx2x_update_adv_fc() 5375 CL22_RD_OVER_CL45(bp, phy, bnx2x_update_adv_fc() 5385 CL22_RD_OVER_CL45(bp, phy, bnx2x_update_adv_fc() 5389 CL22_RD_OVER_CL45(bp, phy, bnx2x_update_adv_fc() 5408 struct bnx2x *bp = params->bp; bnx2x_flow_ctrl_resolve() local 5434 struct bnx2x *bp = params->bp; bnx2x_check_fallback_to_cl37() local 5438 CL22_RD_OVER_CL45(bp, phy, bnx2x_check_fallback_to_cl37() 5446 CL22_WR_OVER_CL45(bp, phy, bnx2x_check_fallback_to_cl37() 5453 CL22_RD_OVER_CL45(bp, phy, bnx2x_check_fallback_to_cl37() 5469 CL22_RD_OVER_CL45(bp, phy, bnx2x_check_fallback_to_cl37() 5490 CL22_WR_OVER_CL45(bp, phy, bnx2x_check_fallback_to_cl37() 5519 struct bnx2x *bp = params->bp; bnx2x_get_link_speed_duplex() local 5607 struct bnx2x *bp = params->bp; bnx2x_link_settings_status() local 5613 CL22_RD_OVER_CL45(bp, phy, bnx2x_link_settings_status() 5650 CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_CL73_IEEEB1, bnx2x_link_settings_status() 5661 CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G, bnx2x_link_settings_status() 5681 struct bnx2x *bp = params->bp; bnx2x_warpcore_read_status() local 5689 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status() 5691 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status() 5697 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status() 5699 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status() 5707 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status() 5718 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_read_status() 5720 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_read_status() 5728 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status() 5736 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status() 5752 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_read_status() 5763 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status() 5777 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status() 5780 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status() 5804 struct bnx2x *bp = params->bp; bnx2x_set_gmii_tx_driver() local 5811 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_gmii_tx_driver() 5825 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_gmii_tx_driver() 5834 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_gmii_tx_driver() 5844 struct bnx2x *bp = params->bp; bnx2x_emac_program() local 5849 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + bnx2x_emac_program() 5880 bnx2x_bits_en(bp, bnx2x_emac_program() 5893 struct bnx2x *bp = params->bp; bnx2x_set_preemphasis() local 5897 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_preemphasis() 5905 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_preemphasis() 5916 struct bnx2x *bp = params->bp; bnx2x_xgxs_config_init() local 6000 static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, bnx2x_wait_reset_complete() argument 6008 bnx2x_cl22_read(bp, phy, bnx2x_wait_reset_complete() 6011 bnx2x_cl45_read(bp, phy, bnx2x_wait_reset_complete() 6020 netdev_err(bp->dev, "Warning: PHY was not initialized," bnx2x_wait_reset_complete() 6031 struct bnx2x *bp = params->bp; bnx2x_link_int_enable() local 6034 if (CHIP_IS_E3(bp)) { bnx2x_link_int_enable() 6059 bnx2x_bits_en(bp, bnx2x_link_int_enable() 6065 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); bnx2x_link_int_enable() 6067 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bnx2x_link_int_enable() 6068 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), bnx2x_link_int_enable() 6069 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c)); bnx2x_link_int_enable() 6071 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), bnx2x_link_int_enable() 6072 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); bnx2x_link_int_enable() 6075 static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, bnx2x_rearm_latch_signal() argument 6085 latch_status = REG_RD(bp, bnx2x_rearm_latch_signal() 6090 bnx2x_bits_en(bp, bnx2x_rearm_latch_signal() 6095 bnx2x_bits_dis(bp, bnx2x_rearm_latch_signal() 6103 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, bnx2x_rearm_latch_signal() 6112 struct bnx2x *bp = params->bp; bnx2x_link_int_ack() local 6118 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, bnx2x_link_int_ack() 6123 if (USES_WARPCORE(bp)) bnx2x_link_int_ack() 6143 bnx2x_bits_en(bp, bnx2x_link_int_ack() 6198 struct bnx2x *bp; bnx2x_get_ext_phy_fw_version() local 6205 bp = params->bp; bnx2x_get_ext_phy_fw_version() 6209 spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr); bnx2x_get_ext_phy_fw_version() 6219 spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr); bnx2x_get_ext_phy_fw_version() 6239 struct bnx2x *bp = params->bp; bnx2x_set_xgxs_loopback() local 6246 if (!CHIP_IS_E3(bp)) { bnx2x_set_xgxs_loopback() 6248 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + bnx2x_set_xgxs_loopback() 6251 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, bnx2x_set_xgxs_loopback() 6255 bnx2x_cl45_write(bp, phy, bnx2x_set_xgxs_loopback() 6261 bnx2x_cl45_write(bp, phy, bnx2x_set_xgxs_loopback() 6270 if (!CHIP_IS_E3(bp)) { bnx2x_set_xgxs_loopback() 6272 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, bnx2x_set_xgxs_loopback() 6278 bnx2x_cl45_read(bp, phy, 5, bnx2x_set_xgxs_loopback() 6282 bnx2x_cl45_write(bp, phy, 5, bnx2x_set_xgxs_loopback() 6299 struct bnx2x *bp = params->bp; bnx2x_set_led() local 6314 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); bnx2x_set_led() 6315 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, bnx2x_set_led() 6318 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); bnx2x_set_led() 6327 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp); bnx2x_set_led() 6341 CHIP_IS_E2(bp) && params->num_phys == 2) { bnx2x_set_led() 6345 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); bnx2x_set_led() 6346 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); bnx2x_set_led() 6348 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); bnx2x_set_led() 6349 EMAC_WR(bp, EMAC_REG_EMAC_LED, bnx2x_set_led() 6363 if ((!CHIP_IS_E3(bp)) || bnx2x_set_led() 6364 (CHIP_IS_E3(bp) && bnx2x_set_led() 6366 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); bnx2x_set_led() 6368 if (CHIP_IS_E1x(bp) || bnx2x_set_led() 6369 CHIP_IS_E2(bp) || bnx2x_set_led() 6371 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); bnx2x_set_led() 6373 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, bnx2x_set_led() 6378 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); bnx2x_set_led() 6379 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); bnx2x_set_led() 6380 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | bnx2x_set_led() 6392 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, bnx2x_set_led() 6396 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); bnx2x_set_led() 6398 if (CHIP_IS_E3(bp)) bnx2x_set_led() 6399 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, bnx2x_set_led() 6402 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, bnx2x_set_led() 6404 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + bnx2x_set_led() 6406 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); bnx2x_set_led() 6407 EMAC_WR(bp, EMAC_REG_EMAC_LED, bnx2x_set_led() 6410 if (CHIP_IS_E1(bp) && bnx2x_set_led() 6416 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 bnx2x_set_led() 6418 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + bnx2x_set_led() 6420 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + bnx2x_set_led() 6441 struct bnx2x *bp = params->bp; bnx2x_test_link() local 6447 if (CHIP_IS_E3(bp)) { bnx2x_test_link() 6452 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, bnx2x_test_link() 6454 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, bnx2x_test_link() 6460 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, bnx2x_test_link() 6470 CL22_RD_OVER_CL45(bp, int_phy, bnx2x_test_link() 6523 struct bnx2x *bp = params->bp; bnx2x_link_initialize() local 6535 if (!USES_WARPCORE(bp)) bnx2x_link_initialize() 6546 (CHIP_IS_E1x(bp) || bnx2x_link_initialize() 6547 CHIP_IS_E2(bp))) bnx2x_link_initialize() 6588 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + bnx2x_link_initialize() 6601 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, bnx2x_int_link_reset() 6608 struct bnx2x *bp = params->bp; bnx2x_common_ext_link_reset() local 6611 if (CHIP_IS_E2(bp)) bnx2x_common_ext_link_reset() 6612 gpio_port = BP_PATH(bp); bnx2x_common_ext_link_reset() 6615 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, bnx2x_common_ext_link_reset() 6618 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_common_ext_link_reset() 6627 struct bnx2x *bp = params->bp; bnx2x_update_link_down() local 6642 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); bnx2x_update_link_down() 6645 if (!CHIP_IS_E3(bp)) bnx2x_update_link_down() 6646 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); bnx2x_update_link_down() 6650 if (CHIP_IS_E1x(bp) || bnx2x_update_link_down() 6651 CHIP_IS_E2(bp)) bnx2x_update_link_down() 6652 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); bnx2x_update_link_down() 6654 if (CHIP_IS_E3(bp)) { bnx2x_update_link_down() 6656 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), bnx2x_update_link_down() 6658 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), bnx2x_update_link_down() 6675 struct bnx2x *bp = params->bp; bnx2x_update_link_up() local 6690 if (USES_WARPCORE(bp)) { bnx2x_update_link_up() 6707 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + bnx2x_update_link_up() 6709 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1); bnx2x_update_link_up() 6710 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + bnx2x_update_link_up() 6714 if ((CHIP_IS_E1x(bp) || bnx2x_update_link_up() 6715 CHIP_IS_E2(bp))) { bnx2x_update_link_up() 6741 if (CHIP_IS_E1x(bp)) bnx2x_update_link_up() 6746 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); bnx2x_update_link_up() 6764 struct bnx2x *bp = params->bp; bnx2x_chng_link_count() local 6768 if (!(SHMEM2_HAS(bp, link_change_count))) bnx2x_chng_link_count() 6776 val = REG_RD(bp, addr) + 1; bnx2x_chng_link_count() 6777 REG_WR(bp, addr, val); bnx2x_chng_link_count() 6794 struct bnx2x *bp = params->bp; bnx2x_link_update() local 6819 if (USES_WARPCORE(bp)) bnx2x_link_update() 6824 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); bnx2x_link_update() 6826 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + bnx2x_link_update() 6829 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bnx2x_link_update() 6831 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c)); bnx2x_link_update() 6834 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), bnx2x_link_update() 6835 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); bnx2x_link_update() 6838 if (!CHIP_IS_E3(bp)) bnx2x_link_update() 6839 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); bnx2x_link_update() 6957 bnx2x_rearm_latch_signal(bp, port, bnx2x_link_update() 6980 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, bnx2x_link_update() 7043 bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0); bnx2x_link_update() 7051 void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) bnx2x_ext_phy_hw_reset() argument 7053 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, bnx2x_ext_phy_hw_reset() 7056 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, bnx2x_ext_phy_hw_reset() 7060 static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, bnx2x_save_spirom_version() argument 7067 REG_WR(bp, ver_addr, spirom_ver); bnx2x_save_spirom_version() 7070 static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, bnx2x_save_bcm_spirom_ver() argument 7076 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_save_bcm_spirom_ver() 7078 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_save_bcm_spirom_ver() 7080 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), bnx2x_save_bcm_spirom_ver() 7084 static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp, bnx2x_ext_phy_10G_an_resolve() argument 7089 bnx2x_cl45_read(bp, phy, bnx2x_ext_phy_10G_an_resolve() 7092 bnx2x_cl45_read(bp, phy, bnx2x_ext_phy_10G_an_resolve() 7108 struct bnx2x *bp = params->bp; bnx2x_8073_resolve_fc() local 7120 bnx2x_cl45_read(bp, phy, bnx2x_8073_resolve_fc() 7124 bnx2x_cl45_read(bp, phy, bnx2x_8073_resolve_fc() 7137 static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, bnx2x_8073_8727_external_rom_boot() argument 7147 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot() 7153 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot() 7158 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot() 7163 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot() 7169 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot() 7189 bnx2x_cl45_read(bp, phy, bnx2x_8073_8727_external_rom_boot() 7192 bnx2x_cl45_read(bp, phy, bnx2x_8073_8727_external_rom_boot() 7202 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot() 7205 bnx2x_save_bcm_spirom_ver(bp, phy, port); bnx2x_8073_8727_external_rom_boot() 7218 static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) bnx2x_8073_is_snr_needed() argument 7224 bnx2x_cl45_read(bp, phy, bnx2x_8073_is_snr_needed() 7233 bnx2x_cl45_read(bp, phy, bnx2x_8073_is_snr_needed() 7244 static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) bnx2x_8073_xaui_wa() argument 7248 bnx2x_cl45_read(bp, phy, bnx2x_8073_xaui_wa() 7263 bnx2x_cl45_read(bp, phy, bnx2x_8073_xaui_wa() 7282 bnx2x_cl45_read(bp, phy, bnx2x_8073_xaui_wa() 7300 static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy) bnx2x_807x_force_10G() argument 7303 bnx2x_cl45_write(bp, phy, bnx2x_807x_force_10G() 7305 bnx2x_cl45_write(bp, phy, bnx2x_807x_force_10G() 7307 bnx2x_cl45_write(bp, phy, bnx2x_807x_force_10G() 7309 bnx2x_cl45_write(bp, phy, bnx2x_807x_force_10G() 7318 struct bnx2x *bp = params->bp; bnx2x_8073_set_pause_cl37() local 7319 bnx2x_cl45_read(bp, phy, bnx2x_8073_set_pause_cl37() 7343 bnx2x_cl45_write(bp, phy, bnx2x_8073_set_pause_cl37() 7352 struct bnx2x *bp = params->bp; bnx2x_8073_specific_func() local 7356 bnx2x_cl45_write(bp, phy, bnx2x_8073_specific_func() 7358 bnx2x_cl45_write(bp, phy, bnx2x_8073_specific_func() 7368 struct bnx2x *bp = params->bp; bnx2x_8073_config_init() local 7373 if (CHIP_IS_E2(bp)) bnx2x_8073_config_init() 7374 gpio_port = BP_PATH(bp); bnx2x_8073_config_init() 7378 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8073_config_init() 7381 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, bnx2x_8073_config_init() 7387 bnx2x_cl45_read(bp, phy, bnx2x_8073_config_init() 7390 bnx2x_cl45_read(bp, phy, bnx2x_8073_config_init() 7400 bnx2x_cl45_read(bp, phy, bnx2x_8073_config_init() 7403 bnx2x_cl45_write(bp, phy, bnx2x_8073_config_init() 7411 if (REG_RD(bp, params->shmem_base + bnx2x_8073_config_init() 7416 bnx2x_cl45_read(bp, phy, bnx2x_8073_config_init() 7419 bnx2x_cl45_write(bp, phy, bnx2x_8073_config_init() 7425 bnx2x_807x_force_10G(bp, phy); bnx2x_8073_config_init() 7429 bnx2x_cl45_write(bp, phy, bnx2x_8073_config_init() 7456 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val); bnx2x_8073_config_init() 7457 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1); bnx2x_8073_config_init() 7464 bnx2x_cl45_read(bp, phy, bnx2x_8073_config_init() 7477 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1); bnx2x_8073_config_init() 7480 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1); bnx2x_8073_config_init() 7481 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, bnx2x_8073_config_init() 7486 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); bnx2x_8073_config_init() 7492 if (bnx2x_8073_is_snr_needed(bp, phy)) bnx2x_8073_config_init() 7493 bnx2x_cl45_write(bp, phy, bnx2x_8073_config_init() 7498 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1); bnx2x_8073_config_init() 7500 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1); bnx2x_8073_config_init() 7506 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); bnx2x_8073_config_init() 7516 struct bnx2x *bp = params->bp; bnx2x_8073_read_status() local 7522 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7528 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7530 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7534 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7538 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7544 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7548 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7550 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7557 if (bnx2x_8073_xaui_wa(bp, phy) != 0) bnx2x_8073_read_status() 7560 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7562 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7566 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7568 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7574 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { bnx2x_8073_read_status() 7579 bnx2x_cl45_write(bp, phy, bnx2x_8073_read_status() 7584 bnx2x_cl45_write(bp, phy, bnx2x_8073_read_status() 7588 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7619 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status() 7632 bnx2x_cl45_write(bp, phy, bnx2x_8073_read_status() 7637 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); bnx2x_8073_read_status() 7643 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_8073_read_status() 7660 struct bnx2x *bp = params->bp; bnx2x_8073_link_reset() local 7662 if (CHIP_IS_E2(bp)) bnx2x_8073_link_reset() 7663 gpio_port = BP_PATH(bp); bnx2x_8073_link_reset() 7668 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8073_link_reset() 7680 struct bnx2x *bp = params->bp; bnx2x_8705_config_init() local 7683 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8705_config_init() 7686 bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_8705_config_init() 7687 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); bnx2x_8705_config_init() 7688 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_8705_config_init() 7690 bnx2x_cl45_write(bp, phy, bnx2x_8705_config_init() 7692 bnx2x_cl45_write(bp, phy, bnx2x_8705_config_init() 7694 bnx2x_cl45_write(bp, phy, bnx2x_8705_config_init() 7696 bnx2x_cl45_write(bp, phy, bnx2x_8705_config_init() 7699 bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0); bnx2x_8705_config_init() 7709 struct bnx2x *bp = params->bp; bnx2x_8705_read_status() local 7711 bnx2x_cl45_read(bp, phy, bnx2x_8705_read_status() 7715 bnx2x_cl45_read(bp, phy, bnx2x_8705_read_status() 7719 bnx2x_cl45_read(bp, phy, bnx2x_8705_read_status() 7722 bnx2x_cl45_read(bp, phy, bnx2x_8705_read_status() 7724 bnx2x_cl45_read(bp, phy, bnx2x_8705_read_status() 7743 struct bnx2x *bp = params->bp; bnx2x_set_disable_pmd_transmit() local 7757 bnx2x_cl45_write(bp, phy, bnx2x_set_disable_pmd_transmit() 7766 struct bnx2x *bp = params->bp; bnx2x_get_gpio_port() local 7767 if (CHIP_IS_E2(bp)) bnx2x_get_gpio_port() 7768 gpio_port = BP_PATH(bp); bnx2x_get_gpio_port() 7771 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); bnx2x_get_gpio_port() 7772 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); bnx2x_get_gpio_port() 7782 struct bnx2x *bp = params->bp; bnx2x_sfp_e1e2_set_transmitter() local 7786 tx_en_mode = REG_RD(bp, params->shmem_base + bnx2x_sfp_e1e2_set_transmitter() 7795 bnx2x_cl45_read(bp, phy, bnx2x_sfp_e1e2_set_transmitter() 7805 bnx2x_cl45_write(bp, phy, bnx2x_sfp_e1e2_set_transmitter() 7824 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); bnx2x_sfp_e1e2_set_transmitter() 7837 struct bnx2x *bp = params->bp; bnx2x_sfp_set_transmitter() local 7839 if (CHIP_IS_E3(bp)) bnx2x_sfp_set_transmitter() 7850 struct bnx2x *bp = params->bp; bnx2x_8726_read_sfp_module_eeprom() local 7859 bnx2x_cl45_write(bp, phy, bnx2x_8726_read_sfp_module_eeprom() 7864 bnx2x_cl45_write(bp, phy, bnx2x_8726_read_sfp_module_eeprom() 7869 bnx2x_cl45_write(bp, phy, bnx2x_8726_read_sfp_module_eeprom() 7875 bnx2x_cl45_read(bp, phy, bnx2x_8726_read_sfp_module_eeprom() 7894 bnx2x_cl45_read(bp, phy, bnx2x_8726_read_sfp_module_eeprom() 7901 bnx2x_cl45_read(bp, phy, bnx2x_8726_read_sfp_module_eeprom() 7916 struct bnx2x *bp = params->bp; bnx2x_warpcore_power_module() local 7918 pin_cfg = (REG_RD(bp, params->shmem_base + bnx2x_warpcore_power_module() 7931 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); bnx2x_warpcore_power_module() 7943 struct bnx2x *bp = params->bp; bnx2x_warpcore_read_sfp_module_eeprom() local 7960 rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt, bnx2x_warpcore_read_sfp_module_eeprom() 7979 struct bnx2x *bp = params->bp; bnx2x_8727_read_sfp_module_eeprom() local 7992 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_sfp_module_eeprom() 7998 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_sfp_module_eeprom() 8004 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_sfp_module_eeprom() 8010 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_sfp_module_eeprom() 8015 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_sfp_module_eeprom() 8021 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_sfp_module_eeprom() 8032 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_sfp_module_eeprom() 8051 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_sfp_module_eeprom() 8058 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_sfp_module_eeprom() 8074 struct bnx2x *bp = params->bp; bnx2x_read_sfp_module_eeprom() local 8115 struct bnx2x *bp = params->bp; bnx2x_get_edc_mode() local 8182 if (!CHIP_IS_E1x(bp)) { bnx2x_get_edc_mode() 8183 gport = BP_PATH(bp) + bnx2x_get_edc_mode() 8186 netdev_err(bp->dev, bnx2x_get_edc_mode() 8217 media_types = REG_RD(bp, sync_offset); bnx2x_get_edc_mode() 8229 REG_WR(bp, sync_offset, media_types); bnx2x_get_edc_mode() 8256 struct bnx2x *bp = params->bp; bnx2x_verify_sfp_module() local 8262 val = REG_RD(bp, params->shmem_base + bnx2x_verify_sfp_module() 8292 fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param); bnx2x_verify_sfp_module() 8318 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," bnx2x_verify_sfp_module() 8333 struct bnx2x *bp = params->bp; bnx2x_wait_for_sfp_module_initialized() local 8361 static void bnx2x_8727_power_module(struct bnx2x *bp, bnx2x_8727_power_module() argument 8387 bnx2x_cl45_write(bp, phy, bnx2x_8727_power_module() 8393 static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp, bnx2x_8726_set_limiting_mode() argument 8399 bnx2x_cl45_read(bp, phy, bnx2x_8726_set_limiting_mode() 8408 bnx2x_cl45_write(bp, phy, bnx2x_8726_set_limiting_mode() 8422 bnx2x_cl45_write(bp, phy, bnx2x_8726_set_limiting_mode() 8426 bnx2x_cl45_write(bp, phy, bnx2x_8726_set_limiting_mode() 8430 bnx2x_cl45_write(bp, phy, bnx2x_8726_set_limiting_mode() 8434 bnx2x_cl45_write(bp, phy, bnx2x_8726_set_limiting_mode() 8442 static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp, bnx2x_8727_set_limiting_mode() argument 8448 bnx2x_cl45_read(bp, phy, bnx2x_8727_set_limiting_mode() 8453 bnx2x_cl45_write(bp, phy, bnx2x_8727_set_limiting_mode() 8458 bnx2x_cl45_read(bp, phy, bnx2x_8727_set_limiting_mode() 8463 bnx2x_cl45_write(bp, phy, bnx2x_8727_set_limiting_mode() 8468 bnx2x_cl45_write(bp, phy, bnx2x_8727_set_limiting_mode() 8480 struct bnx2x *bp = params->bp; bnx2x_8727_specific_func() local 8491 bnx2x_cl45_write(bp, phy, bnx2x_8727_specific_func() 8494 bnx2x_cl45_write(bp, phy, bnx2x_8727_specific_func() 8497 bnx2x_cl45_write(bp, phy, bnx2x_8727_specific_func() 8500 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_8727_specific_func() 8511 bnx2x_cl45_write(bp, phy, bnx2x_8727_specific_func() 8525 struct bnx2x *bp = params->bp; bnx2x_set_e1e2_module_fault_led() local 8527 u32 fault_led_gpio = REG_RD(bp, params->shmem_base + bnx2x_set_e1e2_module_fault_led() 8545 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); bnx2x_set_e1e2_module_fault_led() 8559 struct bnx2x *bp = params->bp; bnx2x_set_e3_module_fault_led() local 8560 pin_cfg = (REG_RD(bp, params->shmem_base + bnx2x_set_e3_module_fault_led() 8567 bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode); bnx2x_set_e3_module_fault_led() 8573 struct bnx2x *bp = params->bp; bnx2x_set_sfp_module_fault_led() local 8575 if (CHIP_IS_E3(bp)) { bnx2x_set_sfp_module_fault_led() 8587 struct bnx2x *bp = params->bp; bnx2x_warpcore_hw_reset() local 8590 REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e); bnx2x_warpcore_hw_reset() 8593 REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1); bnx2x_warpcore_hw_reset() 8594 REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0); bnx2x_warpcore_hw_reset() 8595 REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0); bnx2x_warpcore_hw_reset() 8602 struct bnx2x *bp = params->bp; bnx2x_power_sfp_module() local 8608 bnx2x_8727_power_module(params->bp, phy, power); bnx2x_power_sfp_module() 8623 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_limiting_mode() local 8627 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_limiting_mode() 8645 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_limiting_mode() 8648 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_limiting_mode() 8652 bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_set_limiting_mode() 8653 bnx2x_warpcore_reset_lane(bp, phy, 0); bnx2x_warpcore_set_limiting_mode() 8663 bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode); bnx2x_set_limiting_mode() 8667 bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode); bnx2x_set_limiting_mode() 8678 struct bnx2x *bp = params->bp; bnx2x_sfp_module_detection() local 8682 u32 val = REG_RD(bp, params->shmem_base + bnx2x_sfp_module_detection() 8732 struct bnx2x *bp = params->bp; bnx2x_handle_module_detect_int() local 8736 if (CHIP_IS_E3(bp)) { bnx2x_handle_module_detect_int() 8743 if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base, bnx2x_handle_module_detect_int() 8754 gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); bnx2x_handle_module_detect_int() 8758 bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_handle_module_detect_int() 8762 bnx2x_set_gpio_int(bp, gpio_num, bnx2x_handle_module_detect_int() 8767 if (CHIP_IS_E3(bp)) { bnx2x_handle_module_detect_int() 8773 bnx2x_cl45_read(bp, phy, bnx2x_handle_module_detect_int() 8780 bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_handle_module_detect_int() 8782 bnx2x_warpcore_reset_lane(bp, phy, 0); bnx2x_handle_module_detect_int() 8789 bnx2x_set_gpio_int(bp, gpio_num, bnx2x_handle_module_detect_int() 8802 static void bnx2x_sfp_mask_fault(struct bnx2x *bp, bnx2x_sfp_mask_fault() argument 8808 bnx2x_cl45_read(bp, phy, bnx2x_sfp_mask_fault() 8811 bnx2x_cl45_read(bp, phy, bnx2x_sfp_mask_fault() 8815 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val); bnx2x_sfp_mask_fault() 8820 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val); bnx2x_sfp_mask_fault() 8831 struct bnx2x *bp = params->bp; bnx2x_8706_8726_read_status() local 8834 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status() 8837 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, bnx2x_8706_8726_read_status() 8841 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status() 8843 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status() 8847 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status() 8849 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status() 8851 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status() 8853 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status() 8873 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_8706_8726_read_status() 8875 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_8706_8726_read_status() 8893 struct bnx2x *bp = params->bp; bnx2x_8706_config_init() local 8895 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8706_config_init() 8898 bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_8706_config_init() 8899 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); bnx2x_8706_config_init() 8900 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_8706_config_init() 8904 bnx2x_cl45_read(bp, phy, bnx2x_8706_config_init() 8919 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val); bnx2x_8706_config_init() 8926 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val); bnx2x_8706_config_init() 8933 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init() 8936 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init() 8940 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init() 8947 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init() 8951 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init() 8954 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init() 8957 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init() 8961 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init() 8963 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init() 8966 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init() 8970 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); bnx2x_8706_config_init() 8976 tx_en_mode = REG_RD(bp, params->shmem_base + bnx2x_8706_config_init() 8983 bnx2x_cl45_read(bp, phy, bnx2x_8706_config_init() 8986 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init() 9006 struct bnx2x *bp = params->bp; bnx2x_8726_config_loopback() local 9008 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001); bnx2x_8726_config_loopback() 9014 struct bnx2x *bp = params->bp; bnx2x_8726_external_rom_boot() local 9019 bnx2x_cl45_write(bp, phy, bnx2x_8726_external_rom_boot() 9023 bnx2x_cl45_write(bp, phy, bnx2x_8726_external_rom_boot() 9028 bnx2x_cl45_write(bp, phy, bnx2x_8726_external_rom_boot() 9032 bnx2x_cl45_write(bp, phy, bnx2x_8726_external_rom_boot() 9041 bnx2x_cl45_write(bp, phy, bnx2x_8726_external_rom_boot() 9046 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); bnx2x_8726_external_rom_boot() 9053 struct bnx2x *bp = params->bp; bnx2x_8726_read_status() local 9057 bnx2x_cl45_read(bp, phy, bnx2x_8726_read_status() 9074 struct bnx2x *bp = params->bp; bnx2x_8726_config_init() local 9077 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); bnx2x_8726_config_init() 9078 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_8726_config_init() 9091 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9093 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9095 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9097 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9109 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9111 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9113 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9115 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9117 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9122 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9124 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9129 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9140 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9145 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init() 9158 struct bnx2x *bp = params->bp; bnx2x_8726_link_reset() local 9161 bnx2x_cl45_write(bp, phy, bnx2x_8726_link_reset() 9173 struct bnx2x *bp = params->bp; bnx2x_8727_set_link_led() local 9195 bnx2x_cl45_read(bp, phy, bnx2x_8727_set_link_led() 9201 bnx2x_cl45_write(bp, phy, bnx2x_8727_set_link_led() 9205 bnx2x_cl45_read(bp, phy, bnx2x_8727_set_link_led() 9211 bnx2x_cl45_write(bp, phy, bnx2x_8727_set_link_led() 9223 struct bnx2x *bp = params->bp; bnx2x_8727_hw_reset() local 9224 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); bnx2x_8727_hw_reset() 9225 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); bnx2x_8727_hw_reset() 9227 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, bnx2x_8727_hw_reset() 9234 struct bnx2x *bp = params->bp; bnx2x_8727_config_speed() local 9240 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed() 9242 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed() 9244 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_speed() 9251 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_speed() 9255 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed() 9267 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed() 9269 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed() 9275 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed() 9278 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed() 9280 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed() 9282 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed() 9294 struct bnx2x *bp = params->bp; bnx2x_8727_config_init() local 9297 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_8727_config_init() 9305 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_init() 9314 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_init() 9320 bnx2x_8727_power_module(bp, phy, 1); bnx2x_8727_config_init() 9322 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_init() 9325 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_init() 9337 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_init() 9341 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_init() 9349 tx_en_mode = REG_RD(bp, params->shmem_base + bnx2x_8727_config_init() 9357 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_init() 9361 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_init() 9363 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_init() 9366 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_init() 9377 struct bnx2x *bp = params->bp; bnx2x_8727_handle_mod_abs() local 9379 u32 val = REG_RD(bp, params->shmem_base + bnx2x_8727_handle_mod_abs() 9383 bnx2x_cl45_read(bp, phy, bnx2x_8727_handle_mod_abs() 9402 bnx2x_cl45_write(bp, phy, bnx2x_8727_handle_mod_abs() 9409 bnx2x_cl45_read(bp, phy, bnx2x_8727_handle_mod_abs() 9427 bnx2x_cl45_write(bp, phy, bnx2x_8727_handle_mod_abs() 9436 bnx2x_cl45_read(bp, phy, bnx2x_8727_handle_mod_abs() 9464 struct bnx2x *bp = params->bp; bnx2x_8727_read_status() local 9470 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status() 9477 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status() 9483 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, bnx2x_8727_read_status() 9486 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status() 9492 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status() 9500 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status() 9505 if (!CHIP_IS_E1x(bp)) bnx2x_8727_read_status() 9506 oc_port = BP_PATH(bp) + (params->port << 1); bnx2x_8727_read_status() 9510 netdev_err(bp->dev, "Error: Power fault on Port %d has " bnx2x_8727_read_status() 9519 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_status() 9523 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status() 9528 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_status() 9532 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status() 9535 bnx2x_8727_power_module(params->bp, phy, 0); bnx2x_8727_read_status() 9544 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_status() 9557 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status() 9582 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_8727_read_status() 9585 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_8727_read_status() 9601 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status() 9611 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_status() 9621 struct bnx2x *bp = params->bp; bnx2x_8727_link_reset() local 9629 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0); bnx2x_8727_link_reset() 9637 struct bnx2x *bp, bnx2x_save_848xx_spirom_version() 9652 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); bnx2x_save_848xx_spirom_version() 9653 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, bnx2x_save_848xx_spirom_version() 9659 bnx2x_cl45_write(bp, phy, reg_set[i].devad, bnx2x_save_848xx_spirom_version() 9663 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); bnx2x_save_848xx_spirom_version() 9671 bnx2x_save_spirom_version(bp, port, 0, bnx2x_save_848xx_spirom_version() 9678 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); bnx2x_save_848xx_spirom_version() 9679 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); bnx2x_save_848xx_spirom_version() 9680 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); bnx2x_save_848xx_spirom_version() 9682 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); bnx2x_save_848xx_spirom_version() 9690 bnx2x_save_spirom_version(bp, port, 0, bnx2x_save_848xx_spirom_version() 9696 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); bnx2x_save_848xx_spirom_version() 9698 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); bnx2x_save_848xx_spirom_version() 9700 bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1, bnx2x_save_848xx_spirom_version() 9705 static void bnx2x_848xx_set_led(struct bnx2x *bp, bnx2x_848xx_set_led() argument 9719 bnx2x_cl45_read(bp, phy, bnx2x_848xx_set_led() 9725 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_led() 9730 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, bnx2x_848xx_set_led() 9740 bnx2x_cl45_read_or_write(bp, phy, bnx2x_848xx_set_led() 9749 struct bnx2x *bp = params->bp; bnx2x_848xx_specific_func() local 9755 bnx2x_save_848xx_spirom_version(phy, bp, params->port); bnx2x_848xx_specific_func() 9761 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, bnx2x_848xx_specific_func() 9764 bnx2x_848xx_set_led(bp, phy); bnx2x_848xx_specific_func() 9773 struct bnx2x *bp = params->bp; bnx2x_848xx_cmn_config_init() local 9777 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init() 9781 bnx2x_cl45_read(bp, phy, bnx2x_848xx_cmn_config_init() 9786 bnx2x_cl45_read(bp, phy, bnx2x_848xx_cmn_config_init() 9790 bnx2x_cl45_read(bp, phy, bnx2x_848xx_cmn_config_init() 9809 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init() 9857 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init() 9869 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init() 9875 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init() 9888 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init() 9900 bp, phy, bnx2x_848xx_cmn_config_init() 9904 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init() 9908 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init() 9920 struct bnx2x *bp = params->bp; bnx2x_8481_config_init() local 9922 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8481_config_init() 9926 bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_8481_config_init() 9927 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_8481_config_init() 9929 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); bnx2x_8481_config_init() 9941 struct bnx2x *bp = params->bp; bnx2x_84833_cmd_hdlr() local 9943 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr() 9947 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr() 9960 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr() 9964 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr() 9967 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr() 9981 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr() 9985 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr() 9998 struct bnx2x *bp = params->bp; bnx2x_84833_pair_swap_cfg() local 10001 pair_swap = REG_RD(bp, params->shmem_base + bnx2x_84833_pair_swap_cfg() 10020 static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, bnx2x_84833_get_reset_gpios() argument 10027 if (CHIP_IS_E3(bp)) { bnx2x_84833_get_reset_gpios() 10031 reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + bnx2x_84833_get_reset_gpios() 10044 reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + bnx2x_84833_get_reset_gpios() 10061 struct bnx2x *bp = params->bp; bnx2x_84833_hw_reset_phy() local 10063 u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base + bnx2x_84833_hw_reset_phy() 10070 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_84833_hw_reset_phy() 10073 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_84833_hw_reset_phy() 10080 reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, bnx2x_84833_hw_reset_phy() 10083 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); bnx2x_84833_hw_reset_phy() 10096 struct bnx2x *bp = params->bp; bnx2x_8483x_disable_eee() local 10117 struct bnx2x *bp = params->bp; bnx2x_8483x_enable_eee() local 10135 struct bnx2x *bp = params->bp; bnx2x_848x3_config_init() local 10144 if (!(CHIP_IS_E1x(bp))) bnx2x_848x3_config_init() 10145 port = BP_PATH(bp); bnx2x_848x3_config_init() 10150 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, bnx2x_848x3_config_init() 10155 bnx2x_cl45_write(bp, phy, bnx2x_848x3_config_init() 10160 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_848x3_config_init() 10177 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_848x3_config_init() 10185 if (CHIP_IS_E3(bp)) { bnx2x_848x3_config_init() 10216 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_848x3_config_init() 10239 bnx2x_save_848xx_spirom_version(phy, bp, params->port); bnx2x_848x3_config_init() 10242 u32 cms_enable = REG_RD(bp, params->shmem_base + bnx2x_848x3_config_init() 10247 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_848x3_config_init() 10253 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_848x3_config_init() 10257 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_848x3_config_init() 10289 bnx2x_cl45_read_and_write(bp, phy, bnx2x_848x3_config_init() 10301 struct bnx2x *bp = params->bp; bnx2x_848xx_read_status() local 10308 bnx2x_cl45_read(bp, phy, bnx2x_848xx_read_status() 10310 bnx2x_cl45_read(bp, phy, bnx2x_848xx_read_status() 10320 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); bnx2x_848xx_read_status() 10325 bnx2x_cl45_write(bp, phy, bnx2x_848xx_read_status() 10330 bnx2x_cl45_read(bp, phy, bnx2x_848xx_read_status() 10361 bnx2x_cl45_read(bp, phy, bnx2x_848xx_read_status() 10368 bnx2x_cl45_read(bp, phy, bnx2x_848xx_read_status() 10383 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_848xx_read_status() 10401 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_848xx_read_status() 10411 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_848xx_read_status() 10439 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, bnx2x_8481_hw_reset() 10441 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, bnx2x_8481_hw_reset() 10448 bnx2x_cl45_write(params->bp, phy, bnx2x_8481_link_reset() 10450 bnx2x_cl45_write(params->bp, phy, bnx2x_8481_link_reset() 10457 struct bnx2x *bp = params->bp; bnx2x_848x3_link_reset() local 10461 if (!(CHIP_IS_E1x(bp))) bnx2x_848x3_link_reset() 10462 port = BP_PATH(bp); bnx2x_848x3_link_reset() 10467 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, bnx2x_848x3_link_reset() 10471 bnx2x_cl45_read(bp, phy, bnx2x_848x3_link_reset() 10475 bnx2x_cl45_write(bp, phy, bnx2x_848x3_link_reset() 10484 struct bnx2x *bp = params->bp; bnx2x_848xx_set_link_led() local 10488 if (!(CHIP_IS_E1x(bp))) bnx2x_848xx_set_link_led() 10489 port = BP_PATH(bp); bnx2x_848xx_set_link_led() 10502 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10507 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10512 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10517 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10523 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10538 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10543 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10548 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10553 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10559 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10568 if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + bnx2x_848xx_set_link_led() 10575 bp, bnx2x_848xx_set_link_led() 10580 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10594 bnx2x_cl45_read(bp, phy, bnx2x_848xx_set_link_led() 10601 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10607 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10612 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10617 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10622 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10627 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10636 if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + bnx2x_848xx_set_link_led() 10643 bp, bnx2x_848xx_set_link_led() 10648 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10664 bnx2x_cl45_read(bp, phy, bnx2x_848xx_set_link_led() 10673 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10680 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10685 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10690 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10695 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10709 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10715 bnx2x_cl45_read(bp, phy, bnx2x_848xx_set_link_led() 10721 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10730 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led() 10748 if (CHIP_IS_E3(bp)) { bnx2x_848xx_set_link_led() 10749 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_848xx_set_link_led() 10761 struct bnx2x *bp = params->bp; bnx2x_54618se_specific_func() local 10767 bnx2x_cl22_write(bp, phy, bnx2x_54618se_specific_func() 10770 bnx2x_cl22_read(bp, phy, bnx2x_54618se_specific_func() 10775 bnx2x_cl22_write(bp, phy, bnx2x_54618se_specific_func() 10779 bnx2x_cl22_write(bp, phy, bnx2x_54618se_specific_func() 10790 struct bnx2x *bp = params->bp; bnx2x_54618se_config_init() local 10803 cfg_pin = (REG_RD(bp, params->shmem_base + bnx2x_54618se_config_init() 10810 bnx2x_set_cfg_pin(bp, cfg_pin, 1); bnx2x_54618se_config_init() 10816 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init() 10818 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_54618se_config_init() 10826 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init() 10829 bnx2x_cl22_read(bp, phy, bnx2x_54618se_config_init() 10833 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init() 10850 bnx2x_cl22_read(bp, phy, bnx2x_54618se_config_init() 10854 bnx2x_cl22_read(bp, phy, bnx2x_54618se_config_init() 10858 bnx2x_cl22_read(bp, phy, bnx2x_54618se_config_init() 10879 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init() 10882 bnx2x_cl22_read(bp, phy, bnx2x_54618se_config_init() 10918 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init() 10925 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init() 10934 bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS, bnx2x_54618se_config_init() 10937 bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp); bnx2x_54618se_config_init() 10939 bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp); bnx2x_54618se_config_init() 10974 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_54618se_config_init() 10979 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init() 10986 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init() 10996 struct bnx2x *bp = params->bp; bnx2x_5461x_set_link_led() local 10999 bnx2x_cl22_write(bp, phy, bnx2x_5461x_set_link_led() 11002 bnx2x_cl22_read(bp, phy, bnx2x_5461x_set_link_led() 11022 bnx2x_cl22_write(bp, phy, bnx2x_5461x_set_link_led() 11032 struct bnx2x *bp = params->bp; bnx2x_54618se_link_reset() local 11039 bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); bnx2x_54618se_link_reset() 11044 cfg_pin = (REG_RD(bp, params->shmem_base + bnx2x_54618se_link_reset() 11051 bnx2x_set_cfg_pin(bp, cfg_pin, 0); bnx2x_54618se_link_reset() 11058 struct bnx2x *bp = params->bp; bnx2x_54618se_read_status() local 11064 bnx2x_cl22_read(bp, phy, bnx2x_54618se_read_status() 11070 bnx2x_cl22_read(bp, phy, bnx2x_54618se_read_status() 11107 bnx2x_cl22_read(bp, phy, bnx2x_54618se_read_status() 11113 bnx2x_cl22_read(bp, phy, bnx2x_54618se_read_status() 11127 bnx2x_cl22_read(bp, phy, 0x5, &val); bnx2x_54618se_read_status() 11145 bnx2x_cl22_read(bp, phy, 0xa, &val); bnx2x_54618se_read_status() 11164 struct bnx2x *bp = params->bp; bnx2x_54618se_config_loopback() local 11172 bnx2x_cl22_write(bp, phy, 0x09, 3<<11); bnx2x_54618se_config_loopback() 11179 bnx2x_cl22_read(bp, phy, 0x00, &val); bnx2x_54618se_config_loopback() 11182 bnx2x_cl22_write(bp, phy, 0x00, val); bnx2x_54618se_config_loopback() 11188 bnx2x_cl22_write(bp, phy, 0x18, 7); bnx2x_54618se_config_loopback() 11189 bnx2x_cl22_read(bp, phy, 0x18, &val); bnx2x_54618se_config_loopback() 11190 bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15)); bnx2x_54618se_config_loopback() 11193 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); bnx2x_54618se_config_loopback() 11198 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); bnx2x_54618se_config_loopback() 11207 struct bnx2x *bp = params->bp; bnx2x_7101_config_loopback() local 11209 bnx2x_cl45_write(bp, phy, bnx2x_7101_config_loopback() 11218 struct bnx2x *bp = params->bp; bnx2x_7101_config_init() local 11222 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_7101_config_init() 11225 bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_7101_config_init() 11226 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_7101_config_init() 11228 bnx2x_cl45_write(bp, phy, bnx2x_7101_config_init() 11231 bnx2x_cl45_write(bp, phy, bnx2x_7101_config_init() 11236 bnx2x_cl45_read(bp, phy, bnx2x_7101_config_init() 11239 bnx2x_cl45_write(bp, phy, bnx2x_7101_config_init() 11243 bnx2x_cl45_read(bp, phy, bnx2x_7101_config_init() 11246 bnx2x_cl45_read(bp, phy, bnx2x_7101_config_init() 11248 bnx2x_save_spirom_version(bp, params->port, bnx2x_7101_config_init() 11257 struct bnx2x *bp = params->bp; bnx2x_7101_read_status() local 11260 bnx2x_cl45_read(bp, phy, bnx2x_7101_read_status() 11262 bnx2x_cl45_read(bp, phy, bnx2x_7101_read_status() 11266 bnx2x_cl45_read(bp, phy, bnx2x_7101_read_status() 11268 bnx2x_cl45_read(bp, phy, bnx2x_7101_read_status() 11275 bnx2x_cl45_read(bp, phy, bnx2x_7101_read_status() 11282 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); bnx2x_7101_read_status() 11306 void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy) bnx2x_sfx7101_sp_sw_reset() argument 11310 bnx2x_cl45_read(bp, phy, bnx2x_sfx7101_sp_sw_reset() 11317 bnx2x_cl45_write(bp, phy, bnx2x_sfx7101_sp_sw_reset() 11322 bnx2x_cl45_read(bp, phy, bnx2x_sfx7101_sp_sw_reset() 11334 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, bnx2x_7101_hw_reset() 11337 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, bnx2x_7101_hw_reset() 11345 struct bnx2x *bp = params->bp; bnx2x_7101_set_link_led() local 11358 bnx2x_cl45_write(bp, phy, bnx2x_7101_set_link_led() 11863 static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, bnx2x_populate_preemphasis() argument 11875 rx = REG_RD(bp, shmem_base + bnx2x_populate_preemphasis() 11879 tx = REG_RD(bp, shmem_base + bnx2x_populate_preemphasis() 11883 rx = REG_RD(bp, shmem_base + bnx2x_populate_preemphasis() 11887 tx = REG_RD(bp, shmem_base + bnx2x_populate_preemphasis() 11900 static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base, bnx2x_get_ext_phy_config() argument 11906 ext_phy_config = REG_RD(bp, shmem_base + bnx2x_get_ext_phy_config() 11911 ext_phy_config = REG_RD(bp, shmem_base + bnx2x_get_ext_phy_config() 11922 static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, bnx2x_populate_int_phy() argument 11927 u32 switch_cfg = (REG_RD(bp, shmem_base + bnx2x_populate_int_phy() 11931 chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | bnx2x_populate_int_phy() 11932 ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); bnx2x_populate_int_phy() 11935 if (USES_WARPCORE(bp)) { bnx2x_populate_int_phy() 11937 phy_addr = REG_RD(bp, bnx2x_populate_int_phy() 11940 if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3) bnx2x_populate_int_phy() 11945 serdes_net_if = (REG_RD(bp, shmem_base + bnx2x_populate_int_phy() 12020 if (CHIP_REV(bp) == CHIP_REV_Ax) bnx2x_populate_int_phy() 12027 phy_addr = REG_RD(bp, bnx2x_populate_int_phy() 12033 phy_addr = REG_RD(bp, bnx2x_populate_int_phy() 12044 phy->mdio_ctrl = bnx2x_get_emac_base(bp, bnx2x_populate_int_phy() 12047 if (CHIP_IS_E2(bp)) bnx2x_populate_int_phy() 12055 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY); bnx2x_populate_int_phy() 12059 static int bnx2x_populate_ext_phy(struct bnx2x *bp, bnx2x_populate_ext_phy() argument 12068 ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base, bnx2x_populate_ext_phy() 12132 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); bnx2x_populate_ext_phy() 12138 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, bnx2x_populate_ext_phy() 12149 u32 size = REG_RD(bp, shmem2_base); bnx2x_populate_ext_phy() 12164 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); bnx2x_populate_ext_phy() 12172 u32 raw_ver = REG_RD(bp, phy->ver_addr); bnx2x_populate_ext_phy() 12186 static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base, bnx2x_populate_phy() argument 12192 return bnx2x_populate_int_phy(bp, shmem_base, port, phy); bnx2x_populate_phy() 12193 status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base, bnx2x_populate_phy() 12202 struct bnx2x *bp = params->bp; bnx2x_phy_def_cfg() local 12206 link_config = REG_RD(bp, params->shmem_base + bnx2x_phy_def_cfg() 12209 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + bnx2x_phy_def_cfg() 12214 link_config = REG_RD(bp, params->shmem_base + bnx2x_phy_def_cfg() 12217 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + bnx2x_phy_def_cfg() 12307 struct bnx2x *bp = params->bp; bnx2x_phy_probe() local 12327 if (bnx2x_populate_phy(bp, phy_index, params->shmem_base, bnx2x_phy_probe() 12353 media_types = REG_RD(bp, sync_offset); bnx2x_phy_probe() 12367 REG_WR(bp, sync_offset, media_types); bnx2x_phy_probe() 12380 struct bnx2x *bp = params->bp; bnx2x_init_bmac_loopback() local 12394 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_init_bmac_loopback() 12400 struct bnx2x *bp = params->bp; bnx2x_init_emac_loopback() local 12413 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_init_emac_loopback() 12419 struct bnx2x *bp = params->bp; bnx2x_init_xmac_loopback() local 12433 bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0); bnx2x_init_xmac_loopback() 12439 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_init_xmac_loopback() 12445 struct bnx2x *bp = params->bp; bnx2x_init_umac_loopback() local 12454 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_init_umac_loopback() 12460 struct bnx2x *bp = params->bp; bnx2x_init_xgxs_loopback() local 12473 if (!USES_WARPCORE(bp)) bnx2x_init_xgxs_loopback() 12478 if (USES_WARPCORE(bp)) bnx2x_init_xgxs_loopback() 12485 if (USES_WARPCORE(bp)) bnx2x_init_xgxs_loopback() 12504 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_init_xgxs_loopback() 12511 struct bnx2x *bp = params->bp; bnx2x_set_rx_filter() local 12515 if (!CHIP_IS_E1x(bp)) bnx2x_set_rx_filter() 12517 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val); bnx2x_set_rx_filter() 12519 if (!CHIP_IS_E1(bp)) { bnx2x_set_rx_filter() 12520 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4, bnx2x_set_rx_filter() 12524 REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP : bnx2x_set_rx_filter() 12532 struct bnx2x *bp = params->bp; bnx2x_avoid_link_flap() local 12534 bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_avoid_link_flap() 12554 lfa_sts = REG_RD(bp, params->lfa_base + bnx2x_avoid_link_flap() 12561 if (CHIP_IS_E3(bp)) { bnx2x_avoid_link_flap() 12563 REG_WR(bp, GRCBASE_MISC + bnx2x_avoid_link_flap() 12567 REG_WR(bp, GRCBASE_MISC + bnx2x_avoid_link_flap() 12591 REG_WR(bp, params->lfa_base + bnx2x_avoid_link_flap() 12595 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_avoid_link_flap() 12607 struct bnx2x *bp = params->bp; bnx2x_cannot_avoid_link_flap() local 12614 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap() 12618 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap() 12622 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap() 12627 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap() 12633 tmp_val = REG_RD(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap() 12638 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap() 12641 lfa_sts = REG_RD(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap() 12657 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap() 12665 struct bnx2x *bp = params->bp; bnx2x_phy_init() local 12698 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, bnx2x_phy_init() 12734 if (!CHIP_IS_E3(bp)) { bnx2x_phy_init() 12738 bnx2x_serdes_deassert(bp, params->port); bnx2x_phy_init() 12754 struct bnx2x *bp = params->bp; bnx2x_link_reset() local 12764 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, bnx2x_link_reset() 12771 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); bnx2x_link_reset() 12774 if (!CHIP_IS_E3(bp)) { bnx2x_link_reset() 12775 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); bnx2x_link_reset() 12776 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); bnx2x_link_reset() 12779 if (!CHIP_IS_E3(bp)) { bnx2x_link_reset() 12780 bnx2x_set_bmac_rx(bp, params->chip_id, port, 0); bnx2x_link_reset() 12786 if (!CHIP_IS_E3(bp)) bnx2x_link_reset() 12787 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); bnx2x_link_reset() 12794 bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_link_reset() 12815 bnx2x_rearm_latch_signal(bp, port, 0); bnx2x_link_reset() 12816 bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4, bnx2x_link_reset() 12824 if (!CHIP_IS_E3(bp)) { bnx2x_link_reset() 12826 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_link_reset() 12828 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); bnx2x_link_reset() 12829 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0); bnx2x_link_reset() 12833 if (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_link_reset() 12835 REG_WR(bp, xmac_base + XMAC_REG_CTRL, bnx2x_link_reset() 12845 struct bnx2x *bp = params->bp; bnx2x_lfa_reset() local 12855 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); bnx2x_lfa_reset() 12861 if (!CHIP_IS_E3(bp)) bnx2x_lfa_reset() 12862 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); bnx2x_lfa_reset() 12864 if (CHIP_IS_E3(bp)) { bnx2x_lfa_reset() 12882 if (!CHIP_IS_E3(bp)) bnx2x_lfa_reset() 12883 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1); bnx2x_lfa_reset() 12885 if (CHIP_IS_E3(bp)) { bnx2x_lfa_reset() 12890 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_lfa_reset() 12897 static int bnx2x_8073_common_init_phy(struct bnx2x *bp, bnx2x_8073_common_init_phy() argument 12908 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); bnx2x_8073_common_init_phy() 12909 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); bnx2x_8073_common_init_phy() 12911 bnx2x_ext_phy_hw_reset(bp, port); bnx2x_8073_common_init_phy() 12916 if (CHIP_IS_E1x(bp)) { bnx2x_8073_common_init_phy() 12927 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, bnx2x_8073_common_init_phy() 12934 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + bnx2x_8073_common_init_phy() 12944 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8073_common_init_phy() 12949 bnx2x_cl45_write(bp, &phy[port], bnx2x_8073_common_init_phy() 12968 if (CHIP_IS_E1x(bp)) bnx2x_8073_common_init_phy() 12975 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], bnx2x_8073_common_init_phy() 12980 bnx2x_cl45_read(bp, phy_blk[port], bnx2x_8073_common_init_phy() 12985 bnx2x_cl45_write(bp, phy_blk[port], bnx2x_8073_common_init_phy() 13000 bnx2x_cl45_read(bp, phy_blk[port], bnx2x_8073_common_init_phy() 13004 bnx2x_cl45_write(bp, phy_blk[port], bnx2x_8073_common_init_phy() 13010 bnx2x_cl45_read(bp, phy_blk[port], bnx2x_8073_common_init_phy() 13013 bnx2x_cl45_write(bp, phy_blk[port], bnx2x_8073_common_init_phy() 13018 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8073_common_init_phy() 13023 static int bnx2x_8726_common_init_phy(struct bnx2x *bp, bnx2x_8726_common_init_phy() argument 13033 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); bnx2x_8726_common_init_phy() 13036 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); bnx2x_8726_common_init_phy() 13038 bnx2x_ext_phy_hw_reset(bp, 0); bnx2x_8726_common_init_phy() 13044 if (CHIP_IS_E1x(bp)) { bnx2x_8726_common_init_phy() 13052 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, bnx2x_8726_common_init_phy() 13060 bnx2x_cl45_write(bp, &phy, bnx2x_8726_common_init_phy() 13065 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, bnx2x_8726_common_init_phy() 13072 static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base, bnx2x_get_ext_phy_reset_gpio() argument 13076 u32 phy_gpio_reset = REG_RD(bp, shmem_base + bnx2x_get_ext_phy_reset_gpio() 13118 static int bnx2x_8727_common_init_phy(struct bnx2x *bp, bnx2x_8727_common_init_phy() argument 13128 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); bnx2x_8727_common_init_phy() 13129 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); bnx2x_8727_common_init_phy() 13137 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0], bnx2x_8727_common_init_phy() 13144 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, bnx2x_8727_common_init_phy() 13147 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, bnx2x_8727_common_init_phy() 13157 if (CHIP_IS_E1x(bp)) { bnx2x_8727_common_init_phy() 13168 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, bnx2x_8727_common_init_phy() 13175 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + bnx2x_8727_common_init_phy() 13184 bnx2x_cl45_write(bp, &phy[port], bnx2x_8727_common_init_phy() 13199 if (CHIP_IS_E1x(bp)) bnx2x_8727_common_init_phy() 13205 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], bnx2x_8727_common_init_phy() 13209 bnx2x_cl45_write(bp, phy_blk[port], bnx2x_8727_common_init_phy() 13217 static int bnx2x_84833_common_init_phy(struct bnx2x *bp, bnx2x_84833_common_init_phy() argument 13224 reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); bnx2x_84833_common_init_phy() 13225 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); bnx2x_84833_common_init_phy() 13227 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); bnx2x_84833_common_init_phy() 13233 static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], bnx2x_ext_phy_common_init() argument 13241 rc = bnx2x_8073_common_init_phy(bp, shmem_base_path, bnx2x_ext_phy_common_init() 13248 rc = bnx2x_8727_common_init_phy(bp, shmem_base_path, bnx2x_ext_phy_common_init() 13257 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, bnx2x_ext_phy_common_init() 13266 rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, bnx2x_ext_phy_common_init() 13281 netdev_err(bp->dev, "Warning: PHY was not initialized," bnx2x_ext_phy_common_init() 13287 int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], bnx2x_common_init_phy() argument 13295 bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC0); bnx2x_common_init_phy() 13296 bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC1); bnx2x_common_init_phy() 13298 if (CHIP_IS_E3(bp)) { bnx2x_common_init_phy() 13300 val = REG_RD(bp, MISC_REG_GEN_PURP_HWG); bnx2x_common_init_phy() 13301 REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1); bnx2x_common_init_phy() 13304 phy_ver = REG_RD(bp, shmem_base_path[0] + bnx2x_common_init_phy() 13316 ext_phy_config = bnx2x_get_ext_phy_config(bp, bnx2x_common_init_phy() 13320 rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path, bnx2x_common_init_phy() 13331 struct bnx2x *bp = params->bp; bnx2x_check_over_curr() local 13336 cfg_pin = (REG_RD(bp, params->shmem_base + bnx2x_check_over_curr() 13343 if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0) bnx2x_check_over_curr() 13348 netdev_err(bp->dev, "Error: Power fault on Port %d has" bnx2x_check_over_curr() 13368 struct bnx2x *bp = params->bp; bnx2x_analyze_link_error() local 13404 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); bnx2x_analyze_link_error() 13417 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_analyze_link_error() 13429 bnx2x_notify_link_changed(bp); bnx2x_analyze_link_error() 13447 struct bnx2x *bp = params->bp; bnx2x_check_half_open_conn() local 13452 (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4))) bnx2x_check_half_open_conn() 13455 if (CHIP_IS_E3(bp) && bnx2x_check_half_open_conn() 13456 (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_check_half_open_conn() 13466 REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); bnx2x_check_half_open_conn() 13467 REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, bnx2x_check_half_open_conn() 13470 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) bnx2x_check_half_open_conn() 13476 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_check_half_open_conn() 13484 if (CHIP_IS_E2(bp)) bnx2x_check_half_open_conn() 13489 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); bnx2x_check_half_open_conn() 13502 struct bnx2x *bp = params->bp; bnx2x_sfp_tx_fault_detection() local 13507 cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, bnx2x_sfp_tx_fault_detection() 13512 if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) { bnx2x_sfp_tx_fault_detection() 13545 struct bnx2x *bp = params->bp; bnx2x_kr2_recovery() local 13555 struct bnx2x *bp = params->bp; bnx2x_check_kr2_wa() local 13579 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_check_kr2_wa() 13581 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_check_kr2_wa() 13583 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_check_kr2_wa() 13627 struct bnx2x *bp = params->bp; bnx2x_period_func() local 13638 if (CHIP_IS_E3(bp)) { bnx2x_period_func() 13648 if ((REG_RD(bp, params->shmem_base + bnx2x_period_func() 13667 u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, bnx2x_fan_failure_det_req() argument 13676 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, bnx2x_fan_failure_det_req() 13691 struct bnx2x *bp = params->bp; bnx2x_hw_reset_phy() local 13693 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, bnx2x_hw_reset_phy() 13710 void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, bnx2x_init_mod_abs_int() argument 13717 if (CHIP_IS_E3(bp)) { bnx2x_init_mod_abs_int() 13718 if (bnx2x_get_mod_abs_int_cfg(bp, chip_id, bnx2x_init_mod_abs_int() 13728 if (bnx2x_populate_phy(bp, phy_index, shmem_base, bnx2x_init_mod_abs_int() 13746 bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port); bnx2x_init_mod_abs_int() 13748 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); bnx2x_init_mod_abs_int() 13749 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); bnx2x_init_mod_abs_int() 13758 REG_WR(bp, sync_offset, vars->aeu_int_mask); bnx2x_init_mod_abs_int() 13769 aeu_mask = REG_RD(bp, offset); bnx2x_init_mod_abs_int() 13771 REG_WR(bp, offset, aeu_mask); bnx2x_init_mod_abs_int() 13774 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); bnx2x_init_mod_abs_int() 13776 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); bnx2x_init_mod_abs_int() 3088 bnx2x_bsc_read(struct link_params *params, struct bnx2x *bp, u8 sl_devid, u16 sl_addr, u8 lc_addr, u8 xfer_cnt, u32 *data_array) bnx2x_bsc_read() argument 9636 bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, struct bnx2x *bp, u8 port) bnx2x_save_848xx_spirom_version() argument
|
H A D | bnx2x_dcb.h | 71 #define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\ 72 (bp)->dcbx_port_params.ets.enabled) 153 #define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \ 154 ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask) 155 #define DCBX_PFC_PRI_PAUSE_MASK(bp) \ 156 ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) 157 #define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \ 158 ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp))) 159 #define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \ 160 (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri)) 161 #define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \ 162 (0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri)) 163 #define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \ 164 (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri))) 165 #define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\ 166 ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri))) 167 #define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \ 168 (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \ 169 IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri)))) 185 void bnx2x_dcbx_init_params(struct bnx2x *bp); 186 void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled); 194 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); 195 void bnx2x_dcbx_pmf_update(struct bnx2x *bp); 199 int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); 202 int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); 203 int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
|
H A D | bnx2x_sp.c | 46 static inline void bnx2x_exe_queue_init(struct bnx2x *bp, bnx2x_exe_queue_init() argument 77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, bnx2x_exe_queue_free_elem() argument 102 * @bp: driver handle 109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp, bnx2x_exe_queue_add() argument 120 rc = o->optimize(bp, o->owner, elem); bnx2x_exe_queue_add() 125 rc = o->validate(bp, o->owner, elem); bnx2x_exe_queue_add() 140 bnx2x_exe_queue_free_elem(bp, elem); bnx2x_exe_queue_add() 148 struct bnx2x *bp, __bnx2x_exe_queue_reset_pending() 158 bnx2x_exe_queue_free_elem(bp, elem); __bnx2x_exe_queue_reset_pending() 165 * @bp: driver handle 171 static inline int bnx2x_exe_queue_step(struct bnx2x *bp, bnx2x_exe_queue_step() argument 189 __bnx2x_exe_queue_reset_pending(bp, o); bnx2x_exe_queue_step() 221 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); bnx2x_exe_queue_step() 231 __bnx2x_exe_queue_reset_pending(bp, o); bnx2x_exe_queue_step() 247 struct bnx2x *bp) bnx2x_exe_queue_alloc_elem() 276 * @bp: device handle 281 static inline int bnx2x_state_wait(struct bnx2x *bp, int state, bnx2x_state_wait() argument 287 if (CHIP_REV_IS_EMUL(bp)) bnx2x_state_wait() 303 if (bp->panic) bnx2x_state_wait() 316 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) bnx2x_raw_wait() argument 318 return bnx2x_state_wait(bp, raw->state, raw->pstate); bnx2x_raw_wait() 389 * @bp: device handle 395 static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp, __bnx2x_vlan_mac_h_write_trylock() argument 410 * @bp: device handle 416 static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp, __bnx2x_vlan_mac_h_exec_pending() argument 426 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags); __bnx2x_vlan_mac_h_exec_pending() 439 * @bp: device handle 445 static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp, __bnx2x_vlan_mac_h_pend() argument 458 * @bp: device handle 465 static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, __bnx2x_vlan_mac_h_write_unlock() argument 473 __bnx2x_vlan_mac_h_exec_pending(bp, o); __bnx2x_vlan_mac_h_write_unlock() 481 * @bp: device handle 487 static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, __bnx2x_vlan_mac_h_read_lock() argument 501 * @bp: device handle 506 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, bnx2x_vlan_mac_h_read_lock() argument 512 rc = __bnx2x_vlan_mac_h_read_lock(bp, o); bnx2x_vlan_mac_h_read_lock() 521 * @bp: device handle 528 static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, __bnx2x_vlan_mac_h_read_unlock() argument 549 __bnx2x_vlan_mac_h_write_unlock(bp, o); __bnx2x_vlan_mac_h_read_unlock() 556 * @bp: device handle 563 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, bnx2x_vlan_mac_h_read_unlock() argument 567 __bnx2x_vlan_mac_h_read_unlock(bp, o); bnx2x_vlan_mac_h_read_unlock() 571 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, bnx2x_get_n_elements() argument 580 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); bnx2x_get_n_elements() 597 bnx2x_vlan_mac_h_read_unlock(bp, o); bnx2x_get_n_elements() 604 static int bnx2x_check_mac_add(struct bnx2x *bp, bnx2x_check_mac_add() argument 624 static int bnx2x_check_vlan_add(struct bnx2x *bp, bnx2x_check_vlan_add() argument 641 bnx2x_check_mac_del(struct bnx2x *bp, bnx2x_check_mac_del() argument 658 bnx2x_check_vlan_del(struct bnx2x *bp, bnx2x_check_vlan_del() argument 674 static bool bnx2x_check_move(struct bnx2x *bp, bnx2x_check_move() argument 685 pos = src_o->check_del(bp, src_o, data); bnx2x_check_move() 688 rc = dst_o->check_add(bp, dst_o, data); bnx2x_check_move() 700 struct bnx2x *bp, bnx2x_check_move_always_err() 724 static void bnx2x_set_mac_in_nig(struct bnx2x *bp, bnx2x_set_mac_in_nig() argument 728 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : bnx2x_set_mac_in_nig() 731 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp)) bnx2x_set_mac_in_nig() 748 REG_WR_DMAE(bp, reg_offset, wb_data, 2); bnx2x_set_mac_in_nig() 751 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : bnx2x_set_mac_in_nig() 758 * @bp: device handle 765 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, bnx2x_vlan_mac_set_cmd_hdr_e2() argument 805 static void bnx2x_set_one_mac_e2(struct bnx2x *bp, bnx2x_set_one_mac_e2() argument 838 bnx2x_set_mac_in_nig(bp, add, mac, bnx2x_set_one_mac_e2() 841 bnx2x_set_mac_in_nig(bp, add, mac, bnx2x_set_one_mac_e2() 850 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, bnx2x_set_one_mac_e2() 869 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, bnx2x_set_one_mac_e2() 893 * @bp: device handle 901 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, bnx2x_vlan_mac_set_rdata_hdr_e1x() argument 914 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, bnx2x_vlan_mac_set_cfg_entry_e1x() argument 940 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, bnx2x_vlan_mac_set_rdata_e1x() argument 947 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, bnx2x_vlan_mac_set_rdata_e1x() 949 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, bnx2x_vlan_mac_set_rdata_e1x() 960 * @bp: device handle 966 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, bnx2x_set_one_mac_e1x() argument 983 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state, bnx2x_set_one_mac_e1x() 989 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, bnx2x_set_one_vlan_e2() argument 1008 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, bnx2x_set_one_vlan_e2() 1023 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, bnx2x_set_one_vlan_e2() 1042 * @bp: device handle 1058 static int bnx2x_vlan_mac_restore(struct bnx2x *bp, bnx2x_vlan_mac_restore() argument 1097 return bnx2x_config_vlan_mac(bp, p); bnx2x_vlan_mac_restore() 1141 * @bp: device handle 1151 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, bnx2x_validate_vlan_mac_add() argument 1160 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u); bnx2x_validate_vlan_mac_add() 1190 * @bp: device handle 1199 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, bnx2x_validate_vlan_mac_del() argument 1211 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); bnx2x_validate_vlan_mac_del() 1249 * @bp: device handle 1258 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, bnx2x_validate_vlan_mac_move() argument 1271 if (!src_o->check_move(bp, src_o, dest_o, bnx2x_validate_vlan_mac_move() 1320 static int bnx2x_validate_vlan_mac(struct bnx2x *bp, bnx2x_validate_vlan_mac() argument 1326 return bnx2x_validate_vlan_mac_add(bp, qo, elem); bnx2x_validate_vlan_mac() 1328 return bnx2x_validate_vlan_mac_del(bp, qo, elem); bnx2x_validate_vlan_mac() 1330 return bnx2x_validate_vlan_mac_move(bp, qo, elem); bnx2x_validate_vlan_mac() 1336 static int bnx2x_remove_vlan_mac(struct bnx2x *bp, bnx2x_remove_vlan_mac() argument 1368 * @bp: device handle 1372 static int bnx2x_wait_vlan_mac(struct bnx2x *bp, bnx2x_wait_vlan_mac() argument 1381 rc = raw->wait_comp(bp, raw); bnx2x_wait_vlan_mac() 1395 static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp, __bnx2x_vlan_mac_execute_step() argument 1404 rc = __bnx2x_vlan_mac_h_write_trylock(bp, o); __bnx2x_vlan_mac_execute_step() 1407 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags); __bnx2x_vlan_mac_execute_step() 1414 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); __bnx2x_vlan_mac_execute_step() 1424 * @bp: device handle 1430 static int bnx2x_complete_vlan_mac(struct bnx2x *bp, bnx2x_complete_vlan_mac() argument 1444 __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); bnx2x_complete_vlan_mac() 1457 rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags); bnx2x_complete_vlan_mac() 1473 * @bp: device handle 1477 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, bnx2x_optimize_vlan_mac() argument 1521 bnx2x_exe_queue_free_elem(bp, pos); bnx2x_optimize_vlan_mac() 1531 * @bp: device handle 1540 struct bnx2x *bp, bnx2x_vlan_mac_get_registry_elem() 1576 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); bnx2x_vlan_mac_get_registry_elem() 1585 * @bp: device handle 1592 static int bnx2x_execute_vlan_mac(struct bnx2x *bp, bnx2x_execute_vlan_mac() argument 1626 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, list_for_each_entry() 1641 o->set_one_rule(bp, o, elem, idx, list_for_each_entry() 1658 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, 1671 reg_elem = o->check_del(bp, o, list_for_each_entry() 1703 reg_elem = o->check_del(bp, cam_obj, list_for_each_entry() 1716 struct bnx2x *bp, bnx2x_vlan_mac_push_new_cmd() 1724 elem = bnx2x_exe_queue_alloc_elem(bp); bnx2x_vlan_mac_push_new_cmd() 1741 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); bnx2x_vlan_mac_push_new_cmd() 1747 * @bp: device handle 1751 int bnx2x_config_vlan_mac(struct bnx2x *bp, bnx2x_config_vlan_mac() argument 1764 rc = bnx2x_vlan_mac_push_new_cmd(bp, p); bnx2x_config_vlan_mac() 1783 rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj, bnx2x_config_vlan_mac() 1802 rc = raw->wait_comp(bp, raw); bnx2x_config_vlan_mac() 1807 rc = __bnx2x_vlan_mac_execute_step(bp, bnx2x_config_vlan_mac() 1823 * @bp: device handle 1833 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, bnx2x_vlan_mac_del_all() argument 1854 rc = exeq->remove(bp, exeq->owner, exeq_pos); bnx2x_vlan_mac_del_all() 1861 bnx2x_exe_queue_free_elem(bp, exeq_pos); bnx2x_vlan_mac_del_all() 1881 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); bnx2x_vlan_mac_del_all() 1891 rc = bnx2x_config_vlan_mac(bp, &p); bnx2x_vlan_mac_del_all() 1894 bnx2x_vlan_mac_h_read_unlock(bp, o); bnx2x_vlan_mac_del_all() 1901 bnx2x_vlan_mac_h_read_unlock(bp, o); bnx2x_vlan_mac_del_all() 1906 return bnx2x_config_vlan_mac(bp, &p); bnx2x_vlan_mac_del_all() 1950 void bnx2x_init_mac_obj(struct bnx2x *bp, bnx2x_init_mac_obj() argument 1969 if (CHIP_IS_E1x(bp)) { bnx2x_init_mac_obj() 1977 bnx2x_exe_queue_init(bp, bnx2x_init_mac_obj() 1994 bnx2x_exe_queue_init(bp, bnx2x_init_mac_obj() 2004 void bnx2x_init_vlan_obj(struct bnx2x *bp, bnx2x_init_vlan_obj() argument 2022 if (CHIP_IS_E1x(bp)) { bnx2x_init_vlan_obj() 2035 bnx2x_exe_queue_init(bp, bnx2x_init_vlan_obj() 2046 static inline void __storm_memset_mac_filters(struct bnx2x *bp, __storm_memset_mac_filters() argument 2055 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); __storm_memset_mac_filters() 2058 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, bnx2x_set_rx_mode_e1x() argument 2061 /* update the bp MAC filter structure */ bnx2x_set_rx_mode_e1x() 2130 __storm_memset_mac_filters(bp, mac_filters, p->func_id); bnx2x_set_rx_mode_e1x() 2148 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, bnx2x_rx_mode_set_cmd_state_e2() argument 2197 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, bnx2x_set_rx_mode_e2() argument 2217 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, bnx2x_set_rx_mode_e2() 2230 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, bnx2x_set_rx_mode_e2() 2244 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); bnx2x_set_rx_mode_e2() 2250 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, bnx2x_set_rx_mode_e2() 2258 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); bnx2x_set_rx_mode_e2() 2264 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, bnx2x_set_rx_mode_e2() 2288 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, bnx2x_set_rx_mode_e2() 2299 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, bnx2x_wait_rx_mode_comp_e2() argument 2302 return bnx2x_state_wait(bp, p->state, p->pstate); bnx2x_wait_rx_mode_comp_e2() 2305 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, bnx2x_empty_rx_mode_wait() argument 2312 int bnx2x_config_rx_mode(struct bnx2x *bp, bnx2x_config_rx_mode() argument 2318 rc = p->rx_mode_obj->config_rx_mode(bp, p); bnx2x_config_rx_mode() 2324 rc = p->rx_mode_obj->wait_comp(bp, p); bnx2x_config_rx_mode() 2332 void bnx2x_init_rx_mode_obj(struct bnx2x *bp, bnx2x_init_rx_mode_obj() argument 2335 if (CHIP_IS_E1x(bp)) { bnx2x_init_rx_mode_obj() 2373 static int bnx2x_mcast_wait(struct bnx2x *bp, bnx2x_mcast_wait() argument 2376 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || bnx2x_mcast_wait() 2377 o->raw.wait_comp(bp, &o->raw)) bnx2x_mcast_wait() 2383 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, bnx2x_mcast_enqueue_cmd() argument 2515 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, bnx2x_mcast_set_one_rule_e2() argument 2570 * @bp: device handle 2578 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, bnx2x_mcast_handle_restore_cmd_e2() 2589 o->set_one_rule(bp, o, cnt, &cfg_data, bnx2x_mcast_handle_restore_cmd_e2() 2608 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, bnx2x_mcast_hdl_pending_add_e2() argument 2620 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); bnx2x_mcast_hdl_pending_add_e2() 2643 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, bnx2x_mcast_hdl_pending_del_e2() argument 2650 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); bnx2x_mcast_hdl_pending_del_e2() 2673 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, bnx2x_mcast_hdl_pending_restore_e2() argument 2677 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, bnx2x_mcast_hdl_pending_restore_e2() 2688 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, bnx2x_mcast_handle_pending_cmds_e2() argument 2699 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); bnx2x_mcast_handle_pending_cmds_e2() 2703 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); bnx2x_mcast_handle_pending_cmds_e2() 2707 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, bnx2x_mcast_handle_pending_cmds_e2() 2732 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, bnx2x_mcast_hdl_add() argument 2742 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); bnx2x_mcast_hdl_add() 2753 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, bnx2x_mcast_hdl_del() argument 2760 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); bnx2x_mcast_hdl_del() 2774 * @bp: device handle 2783 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, bnx2x_mcast_handle_current_cmd() argument 2795 bnx2x_mcast_hdl_add(bp, o, p, &cnt); bnx2x_mcast_handle_current_cmd() 2799 bnx2x_mcast_hdl_del(bp, o, p, &cnt); bnx2x_mcast_handle_current_cmd() 2803 o->hdl_restore(bp, o, 0, &cnt); bnx2x_mcast_handle_current_cmd() 2817 static int bnx2x_mcast_validate_e2(struct bnx2x *bp, bnx2x_mcast_validate_e2() argument 2862 static void bnx2x_mcast_revert_e2(struct bnx2x *bp, bnx2x_mcast_revert_e2() argument 2875 * @bp: device handle 2879 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, bnx2x_mcast_set_rdata_hdr_e2() argument 2896 * @bp: device handle 2904 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, bnx2x_mcast_refresh_registry_e2() argument 2921 static int bnx2x_mcast_setup_e2(struct bnx2x *bp, bnx2x_mcast_setup_e2() argument 2934 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); bnx2x_mcast_setup_e2() 2947 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); bnx2x_mcast_setup_e2() 2958 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); bnx2x_mcast_setup_e2() 2976 bnx2x_mcast_refresh_registry_e2(bp, o); bnx2x_mcast_setup_e2() 2993 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, bnx2x_mcast_setup_e2() 3005 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, bnx2x_mcast_validate_e1h() argument 3016 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, bnx2x_mcast_revert_e1h() argument 3028 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, bnx2x_mcast_hdl_add_e1h() argument 3049 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, bnx2x_mcast_hdl_restore_e1h() argument 3067 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, bnx2x_mcast_setup_e1h() argument 3086 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); bnx2x_mcast_setup_e1h() 3099 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); bnx2x_mcast_setup_e1h() 3109 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); bnx2x_mcast_setup_e1h() 3121 static int bnx2x_mcast_validate_e1(struct bnx2x *bp, bnx2x_mcast_validate_e1() argument 3176 static void bnx2x_mcast_revert_e1(struct bnx2x *bp, bnx2x_mcast_revert_e1() argument 3192 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, bnx2x_mcast_set_one_rule_e1() argument 3222 * @bp: device handle 3226 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, bnx2x_mcast_set_rdata_hdr_e1() argument 3234 u8 offset = (CHIP_REV_IS_SLOW(bp) ? bnx2x_mcast_set_rdata_hdr_e1() 3249 * @bp: device handle 3260 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, bnx2x_mcast_handle_restore_cmd_e1() 3270 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); bnx2x_mcast_handle_restore_cmd_e1() 3284 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) bnx2x_mcast_handle_pending_cmds_e1() 3304 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); bnx2x_mcast_handle_pending_cmds_e1() 3319 o->hdl_restore(bp, o, 0, &cnt); bnx2x_mcast_handle_pending_cmds_e1() 3355 * @bp: device handle 3363 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, bnx2x_mcast_refresh_registry_e1() argument 3410 static int bnx2x_mcast_setup_e1(struct bnx2x *bp, bnx2x_mcast_setup_e1() argument 3430 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); bnx2x_mcast_setup_e1() 3438 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); bnx2x_mcast_setup_e1() 3450 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); bnx2x_mcast_setup_e1() 3458 rc = bnx2x_mcast_refresh_registry_e1(bp, o); bnx2x_mcast_setup_e1() 3477 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, bnx2x_mcast_setup_e1() 3511 int bnx2x_config_mcast(struct bnx2x *bp, bnx2x_config_mcast() argument 3525 rc = o->validate(bp, p, cmd); bnx2x_config_mcast() 3541 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); bnx2x_config_mcast() 3557 rc = o->config_mcast(bp, p, cmd); bnx2x_config_mcast() 3563 rc = o->wait_comp(bp, o); bnx2x_config_mcast() 3572 o->revert(bp, p, old_reg_size); bnx2x_config_mcast() 3601 void bnx2x_init_mcast_obj(struct bnx2x *bp, bnx2x_init_mcast_obj() argument 3621 if (CHIP_IS_E1(bp)) { bnx2x_init_mcast_obj() 3628 if (CHIP_REV_IS_SLOW(bp)) bnx2x_init_mcast_obj() 3647 } else if (CHIP_IS_E1H(bp)) { bnx2x_init_mcast_obj() 3899 void bnx2x_init_mac_credit_pool(struct bnx2x *bp, bnx2x_init_mac_credit_pool() argument 3908 if (CHIP_IS_E1(bp)) { bnx2x_init_mac_credit_pool() 3910 if (!CHIP_REV_IS_SLOW(bp)) bnx2x_init_mac_credit_pool() 3917 } else if (CHIP_IS_E1H(bp)) { bnx2x_init_mac_credit_pool() 3922 if (!CHIP_REV_IS_SLOW(bp)) bnx2x_init_mac_credit_pool() 3938 if (!CHIP_REV_IS_SLOW(bp)) bnx2x_init_mac_credit_pool() 3954 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, bnx2x_init_vlan_credit_pool() argument 3959 if (CHIP_IS_E1x(bp)) { bnx2x_init_vlan_credit_pool() 3981 * @bp: driver handle 3986 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, bnx2x_debug_print_ind_table() argument 4010 * @bp: device handle 4015 static int bnx2x_setup_rss(struct bnx2x *bp, bnx2x_setup_rss() argument 4091 if (netif_msg_ifup(bp)) bnx2x_setup_rss() 4092 bnx2x_debug_print_ind_table(bp, p); bnx2x_setup_rss() 4102 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, bnx2x_setup_rss() 4119 int bnx2x_config_rss(struct bnx2x *bp, bnx2x_config_rss() argument 4135 rc = o->config_rss(bp, p); bnx2x_config_rss() 4142 rc = r->wait_comp(bp, r); bnx2x_config_rss() 4147 void bnx2x_init_rss_config_obj(struct bnx2x *bp, bnx2x_init_rss_config_obj() argument 4166 * @bp: device handle 4175 int bnx2x_queue_state_change(struct bnx2x *bp, bnx2x_queue_state_change() argument 4183 rc = o->check_transition(bp, o, params); bnx2x_queue_state_change() 4196 o->complete_cmd(bp, o, pending_bit); bnx2x_queue_state_change() 4199 rc = o->send_cmd(bp, params); bnx2x_queue_state_change() 4208 rc = o->wait_comp(bp, o, pending_bit); bnx2x_queue_state_change() 4237 static int bnx2x_queue_wait_comp(struct bnx2x *bp, bnx2x_queue_wait_comp() argument 4241 return bnx2x_state_wait(bp, cmd, &o->pending); bnx2x_queue_wait_comp() 4247 * @bp: device handle 4253 static int bnx2x_queue_comp_cmd(struct bnx2x *bp, bnx2x_queue_comp_cmd() argument 4296 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, bnx2x_q_fill_setup_data_e2() argument 4309 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, bnx2x_q_fill_init_general_data() argument 4465 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, bnx2x_q_fill_setup_data_cmn() argument 4469 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, bnx2x_q_fill_setup_data_cmn() 4490 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, bnx2x_q_fill_setup_tx_only() argument 4494 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, bnx2x_q_fill_setup_tx_only() 4513 * @bp: device handle 4521 static inline int bnx2x_q_init(struct bnx2x *bp, bnx2x_q_init() argument 4534 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, bnx2x_q_init() 4545 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, bnx2x_q_init() 4556 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]); bnx2x_q_init() 4560 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); bnx2x_q_init() 4568 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, bnx2x_q_send_setup_e1x() argument 4581 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_send_setup_e1x() 4589 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], bnx2x_q_send_setup_e1x() 4594 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, bnx2x_q_send_setup_e2() argument 4607 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_send_setup_e2() 4608 bnx2x_q_fill_setup_data_e2(bp, params, rdata); bnx2x_q_send_setup_e2() 4616 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], bnx2x_q_send_setup_e2() 4621 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, bnx2x_q_send_setup_tx_only() argument 4647 bnx2x_q_fill_setup_tx_only(bp, params, rdata); bnx2x_q_send_setup_tx_only() 4659 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], bnx2x_q_send_setup_tx_only() 4664 static void bnx2x_q_fill_update_data(struct bnx2x *bp, bnx2x_q_fill_update_data() argument 4736 static inline int bnx2x_q_send_update(struct bnx2x *bp, bnx2x_q_send_update() argument 4757 bnx2x_q_fill_update_data(bp, o, update_params, rdata); bnx2x_q_send_update() 4765 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, bnx2x_q_send_update() 4773 * @bp: device handle 4778 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, bnx2x_q_send_deactivate() argument 4787 return bnx2x_q_send_update(bp, params); bnx2x_q_send_deactivate() 4793 * @bp: device handle 4798 static inline int bnx2x_q_send_activate(struct bnx2x *bp, bnx2x_q_send_activate() argument 4808 return bnx2x_q_send_update(bp, params); bnx2x_q_send_activate() 4811 static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp, bnx2x_q_fill_update_tpa_data() argument 4833 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, bnx2x_q_send_update_tpa() argument 4848 bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata); bnx2x_q_send_update_tpa() 4863 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE, bnx2x_q_send_update_tpa() 4869 static inline int bnx2x_q_send_halt(struct bnx2x *bp, bnx2x_q_send_halt() argument 4874 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, bnx2x_q_send_halt() 4879 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, bnx2x_q_send_cfc_del() argument 4891 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, bnx2x_q_send_cfc_del() 4895 static inline int bnx2x_q_send_terminate(struct bnx2x *bp, bnx2x_q_send_terminate() argument 4907 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, bnx2x_q_send_terminate() 4911 static inline int bnx2x_q_send_empty(struct bnx2x *bp, bnx2x_q_send_empty() argument 4916 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, bnx2x_q_send_empty() 4921 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, bnx2x_queue_send_cmd_cmn() argument 4926 return bnx2x_q_init(bp, params); bnx2x_queue_send_cmd_cmn() 4928 return bnx2x_q_send_setup_tx_only(bp, params); bnx2x_queue_send_cmd_cmn() 4930 return bnx2x_q_send_deactivate(bp, params); bnx2x_queue_send_cmd_cmn() 4932 return bnx2x_q_send_activate(bp, params); bnx2x_queue_send_cmd_cmn() 4934 return bnx2x_q_send_update(bp, params); bnx2x_queue_send_cmd_cmn() 4936 return bnx2x_q_send_update_tpa(bp, params); bnx2x_queue_send_cmd_cmn() 4938 return bnx2x_q_send_halt(bp, params); bnx2x_queue_send_cmd_cmn() 4940 return bnx2x_q_send_cfc_del(bp, params); bnx2x_queue_send_cmd_cmn() 4942 return bnx2x_q_send_terminate(bp, params); bnx2x_queue_send_cmd_cmn() 4944 return bnx2x_q_send_empty(bp, params); bnx2x_queue_send_cmd_cmn() 4951 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, bnx2x_queue_send_cmd_e1x() argument 4956 return bnx2x_q_send_setup_e1x(bp, params); bnx2x_queue_send_cmd_e1x() 4967 return bnx2x_queue_send_cmd_cmn(bp, params); bnx2x_queue_send_cmd_e1x() 4974 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, bnx2x_queue_send_cmd_e2() argument 4979 return bnx2x_q_send_setup_e2(bp, params); bnx2x_queue_send_cmd_e2() 4990 return bnx2x_queue_send_cmd_cmn(bp, params); bnx2x_queue_send_cmd_e2() 5000 * @bp: device handle 5013 static int bnx2x_queue_chk_transition(struct bnx2x *bp, bnx2x_queue_chk_transition() argument 5179 void bnx2x_init_queue_obj(struct bnx2x *bp, bnx2x_init_queue_obj() argument 5199 if (CHIP_IS_E1x(bp)) bnx2x_init_queue_obj() 5212 int bnx2x_get_q_logical_state(struct bnx2x *bp, bnx2x_get_q_logical_state() argument 5233 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, bnx2x_func_get_state() argument 5248 static int bnx2x_func_wait_comp(struct bnx2x *bp, bnx2x_func_wait_comp() argument 5252 return bnx2x_state_wait(bp, cmd, &o->pending); bnx2x_func_wait_comp() 5258 * @bp: device handle 5265 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, bnx2x_func_state_change_comp() argument 5273 cmd, BP_FUNC(bp), o->state, bnx2x_func_state_change_comp() 5280 cmd, BP_FUNC(bp), o->next_state); bnx2x_func_state_change_comp() 5299 * @bp: device handle 5305 static int bnx2x_func_comp_cmd(struct bnx2x *bp, bnx2x_func_comp_cmd() argument 5312 int rc = bnx2x_func_state_change_comp(bp, o, cmd); bnx2x_func_comp_cmd() 5319 * @bp: device handle 5331 static int bnx2x_func_chk_transition(struct bnx2x *bp, bnx2x_func_chk_transition() argument 5430 * @bp: device handle 5437 static inline int bnx2x_func_init_func(struct bnx2x *bp, bnx2x_func_init_func() argument 5440 return drv->init_hw_func(bp); bnx2x_func_init_func() 5446 * @bp: device handle 5454 static inline int bnx2x_func_init_port(struct bnx2x *bp, bnx2x_func_init_port() argument 5457 int rc = drv->init_hw_port(bp); bnx2x_func_init_port() 5461 return bnx2x_func_init_func(bp, drv); bnx2x_func_init_port() 5467 * @bp: device handle 5474 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, bnx2x_func_init_cmn_chip() argument 5477 int rc = drv->init_hw_cmn_chip(bp); bnx2x_func_init_cmn_chip() 5481 return bnx2x_func_init_port(bp, drv); bnx2x_func_init_cmn_chip() 5487 * @bp: device handle 5494 static inline int bnx2x_func_init_cmn(struct bnx2x *bp, bnx2x_func_init_cmn() argument 5497 int rc = drv->init_hw_cmn(bp); bnx2x_func_init_cmn() 5501 return bnx2x_func_init_port(bp, drv); bnx2x_func_init_cmn() 5504 static int bnx2x_func_hw_init(struct bnx2x *bp, bnx2x_func_hw_init() argument 5513 BP_ABS_FUNC(bp), load_code); bnx2x_func_hw_init() 5516 rc = drv->gunzip_init(bp); bnx2x_func_hw_init() 5521 rc = drv->init_fw(bp); bnx2x_func_hw_init() 5530 rc = bnx2x_func_init_cmn_chip(bp, drv); bnx2x_func_hw_init() 5536 rc = bnx2x_func_init_cmn(bp, drv); bnx2x_func_hw_init() 5542 rc = bnx2x_func_init_port(bp, drv); bnx2x_func_hw_init() 5548 rc = bnx2x_func_init_func(bp, drv); bnx2x_func_hw_init() 5559 drv->gunzip_end(bp); bnx2x_func_hw_init() 5565 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); bnx2x_func_hw_init() 5573 * @bp: device handle 5579 static inline void bnx2x_func_reset_func(struct bnx2x *bp, bnx2x_func_reset_func() argument 5582 drv->reset_hw_func(bp); bnx2x_func_reset_func() 5588 * @bp: device handle 5600 static inline void bnx2x_func_reset_port(struct bnx2x *bp, bnx2x_func_reset_port() argument 5603 drv->reset_hw_port(bp); bnx2x_func_reset_port() 5604 bnx2x_func_reset_func(bp, drv); bnx2x_func_reset_port() 5610 * @bp: device handle 5617 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, bnx2x_func_reset_cmn() argument 5620 bnx2x_func_reset_port(bp, drv); bnx2x_func_reset_cmn() 5621 drv->reset_hw_cmn(bp); bnx2x_func_reset_cmn() 5624 static inline int bnx2x_func_hw_reset(struct bnx2x *bp, bnx2x_func_hw_reset() argument 5631 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), bnx2x_func_hw_reset() 5636 bnx2x_func_reset_cmn(bp, drv); bnx2x_func_hw_reset() 5639 bnx2x_func_reset_port(bp, drv); bnx2x_func_hw_reset() 5642 bnx2x_func_reset_func(bp, drv); bnx2x_func_hw_reset() 5651 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); bnx2x_func_hw_reset() 5656 static inline int bnx2x_func_send_start(struct bnx2x *bp, bnx2x_func_send_start() argument 5670 rdata->path_id = BP_PATH(bp); bnx2x_func_send_start() 5700 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, bnx2x_func_send_start() 5705 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, bnx2x_func_send_switch_update() argument 5772 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, bnx2x_func_send_switch_update() 5777 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, bnx2x_func_send_afex_update() argument 5810 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, bnx2x_func_send_afex_update() 5816 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp, bnx2x_func_send_afex_viflists() argument 5849 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, bnx2x_func_send_afex_viflists() 5854 static inline int bnx2x_func_send_stop(struct bnx2x *bp, bnx2x_func_send_stop() argument 5857 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, bnx2x_func_send_stop() 5861 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp, bnx2x_func_send_tx_stop() argument 5864 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0, bnx2x_func_send_tx_stop() 5867 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, bnx2x_func_send_tx_start() argument 5894 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, bnx2x_func_send_tx_start() 5900 int bnx2x_func_send_set_timesync(struct bnx2x *bp, bnx2x_func_send_set_timesync() argument 5930 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0, bnx2x_func_send_set_timesync() 5935 static int bnx2x_func_send_cmd(struct bnx2x *bp, bnx2x_func_send_cmd() argument 5940 return bnx2x_func_hw_init(bp, params); bnx2x_func_send_cmd() 5942 return bnx2x_func_send_start(bp, params); bnx2x_func_send_cmd() 5944 return bnx2x_func_send_stop(bp, params); bnx2x_func_send_cmd() 5946 return bnx2x_func_hw_reset(bp, params); bnx2x_func_send_cmd() 5948 return bnx2x_func_send_afex_update(bp, params); bnx2x_func_send_cmd() 5950 return bnx2x_func_send_afex_viflists(bp, params); bnx2x_func_send_cmd() 5952 return bnx2x_func_send_tx_stop(bp, params); bnx2x_func_send_cmd() 5954 return bnx2x_func_send_tx_start(bp, params); bnx2x_func_send_cmd() 5956 return bnx2x_func_send_switch_update(bp, params); bnx2x_func_send_cmd() 5958 return bnx2x_func_send_set_timesync(bp, params); bnx2x_func_send_cmd() 5965 void bnx2x_init_func_obj(struct bnx2x *bp, bnx2x_init_func_obj() argument 5990 * @bp: device handle 6000 int bnx2x_func_state_change(struct bnx2x *bp, bnx2x_func_state_change() argument 6011 rc = o->check_transition(bp, o, params); bnx2x_func_state_change() 6018 rc = o->check_transition(bp, o, params); bnx2x_func_state_change() 6035 bnx2x_func_state_change_comp(bp, o, cmd); bnx2x_func_state_change() 6039 rc = o->send_cmd(bp, params); bnx2x_func_state_change() 6051 rc = o->wait_comp(bp, o, cmd); bnx2x_func_state_change() 147 __bnx2x_exe_queue_reset_pending( struct bnx2x *bp, struct bnx2x_exe_queue_obj *o) __bnx2x_exe_queue_reset_pending() argument 246 bnx2x_exe_queue_alloc_elem( struct bnx2x *bp) bnx2x_exe_queue_alloc_elem() argument 699 bnx2x_check_move_always_err( struct bnx2x *bp, struct bnx2x_vlan_mac_obj *src_o, struct bnx2x_vlan_mac_obj *dst_o, union bnx2x_classification_ramrod_data *data) bnx2x_check_move_always_err() argument 1539 bnx2x_vlan_mac_get_registry_elem( struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, struct bnx2x_exeq_elem *elem, bool restore, struct bnx2x_vlan_mac_registry_elem **re) bnx2x_vlan_mac_get_registry_elem() argument 1715 bnx2x_vlan_mac_push_new_cmd( struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params *p) bnx2x_vlan_mac_push_new_cmd() argument 2577 bnx2x_mcast_handle_restore_cmd_e2( struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, int *rdata_idx) bnx2x_mcast_handle_restore_cmd_e2() argument 3259 bnx2x_mcast_handle_restore_cmd_e1( struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, int *rdata_idx) bnx2x_mcast_handle_restore_cmd_e1() argument 3283 bnx2x_mcast_handle_pending_cmds_e1( struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) bnx2x_mcast_handle_pending_cmds_e1() argument
|
H A D | bnx2x_init.h | 205 static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos) bnx2x_map_q_cos() argument 208 u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4); bnx2x_map_q_cos() 216 if (INIT_MODE_FLAGS(bp) & MODE_PORT4) { bnx2x_map_q_cos() 218 if (BP_PORT(bp)) { bnx2x_map_q_cos() 227 BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic); bnx2x_map_q_cos() 231 REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos); bnx2x_map_q_cos() 235 reg_bit_map = REG_RD(bp, reg_addr); bnx2x_map_q_cos() 236 REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map)); bnx2x_map_q_cos() 240 reg_bit_map = REG_RD(bp, reg_addr); bnx2x_map_q_cos() 241 REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); bnx2x_map_q_cos() 246 if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { bnx2x_map_q_cos() 248 reg_bit_map = REG_RD(bp, reg_addr); bnx2x_map_q_cos() 253 REG_WR(bp, reg_addr, reg_bit_map); bnx2x_map_q_cos() 260 static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode, bnx2x_dcb_config_qm() argument 263 bnx2x_map_q_cos(bp, BNX2X_FCOE_Q, bnx2x_dcb_config_qm() 265 bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q, bnx2x_dcb_config_qm() 267 bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q, bnx2x_dcb_config_qm() 271 bnx2x_map_q_cos(bp, BNX2X_ETH_Q, bnx2x_dcb_config_qm() 273 bnx2x_map_q_cos(bp, BNX2X_TOE_Q, bnx2x_dcb_config_qm() 275 bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q, bnx2x_dcb_config_qm() 574 /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */ 576 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */ 578 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */ 579 /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */ 580 /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */ 674 static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) bnx2x_set_mcp_parity() argument 680 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr); bnx2x_set_mcp_parity() 687 REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val); bnx2x_set_mcp_parity() 691 static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx) bnx2x_parity_reg_mask() argument 693 if (CHIP_IS_E1(bp)) bnx2x_parity_reg_mask() 695 else if (CHIP_IS_E1H(bp)) bnx2x_parity_reg_mask() 697 else if (CHIP_IS_E2(bp)) bnx2x_parity_reg_mask() 703 static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp) bnx2x_disable_blocks_parity() argument 708 u32 dis_mask = bnx2x_parity_reg_mask(bp, i); bnx2x_disable_blocks_parity() 711 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, bnx2x_disable_blocks_parity() 720 bnx2x_set_mcp_parity(bp, false); bnx2x_disable_blocks_parity() 724 static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) bnx2x_clear_blocks_parity() argument 734 REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); bnx2x_clear_blocks_parity() 735 REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); bnx2x_clear_blocks_parity() 736 REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); bnx2x_clear_blocks_parity() 737 REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); bnx2x_clear_blocks_parity() 740 u32 reg_mask = bnx2x_parity_reg_mask(bp, i); bnx2x_clear_blocks_parity() 743 reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i]. bnx2x_clear_blocks_parity() 754 reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP); bnx2x_clear_blocks_parity() 765 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780); bnx2x_clear_blocks_parity() 768 static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp) bnx2x_enable_blocks_parity() argument 773 u32 reg_mask = bnx2x_parity_reg_mask(bp, i); bnx2x_enable_blocks_parity() 776 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, bnx2x_enable_blocks_parity() 781 bnx2x_set_mcp_parity(bp, true); bnx2x_enable_blocks_parity()
|
H A D | bnx2x_sp.h | 91 int (*wait_comp)(struct bnx2x *bp, 168 typedef int (*exe_q_validate)(struct bnx2x *bp, 172 typedef int (*exe_q_remove)(struct bnx2x *bp, 179 typedef int (*exe_q_optimize)(struct bnx2x *bp, 182 typedef int (*exe_q_execute)(struct bnx2x *bp, 324 int (*get_n_elements)(struct bnx2x *bp, 334 int (*check_add)(struct bnx2x *bp, 344 (*check_del)(struct bnx2x *bp, 353 bool (*check_move)(struct bnx2x *bp, 370 void (*set_one_rule)(struct bnx2x *bp, 383 * @param bp 392 int (*delete_all)(struct bnx2x *bp, 401 * @param bp 412 int (*restore)(struct bnx2x *bp, 419 * @param bp 433 int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, 442 int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); 495 int (*config_rx_mode)(struct bnx2x *bp, 498 int (*wait_comp)(struct bnx2x *bp, 580 int (*config_mcast)(struct bnx2x *bp, 587 * @param bp 595 int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, 598 int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, 602 void (*set_one_rule)(struct bnx2x *bp, 620 int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o); 627 int (*validate)(struct bnx2x *bp, 634 void (*revert)(struct bnx2x *bp, 752 int (*config_rss)(struct bnx2x *bp, 1069 int (*send_cmd)(struct bnx2x *bp, 1081 int (*check_transition)(struct bnx2x *bp, 1088 int (*complete_cmd)(struct bnx2x *bp, 1092 int (*wait_comp)(struct bnx2x *bp, 1281 int (*init_hw_cmn_chip)(struct bnx2x *bp); 1282 int (*init_hw_cmn)(struct bnx2x *bp); 1283 int (*init_hw_port)(struct bnx2x *bp); 1284 int (*init_hw_func)(struct bnx2x *bp); 1287 void (*reset_hw_cmn)(struct bnx2x *bp); 1288 void (*reset_hw_port)(struct bnx2x *bp); 1289 void (*reset_hw_func)(struct bnx2x *bp); 1292 int (*gunzip_init)(struct bnx2x *bp); 1293 void (*gunzip_end)(struct bnx2x *bp); 1296 int (*init_fw)(struct bnx2x *bp); 1297 void (*release_fw)(struct bnx2x *bp); 1334 int (*send_cmd)(struct bnx2x *bp, 1340 int (*check_transition)(struct bnx2x *bp, 1347 int (*complete_cmd)(struct bnx2x *bp, 1351 int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o, 1361 void bnx2x_init_func_obj(struct bnx2x *bp, 1367 int bnx2x_func_state_change(struct bnx2x *bp, 1370 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, 1373 void bnx2x_init_queue_obj(struct bnx2x *bp, 1378 int bnx2x_queue_state_change(struct bnx2x *bp, 1381 int bnx2x_get_q_logical_state(struct bnx2x *bp, 1385 void bnx2x_init_mac_obj(struct bnx2x *bp, 1392 void bnx2x_init_vlan_obj(struct bnx2x *bp, 1399 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, 1401 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, 1403 int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp, 1405 int bnx2x_config_vlan_mac(struct bnx2x *bp, 1408 int bnx2x_vlan_mac_move(struct bnx2x *bp, 1414 void bnx2x_init_rx_mode_obj(struct bnx2x *bp, 1426 int bnx2x_config_rx_mode(struct bnx2x *bp, 1431 void bnx2x_init_mcast_obj(struct bnx2x *bp, 1458 int bnx2x_config_mcast(struct bnx2x *bp, 1463 void bnx2x_init_mac_credit_pool(struct bnx2x *bp, 1466 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, 1471 void bnx2x_init_rss_config_obj(struct bnx2x *bp, 1483 int bnx2x_config_rss(struct bnx2x *bp,
|
H A D | bnx2x_stats.h | 539 void bnx2x_memset_stats(struct bnx2x *bp); 540 void bnx2x_stats_init(struct bnx2x *bp); 541 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 542 int bnx2x_stats_safe_exec(struct bnx2x *bp, 549 * @bp: driver handle 551 void bnx2x_save_statistics(struct bnx2x *bp); 553 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
|
H A D | bnx2x_link.h | 319 struct bnx2x *bp; member in struct:link_params 425 int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], 429 void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); 432 void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); 448 u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, 538 void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
|
/linux-4.1.27/fs/xfs/ |
H A D | xfs_buf.c | 48 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) 49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) 50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder) 52 # define XB_SET_OWNER(bp) do { } while (0) 53 # define XB_CLEAR_OWNER(bp) do { } while (0) 54 # define XB_GET_OWNER(bp) do { } while (0) 63 struct xfs_buf *bp) xfs_buf_is_vmapped() 70 * to be both for b_addr and bp->b_page_count > 1. xfs_buf_is_vmapped() 72 return bp->b_addr && bp->b_page_count > 1; xfs_buf_is_vmapped() 77 struct xfs_buf *bp) xfs_buf_vmap_len() 79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; xfs_buf_vmap_len() 92 struct xfs_buf *bp) xfs_buf_stale() 94 ASSERT(xfs_buf_islocked(bp)); xfs_buf_stale() 96 bp->b_flags |= XBF_STALE; xfs_buf_stale() 103 bp->b_flags &= ~_XBF_DELWRI_Q; xfs_buf_stale() 105 spin_lock(&bp->b_lock); xfs_buf_stale() 106 atomic_set(&bp->b_lru_ref, 0); xfs_buf_stale() 107 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && xfs_buf_stale() 108 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) xfs_buf_stale() 109 atomic_dec(&bp->b_hold); xfs_buf_stale() 111 ASSERT(atomic_read(&bp->b_hold) >= 1); xfs_buf_stale() 112 spin_unlock(&bp->b_lock); xfs_buf_stale() 117 struct xfs_buf *bp, xfs_buf_get_maps() 120 ASSERT(bp->b_maps == NULL); xfs_buf_get_maps() 121 bp->b_map_count = map_count; xfs_buf_get_maps() 124 bp->b_maps = &bp->__b_map; xfs_buf_get_maps() 128 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), xfs_buf_get_maps() 130 if (!bp->b_maps) xfs_buf_get_maps() 140 struct xfs_buf *bp) xfs_buf_free_maps() 142 if (bp->b_maps != &bp->__b_map) { xfs_buf_free_maps() 143 kmem_free(bp->b_maps); xfs_buf_free_maps() 144 bp->b_maps = NULL; xfs_buf_free_maps() 155 struct xfs_buf *bp; _xfs_buf_alloc() local 159 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); _xfs_buf_alloc() 160 if (unlikely(!bp)) _xfs_buf_alloc() 169 atomic_set(&bp->b_hold, 1); _xfs_buf_alloc() 170 atomic_set(&bp->b_lru_ref, 1); _xfs_buf_alloc() 171 init_completion(&bp->b_iowait); _xfs_buf_alloc() 172 INIT_LIST_HEAD(&bp->b_lru); _xfs_buf_alloc() 173 INIT_LIST_HEAD(&bp->b_list); _xfs_buf_alloc() 174 RB_CLEAR_NODE(&bp->b_rbnode); _xfs_buf_alloc() 175 sema_init(&bp->b_sema, 0); /* held, no waiters */ _xfs_buf_alloc() 176 spin_lock_init(&bp->b_lock); _xfs_buf_alloc() 177 XB_SET_OWNER(bp); _xfs_buf_alloc() 178 bp->b_target = target; _xfs_buf_alloc() 179 bp->b_flags = flags; _xfs_buf_alloc() 186 error = xfs_buf_get_maps(bp, nmaps); _xfs_buf_alloc() 188 kmem_zone_free(xfs_buf_zone, bp); _xfs_buf_alloc() 192 bp->b_bn = map[0].bm_bn; _xfs_buf_alloc() 193 bp->b_length = 0; _xfs_buf_alloc() 195 bp->b_maps[i].bm_bn = map[i].bm_bn; _xfs_buf_alloc() 196 bp->b_maps[i].bm_len = map[i].bm_len; _xfs_buf_alloc() 197 bp->b_length += map[i].bm_len; _xfs_buf_alloc() 199 bp->b_io_length = bp->b_length; _xfs_buf_alloc() 201 atomic_set(&bp->b_pin_count, 0); _xfs_buf_alloc() 202 init_waitqueue_head(&bp->b_waiters); _xfs_buf_alloc() 205 trace_xfs_buf_init(bp, _RET_IP_); _xfs_buf_alloc() 207 return bp; _xfs_buf_alloc() 216 xfs_buf_t *bp, _xfs_buf_get_pages() 220 if (bp->b_pages == NULL) { _xfs_buf_get_pages() 221 bp->b_page_count = page_count; _xfs_buf_get_pages() 223 bp->b_pages = bp->b_page_array; _xfs_buf_get_pages() 225 bp->b_pages = kmem_alloc(sizeof(struct page *) * _xfs_buf_get_pages() 227 if (bp->b_pages == NULL) _xfs_buf_get_pages() 230 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); _xfs_buf_get_pages() 240 xfs_buf_t *bp) _xfs_buf_free_pages() 242 if (bp->b_pages != bp->b_page_array) { _xfs_buf_free_pages() 243 kmem_free(bp->b_pages); _xfs_buf_free_pages() 244 bp->b_pages = NULL; _xfs_buf_free_pages() 257 xfs_buf_t *bp) xfs_buf_free() 259 trace_xfs_buf_free(bp, _RET_IP_); xfs_buf_free() 261 ASSERT(list_empty(&bp->b_lru)); xfs_buf_free() 263 if (bp->b_flags & _XBF_PAGES) { xfs_buf_free() 266 if (xfs_buf_is_vmapped(bp)) xfs_buf_free() 267 vm_unmap_ram(bp->b_addr - bp->b_offset, xfs_buf_free() 268 bp->b_page_count); xfs_buf_free() 270 for (i = 0; i < bp->b_page_count; i++) { xfs_buf_free() 271 struct page *page = bp->b_pages[i]; xfs_buf_free() 275 } else if (bp->b_flags & _XBF_KMEM) xfs_buf_free() 276 kmem_free(bp->b_addr); xfs_buf_free() 277 _xfs_buf_free_pages(bp); xfs_buf_free() 278 xfs_buf_free_maps(bp); xfs_buf_free() 279 kmem_zone_free(xfs_buf_zone, bp); xfs_buf_free() 287 xfs_buf_t *bp, xfs_buf_allocate_memory() 302 size = BBTOB(bp->b_length); xfs_buf_allocate_memory() 304 bp->b_addr = kmem_alloc(size, KM_NOFS); xfs_buf_allocate_memory() 305 if (!bp->b_addr) { xfs_buf_allocate_memory() 310 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != xfs_buf_allocate_memory() 311 ((unsigned long)bp->b_addr & PAGE_MASK)) { xfs_buf_allocate_memory() 313 kmem_free(bp->b_addr); xfs_buf_allocate_memory() 314 bp->b_addr = NULL; xfs_buf_allocate_memory() 317 bp->b_offset = offset_in_page(bp->b_addr); xfs_buf_allocate_memory() 318 bp->b_pages = bp->b_page_array; xfs_buf_allocate_memory() 319 bp->b_pages[0] = virt_to_page(bp->b_addr); xfs_buf_allocate_memory() 320 bp->b_page_count = 1; xfs_buf_allocate_memory() 321 bp->b_flags |= _XBF_KMEM; xfs_buf_allocate_memory() 326 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; xfs_buf_allocate_memory() 327 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) xfs_buf_allocate_memory() 330 error = _xfs_buf_get_pages(bp, page_count); xfs_buf_allocate_memory() 334 offset = bp->b_offset; xfs_buf_allocate_memory() 335 bp->b_flags |= _XBF_PAGES; xfs_buf_allocate_memory() 337 for (i = 0; i < bp->b_page_count; i++) { xfs_buf_allocate_memory() 344 bp->b_page_count = i; xfs_buf_allocate_memory() 369 bp->b_pages[i] = page; xfs_buf_allocate_memory() 375 for (i = 0; i < bp->b_page_count; i++) xfs_buf_allocate_memory() 376 __free_page(bp->b_pages[i]); xfs_buf_allocate_memory() 385 xfs_buf_t *bp, _xfs_buf_map_pages() 388 ASSERT(bp->b_flags & _XBF_PAGES); _xfs_buf_map_pages() 389 if (bp->b_page_count == 1) { _xfs_buf_map_pages() 391 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; _xfs_buf_map_pages() 393 bp->b_addr = NULL; _xfs_buf_map_pages() 408 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, _xfs_buf_map_pages() 410 if (bp->b_addr) _xfs_buf_map_pages() 416 if (!bp->b_addr) _xfs_buf_map_pages() 418 bp->b_addr += bp->b_offset; _xfs_buf_map_pages() 445 xfs_buf_t *bp; _xfs_buf_find() local 485 bp = NULL; _xfs_buf_find() 488 bp = rb_entry(parent, struct xfs_buf, b_rbnode); _xfs_buf_find() 490 if (blkno < bp->b_bn) _xfs_buf_find() 492 else if (blkno > bp->b_bn) _xfs_buf_find() 503 if (bp->b_length != numblks) { _xfs_buf_find() 504 ASSERT(bp->b_flags & XBF_STALE); _xfs_buf_find() 508 atomic_inc(&bp->b_hold); _xfs_buf_find() 531 if (!xfs_buf_trylock(bp)) { _xfs_buf_find() 533 xfs_buf_rele(bp); _xfs_buf_find() 537 xfs_buf_lock(bp); _xfs_buf_find() 546 if (bp->b_flags & XBF_STALE) { _xfs_buf_find() 547 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); _xfs_buf_find() 548 ASSERT(bp->b_iodone == NULL); _xfs_buf_find() 549 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; _xfs_buf_find() 550 bp->b_ops = NULL; _xfs_buf_find() 553 trace_xfs_buf_find(bp, flags, _RET_IP_); _xfs_buf_find() 555 return bp; _xfs_buf_find() 570 struct xfs_buf *bp; xfs_buf_get_map() local 574 bp = _xfs_buf_find(target, map, nmaps, flags, NULL); xfs_buf_get_map() 575 if (likely(bp)) xfs_buf_get_map() 588 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp); xfs_buf_get_map() 589 if (!bp) { xfs_buf_get_map() 594 if (bp != new_bp) xfs_buf_get_map() 598 if (!bp->b_addr) { xfs_buf_get_map() 599 error = _xfs_buf_map_pages(bp, flags); xfs_buf_get_map() 603 xfs_buf_relse(bp); xfs_buf_get_map() 613 xfs_buf_ioerror(bp, 0); xfs_buf_get_map() 616 trace_xfs_buf_get(bp, flags, _RET_IP_); xfs_buf_get_map() 617 return bp; xfs_buf_get_map() 622 xfs_buf_t *bp, _xfs_buf_read() 626 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); _xfs_buf_read() 628 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); _xfs_buf_read() 629 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); _xfs_buf_read() 632 xfs_buf_submit(bp); _xfs_buf_read() 635 return xfs_buf_submit_wait(bp); _xfs_buf_read() 646 struct xfs_buf *bp; xfs_buf_read_map() local 650 bp = xfs_buf_get_map(target, map, nmaps, flags); xfs_buf_read_map() 651 if (bp) { xfs_buf_read_map() 652 trace_xfs_buf_read(bp, flags, _RET_IP_); xfs_buf_read_map() 654 if (!XFS_BUF_ISDONE(bp)) { xfs_buf_read_map() 656 bp->b_ops = ops; xfs_buf_read_map() 657 _xfs_buf_read(bp, flags); xfs_buf_read_map() 663 xfs_buf_relse(bp); xfs_buf_read_map() 667 bp->b_flags &= ~XBF_READ; xfs_buf_read_map() 671 return bp; xfs_buf_read_map() 705 struct xfs_buf *bp; xfs_buf_read_uncached() local 709 bp = xfs_buf_get_uncached(target, numblks, flags); xfs_buf_read_uncached() 710 if (!bp) xfs_buf_read_uncached() 714 ASSERT(bp->b_map_count == 1); xfs_buf_read_uncached() 715 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */ xfs_buf_read_uncached() 716 bp->b_maps[0].bm_bn = daddr; xfs_buf_read_uncached() 717 bp->b_flags |= XBF_READ; xfs_buf_read_uncached() 718 bp->b_ops = ops; xfs_buf_read_uncached() 720 xfs_buf_submit_wait(bp); xfs_buf_read_uncached() 721 if (bp->b_error) { xfs_buf_read_uncached() 722 int error = bp->b_error; xfs_buf_read_uncached() 723 xfs_buf_relse(bp); xfs_buf_read_uncached() 727 *bpp = bp; xfs_buf_read_uncached() 737 struct xfs_buf *bp, xfs_buf_set_empty() 740 if (bp->b_pages) xfs_buf_set_empty() 741 _xfs_buf_free_pages(bp); xfs_buf_set_empty() 743 bp->b_pages = NULL; xfs_buf_set_empty() 744 bp->b_page_count = 0; xfs_buf_set_empty() 745 bp->b_addr = NULL; xfs_buf_set_empty() 746 bp->b_length = numblks; xfs_buf_set_empty() 747 bp->b_io_length = numblks; xfs_buf_set_empty() 749 ASSERT(bp->b_map_count == 1); xfs_buf_set_empty() 750 bp->b_bn = XFS_BUF_DADDR_NULL; xfs_buf_set_empty() 751 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; xfs_buf_set_empty() 752 bp->b_maps[0].bm_len = bp->b_length; xfs_buf_set_empty() 768 xfs_buf_t *bp, xfs_buf_associate_memory() 785 if (bp->b_pages) xfs_buf_associate_memory() 786 _xfs_buf_free_pages(bp); xfs_buf_associate_memory() 788 bp->b_pages = NULL; xfs_buf_associate_memory() 789 bp->b_addr = mem; xfs_buf_associate_memory() 791 rval = _xfs_buf_get_pages(bp, page_count); xfs_buf_associate_memory() 795 bp->b_offset = offset; xfs_buf_associate_memory() 797 for (i = 0; i < bp->b_page_count; i++) { xfs_buf_associate_memory() 798 bp->b_pages[i] = mem_to_page((void *)pageaddr); xfs_buf_associate_memory() 802 bp->b_io_length = BTOBB(len); xfs_buf_associate_memory() 803 bp->b_length = BTOBB(buflen); xfs_buf_associate_memory() 816 struct xfs_buf *bp; xfs_buf_get_uncached() local 819 bp = _xfs_buf_alloc(target, &map, 1, 0); xfs_buf_get_uncached() 820 if (unlikely(bp == NULL)) xfs_buf_get_uncached() 824 error = _xfs_buf_get_pages(bp, page_count); xfs_buf_get_uncached() 829 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); xfs_buf_get_uncached() 830 if (!bp->b_pages[i]) xfs_buf_get_uncached() 833 bp->b_flags |= _XBF_PAGES; xfs_buf_get_uncached() 835 error = _xfs_buf_map_pages(bp, 0); xfs_buf_get_uncached() 842 trace_xfs_buf_get_uncached(bp, _RET_IP_); xfs_buf_get_uncached() 843 return bp; xfs_buf_get_uncached() 847 __free_page(bp->b_pages[i]); xfs_buf_get_uncached() 848 _xfs_buf_free_pages(bp); xfs_buf_get_uncached() 850 xfs_buf_free_maps(bp); xfs_buf_get_uncached() 851 kmem_zone_free(xfs_buf_zone, bp); xfs_buf_get_uncached() 863 xfs_buf_t *bp) xfs_buf_hold() 865 trace_xfs_buf_hold(bp, _RET_IP_); xfs_buf_hold() 866 atomic_inc(&bp->b_hold); xfs_buf_hold() 875 xfs_buf_t *bp) xfs_buf_rele() 877 struct xfs_perag *pag = bp->b_pag; xfs_buf_rele() 879 trace_xfs_buf_rele(bp, _RET_IP_); xfs_buf_rele() 882 ASSERT(list_empty(&bp->b_lru)); xfs_buf_rele() 883 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); xfs_buf_rele() 884 if (atomic_dec_and_test(&bp->b_hold)) xfs_buf_rele() 885 xfs_buf_free(bp); xfs_buf_rele() 889 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); xfs_buf_rele() 891 ASSERT(atomic_read(&bp->b_hold) > 0); xfs_buf_rele() 892 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { xfs_buf_rele() 893 spin_lock(&bp->b_lock); xfs_buf_rele() 894 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { xfs_buf_rele() 900 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { xfs_buf_rele() 901 bp->b_state &= ~XFS_BSTATE_DISPOSE; xfs_buf_rele() 902 atomic_inc(&bp->b_hold); xfs_buf_rele() 904 spin_unlock(&bp->b_lock); xfs_buf_rele() 913 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { xfs_buf_rele() 914 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); xfs_buf_rele() 916 ASSERT(list_empty(&bp->b_lru)); xfs_buf_rele() 918 spin_unlock(&bp->b_lock); xfs_buf_rele() 920 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); xfs_buf_rele() 921 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); xfs_buf_rele() 924 xfs_buf_free(bp); xfs_buf_rele() 943 struct xfs_buf *bp) xfs_buf_trylock() 947 locked = down_trylock(&bp->b_sema) == 0; xfs_buf_trylock() 949 XB_SET_OWNER(bp); xfs_buf_trylock() 951 trace_xfs_buf_trylock(bp, _RET_IP_); xfs_buf_trylock() 966 struct xfs_buf *bp) xfs_buf_lock() 968 trace_xfs_buf_lock(bp, _RET_IP_); xfs_buf_lock() 970 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) xfs_buf_lock() 971 xfs_log_force(bp->b_target->bt_mount, 0); xfs_buf_lock() 972 down(&bp->b_sema); xfs_buf_lock() 973 XB_SET_OWNER(bp); xfs_buf_lock() 975 trace_xfs_buf_lock_done(bp, _RET_IP_); xfs_buf_lock() 980 struct xfs_buf *bp) xfs_buf_unlock() 982 XB_CLEAR_OWNER(bp); xfs_buf_unlock() 983 up(&bp->b_sema); xfs_buf_unlock() 985 trace_xfs_buf_unlock(bp, _RET_IP_); xfs_buf_unlock() 990 xfs_buf_t *bp) xfs_buf_wait_unpin() 994 if (atomic_read(&bp->b_pin_count) == 0) xfs_buf_wait_unpin() 997 add_wait_queue(&bp->b_waiters, &wait); xfs_buf_wait_unpin() 1000 if (atomic_read(&bp->b_pin_count) == 0) xfs_buf_wait_unpin() 1004 remove_wait_queue(&bp->b_waiters, &wait); xfs_buf_wait_unpin() 1014 struct xfs_buf *bp) xfs_buf_ioend() 1016 bool read = bp->b_flags & XBF_READ; xfs_buf_ioend() 1018 trace_xfs_buf_iodone(bp, _RET_IP_); xfs_buf_ioend() 1020 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); xfs_buf_ioend() 1026 if (!bp->b_error && bp->b_io_error) xfs_buf_ioend() 1027 xfs_buf_ioerror(bp, bp->b_io_error); xfs_buf_ioend() 1030 if (read && !bp->b_error && bp->b_ops) { xfs_buf_ioend() 1031 ASSERT(!bp->b_iodone); xfs_buf_ioend() 1032 bp->b_ops->verify_read(bp); xfs_buf_ioend() 1035 if (!bp->b_error) xfs_buf_ioend() 1036 bp->b_flags |= XBF_DONE; xfs_buf_ioend() 1038 if (bp->b_iodone) xfs_buf_ioend() 1039 (*(bp->b_iodone))(bp); xfs_buf_ioend() 1040 else if (bp->b_flags & XBF_ASYNC) xfs_buf_ioend() 1041 xfs_buf_relse(bp); xfs_buf_ioend() 1043 complete(&bp->b_iowait); xfs_buf_ioend() 1050 struct xfs_buf *bp = xfs_buf_ioend_work() local 1053 xfs_buf_ioend(bp); xfs_buf_ioend_work() 1058 struct xfs_buf *bp) xfs_buf_ioend_async() 1060 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); xfs_buf_ioend_async() 1061 queue_work(bp->b_ioend_wq, &bp->b_ioend_work); xfs_buf_ioend_async() 1066 xfs_buf_t *bp, xfs_buf_ioerror() 1070 bp->b_error = error; xfs_buf_ioerror() 1071 trace_xfs_buf_ioerror(bp, error, _RET_IP_); xfs_buf_ioerror() 1076 struct xfs_buf *bp, xfs_buf_ioerror_alert() 1079 xfs_alert(bp->b_target->bt_mount, xfs_buf_ioerror_alert() 1081 (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length); xfs_buf_ioerror_alert() 1086 struct xfs_buf *bp) xfs_bwrite() 1090 ASSERT(xfs_buf_islocked(bp)); xfs_bwrite() 1092 bp->b_flags |= XBF_WRITE; xfs_bwrite() 1093 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | xfs_bwrite() 1096 error = xfs_buf_submit_wait(bp); xfs_bwrite() 1098 xfs_force_shutdown(bp->b_target->bt_mount, xfs_bwrite() 1109 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; xfs_buf_bio_end_io() local 1116 spin_lock(&bp->b_lock); xfs_buf_bio_end_io() 1117 if (!bp->b_io_error) xfs_buf_bio_end_io() 1118 bp->b_io_error = error; xfs_buf_bio_end_io() 1119 spin_unlock(&bp->b_lock); xfs_buf_bio_end_io() 1122 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) xfs_buf_bio_end_io() 1123 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); xfs_buf_bio_end_io() 1125 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) xfs_buf_bio_end_io() 1126 xfs_buf_ioend_async(bp); xfs_buf_bio_end_io() 1132 struct xfs_buf *bp, xfs_buf_ioapply_map() 1139 int total_nr_pages = bp->b_page_count; xfs_buf_ioapply_map() 1142 sector_t sector = bp->b_maps[map].bm_bn; xfs_buf_ioapply_map() 1146 total_nr_pages = bp->b_page_count; xfs_buf_ioapply_map() 1160 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); xfs_buf_ioapply_map() 1165 atomic_inc(&bp->b_io_remaining); xfs_buf_ioapply_map() 1171 bio->bi_bdev = bp->b_target->bt_bdev; xfs_buf_ioapply_map() 1174 bio->bi_private = bp; xfs_buf_ioapply_map() 1183 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, xfs_buf_ioapply_map() 1195 if (xfs_buf_is_vmapped(bp)) { xfs_buf_ioapply_map() 1196 flush_kernel_vmap_range(bp->b_addr, xfs_buf_ioapply_map() 1197 xfs_buf_vmap_len(bp)); xfs_buf_ioapply_map() 1207 atomic_dec(&bp->b_io_remaining); xfs_buf_ioapply_map() 1208 xfs_buf_ioerror(bp, -EIO); xfs_buf_ioapply_map() 1216 struct xfs_buf *bp) _xfs_buf_ioapply() 1228 bp->b_error = 0; _xfs_buf_ioapply() 1234 if (!bp->b_ioend_wq) _xfs_buf_ioapply() 1235 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue; _xfs_buf_ioapply() 1237 if (bp->b_flags & XBF_WRITE) { _xfs_buf_ioapply() 1238 if (bp->b_flags & XBF_SYNCIO) _xfs_buf_ioapply() 1242 if (bp->b_flags & XBF_FUA) _xfs_buf_ioapply() 1244 if (bp->b_flags & XBF_FLUSH) _xfs_buf_ioapply() 1252 if (bp->b_ops) { _xfs_buf_ioapply() 1253 bp->b_ops->verify_write(bp); _xfs_buf_ioapply() 1254 if (bp->b_error) { _xfs_buf_ioapply() 1255 xfs_force_shutdown(bp->b_target->bt_mount, _xfs_buf_ioapply() 1259 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { _xfs_buf_ioapply() 1260 struct xfs_mount *mp = bp->b_target->bt_mount; _xfs_buf_ioapply() 1269 __func__, bp->b_bn, bp->b_length); _xfs_buf_ioapply() 1270 xfs_hex_dump(bp->b_addr, 64); _xfs_buf_ioapply() 1274 } else if (bp->b_flags & XBF_READ_AHEAD) { _xfs_buf_ioapply() 1289 offset = bp->b_offset; _xfs_buf_ioapply() 1290 size = BBTOB(bp->b_io_length); _xfs_buf_ioapply() 1292 for (i = 0; i < bp->b_map_count; i++) { _xfs_buf_ioapply() 1293 xfs_buf_ioapply_map(bp, i, &offset, &size, rw); _xfs_buf_ioapply() 1294 if (bp->b_error) _xfs_buf_ioapply() 1310 struct xfs_buf *bp) xfs_buf_submit() 1312 trace_xfs_buf_submit(bp, _RET_IP_); xfs_buf_submit() 1314 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); xfs_buf_submit() 1315 ASSERT(bp->b_flags & XBF_ASYNC); xfs_buf_submit() 1318 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { xfs_buf_submit() 1319 xfs_buf_ioerror(bp, -EIO); xfs_buf_submit() 1320 bp->b_flags &= ~XBF_DONE; xfs_buf_submit() 1321 xfs_buf_stale(bp); xfs_buf_submit() 1322 xfs_buf_ioend(bp); xfs_buf_submit() 1326 if (bp->b_flags & XBF_WRITE) xfs_buf_submit() 1327 xfs_buf_wait_unpin(bp); xfs_buf_submit() 1330 bp->b_io_error = 0; xfs_buf_submit() 1340 xfs_buf_hold(bp); xfs_buf_submit() 1347 atomic_set(&bp->b_io_remaining, 1); xfs_buf_submit() 1348 _xfs_buf_ioapply(bp); xfs_buf_submit() 1355 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { xfs_buf_submit() 1356 if (bp->b_error) xfs_buf_submit() 1357 xfs_buf_ioend(bp); xfs_buf_submit() 1359 xfs_buf_ioend_async(bp); xfs_buf_submit() 1362 xfs_buf_rele(bp); xfs_buf_submit() 1363 /* Note: it is not safe to reference bp now we've dropped our ref */ xfs_buf_submit() 1371 struct xfs_buf *bp) xfs_buf_submit_wait() 1375 trace_xfs_buf_submit_wait(bp, _RET_IP_); xfs_buf_submit_wait() 1377 ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC))); xfs_buf_submit_wait() 1379 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { xfs_buf_submit_wait() 1380 xfs_buf_ioerror(bp, -EIO); xfs_buf_submit_wait() 1381 xfs_buf_stale(bp); xfs_buf_submit_wait() 1382 bp->b_flags &= ~XBF_DONE; xfs_buf_submit_wait() 1386 if (bp->b_flags & XBF_WRITE) xfs_buf_submit_wait() 1387 xfs_buf_wait_unpin(bp); xfs_buf_submit_wait() 1390 bp->b_io_error = 0; xfs_buf_submit_wait() 1398 xfs_buf_hold(bp); xfs_buf_submit_wait() 1405 atomic_set(&bp->b_io_remaining, 1); xfs_buf_submit_wait() 1406 _xfs_buf_ioapply(bp); xfs_buf_submit_wait() 1412 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) xfs_buf_submit_wait() 1413 xfs_buf_ioend(bp); xfs_buf_submit_wait() 1416 trace_xfs_buf_iowait(bp, _RET_IP_); xfs_buf_submit_wait() 1417 wait_for_completion(&bp->b_iowait); xfs_buf_submit_wait() 1418 trace_xfs_buf_iowait_done(bp, _RET_IP_); xfs_buf_submit_wait() 1419 error = bp->b_error; xfs_buf_submit_wait() 1425 xfs_buf_rele(bp); xfs_buf_submit_wait() 1431 xfs_buf_t *bp, xfs_buf_offset() 1436 if (bp->b_addr) xfs_buf_offset() 1437 return bp->b_addr + offset; xfs_buf_offset() 1439 offset += bp->b_offset; xfs_buf_offset() 1440 page = bp->b_pages[offset >> PAGE_SHIFT]; xfs_buf_offset() 1449 xfs_buf_t *bp, /* buffer to process */ xfs_buf_iomove() 1462 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; xfs_buf_iomove() 1463 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; xfs_buf_iomove() 1464 page = bp->b_pages[page_index]; xfs_buf_iomove() 1466 BBTOB(bp->b_io_length) - boff); xfs_buf_iomove() 1503 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); xfs_buftarg_wait_rele() local 1506 if (atomic_read(&bp->b_hold) > 1) { xfs_buftarg_wait_rele() 1508 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); xfs_buftarg_wait_rele() 1511 if (!spin_trylock(&bp->b_lock)) xfs_buftarg_wait_rele() 1518 atomic_set(&bp->b_lru_ref, 0); xfs_buftarg_wait_rele() 1519 bp->b_state |= XFS_BSTATE_DISPOSE; xfs_buftarg_wait_rele() 1521 spin_unlock(&bp->b_lock); xfs_buftarg_wait_rele() 1548 struct xfs_buf *bp; xfs_wait_buftarg() local 1549 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); xfs_wait_buftarg() 1550 list_del_init(&bp->b_lru); xfs_wait_buftarg() 1551 if (bp->b_flags & XBF_WRITE_FAIL) { xfs_wait_buftarg() 1555 (long long)bp->b_bn); xfs_wait_buftarg() 1557 xfs_buf_rele(bp); xfs_wait_buftarg() 1571 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); xfs_buftarg_isolate() local 1575 * we are inverting the lru lock/bp->b_lock here, so use a trylock. xfs_buftarg_isolate() 1578 if (!spin_trylock(&bp->b_lock)) xfs_buftarg_isolate() 1585 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { xfs_buftarg_isolate() 1586 spin_unlock(&bp->b_lock); xfs_buftarg_isolate() 1590 bp->b_state |= XFS_BSTATE_DISPOSE; xfs_buftarg_isolate() 1592 spin_unlock(&bp->b_lock); xfs_buftarg_isolate() 1610 struct xfs_buf *bp; xfs_buftarg_shrink_scan() local 1611 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); xfs_buftarg_shrink_scan() 1612 list_del_init(&bp->b_lru); xfs_buftarg_shrink_scan() 1613 xfs_buf_rele(bp); xfs_buftarg_shrink_scan() 1728 struct xfs_buf *bp, xfs_buf_delwri_queue() 1731 ASSERT(xfs_buf_islocked(bp)); xfs_buf_delwri_queue() 1732 ASSERT(!(bp->b_flags & XBF_READ)); xfs_buf_delwri_queue() 1739 if (bp->b_flags & _XBF_DELWRI_Q) { xfs_buf_delwri_queue() 1740 trace_xfs_buf_delwri_queued(bp, _RET_IP_); xfs_buf_delwri_queue() 1744 trace_xfs_buf_delwri_queue(bp, _RET_IP_); xfs_buf_delwri_queue() 1754 bp->b_flags |= _XBF_DELWRI_Q; xfs_buf_delwri_queue() 1755 if (list_empty(&bp->b_list)) { xfs_buf_delwri_queue() 1756 atomic_inc(&bp->b_hold); xfs_buf_delwri_queue() 1757 list_add_tail(&bp->b_list, list); xfs_buf_delwri_queue() 1775 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); xfs_buf_cmp() local 1778 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; xfs_buf_cmp() 1793 struct xfs_buf *bp, *n; __xfs_buf_delwri_submit() local 1796 list_for_each_entry_safe(bp, n, buffer_list, b_list) { list_for_each_entry_safe() 1798 if (xfs_buf_ispinned(bp)) { list_for_each_entry_safe() 1802 if (!xfs_buf_trylock(bp)) list_for_each_entry_safe() 1805 xfs_buf_lock(bp); list_for_each_entry_safe() 1814 if (!(bp->b_flags & _XBF_DELWRI_Q)) { list_for_each_entry_safe() 1815 list_del_init(&bp->b_list); list_for_each_entry_safe() 1816 xfs_buf_relse(bp); list_for_each_entry_safe() 1820 list_move_tail(&bp->b_list, io_list); list_for_each_entry_safe() 1821 trace_xfs_buf_delwri_split(bp, _RET_IP_); list_for_each_entry_safe() 1827 list_for_each_entry_safe(bp, n, io_list, b_list) { list_for_each_entry_safe() 1828 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); list_for_each_entry_safe() 1829 bp->b_flags |= XBF_WRITE | XBF_ASYNC; list_for_each_entry_safe() 1837 xfs_buf_hold(bp); list_for_each_entry_safe() 1839 list_del_init(&bp->b_list); list_for_each_entry_safe() 1841 xfs_buf_submit(bp); list_for_each_entry_safe() 1879 struct xfs_buf *bp; xfs_buf_delwri_submit() local 1885 bp = list_first_entry(&io_list, struct xfs_buf, b_list); xfs_buf_delwri_submit() 1887 list_del_init(&bp->b_list); xfs_buf_delwri_submit() 1890 xfs_buf_lock(bp); xfs_buf_delwri_submit() 1891 error2 = bp->b_error; xfs_buf_delwri_submit() 1892 xfs_buf_relse(bp); xfs_buf_delwri_submit() 62 xfs_buf_is_vmapped( struct xfs_buf *bp) xfs_buf_is_vmapped() argument 76 xfs_buf_vmap_len( struct xfs_buf *bp) xfs_buf_vmap_len() argument 91 xfs_buf_stale( struct xfs_buf *bp) xfs_buf_stale() argument 116 xfs_buf_get_maps( struct xfs_buf *bp, int map_count) xfs_buf_get_maps() argument 139 xfs_buf_free_maps( struct xfs_buf *bp) xfs_buf_free_maps() argument 215 _xfs_buf_get_pages( xfs_buf_t *bp, int page_count) _xfs_buf_get_pages() argument 239 _xfs_buf_free_pages( xfs_buf_t *bp) _xfs_buf_free_pages() argument 256 xfs_buf_free( xfs_buf_t *bp) xfs_buf_free() argument 286 xfs_buf_allocate_memory( xfs_buf_t *bp, uint flags) xfs_buf_allocate_memory() argument 384 _xfs_buf_map_pages( xfs_buf_t *bp, uint flags) _xfs_buf_map_pages() argument 621 _xfs_buf_read( xfs_buf_t *bp, xfs_buf_flags_t flags) _xfs_buf_read() argument 736 xfs_buf_set_empty( struct xfs_buf *bp, size_t numblks) xfs_buf_set_empty() argument 767 xfs_buf_associate_memory( xfs_buf_t *bp, void *mem, size_t len) xfs_buf_associate_memory() argument 862 xfs_buf_hold( xfs_buf_t *bp) xfs_buf_hold() argument 874 xfs_buf_rele( xfs_buf_t *bp) xfs_buf_rele() argument 942 xfs_buf_trylock( struct xfs_buf *bp) xfs_buf_trylock() argument 965 xfs_buf_lock( struct xfs_buf *bp) xfs_buf_lock() argument 979 xfs_buf_unlock( struct xfs_buf *bp) xfs_buf_unlock() argument 989 xfs_buf_wait_unpin( xfs_buf_t *bp) xfs_buf_wait_unpin() argument 1013 xfs_buf_ioend( struct xfs_buf *bp) xfs_buf_ioend() argument 1057 xfs_buf_ioend_async( struct xfs_buf *bp) xfs_buf_ioend_async() argument 1065 xfs_buf_ioerror( xfs_buf_t *bp, int error) xfs_buf_ioerror() argument 1075 xfs_buf_ioerror_alert( struct xfs_buf *bp, const char *func) xfs_buf_ioerror_alert() argument 1085 xfs_bwrite( struct xfs_buf *bp) xfs_bwrite() argument 1131 xfs_buf_ioapply_map( struct xfs_buf *bp, int map, int *buf_offset, int *count, int rw) xfs_buf_ioapply_map() argument 1215 _xfs_buf_ioapply( struct xfs_buf *bp) _xfs_buf_ioapply() argument 1309 xfs_buf_submit( struct xfs_buf *bp) xfs_buf_submit() argument 1370 xfs_buf_submit_wait( struct xfs_buf *bp) xfs_buf_submit_wait() argument 1430 xfs_buf_offset( xfs_buf_t *bp, size_t offset) xfs_buf_offset() argument 1448 xfs_buf_iomove( xfs_buf_t *bp, size_t boff, size_t bsize, void *data, xfs_buf_rw_t mode) xfs_buf_iomove() argument 1727 xfs_buf_delwri_queue( struct xfs_buf *bp, struct list_head *list) xfs_buf_delwri_queue() argument
|
H A D | xfs_buf.h | 268 void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks); 269 int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length); 276 void xfs_buf_hold(struct xfs_buf *bp); 286 #define xfs_buf_islocked(bp) \ 287 ((bp)->b_sema.count <= 0) 290 extern int xfs_bwrite(struct xfs_buf *bp); 291 extern void xfs_buf_ioend(struct xfs_buf *bp); 294 extern void xfs_buf_submit(struct xfs_buf *bp); 295 extern int xfs_buf_submit_wait(struct xfs_buf *bp); 298 #define xfs_buf_zero(bp, off, len) \ 299 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) 313 #define XFS_BUF_ZEROFLAGS(bp) \ 314 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ 318 void xfs_buf_stale(struct xfs_buf *bp); 319 #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) 320 #define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE) 322 #define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE) 323 #define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE) 324 #define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE) 326 #define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC) 327 #define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC) 328 #define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC) 330 #define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) 331 #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) 332 #define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ) 334 #define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE) 335 #define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) 336 #define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) 348 #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn) 349 #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno)) 351 static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) xfs_buf_set_ref() argument 353 atomic_set(&bp->b_lru_ref, lru_ref); xfs_buf_set_ref() 356 static inline int xfs_buf_ispinned(struct xfs_buf *bp) xfs_buf_ispinned() argument 358 return atomic_read(&bp->b_pin_count); xfs_buf_ispinned() 361 static inline void xfs_buf_relse(xfs_buf_t *bp) xfs_buf_relse() argument 363 xfs_buf_unlock(bp); xfs_buf_relse() 364 xfs_buf_rele(bp); xfs_buf_relse() 368 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset) xfs_buf_verify_cksum() argument 370 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), xfs_buf_verify_cksum() 375 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset) xfs_buf_update_cksum() argument 377 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), xfs_buf_update_cksum()
|
H A D | xfs_fsops.c | 127 struct xfs_buf *bp; xfs_growfs_get_hdr_buf() local 129 bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags); xfs_growfs_get_hdr_buf() 130 if (!bp) xfs_growfs_get_hdr_buf() 133 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); xfs_growfs_get_hdr_buf() 134 bp->b_bn = blkno; xfs_growfs_get_hdr_buf() 135 bp->b_maps[0].bm_bn = blkno; xfs_growfs_get_hdr_buf() 136 bp->b_ops = ops; xfs_growfs_get_hdr_buf() 138 return bp; xfs_growfs_get_hdr_buf() 153 xfs_buf_t *bp; xfs_growfs_data_private() local 175 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL); xfs_growfs_data_private() 178 xfs_buf_relse(bp); xfs_growfs_data_private() 220 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private() 224 if (!bp) { xfs_growfs_data_private() 229 agf = XFS_BUF_TO_AGF(bp); xfs_growfs_data_private() 253 error = xfs_bwrite(bp); xfs_growfs_data_private() 254 xfs_buf_relse(bp); xfs_growfs_data_private() 261 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private() 265 if (!bp) { xfs_growfs_data_private() 270 agfl = XFS_BUF_TO_AGFL(bp); xfs_growfs_data_private() 277 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp); xfs_growfs_data_private() 281 error = xfs_bwrite(bp); xfs_growfs_data_private() 282 xfs_buf_relse(bp); xfs_growfs_data_private() 289 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private() 293 if (!bp) { xfs_growfs_data_private() 298 agi = XFS_BUF_TO_AGI(bp); xfs_growfs_data_private() 318 error = xfs_bwrite(bp); xfs_growfs_data_private() 319 xfs_buf_relse(bp); xfs_growfs_data_private() 326 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private() 331 if (!bp) { xfs_growfs_data_private() 337 xfs_btree_init_block(mp, bp, XFS_ABTB_CRC_MAGIC, 0, 1, xfs_growfs_data_private() 340 xfs_btree_init_block(mp, bp, XFS_ABTB_MAGIC, 0, 1, xfs_growfs_data_private() 343 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); xfs_growfs_data_private() 348 error = xfs_bwrite(bp); xfs_growfs_data_private() 349 xfs_buf_relse(bp); xfs_growfs_data_private() 356 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private() 360 if (!bp) { xfs_growfs_data_private() 366 xfs_btree_init_block(mp, bp, XFS_ABTC_CRC_MAGIC, 0, 1, xfs_growfs_data_private() 369 xfs_btree_init_block(mp, bp, XFS_ABTC_MAGIC, 0, 1, xfs_growfs_data_private() 372 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); xfs_growfs_data_private() 378 error = xfs_bwrite(bp); xfs_growfs_data_private() 379 xfs_buf_relse(bp); xfs_growfs_data_private() 386 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private() 390 if (!bp) { xfs_growfs_data_private() 396 xfs_btree_init_block(mp, bp, XFS_IBT_CRC_MAGIC, 0, 0, xfs_growfs_data_private() 399 xfs_btree_init_block(mp, bp, XFS_IBT_MAGIC, 0, 0, xfs_growfs_data_private() 402 error = xfs_bwrite(bp); xfs_growfs_data_private() 403 xfs_buf_relse(bp); xfs_growfs_data_private() 411 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private() 415 if (!bp) { xfs_growfs_data_private() 421 xfs_btree_init_block(mp, bp, XFS_FIBT_CRC_MAGIC, xfs_growfs_data_private() 425 xfs_btree_init_block(mp, bp, XFS_FIBT_MAGIC, 0, xfs_growfs_data_private() 428 error = xfs_bwrite(bp); xfs_growfs_data_private() 429 xfs_buf_relse(bp); xfs_growfs_data_private() 443 error = xfs_ialloc_read_agi(mp, tp, agno, &bp); xfs_growfs_data_private() 447 ASSERT(bp); xfs_growfs_data_private() 448 agi = XFS_BUF_TO_AGI(bp); xfs_growfs_data_private() 452 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); xfs_growfs_data_private() 456 error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp); xfs_growfs_data_private() 460 ASSERT(bp); xfs_growfs_data_private() 461 agf = XFS_BUF_TO_AGF(bp); xfs_growfs_data_private() 466 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); xfs_growfs_data_private() 518 XFS_FSS_TO_BB(mp, 1), 0, &bp, xfs_growfs_data_private() 521 bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp, xfs_growfs_data_private() 524 if (bp) { xfs_growfs_data_private() 525 bp->b_ops = &xfs_sb_buf_ops; xfs_growfs_data_private() 526 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); xfs_growfs_data_private() 545 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb); xfs_growfs_data_private() 547 error = xfs_bwrite(bp); xfs_growfs_data_private() 548 xfs_buf_relse(bp); xfs_growfs_data_private()
|
H A D | xfs_trans_buf.c | 77 struct xfs_buf *bp, _xfs_trans_bjoin() 82 ASSERT(bp->b_transp == NULL); _xfs_trans_bjoin() 89 xfs_buf_item_init(bp, tp->t_mountp); _xfs_trans_bjoin() 90 bip = bp->b_fspriv; _xfs_trans_bjoin() 111 bp->b_transp = tp; _xfs_trans_bjoin() 118 struct xfs_buf *bp) xfs_trans_bjoin() 120 _xfs_trans_bjoin(tp, bp, 0); xfs_trans_bjoin() 121 trace_xfs_trans_bjoin(bp->b_fspriv); xfs_trans_bjoin() 141 xfs_buf_t *bp; xfs_trans_get_buf_map() local 153 bp = xfs_trans_buf_item_match(tp, target, map, nmaps); xfs_trans_get_buf_map() 154 if (bp != NULL) { xfs_trans_get_buf_map() 155 ASSERT(xfs_buf_islocked(bp)); xfs_trans_get_buf_map() 157 xfs_buf_stale(bp); xfs_trans_get_buf_map() 158 XFS_BUF_DONE(bp); xfs_trans_get_buf_map() 161 ASSERT(bp->b_transp == tp); xfs_trans_get_buf_map() 162 bip = bp->b_fspriv; xfs_trans_get_buf_map() 167 return bp; xfs_trans_get_buf_map() 170 bp = xfs_buf_get_map(target, map, nmaps, flags); xfs_trans_get_buf_map() 171 if (bp == NULL) { xfs_trans_get_buf_map() 175 ASSERT(!bp->b_error); xfs_trans_get_buf_map() 177 _xfs_trans_bjoin(tp, bp, 1); xfs_trans_get_buf_map() 178 trace_xfs_trans_get_buf(bp->b_fspriv); xfs_trans_get_buf_map() 179 return bp; xfs_trans_get_buf_map() 195 xfs_buf_t *bp; xfs_trans_getsb() local 211 bp = mp->m_sb_bp; xfs_trans_getsb() 212 if (bp->b_transp == tp) { xfs_trans_getsb() 213 bip = bp->b_fspriv; xfs_trans_getsb() 218 return bp; xfs_trans_getsb() 221 bp = xfs_getsb(mp, flags); xfs_trans_getsb() 222 if (bp == NULL) xfs_trans_getsb() 225 _xfs_trans_bjoin(tp, bp, 1); xfs_trans_getsb() 226 trace_xfs_trans_getsb(bp->b_fspriv); xfs_trans_getsb() 227 return bp; xfs_trans_getsb() 251 struct xfs_buf *bp = NULL; xfs_trans_read_buf_map() local 265 bp = xfs_trans_buf_item_match(tp, target, map, nmaps); xfs_trans_read_buf_map() 266 if (bp) { xfs_trans_read_buf_map() 267 ASSERT(xfs_buf_islocked(bp)); xfs_trans_read_buf_map() 268 ASSERT(bp->b_transp == tp); xfs_trans_read_buf_map() 269 ASSERT(bp->b_fspriv != NULL); xfs_trans_read_buf_map() 270 ASSERT(!bp->b_error); xfs_trans_read_buf_map() 271 ASSERT(bp->b_flags & XBF_DONE); xfs_trans_read_buf_map() 278 trace_xfs_trans_read_buf_shut(bp, _RET_IP_); xfs_trans_read_buf_map() 282 bip = bp->b_fspriv; xfs_trans_read_buf_map() 287 *bpp = bp; xfs_trans_read_buf_map() 291 bp = xfs_buf_read_map(target, map, nmaps, flags, ops); xfs_trans_read_buf_map() 292 if (!bp) { xfs_trans_read_buf_map() 307 if (bp->b_error) { xfs_trans_read_buf_map() 308 error = bp->b_error; xfs_trans_read_buf_map() 310 xfs_buf_ioerror_alert(bp, __func__); xfs_trans_read_buf_map() 311 bp->b_flags &= ~XBF_DONE; xfs_trans_read_buf_map() 312 xfs_buf_stale(bp); xfs_trans_read_buf_map() 316 xfs_buf_relse(bp); xfs_trans_read_buf_map() 325 xfs_buf_relse(bp); xfs_trans_read_buf_map() 326 trace_xfs_trans_read_buf_shut(bp, _RET_IP_); xfs_trans_read_buf_map() 331 _xfs_trans_bjoin(tp, bp, 1); xfs_trans_read_buf_map() 332 trace_xfs_trans_read_buf(bp->b_fspriv); xfs_trans_read_buf_map() 334 *bpp = bp; xfs_trans_read_buf_map() 340 * Release the buffer bp which was previously acquired with one of the 356 xfs_buf_t *bp) xfs_trans_brelse() 364 ASSERT(bp->b_transp == NULL); xfs_trans_brelse() 365 xfs_buf_relse(bp); xfs_trans_brelse() 369 ASSERT(bp->b_transp == tp); xfs_trans_brelse() 370 bip = bp->b_fspriv; xfs_trans_brelse() 433 ASSERT(bp->b_pincount == 0); xfs_trans_brelse() 438 xfs_buf_item_relse(bp); xfs_trans_brelse() 441 bp->b_transp = NULL; xfs_trans_brelse() 442 xfs_buf_relse(bp); xfs_trans_brelse() 453 xfs_buf_t *bp) xfs_trans_bhold() 455 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_bhold() 457 ASSERT(bp->b_transp == tp); xfs_trans_bhold() 473 xfs_buf_t *bp) xfs_trans_bhold_release() 475 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_bhold_release() 477 ASSERT(bp->b_transp == tp); xfs_trans_bhold_release() 499 xfs_buf_t *bp, xfs_trans_log_buf() 503 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_log_buf() 505 ASSERT(bp->b_transp == tp); xfs_trans_log_buf() 507 ASSERT(first <= last && last < BBTOB(bp->b_length)); xfs_trans_log_buf() 508 ASSERT(bp->b_iodone == NULL || xfs_trans_log_buf() 509 bp->b_iodone == xfs_buf_iodone_callbacks); xfs_trans_log_buf() 521 XFS_BUF_DONE(bp); xfs_trans_log_buf() 524 bp->b_iodone = xfs_buf_iodone_callbacks; xfs_trans_log_buf() 537 ASSERT(XFS_BUF_ISSTALE(bp)); xfs_trans_log_buf() 538 XFS_BUF_UNSTALE(bp); xfs_trans_log_buf() 587 xfs_buf_t *bp) xfs_trans_binval() 589 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_binval() 592 ASSERT(bp->b_transp == tp); xfs_trans_binval() 603 ASSERT(XFS_BUF_ISSTALE(bp)); xfs_trans_binval() 613 xfs_buf_stale(bp); xfs_trans_binval() 642 xfs_buf_t *bp) xfs_trans_inode_buf() 644 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_inode_buf() 646 ASSERT(bp->b_transp == tp); xfs_trans_inode_buf() 651 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); xfs_trans_inode_buf() 666 xfs_buf_t *bp) xfs_trans_stale_inode_buf() 668 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_stale_inode_buf() 670 ASSERT(bp->b_transp == tp); xfs_trans_stale_inode_buf() 676 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); xfs_trans_stale_inode_buf() 691 xfs_buf_t *bp) xfs_trans_inode_alloc_buf() 693 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_inode_alloc_buf() 695 ASSERT(bp->b_transp == tp); xfs_trans_inode_alloc_buf() 700 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); xfs_trans_inode_alloc_buf() 714 struct xfs_buf *bp) xfs_trans_ordered_buf() 716 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_trans_ordered_buf() 718 ASSERT(bp->b_transp == tp); xfs_trans_ordered_buf() 733 struct xfs_buf *bp, xfs_trans_buf_set_type() 736 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_trans_buf_set_type() 741 ASSERT(bp->b_transp == tp); xfs_trans_buf_set_type() 775 xfs_buf_t *bp, xfs_trans_dquot_buf() 778 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_trans_dquot_buf() 801 xfs_trans_buf_set_type(tp, bp, type); xfs_trans_dquot_buf() 75 _xfs_trans_bjoin( struct xfs_trans *tp, struct xfs_buf *bp, int reset_recur) _xfs_trans_bjoin() argument 116 xfs_trans_bjoin( struct xfs_trans *tp, struct xfs_buf *bp) xfs_trans_bjoin() argument 355 xfs_trans_brelse(xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_brelse() argument 452 xfs_trans_bhold(xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_bhold() argument 472 xfs_trans_bhold_release(xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_bhold_release() argument 498 xfs_trans_log_buf(xfs_trans_t *tp, xfs_buf_t *bp, uint first, uint last) xfs_trans_log_buf() argument 585 xfs_trans_binval( xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_binval() argument 640 xfs_trans_inode_buf( xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_inode_buf() argument 664 xfs_trans_stale_inode_buf( xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_stale_inode_buf() argument 689 xfs_trans_inode_alloc_buf( xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_inode_alloc_buf() argument 712 xfs_trans_ordered_buf( struct xfs_trans *tp, struct xfs_buf *bp) xfs_trans_ordered_buf() argument 731 xfs_trans_buf_set_type( struct xfs_trans *tp, struct xfs_buf *bp, enum xfs_blft type) xfs_trans_buf_set_type() argument 773 xfs_trans_dquot_buf( xfs_trans_t *tp, xfs_buf_t *bp, uint type) xfs_trans_dquot_buf() argument
|
H A D | xfs_buf_item.c | 41 STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp); 68 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_size_segment() local 102 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) != xfs_buf_item_size_segment() 103 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) + xfs_buf_item_size_segment() 189 struct xfs_buf *bp, xfs_buf_item_copy_iovec() 196 xfs_buf_offset(bp, offset), xfs_buf_item_copy_iovec() 202 struct xfs_buf *bp, xfs_buf_item_straddle() 207 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) != xfs_buf_item_straddle() 208 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) + xfs_buf_item_straddle() 220 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_format_segment() local 283 xfs_buf_item_copy_iovec(lv, vecp, bp, offset, xfs_buf_item_format_segment() 288 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) { xfs_buf_item_format_segment() 289 xfs_buf_item_copy_iovec(lv, vecp, bp, offset, xfs_buf_item_format_segment() 314 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_format() local 362 offset += bp->b_maps[i].bm_len; xfs_buf_item_format() 416 xfs_buf_t *bp = bip->bli_buf; xfs_buf_item_unpin() local 421 ASSERT(bp->b_fspriv == bip); xfs_buf_item_unpin() 428 if (atomic_dec_and_test(&bp->b_pin_count)) xfs_buf_item_unpin() 429 wake_up_all(&bp->b_waiters); xfs_buf_item_unpin() 433 ASSERT(xfs_buf_islocked(bp)); xfs_buf_item_unpin() 434 ASSERT(XFS_BUF_ISSTALE(bp)); xfs_buf_item_unpin() 455 bp->b_transp = NULL; xfs_buf_item_unpin() 465 xfs_buf_do_callbacks(bp); xfs_buf_item_unpin() 466 bp->b_fspriv = NULL; xfs_buf_item_unpin() 467 bp->b_iodone = NULL; xfs_buf_item_unpin() 471 xfs_buf_item_relse(bp); xfs_buf_item_unpin() 472 ASSERT(bp->b_fspriv == NULL); xfs_buf_item_unpin() 474 xfs_buf_relse(bp); xfs_buf_item_unpin() 485 * processing (via the bp->b_iodone callback), and then finally xfs_buf_item_unpin() 492 xfs_buf_lock(bp); xfs_buf_item_unpin() 493 xfs_buf_hold(bp); xfs_buf_item_unpin() 494 bp->b_flags |= XBF_ASYNC; xfs_buf_item_unpin() 495 xfs_buf_ioerror(bp, -EIO); xfs_buf_item_unpin() 496 XFS_BUF_UNDONE(bp); xfs_buf_item_unpin() 497 xfs_buf_stale(bp); xfs_buf_item_unpin() 498 xfs_buf_ioend(bp); xfs_buf_item_unpin() 516 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_push() local 519 if (xfs_buf_ispinned(bp)) xfs_buf_item_push() 521 if (!xfs_buf_trylock(bp)) { xfs_buf_item_push() 529 if (xfs_buf_ispinned(bp)) xfs_buf_item_push() 539 if ((bp->b_flags & XBF_WRITE_FAIL) && xfs_buf_item_push() 541 xfs_warn(bp->b_target->bt_mount, xfs_buf_item_push() 543 (long long)bp->b_bn); xfs_buf_item_push() 546 if (!xfs_buf_delwri_queue(bp, buffer_list)) xfs_buf_item_push() 548 xfs_buf_unlock(bp); xfs_buf_item_push() 576 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_unlock() local 582 bp->b_transp = NULL; xfs_buf_item_unlock() 647 xfs_buf_item_relse(bp); xfs_buf_item_unlock() 655 xfs_buf_item_relse(bp); xfs_buf_item_unlock() 660 xfs_buf_relse(bp); xfs_buf_item_unlock() 755 xfs_buf_t *bp, xfs_buf_item_init() 758 xfs_log_item_t *lip = bp->b_fspriv; xfs_buf_item_init() 771 ASSERT(bp->b_target->bt_mount == mp); xfs_buf_item_init() 777 bip->bli_buf = bp; xfs_buf_item_init() 778 xfs_buf_hold(bp); xfs_buf_item_init() 789 error = xfs_buf_item_get_format(bip, bp->b_map_count); xfs_buf_item_init() 793 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len), xfs_buf_item_init() 798 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn; xfs_buf_item_init() 799 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len; xfs_buf_item_init() 807 if (bp->b_fspriv) xfs_buf_item_init() 808 bip->bli_item.li_bio_list = bp->b_fspriv; xfs_buf_item_init() 809 bp->b_fspriv = bip; xfs_buf_item_init() 907 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_log() local 916 end = start + BBTOB(bp->b_maps[i].bm_len); xfs_buf_item_log() 918 start += BBTOB(bp->b_maps[i].bm_len); xfs_buf_item_log() 929 start += bp->b_maps[i].bm_len; xfs_buf_item_log() 962 xfs_buf_t *bp) xfs_buf_item_relse() 964 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_buf_item_relse() 966 trace_xfs_buf_item_relse(bp, _RET_IP_); xfs_buf_item_relse() 969 bp->b_fspriv = bip->bli_item.li_bio_list; xfs_buf_item_relse() 970 if (bp->b_fspriv == NULL) xfs_buf_item_relse() 971 bp->b_iodone = NULL; xfs_buf_item_relse() 973 xfs_buf_rele(bp); xfs_buf_item_relse() 989 xfs_buf_t *bp, xfs_buf_attach_iodone() 995 ASSERT(xfs_buf_islocked(bp)); xfs_buf_attach_iodone() 998 head_lip = bp->b_fspriv; xfs_buf_attach_iodone() 1003 bp->b_fspriv = lip; xfs_buf_attach_iodone() 1006 ASSERT(bp->b_iodone == NULL || xfs_buf_attach_iodone() 1007 bp->b_iodone == xfs_buf_iodone_callbacks); xfs_buf_attach_iodone() 1008 bp->b_iodone = xfs_buf_iodone_callbacks; xfs_buf_attach_iodone() 1025 struct xfs_buf *bp) xfs_buf_do_callbacks() 1029 while ((lip = bp->b_fspriv) != NULL) { xfs_buf_do_callbacks() 1030 bp->b_fspriv = lip->li_bio_list; xfs_buf_do_callbacks() 1039 lip->li_cb(bp, lip); xfs_buf_do_callbacks() 1052 struct xfs_buf *bp) xfs_buf_iodone_callbacks() 1054 struct xfs_log_item *lip = bp->b_fspriv; xfs_buf_iodone_callbacks() 1059 if (likely(!bp->b_error)) xfs_buf_iodone_callbacks() 1067 xfs_buf_stale(bp); xfs_buf_iodone_callbacks() 1068 XFS_BUF_DONE(bp); xfs_buf_iodone_callbacks() 1069 trace_xfs_buf_item_iodone(bp, _RET_IP_); xfs_buf_iodone_callbacks() 1073 if (bp->b_target != lasttarg || xfs_buf_iodone_callbacks() 1076 xfs_buf_ioerror_alert(bp, __func__); xfs_buf_iodone_callbacks() 1078 lasttarg = bp->b_target; xfs_buf_iodone_callbacks() 1091 if (XFS_BUF_ISASYNC(bp)) { xfs_buf_iodone_callbacks() 1092 ASSERT(bp->b_iodone != NULL); xfs_buf_iodone_callbacks() 1094 trace_xfs_buf_item_iodone_async(bp, _RET_IP_); xfs_buf_iodone_callbacks() 1096 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ xfs_buf_iodone_callbacks() 1098 if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) { xfs_buf_iodone_callbacks() 1099 bp->b_flags |= XBF_WRITE | XBF_ASYNC | xfs_buf_iodone_callbacks() 1101 xfs_buf_submit(bp); xfs_buf_iodone_callbacks() 1103 xfs_buf_relse(bp); xfs_buf_iodone_callbacks() 1113 xfs_buf_stale(bp); xfs_buf_iodone_callbacks() 1114 XFS_BUF_DONE(bp); xfs_buf_iodone_callbacks() 1116 trace_xfs_buf_error_relse(bp, _RET_IP_); xfs_buf_iodone_callbacks() 1119 xfs_buf_do_callbacks(bp); xfs_buf_iodone_callbacks() 1120 bp->b_fspriv = NULL; xfs_buf_iodone_callbacks() 1121 bp->b_iodone = NULL; xfs_buf_iodone_callbacks() 1122 xfs_buf_ioend(bp); xfs_buf_iodone_callbacks() 1134 struct xfs_buf *bp, xfs_buf_iodone() 1139 ASSERT(BUF_ITEM(lip)->bli_buf == bp); xfs_buf_iodone() 1141 xfs_buf_rele(bp); xfs_buf_iodone() 186 xfs_buf_item_copy_iovec( struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, struct xfs_buf *bp, uint offset, int first_bit, uint nbits) xfs_buf_item_copy_iovec() argument 201 xfs_buf_item_straddle( struct xfs_buf *bp, uint offset, int next_bit, int last_bit) xfs_buf_item_straddle() argument 754 xfs_buf_item_init( xfs_buf_t *bp, xfs_mount_t *mp) xfs_buf_item_init() argument 961 xfs_buf_item_relse( xfs_buf_t *bp) xfs_buf_item_relse() argument 988 xfs_buf_attach_iodone( xfs_buf_t *bp, void (*cb)(xfs_buf_t *, xfs_log_item_t *), xfs_log_item_t *lip) xfs_buf_attach_iodone() argument 1024 xfs_buf_do_callbacks( struct xfs_buf *bp) xfs_buf_do_callbacks() argument 1051 xfs_buf_iodone_callbacks( struct xfs_buf *bp) xfs_buf_iodone_callbacks() argument 1133 xfs_buf_iodone( struct xfs_buf *bp, struct xfs_log_item *lip) xfs_buf_iodone() argument
|
H A D | xfs_attr_list.c | 228 struct xfs_buf *bp; xfs_attr_node_list() local 242 bp = NULL; xfs_attr_node_list() 245 &bp, XFS_ATTR_FORK); xfs_attr_node_list() 248 if (bp) { xfs_attr_node_list() 251 node = bp->b_addr; xfs_attr_node_list() 256 xfs_trans_brelse(NULL, bp); xfs_attr_node_list() 257 bp = NULL; xfs_attr_node_list() 261 leaf = bp->b_addr; xfs_attr_node_list() 268 xfs_trans_brelse(NULL, bp); xfs_attr_node_list() 269 bp = NULL; xfs_attr_node_list() 273 xfs_trans_brelse(NULL, bp); xfs_attr_node_list() 274 bp = NULL; xfs_attr_node_list() 279 xfs_trans_brelse(NULL, bp); xfs_attr_node_list() 280 bp = NULL; xfs_attr_node_list() 290 if (bp == NULL) { xfs_attr_node_list() 296 cursor->blkno, -1, &bp, xfs_attr_node_list() 300 node = bp->b_addr; xfs_attr_node_list() 311 xfs_trans_brelse(NULL, bp); xfs_attr_node_list() 327 xfs_trans_brelse(NULL, bp); xfs_attr_node_list() 330 xfs_trans_brelse(NULL, bp); xfs_attr_node_list() 333 ASSERT(bp != NULL); xfs_attr_node_list() 341 leaf = bp->b_addr; xfs_attr_node_list() 342 error = xfs_attr3_leaf_list_int(bp, context); xfs_attr_node_list() 344 xfs_trans_brelse(NULL, bp); xfs_attr_node_list() 351 xfs_trans_brelse(NULL, bp); xfs_attr_node_list() 352 error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp); xfs_attr_node_list() 356 xfs_trans_brelse(NULL, bp); xfs_attr_node_list() 365 struct xfs_buf *bp, xfs_attr3_leaf_list_int() 379 leaf = bp->b_addr; xfs_attr3_leaf_list_int() 493 struct xfs_buf *bp; xfs_attr_leaf_list() local 498 error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp); xfs_attr_leaf_list() 502 error = xfs_attr3_leaf_list_int(bp, context); xfs_attr_leaf_list() 503 xfs_trans_brelse(NULL, bp); xfs_attr_leaf_list() 364 xfs_attr3_leaf_list_int( struct xfs_buf *bp, struct xfs_attr_list_context *context) xfs_attr3_leaf_list_int() argument
|
H A D | xfs_attr_inactive.c | 54 struct xfs_buf *bp; xfs_attr3_leaf_freextent() local 91 bp = xfs_trans_get_buf(*trans, xfs_attr3_leaf_freextent() 94 if (!bp) xfs_attr3_leaf_freextent() 96 xfs_trans_binval(*trans, bp); xfs_attr3_leaf_freextent() 122 struct xfs_buf *bp) xfs_attr3_leaf_inactive() 135 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_leaf_inactive() 137 leaf = bp->b_addr; xfs_attr3_leaf_inactive() 158 xfs_trans_brelse(*trans, bp); xfs_attr3_leaf_inactive() 185 xfs_trans_brelse(*trans, bp); /* unlock for trans. in freextent() */ xfs_attr3_leaf_inactive() 211 struct xfs_buf *bp, xfs_attr3_node_inactive() 227 xfs_trans_brelse(*trans, bp); /* no locks for later trans */ xfs_attr3_node_inactive() 231 node = bp->b_addr; xfs_attr3_node_inactive() 233 parent_blkno = bp->b_bn; xfs_attr3_node_inactive() 235 xfs_trans_brelse(*trans, bp); xfs_attr3_node_inactive() 240 xfs_trans_brelse(*trans, bp); /* no locks for later trans */ xfs_attr3_node_inactive() 302 &bp, XFS_ATTR_FORK); xfs_attr3_node_inactive() 306 xfs_trans_brelse(*trans, bp); xfs_attr3_node_inactive() 331 struct xfs_buf *bp; xfs_attr3_root_inactive() local 341 error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK); xfs_attr3_root_inactive() 344 blkno = bp->b_bn; xfs_attr3_root_inactive() 350 info = bp->b_addr; xfs_attr3_root_inactive() 354 error = xfs_attr3_node_inactive(trans, dp, bp, 1); xfs_attr3_root_inactive() 358 error = xfs_attr3_leaf_inactive(trans, dp, bp); xfs_attr3_root_inactive() 362 xfs_trans_brelse(*trans, bp); xfs_attr3_root_inactive() 371 error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK); xfs_attr3_root_inactive() 374 xfs_trans_binval(*trans, bp); /* remove from cache */ xfs_attr3_root_inactive() 119 xfs_attr3_leaf_inactive( struct xfs_trans **trans, struct xfs_inode *dp, struct xfs_buf *bp) xfs_attr3_leaf_inactive() argument 208 xfs_attr3_node_inactive( struct xfs_trans **trans, struct xfs_inode *dp, struct xfs_buf *bp, int level) xfs_attr3_node_inactive() argument
|
H A D | xfs_symlink.c | 52 struct xfs_buf *bp; xfs_readlink_bmap() local 73 bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0, xfs_readlink_bmap() 75 if (!bp) xfs_readlink_bmap() 77 error = bp->b_error; xfs_readlink_bmap() 79 xfs_buf_ioerror_alert(bp, __func__); xfs_readlink_bmap() 80 xfs_buf_relse(bp); xfs_readlink_bmap() 91 cur_chunk = bp->b_addr; xfs_readlink_bmap() 94 byte_cnt, bp)) { xfs_readlink_bmap() 99 xfs_buf_relse(bp); xfs_readlink_bmap() 112 xfs_buf_relse(bp); xfs_readlink_bmap() 191 xfs_buf_t *bp; xfs_symlink() local 346 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, xfs_symlink() 348 if (!bp) { xfs_symlink() 352 bp->b_ops = &xfs_symlink_buf_ops; xfs_symlink() 357 buf = bp->b_addr; xfs_symlink() 359 byte_cnt, bp); xfs_symlink() 367 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF); xfs_symlink() 368 xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) - xfs_symlink() 369 (char *)bp->b_addr); xfs_symlink() 440 xfs_buf_t *bp; xfs_inactive_symlink_rmt() local 497 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, xfs_inactive_symlink_rmt() 500 if (!bp) { xfs_inactive_symlink_rmt() 504 xfs_trans_binval(tp, bp); xfs_inactive_symlink_rmt()
|
H A D | xfs_log_recover.c | 104 struct xfs_buf *bp; xlog_get_bp() local 133 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0); xlog_get_bp() 134 if (bp) xlog_get_bp() 135 xfs_buf_unlock(bp); xlog_get_bp() 136 return bp; xlog_get_bp() 141 xfs_buf_t *bp) xlog_put_bp() 143 xfs_buf_free(bp); xlog_put_bp() 155 struct xfs_buf *bp) xlog_align() 159 ASSERT(offset + nbblks <= bp->b_length); xlog_align() 160 return bp->b_addr + BBTOB(offset); xlog_align() 172 struct xfs_buf *bp) xlog_bread_noalign() 187 ASSERT(nbblks <= bp->b_length); xlog_bread_noalign() 189 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); xlog_bread_noalign() 190 XFS_BUF_READ(bp); xlog_bread_noalign() 191 bp->b_io_length = nbblks; xlog_bread_noalign() 192 bp->b_error = 0; xlog_bread_noalign() 194 error = xfs_buf_submit_wait(bp); xlog_bread_noalign() 196 xfs_buf_ioerror_alert(bp, __func__); xlog_bread_noalign() 205 struct xfs_buf *bp, xlog_bread() 210 error = xlog_bread_noalign(log, blk_no, nbblks, bp); xlog_bread() 214 *offset = xlog_align(log, blk_no, nbblks, bp); xlog_bread() 227 struct xfs_buf *bp, xlog_bread_offset() 230 xfs_caddr_t orig_offset = bp->b_addr; xlog_bread_offset() 231 int orig_len = BBTOB(bp->b_length); xlog_bread_offset() 234 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks)); xlog_bread_offset() 238 error = xlog_bread_noalign(log, blk_no, nbblks, bp); xlog_bread_offset() 241 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len); xlog_bread_offset() 257 struct xfs_buf *bp) xlog_bwrite() 272 ASSERT(nbblks <= bp->b_length); xlog_bwrite() 274 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); xlog_bwrite() 275 XFS_BUF_ZEROFLAGS(bp); xlog_bwrite() 276 xfs_buf_hold(bp); xlog_bwrite() 277 xfs_buf_lock(bp); xlog_bwrite() 278 bp->b_io_length = nbblks; xlog_bwrite() 279 bp->b_error = 0; xlog_bwrite() 281 error = xfs_bwrite(bp); xlog_bwrite() 283 xfs_buf_ioerror_alert(bp, __func__); xlog_bwrite() 284 xfs_buf_relse(bp); xlog_bwrite() 368 struct xfs_buf *bp) xlog_recover_iodone() 370 if (bp->b_error) { xlog_recover_iodone() 375 if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { xlog_recover_iodone() 376 xfs_buf_ioerror_alert(bp, __func__); xlog_recover_iodone() 377 xfs_force_shutdown(bp->b_target->bt_mount, xlog_recover_iodone() 381 bp->b_iodone = NULL; xlog_recover_iodone() 382 xfs_buf_ioend(bp); xlog_recover_iodone() 394 struct xfs_buf *bp, xlog_find_cycle_start() 408 error = xlog_bread(log, mid_blk, 1, bp, &offset); xlog_find_cycle_start() 444 xfs_buf_t *bp; xlog_find_verify_cycle() local 458 while (!(bp = xlog_get_bp(log, bufblks))) { xlog_find_verify_cycle() 469 error = xlog_bread(log, i, bcount, bp, &buf); xlog_find_verify_cycle() 487 xlog_put_bp(bp); xlog_find_verify_cycle() 511 xfs_buf_t *bp; xlog_find_verify_log_record() local 521 if (!(bp = xlog_get_bp(log, num_blks))) { xlog_find_verify_log_record() 522 if (!(bp = xlog_get_bp(log, 1))) xlog_find_verify_log_record() 526 error = xlog_bread(log, start_blk, num_blks, bp, &offset); xlog_find_verify_log_record() 543 error = xlog_bread(log, i, 1, bp, &offset); xlog_find_verify_log_record() 596 xlog_put_bp(bp); xlog_find_verify_log_record() 618 xfs_buf_t *bp; xlog_find_head() local 648 bp = xlog_get_bp(log, 1); xlog_find_head() 649 if (!bp) xlog_find_head() 652 error = xlog_bread(log, 0, 1, bp, &offset); xlog_find_head() 659 error = xlog_bread(log, last_blk, 1, bp, &offset); xlog_find_head() 729 if ((error = xlog_find_cycle_start(log, bp, first_blk, xlog_find_head() 849 xlog_put_bp(bp); xlog_find_head() 863 xlog_put_bp(bp); xlog_find_head() 895 xfs_buf_t *bp; xlog_find_tail() local 910 bp = xlog_get_bp(log, 1); xlog_find_tail() 911 if (!bp) xlog_find_tail() 914 error = xlog_bread(log, 0, 1, bp, &offset); xlog_find_tail() 930 error = xlog_bread(log, i, 1, bp, &offset); xlog_find_tail() 947 error = xlog_bread(log, i, 1, bp, &offset); xlog_find_tail() 960 xlog_put_bp(bp); xlog_find_tail() 1023 error = xlog_bread(log, umount_data_blk, 1, bp, &offset); xlog_find_tail() 1073 xlog_put_bp(bp); xlog_find_tail() 1101 xfs_buf_t *bp; xlog_find_zeroed() local 1111 bp = xlog_get_bp(log, 1); xlog_find_zeroed() 1112 if (!bp) xlog_find_zeroed() 1114 error = xlog_bread(log, 0, 1, bp, &offset); xlog_find_zeroed() 1121 xlog_put_bp(bp); xlog_find_zeroed() 1126 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset); xlog_find_zeroed() 1132 xlog_put_bp(bp); xlog_find_zeroed() 1148 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0))) xlog_find_zeroed() 1188 xlog_put_bp(bp); xlog_find_zeroed() 1231 xfs_buf_t *bp; xlog_write_log_records() local 1248 while (!(bp = xlog_get_bp(log, bufblks))) { xlog_write_log_records() 1260 error = xlog_bread_noalign(log, start_block, 1, bp); xlog_write_log_records() 1279 offset = bp->b_addr + BBTOB(ealign - start_block); xlog_write_log_records() 1281 bp, offset); xlog_write_log_records() 1287 offset = xlog_align(log, start_block, endcount, bp); xlog_write_log_records() 1293 error = xlog_bwrite(log, start_block, endcount, bp); xlog_write_log_records() 1301 xlog_put_bp(bp); xlog_write_log_records() 1707 struct xfs_buf *bp, xlog_recover_do_inode_buffer() 1728 bp->b_ops = &xfs_inode_buf_ops; xlog_recover_do_inode_buffer() 1730 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog; xlog_recover_do_inode_buffer() 1773 BBTOB(bp->b_io_length)); xlog_recover_do_inode_buffer() 1784 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). " xlog_recover_do_inode_buffer() 1786 item, bp); xlog_recover_do_inode_buffer() 1792 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, xlog_recover_do_inode_buffer() 1802 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); xlog_recover_do_inode_buffer() 1832 struct xfs_buf *bp) xlog_recover_get_buf_lsn() 1837 void *blk = bp->b_addr; xlog_recover_get_buf_lsn() 1968 struct xfs_buf *bp, xlog_recover_validate_buf_type() 1971 struct xfs_da_blkinfo *info = bp->b_addr; xlog_recover_validate_buf_type() 1987 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); xlog_recover_validate_buf_type() 1988 magic16 = be16_to_cpu(*(__be16*)bp->b_addr); xlog_recover_validate_buf_type() 1997 bp->b_ops = &xfs_allocbt_buf_ops; xlog_recover_validate_buf_type() 2003 bp->b_ops = &xfs_inobt_buf_ops; xlog_recover_validate_buf_type() 2007 bp->b_ops = &xfs_bmbt_buf_ops; xlog_recover_validate_buf_type() 2021 bp->b_ops = &xfs_agf_buf_ops; xlog_recover_validate_buf_type() 2029 bp->b_ops = &xfs_agfl_buf_ops; xlog_recover_validate_buf_type() 2037 bp->b_ops = &xfs_agi_buf_ops; xlog_recover_validate_buf_type() 2048 bp->b_ops = &xfs_dquot_buf_ops; xlog_recover_validate_buf_type() 2061 bp->b_ops = &xfs_inode_buf_ops; xlog_recover_validate_buf_type() 2069 bp->b_ops = &xfs_symlink_buf_ops; xlog_recover_validate_buf_type() 2078 bp->b_ops = &xfs_dir3_block_buf_ops; xlog_recover_validate_buf_type() 2087 bp->b_ops = &xfs_dir3_data_buf_ops; xlog_recover_validate_buf_type() 2096 bp->b_ops = &xfs_dir3_free_buf_ops; xlog_recover_validate_buf_type() 2105 bp->b_ops = &xfs_dir3_leaf1_buf_ops; xlog_recover_validate_buf_type() 2114 bp->b_ops = &xfs_dir3_leafn_buf_ops; xlog_recover_validate_buf_type() 2123 bp->b_ops = &xfs_da3_node_buf_ops; xlog_recover_validate_buf_type() 2132 bp->b_ops = &xfs_attr3_leaf_buf_ops; xlog_recover_validate_buf_type() 2140 bp->b_ops = &xfs_attr3_rmt_buf_ops; xlog_recover_validate_buf_type() 2148 bp->b_ops = &xfs_sb_buf_ops; xlog_recover_validate_buf_type() 2167 struct xfs_buf *bp, xlog_recover_do_reg_buffer() 2189 ASSERT(BBTOB(bp->b_io_length) >= xlog_recover_do_reg_buffer() 2229 memcpy(xfs_buf_offset(bp, xlog_recover_do_reg_buffer() 2241 xlog_recover_validate_buf_type(mp, bp, buf_f); xlog_recover_do_reg_buffer() 2258 struct xfs_buf *bp, xlog_recover_do_dquot_buffer() 2284 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); xlog_recover_do_dquot_buffer() 2320 xfs_buf_t *bp; xlog_recover_buffer_pass2() local 2341 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, xlog_recover_buffer_pass2() 2343 if (!bp) xlog_recover_buffer_pass2() 2345 error = bp->b_error; xlog_recover_buffer_pass2() 2347 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)"); xlog_recover_buffer_pass2() 2370 lsn = xlog_recover_get_buf_lsn(mp, bp); xlog_recover_buffer_pass2() 2372 xlog_recover_validate_buf_type(mp, bp, buf_f); xlog_recover_buffer_pass2() 2377 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); xlog_recover_buffer_pass2() 2384 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); xlog_recover_buffer_pass2() 2388 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); xlog_recover_buffer_pass2() 2407 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && xlog_recover_buffer_pass2() 2408 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, xlog_recover_buffer_pass2() 2410 xfs_buf_stale(bp); xlog_recover_buffer_pass2() 2411 error = xfs_bwrite(bp); xlog_recover_buffer_pass2() 2413 ASSERT(bp->b_target->bt_mount == mp); xlog_recover_buffer_pass2() 2414 bp->b_iodone = xlog_recover_iodone; xlog_recover_buffer_pass2() 2415 xfs_buf_delwri_queue(bp, buffer_list); xlog_recover_buffer_pass2() 2419 xfs_buf_relse(bp); xlog_recover_buffer_pass2() 2508 xfs_buf_t *bp; xlog_recover_inode_pass2() local 2542 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0, xlog_recover_inode_pass2() 2544 if (!bp) { xlog_recover_inode_pass2() 2548 error = bp->b_error; xlog_recover_inode_pass2() 2550 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)"); xlog_recover_inode_pass2() 2554 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset); xlog_recover_inode_pass2() 2562 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld", xlog_recover_inode_pass2() 2563 __func__, dip, bp, in_f->ilf_ino); xlog_recover_inode_pass2() 2631 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", xlog_recover_inode_pass2() 2632 __func__, item, dip, bp, in_f->ilf_ino); xlog_recover_inode_pass2() 2644 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", xlog_recover_inode_pass2() 2645 __func__, item, dip, bp, in_f->ilf_ino); xlog_recover_inode_pass2() 2655 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", xlog_recover_inode_pass2() 2656 __func__, item, dip, bp, in_f->ilf_ino, xlog_recover_inode_pass2() 2667 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__, xlog_recover_inode_pass2() 2668 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); xlog_recover_inode_pass2() 2779 ASSERT(bp->b_target->bt_mount == mp); xlog_recover_inode_pass2() 2780 bp->b_iodone = xlog_recover_iodone; xlog_recover_inode_pass2() 2781 xfs_buf_delwri_queue(bp, buffer_list); xlog_recover_inode_pass2() 2784 xfs_buf_relse(bp); xlog_recover_inode_pass2() 2829 xfs_buf_t *bp; xlog_recover_dquot_pass2() local 2887 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp, xlog_recover_dquot_pass2() 2892 ASSERT(bp); xlog_recover_dquot_pass2() 2893 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset); xlog_recover_dquot_pass2() 2915 ASSERT(bp->b_target->bt_mount == mp); xlog_recover_dquot_pass2() 2916 bp->b_iodone = xlog_recover_iodone; xlog_recover_dquot_pass2() 2917 xfs_buf_delwri_queue(bp, buffer_list); xlog_recover_dquot_pass2() 2920 xfs_buf_relse(bp); xlog_recover_dquot_pass2() 4422 xfs_buf_t *bp; xlog_do_recover() local 4454 bp = xfs_getsb(log->l_mp, 0); xlog_do_recover() 4455 XFS_BUF_UNDONE(bp); xlog_do_recover() 4456 ASSERT(!(XFS_BUF_ISWRITE(bp))); xlog_do_recover() 4457 XFS_BUF_READ(bp); xlog_do_recover() 4458 XFS_BUF_UNASYNC(bp); xlog_do_recover() 4459 bp->b_ops = &xfs_sb_buf_ops; xlog_do_recover() 4461 error = xfs_buf_submit_wait(bp); xlog_do_recover() 4464 xfs_buf_ioerror_alert(bp, __func__); xlog_do_recover() 4467 xfs_buf_relse(bp); xlog_do_recover() 4473 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); xlog_do_recover() 4478 xfs_buf_relse(bp); xlog_do_recover() 140 xlog_put_bp( xfs_buf_t *bp) xlog_put_bp() argument 151 xlog_align( struct xlog *log, xfs_daddr_t blk_no, int nbblks, struct xfs_buf *bp) xlog_align() argument 168 xlog_bread_noalign( struct xlog *log, xfs_daddr_t blk_no, int nbblks, struct xfs_buf *bp) xlog_bread_noalign() argument 201 xlog_bread( struct xlog *log, xfs_daddr_t blk_no, int nbblks, struct xfs_buf *bp, xfs_caddr_t *offset) xlog_bread() argument 223 xlog_bread_offset( struct xlog *log, xfs_daddr_t blk_no, int nbblks, struct xfs_buf *bp, xfs_caddr_t offset) xlog_bread_offset() argument 253 xlog_bwrite( struct xlog *log, xfs_daddr_t blk_no, int nbblks, struct xfs_buf *bp) xlog_bwrite() argument 367 xlog_recover_iodone( struct xfs_buf *bp) xlog_recover_iodone() argument 392 xlog_find_cycle_start( struct xlog *log, struct xfs_buf *bp, xfs_daddr_t first_blk, xfs_daddr_t *last_blk, uint cycle) xlog_find_cycle_start() argument 1704 xlog_recover_do_inode_buffer( struct xfs_mount *mp, xlog_recover_item_t *item, struct xfs_buf *bp, xfs_buf_log_format_t *buf_f) xlog_recover_do_inode_buffer() argument 1830 xlog_recover_get_buf_lsn( struct xfs_mount *mp, struct xfs_buf *bp) xlog_recover_get_buf_lsn() argument 1966 xlog_recover_validate_buf_type( struct xfs_mount *mp, struct xfs_buf *bp, xfs_buf_log_format_t *buf_f) xlog_recover_validate_buf_type() argument 2164 xlog_recover_do_reg_buffer( struct xfs_mount *mp, xlog_recover_item_t *item, struct xfs_buf *bp, xfs_buf_log_format_t *buf_f) xlog_recover_do_reg_buffer() argument 2254 xlog_recover_do_dquot_buffer( struct xfs_mount *mp, struct xlog *log, struct xlog_recover_item *item, struct xfs_buf *bp, struct xfs_buf_log_format *buf_f) xlog_recover_do_dquot_buffer() argument
|
H A D | xfs_error.c | 163 struct xfs_buf *bp) xfs_verifier_error() 165 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_verifier_error() 168 bp->b_error == -EFSBADCRC ? "CRC error" : "corruption", xfs_verifier_error() 169 __return_address, bp->b_bn); xfs_verifier_error() 175 xfs_hex_dump(xfs_buf_offset(bp, 0), 64); xfs_verifier_error() 162 xfs_verifier_error( struct xfs_buf *bp) xfs_verifier_error() argument
|
H A D | xfs_dquot.c | 231 xfs_buf_t *bp) xfs_qm_init_dquot_blk() 238 ASSERT(xfs_buf_islocked(bp)); xfs_qm_init_dquot_blk() 240 d = bp->b_addr; xfs_qm_init_dquot_blk() 260 xfs_trans_dquot_buf(tp, bp, xfs_qm_init_dquot_blk() 264 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); xfs_qm_init_dquot_blk() 310 xfs_buf_t *bp; xfs_qm_dqalloc() local 350 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, xfs_qm_dqalloc() 354 if (!bp) { xfs_qm_dqalloc() 358 bp->b_ops = &xfs_dquot_buf_ops; xfs_qm_dqalloc() 365 dqp->dq_flags & XFS_DQ_ALLTYPES, bp); xfs_qm_dqalloc() 380 xfs_trans_bhold(tp, bp); xfs_qm_dqalloc() 388 xfs_trans_bjoin(tp, bp); xfs_qm_dqalloc() 390 xfs_trans_bhold_release(tp, bp); xfs_qm_dqalloc() 393 *O_bpp = bp; xfs_qm_dqalloc() 465 struct xfs_buf *bp; xfs_qm_dqtobp() local 513 dqp->q_fileoffset, &bp); xfs_qm_dqtobp() 529 0, &bp, &xfs_dquot_buf_ops); xfs_qm_dqtobp() 534 ASSERT(bp == NULL); xfs_qm_dqtobp() 535 error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp); xfs_qm_dqtobp() 539 ASSERT(bp == NULL); xfs_qm_dqtobp() 544 ASSERT(xfs_buf_islocked(bp)); xfs_qm_dqtobp() 545 *O_bpp = bp; xfs_qm_dqtobp() 546 *O_ddpp = bp->b_addr + dqp->q_bufoffset; xfs_qm_dqtobp() 568 struct xfs_buf *bp; xfs_qm_dqread() local 627 error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags); xfs_qm_dqread() 655 xfs_buf_set_ref(bp, XFS_DQUOT_REF); xfs_qm_dqread() 669 ASSERT(xfs_buf_islocked(bp)); xfs_qm_dqread() 670 xfs_trans_brelse(tp, bp); xfs_qm_dqread() 890 struct xfs_buf *bp, xfs_qm_dqflush_done() 936 struct xfs_buf *bp; xfs_qm_dqflush() local 975 mp->m_quotainfo->qi_dqchunklen, 0, &bp, xfs_qm_dqflush() 983 ddqp = bp->b_addr + dqp->q_bufoffset; xfs_qm_dqflush() 991 xfs_buf_relse(bp); xfs_qm_dqflush() 1029 xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, xfs_qm_dqflush() 1036 if (xfs_buf_ispinned(bp)) { xfs_qm_dqflush() 1042 *bpp = bp; xfs_qm_dqflush() 226 xfs_qm_init_dquot_blk( xfs_trans_t *tp, xfs_mount_t *mp, xfs_dqid_t id, uint type, xfs_buf_t *bp) xfs_qm_init_dquot_blk() argument 889 xfs_qm_dqflush_done( struct xfs_buf *bp, struct xfs_log_item *lip) xfs_qm_dqflush_done() argument
|
H A D | xfs_dir2_readdir.c | 164 struct xfs_buf *bp; /* buffer for block */ xfs_dir2_block_getdents() local 181 error = xfs_dir3_block_read(NULL, dp, &bp); xfs_dir2_block_getdents() 190 hdr = bp->b_addr; xfs_dir2_block_getdents() 191 xfs_dir3_data_check(dp, bp); xfs_dir2_block_getdents() 238 xfs_trans_brelse(NULL, bp); xfs_dir2_block_getdents() 249 xfs_trans_brelse(NULL, bp); xfs_dir2_block_getdents() 276 struct xfs_buf *bp = *bpp; xfs_dir2_leaf_readbuf() local 290 if (bp) { xfs_dir2_leaf_readbuf() 291 xfs_trans_brelse(NULL, bp); xfs_dir2_leaf_readbuf() 292 bp = NULL; xfs_dir2_leaf_readbuf() 390 -1, &bp); xfs_dir2_leaf_readbuf() 461 *bpp = bp; xfs_dir2_leaf_readbuf() 476 struct xfs_buf *bp = NULL; /* data block buffer */ xfs_dir2_leaf_getdents() local 531 if (!bp || ptr >= (char *)bp->b_addr + geo->blksize) { xfs_dir2_leaf_getdents() 534 &curoff, &bp); xfs_dir2_leaf_getdents() 554 hdr = bp->b_addr; xfs_dir2_leaf_getdents() 555 xfs_dir3_data_check(dp, bp); xfs_dir2_leaf_getdents() 639 if (bp) xfs_dir2_leaf_getdents() 640 xfs_trans_brelse(NULL, bp); xfs_dir2_leaf_getdents()
|
H A D | xfs_mount.c | 264 struct xfs_buf *bp; xfs_readsb() local 290 BTOBB(sector_size), 0, &bp, buf_ops); xfs_readsb() 303 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); xfs_readsb() 332 xfs_buf_relse(bp); xfs_readsb() 341 bp->b_ops = &xfs_sb_buf_ops; xfs_readsb() 343 mp->m_sb_bp = bp; xfs_readsb() 344 xfs_buf_unlock(bp); xfs_readsb() 348 xfs_buf_relse(bp); xfs_readsb() 529 struct xfs_buf *bp; xfs_check_sizes() local 540 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL); xfs_check_sizes() 545 xfs_buf_relse(bp); xfs_check_sizes() 557 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL); xfs_check_sizes() 562 xfs_buf_relse(bp); xfs_check_sizes() 1239 struct xfs_buf *bp = mp->m_sb_bp; xfs_getsb() local 1241 if (!xfs_buf_trylock(bp)) { xfs_getsb() 1244 xfs_buf_lock(bp); xfs_getsb() 1247 xfs_buf_hold(bp); xfs_getsb() 1248 ASSERT(XFS_BUF_ISDONE(bp)); xfs_getsb() 1249 return bp; xfs_getsb() 1259 struct xfs_buf *bp = mp->m_sb_bp; xfs_freesb() local 1261 xfs_buf_lock(bp); xfs_freesb() 1263 xfs_buf_relse(bp); xfs_freesb()
|
H A D | xfs_qm.c | 152 struct xfs_buf *bp = NULL; xfs_qm_dqpurge() local 159 error = xfs_qm_dqflush(dqp, &bp); xfs_qm_dqpurge() 164 error = xfs_bwrite(bp); xfs_qm_dqpurge() 165 xfs_buf_relse(bp); xfs_qm_dqpurge() 470 struct xfs_buf *bp = NULL; __releases() local 478 error = xfs_qm_dqflush(dqp, &bp); __releases() 485 xfs_buf_delwri_queue(bp, &isol->buffers); __releases() 486 xfs_buf_relse(bp); __releases() 813 xfs_buf_t *bp, xfs_qm_reset_dqcounts() 820 trace_xfs_reset_dqcounts(bp, _RET_IP_); xfs_qm_reset_dqcounts() 831 dqb = bp->b_addr; xfs_qm_reset_dqcounts() 876 struct xfs_buf *bp; xfs_qm_dqiter_bufs() local 897 mp->m_quotainfo->qi_dqchunklen, 0, &bp, xfs_qm_dqiter_bufs() 910 mp->m_quotainfo->qi_dqchunklen, 0, &bp, xfs_qm_dqiter_bufs() 922 bp->b_ops = &xfs_dquot_buf_ops; xfs_qm_dqiter_bufs() 923 xfs_qm_reset_dqcounts(mp, bp, firstid, type); xfs_qm_dqiter_bufs() 924 xfs_buf_delwri_queue(bp, buffer_list); xfs_qm_dqiter_bufs() 925 xfs_buf_relse(bp); xfs_qm_dqiter_bufs() 1223 struct xfs_buf *bp = NULL; xfs_qm_flush_one() local 1233 error = xfs_qm_dqflush(dqp, &bp); xfs_qm_flush_one() 1237 xfs_buf_delwri_queue(bp, buffer_list); xfs_qm_flush_one() 1238 xfs_buf_relse(bp); xfs_qm_flush_one() 1360 struct xfs_buf *bp = xfs_qm_quotacheck() local 1362 list_del_init(&bp->b_list); xfs_qm_quotacheck() 1363 xfs_buf_relse(bp); xfs_qm_quotacheck() 811 xfs_qm_reset_dqcounts( xfs_mount_t *mp, xfs_buf_t *bp, xfs_dqid_t id, uint type) xfs_qm_reset_dqcounts() argument
|
H A D | xfs_trace.h | 309 TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), 310 TP_ARGS(bp, caller_ip), 322 __entry->dev = bp->b_target->bt_dev; 323 __entry->bno = bp->b_bn; 324 __entry->nblks = bp->b_length; 325 __entry->hold = atomic_read(&bp->b_hold); 326 __entry->pincount = atomic_read(&bp->b_pin_count); 327 __entry->lockval = bp->b_sema.count; 328 __entry->flags = bp->b_flags; 345 TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \ 346 TP_ARGS(bp, caller_ip)) 382 TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), 383 TP_ARGS(bp, flags, caller_ip), 395 __entry->dev = bp->b_target->bt_dev; 396 __entry->bno = bp->b_bn; 397 __entry->buffer_length = BBTOB(bp->b_length); 399 __entry->hold = atomic_read(&bp->b_hold); 400 __entry->pincount = atomic_read(&bp->b_pin_count); 401 __entry->lockval = bp->b_sema.count; 418 TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \ 419 TP_ARGS(bp, flags, caller_ip)) 425 TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip), 426 TP_ARGS(bp, error, caller_ip), 439 __entry->dev = bp->b_target->bt_dev; 440 __entry->bno = bp->b_bn; 441 __entry->buffer_length = BBTOB(bp->b_length); 442 __entry->hold = atomic_read(&bp->b_hold); 443 __entry->pincount = atomic_read(&bp->b_pin_count); 444 __entry->lockval = bp->b_sema.count; 446 __entry->flags = bp->b_flags;
|
H A D | xfs_log.c | 1167 xlog_iodone(xfs_buf_t *bp) xlog_iodone() argument 1169 struct xlog_in_core *iclog = bp->b_fspriv; xlog_iodone() 1176 if (XFS_TEST_ERROR(bp->b_error, l->l_mp, xlog_iodone() 1178 xfs_buf_ioerror_alert(bp, __func__); xlog_iodone() 1179 xfs_buf_stale(bp); xlog_iodone() 1192 ASSERT(XFS_BUF_ISASYNC(bp)); xlog_iodone() 1199 * (bp) after the unlock as we could race with it being freed. xlog_iodone() 1201 xfs_buf_unlock(bp); xlog_iodone() 1333 xfs_buf_t *bp; xlog_alloc_log() local 1397 bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL, xlog_alloc_log() 1399 if (!bp) xlog_alloc_log() 1407 ASSERT(xfs_buf_islocked(bp)); xlog_alloc_log() 1408 xfs_buf_unlock(bp); xlog_alloc_log() 1411 bp->b_ioend_wq = mp->m_log_workqueue; xlog_alloc_log() 1412 bp->b_iodone = xlog_iodone; xlog_alloc_log() 1413 log->l_xbuf = bp; xlog_alloc_log() 1436 bp = xfs_buf_get_uncached(mp->m_logdev_targp, xlog_alloc_log() 1438 if (!bp) xlog_alloc_log() 1441 ASSERT(xfs_buf_islocked(bp)); xlog_alloc_log() 1442 xfs_buf_unlock(bp); xlog_alloc_log() 1445 bp->b_ioend_wq = mp->m_log_workqueue; xlog_alloc_log() 1446 bp->b_iodone = xlog_iodone; xlog_alloc_log() 1447 iclog->ic_bp = bp; xlog_alloc_log() 1448 iclog->ic_data = bp->b_addr; xlog_alloc_log() 1462 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize; xlog_alloc_log() 1687 struct xfs_buf *bp) xlog_bdstrat() 1689 struct xlog_in_core *iclog = bp->b_fspriv; xlog_bdstrat() 1691 xfs_buf_lock(bp); xlog_bdstrat() 1693 xfs_buf_ioerror(bp, -EIO); xlog_bdstrat() 1694 xfs_buf_stale(bp); xlog_bdstrat() 1695 xfs_buf_ioend(bp); xlog_bdstrat() 1705 xfs_buf_submit(bp); xlog_bdstrat() 1739 xfs_buf_t *bp; xlog_sync() local 1783 bp = iclog->ic_bp; xlog_sync() 1784 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); xlog_sync() 1789 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { xlog_sync() 1792 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); xlog_sync() 1793 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); xlog_sync() 1820 bp->b_io_length = BTOBB(count); xlog_sync() 1821 bp->b_fspriv = iclog; xlog_sync() 1822 XFS_BUF_ZEROFLAGS(bp); xlog_sync() 1823 XFS_BUF_ASYNC(bp); xlog_sync() 1824 bp->b_flags |= XBF_SYNCIO; xlog_sync() 1827 bp->b_flags |= XBF_FUA; xlog_sync() 1841 bp->b_flags |= XBF_FLUSH; xlog_sync() 1844 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); xlog_sync() 1845 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); xlog_sync() 1850 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); xlog_sync() 1855 XFS_BUF_WRITE(bp); xlog_sync() 1857 error = xlog_bdstrat(bp); xlog_sync() 1859 xfs_buf_ioerror_alert(bp, "xlog_sync"); xlog_sync() 1863 bp = iclog->ic_log->l_xbuf; xlog_sync() 1864 XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ xlog_sync() 1865 xfs_buf_associate_memory(bp, xlog_sync() 1867 bp->b_fspriv = iclog; xlog_sync() 1868 XFS_BUF_ZEROFLAGS(bp); xlog_sync() 1869 XFS_BUF_ASYNC(bp); xlog_sync() 1870 bp->b_flags |= XBF_SYNCIO; xlog_sync() 1872 bp->b_flags |= XBF_FUA; xlog_sync() 1874 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); xlog_sync() 1875 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); xlog_sync() 1878 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); xlog_sync() 1879 XFS_BUF_WRITE(bp); xlog_sync() 1880 error = xlog_bdstrat(bp); xlog_sync() 1882 xfs_buf_ioerror_alert(bp, "xlog_sync (split)"); xlog_sync() 1686 xlog_bdstrat( struct xfs_buf *bp) xlog_bdstrat() argument
|
H A D | xfs_rtalloc.c | 116 xfs_buf_t *bp; /* summary buffer */ xfs_rtcopy_summary() local 122 bp = NULL; xfs_rtcopy_summary() 127 error = xfs_rtget_summary(omp, tp, log, bbno, &bp, xfs_rtcopy_summary() 134 &bp, &sumbno); xfs_rtcopy_summary() 138 &bp, &sumbno); xfs_rtcopy_summary() 768 xfs_buf_t *bp; /* temporary buffer for zeroing */ xfs_growfs_rt_alloc() local 849 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, xfs_growfs_rt_alloc() 851 if (bp == NULL) { xfs_growfs_rt_alloc() 857 memset(bp->b_addr, 0, mp->m_sb.sb_blocksize); xfs_growfs_rt_alloc() 858 xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1); xfs_growfs_rt_alloc() 890 xfs_buf_t *bp; /* temporary buffer */ xfs_growfs_rt() local 923 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL); xfs_growfs_rt() 926 xfs_buf_relse(bp); xfs_growfs_rt() 1060 bp = NULL; xfs_growfs_rt() 1062 nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno); xfs_growfs_rt() 1181 struct xfs_buf *bp; /* buffer for last block of subvolume */ xfs_rtmount_init() local 1212 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL); xfs_rtmount_init() 1217 xfs_buf_relse(bp); xfs_rtmount_init()
|
H A D | xfs_inode_item.c | 413 struct xfs_buf *bp = NULL; xfs_inode_item_push() local 455 error = xfs_iflush(ip, &bp); xfs_inode_item_push() 457 if (!xfs_buf_delwri_queue(bp, buffer_list)) xfs_inode_item_push() 459 xfs_buf_relse(bp); xfs_inode_item_push() 598 struct xfs_buf *bp, xfs_iflush_done() 612 blip = bp->b_fspriv; xfs_iflush_done() 624 bp->b_fspriv = next; xfs_iflush_done() 738 struct xfs_buf *bp, xfs_istale_done() 597 xfs_iflush_done( struct xfs_buf *bp, struct xfs_log_item *lip) xfs_iflush_done() argument 737 xfs_istale_done( struct xfs_buf *bp, struct xfs_log_item *lip) xfs_istale_done() argument
|
H A D | xfs_dquot_item.c | 147 struct xfs_buf *bp = NULL; variable in typeref:struct:xfs_buf 178 error = xfs_qm_dqflush(dqp, &bp); 183 if (!xfs_buf_delwri_queue(bp, buffer_list)) 185 xfs_buf_relse(bp); variable
|
/linux-4.1.27/drivers/sbus/char/ |
H A D | bbc_i2c.c | 53 static void set_device_claimage(struct bbc_i2c_bus *bp, struct platform_device *op, int val) set_device_claimage() argument 58 if (bp->devs[i].device == op) { set_device_claimage() 59 bp->devs[i].client_claimed = val; set_device_claimage() 68 struct platform_device *bbc_i2c_getdev(struct bbc_i2c_bus *bp, int index) bbc_i2c_getdev() argument 74 if (!(op = bp->devs[i].device)) bbc_i2c_getdev() 88 struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct platform_device *op) bbc_i2c_attach() argument 96 client->bp = bp; bbc_i2c_attach() 108 claim_device(bp, op); bbc_i2c_attach() 115 struct bbc_i2c_bus *bp = client->bp; bbc_i2c_detach() local 118 release_device(bp, op); bbc_i2c_detach() 122 static int wait_for_pin(struct bbc_i2c_bus *bp, u8 *status) wait_for_pin() argument 128 bp->waiting = 1; wait_for_pin() 129 add_wait_queue(&bp->wq, &wait); wait_for_pin() 134 bp->wq, wait_for_pin() 135 (((*status = readb(bp->i2c_control_regs + 0)) wait_for_pin() 143 remove_wait_queue(&bp->wq, &wait); wait_for_pin() 144 bp->waiting = 0; wait_for_pin() 151 struct bbc_i2c_bus *bp = client->bp; bbc_i2c_writeb() local 156 if (bp->i2c_bussel_reg != NULL) bbc_i2c_writeb() 157 writeb(client->bus, bp->i2c_bussel_reg); bbc_i2c_writeb() 159 writeb(address, bp->i2c_control_regs + 0x1); bbc_i2c_writeb() 160 writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); bbc_i2c_writeb() 161 if (wait_for_pin(bp, &status)) bbc_i2c_writeb() 164 writeb(off, bp->i2c_control_regs + 0x1); bbc_i2c_writeb() 165 if (wait_for_pin(bp, &status) || bbc_i2c_writeb() 169 writeb(val, bp->i2c_control_regs + 0x1); bbc_i2c_writeb() 170 if (wait_for_pin(bp, &status)) bbc_i2c_writeb() 176 writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); bbc_i2c_writeb() 182 struct bbc_i2c_bus *bp = client->bp; bbc_i2c_readb() local 186 if (bp->i2c_bussel_reg != NULL) bbc_i2c_readb() 187 writeb(client->bus, bp->i2c_bussel_reg); bbc_i2c_readb() 189 writeb(address, bp->i2c_control_regs + 0x1); bbc_i2c_readb() 190 writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); bbc_i2c_readb() 191 if (wait_for_pin(bp, &status)) bbc_i2c_readb() 194 writeb(off, bp->i2c_control_regs + 0x1); bbc_i2c_readb() 195 if (wait_for_pin(bp, &status) || bbc_i2c_readb() 199 writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); bbc_i2c_readb() 203 writeb(address, bp->i2c_control_regs + 0x1); bbc_i2c_readb() 204 writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); bbc_i2c_readb() 205 if (wait_for_pin(bp, &status)) bbc_i2c_readb() 211 (void) readb(bp->i2c_control_regs + 0x1); bbc_i2c_readb() 212 if (wait_for_pin(bp, &status)) bbc_i2c_readb() 215 writeb(I2C_PCF_ESO | I2C_PCF_ENI, bp->i2c_control_regs + 0x0); bbc_i2c_readb() 216 *byte = readb(bp->i2c_control_regs + 0x1); bbc_i2c_readb() 217 if (wait_for_pin(bp, &status)) bbc_i2c_readb() 223 writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); bbc_i2c_readb() 224 (void) readb(bp->i2c_control_regs + 0x1); bbc_i2c_readb() 272 struct bbc_i2c_bus *bp = dev_id; bbc_i2c_interrupt() local 277 if (bp->waiting && bbc_i2c_interrupt() 278 !(readb(bp->i2c_control_regs + 0x0) & I2C_PCF_PIN)) bbc_i2c_interrupt() 279 wake_up_interruptible(&bp->wq); bbc_i2c_interrupt() 284 static void reset_one_i2c(struct bbc_i2c_bus *bp) reset_one_i2c() argument 286 writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); reset_one_i2c() 287 writeb(bp->own, bp->i2c_control_regs + 0x1); reset_one_i2c() 288 writeb(I2C_PCF_PIN | I2C_PCF_ES1, bp->i2c_control_regs + 0x0); reset_one_i2c() 289 writeb(bp->clock, bp->i2c_control_regs + 0x1); reset_one_i2c() 290 writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); reset_one_i2c() 295 struct bbc_i2c_bus *bp; attach_one_i2c() local 299 bp = kzalloc(sizeof(*bp), GFP_KERNEL); attach_one_i2c() 300 if (!bp) attach_one_i2c() 303 INIT_LIST_HEAD(&bp->temps); attach_one_i2c() 304 INIT_LIST_HEAD(&bp->fans); attach_one_i2c() 306 bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs"); attach_one_i2c() 307 if (!bp->i2c_control_regs) attach_one_i2c() 311 bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); attach_one_i2c() 312 if (!bp->i2c_bussel_reg) attach_one_i2c() 316 bp->waiting = 0; attach_one_i2c() 317 init_waitqueue_head(&bp->wq); attach_one_i2c() 319 IRQF_SHARED, "bbc_i2c", bp)) attach_one_i2c() 322 bp->index = index; attach_one_i2c() 323 bp->op = op; attach_one_i2c() 325 spin_lock_init(&bp->lock); attach_one_i2c() 334 bp->devs[entry].device = child_op; attach_one_i2c() 335 bp->devs[entry].client_claimed = 0; attach_one_i2c() 338 writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); attach_one_i2c() 339 bp->own = readb(bp->i2c_control_regs + 0x01); attach_one_i2c() 340 writeb(I2C_PCF_PIN | I2C_PCF_ES1, bp->i2c_control_regs + 0x0); attach_one_i2c() 341 bp->clock = readb(bp->i2c_control_regs + 0x01); attach_one_i2c() 344 bp->index, bp->i2c_control_regs, entry, bp->own, bp->clock); attach_one_i2c() 346 reset_one_i2c(bp); attach_one_i2c() 348 return bp; attach_one_i2c() 351 if (bp->i2c_bussel_reg) attach_one_i2c() 352 of_iounmap(&op->resource[1], bp->i2c_bussel_reg, 1); attach_one_i2c() 353 if (bp->i2c_control_regs) attach_one_i2c() 354 of_iounmap(&op->resource[0], bp->i2c_control_regs, 2); attach_one_i2c() 355 kfree(bp); attach_one_i2c() 359 extern int bbc_envctrl_init(struct bbc_i2c_bus *bp); 360 extern void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp); 364 struct bbc_i2c_bus *bp; bbc_i2c_probe() local 367 bp = attach_one_i2c(op, index); bbc_i2c_probe() 368 if (!bp) bbc_i2c_probe() 371 err = bbc_envctrl_init(bp); bbc_i2c_probe() 373 free_irq(op->archdata.irqs[0], bp); bbc_i2c_probe() 374 if (bp->i2c_bussel_reg) bbc_i2c_probe() 375 of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1); bbc_i2c_probe() 376 if (bp->i2c_control_regs) bbc_i2c_probe() 377 of_iounmap(&op->resource[1], bp->i2c_control_regs, 2); bbc_i2c_probe() 378 kfree(bp); bbc_i2c_probe() 380 dev_set_drvdata(&op->dev, bp); bbc_i2c_probe() 388 struct bbc_i2c_bus *bp = dev_get_drvdata(&op->dev); bbc_i2c_remove() local 390 bbc_envctrl_cleanup(bp); bbc_i2c_remove() 392 free_irq(op->archdata.irqs[0], bp); bbc_i2c_remove() 394 if (bp->i2c_bussel_reg) bbc_i2c_remove() 395 of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1); bbc_i2c_remove() 396 if (bp->i2c_control_regs) bbc_i2c_remove() 397 of_iounmap(&op->resource[1], bp->i2c_control_regs, 2); bbc_i2c_remove() 399 kfree(bp); bbc_i2c_remove()
|
H A D | bbc_envctrl.c | 445 static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op, attach_one_temp() argument 457 tp->client = bbc_i2c_attach(bp, op); attach_one_temp() 467 list_add(&tp->bp_list, &bp->temps); attach_one_temp() 493 static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op, attach_one_fan() argument 505 fp->client = bbc_i2c_attach(bp, op); attach_one_fan() 514 list_add(&fp->bp_list, &bp->fans); attach_one_fan() 537 static void destroy_all_temps(struct bbc_i2c_bus *bp) destroy_all_temps() argument 541 list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { destroy_all_temps() 554 static void destroy_all_fans(struct bbc_i2c_bus *bp) destroy_all_fans() argument 558 list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) { destroy_all_fans() 565 int bbc_envctrl_init(struct bbc_i2c_bus *bp) bbc_envctrl_init() argument 572 while ((op = bbc_i2c_getdev(bp, devidx++)) != NULL) { bbc_envctrl_init() 574 attach_one_temp(bp, op, temp_index++); bbc_envctrl_init() 576 attach_one_fan(bp, op, fan_index++); bbc_envctrl_init() 584 destroy_all_temps(bp); bbc_envctrl_init() 585 destroy_all_fans(bp); bbc_envctrl_init() 593 void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp) bbc_envctrl_cleanup() argument 598 destroy_all_temps(bp); bbc_envctrl_cleanup() 599 destroy_all_fans(bp); bbc_envctrl_cleanup()
|
H A D | bbc_i2c.h | 9 struct bbc_i2c_bus *bp; member in struct:bbc_i2c_client 76 extern struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct platform_device *);
|
/linux-4.1.27/kernel/debug/kdb/ |
H A D | kdb_bp.c | 40 static char *kdb_bptype(kdb_bp_t *bp) kdb_bptype() argument 42 if (bp->bp_type < 0 || bp->bp_type > 4) kdb_bptype() 45 return kdb_rwtypes[bp->bp_type]; kdb_bptype() 48 static int kdb_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp) kdb_parsebp() argument 53 bp->bph_length = 1; kdb_parsebp() 56 bp->bp_type = BP_ACCESS_WATCHPOINT; kdb_parsebp() 58 bp->bp_type = BP_WRITE_WATCHPOINT; kdb_parsebp() 60 bp->bp_type = BP_HARDWARE_BREAKPOINT; kdb_parsebp() 64 bp->bph_length = 1; kdb_parsebp() 80 bp->bph_length = len; kdb_parsebp() 92 static int _kdb_bp_remove(kdb_bp_t *bp) _kdb_bp_remove() argument 95 if (!bp->bp_installed) _kdb_bp_remove() 97 if (!bp->bp_type) _kdb_bp_remove() 98 ret = dbg_remove_sw_break(bp->bp_addr); _kdb_bp_remove() 100 ret = arch_kgdb_ops.remove_hw_breakpoint(bp->bp_addr, _kdb_bp_remove() 101 bp->bph_length, _kdb_bp_remove() 102 bp->bp_type); _kdb_bp_remove() 104 bp->bp_installed = 0; _kdb_bp_remove() 108 static void kdb_handle_bp(struct pt_regs *regs, kdb_bp_t *bp) kdb_handle_bp() argument 121 bp->bp_delay = 0; kdb_handle_bp() 122 bp->bp_delayed = 1; kdb_handle_bp() 125 static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp) _kdb_bp_install() argument 134 __func__, bp->bp_installed); _kdb_bp_install() 136 bp->bp_delay = 0; _kdb_bp_install() 137 if (bp->bp_installed) _kdb_bp_install() 139 if (bp->bp_delay || (bp->bp_delayed && KDB_STATE(DOING_SS))) { _kdb_bp_install() 141 kdb_printf("%s: delayed bp\n", __func__); _kdb_bp_install() 142 kdb_handle_bp(regs, bp); _kdb_bp_install() 145 if (!bp->bp_type) _kdb_bp_install() 146 ret = dbg_set_sw_break(bp->bp_addr); _kdb_bp_install() 148 ret = arch_kgdb_ops.set_hw_breakpoint(bp->bp_addr, _kdb_bp_install() 149 bp->bph_length, _kdb_bp_install() 150 bp->bp_type); _kdb_bp_install() 152 bp->bp_installed = 1; _kdb_bp_install() 155 __func__, bp->bp_addr); _kdb_bp_install() 157 if (!bp->bp_type) { _kdb_bp_install() 181 kdb_bp_t *bp = &kdb_breakpoints[i]; kdb_bp_install() local 184 kdb_printf("%s: bp %d bp_enabled %d\n", kdb_bp_install() 185 __func__, i, bp->bp_enabled); kdb_bp_install() 187 if (bp->bp_enabled) kdb_bp_install() 188 _kdb_bp_install(regs, bp); kdb_bp_install() 212 kdb_bp_t *bp = &kdb_breakpoints[i]; kdb_bp_remove() local 215 kdb_printf("%s: bp %d bp_enabled %d\n", kdb_bp_remove() 216 __func__, i, bp->bp_enabled); kdb_bp_remove() 218 if (bp->bp_enabled) kdb_bp_remove() 219 _kdb_bp_remove(bp); kdb_bp_remove() 240 static void kdb_printbp(kdb_bp_t *bp, int i) kdb_printbp() argument 242 kdb_printf("%s ", kdb_bptype(bp)); kdb_printbp() 244 kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT); kdb_printbp() 246 if (bp->bp_enabled) kdb_printbp() 252 bp->bp_addr, bp->bp_type, bp->bp_installed); kdb_printbp() 260 * Handle the bp commands. 262 * [bp|bph] <addr-expression> [DATAR|DATAW] 275 * bp Set breakpoint on all cpus. Only use hardware assist if need. 282 kdb_bp_t *bp, *bp_check; kdb_bp() local 293 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; kdb_bp() 294 bpno++, bp++) { kdb_bp() 295 if (bp->bp_free) kdb_bp() 297 kdb_printbp(bp, bpno); kdb_bp() 312 * Find an empty bp structure to allocate kdb_bp() 314 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { kdb_bp() 315 if (bp->bp_free) kdb_bp() 352 *bp = template; kdb_bp() 353 bp->bp_free = 0; kdb_bp() 355 kdb_printbp(bp, bpno); kdb_bp() 382 kdb_bp_t *bp = NULL; kdb_bc() local 417 bp = &kdb_breakpoints[addr]; kdb_bc() 421 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; kdb_bc() 422 i++, bp++) { kdb_bc() 423 if (bp->bp_addr == addr) { kdb_bc() 436 for (bp = &kdb_breakpoints[lowbp], i = lowbp; kdb_bc() 438 i++, bp++) { kdb_bc() 439 if (bp->bp_free) kdb_bc() 446 bp->bp_enabled = 0; kdb_bc() 450 i, bp->bp_addr); kdb_bc() 452 bp->bp_addr = 0; kdb_bc() 453 bp->bp_free = 1; kdb_bc() 457 bp->bp_enabled = 1; kdb_bc() 461 i, bp->bp_addr); kdb_bc() 466 if (!bp->bp_enabled) kdb_bc() 469 bp->bp_enabled = 0; kdb_bc() 473 i, bp->bp_addr); kdb_bc() 477 if (bp->bp_delay && (cmd == KDBCMD_BC || cmd == KDBCMD_BD)) { kdb_bc() 478 bp->bp_delay = 0; kdb_bc() 524 kdb_bp_t *bp; kdb_initbptab() local 531 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) kdb_initbptab() 532 bp->bp_free = 1; kdb_initbptab() 534 kdb_register_flags("bp", kdb_bp, "[<vaddr>]", kdb_initbptab()
|
H A D | kdb_debugger.c | 56 kdb_bp_t *bp; kdb_stub() local 78 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { kdb_stub() 79 if ((bp->bp_enabled) && (bp->bp_addr == addr)) { kdb_stub() 88 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { kdb_stub() 89 if (bp->bp_free) kdb_stub() 91 if (bp->bp_addr == addr) { kdb_stub() 92 bp->bp_delay = 1; kdb_stub() 93 bp->bp_delayed = 1; kdb_stub()
|
/linux-4.1.27/drivers/net/ethernet/sun/ |
H A D | sunbmac.c | 97 static void qec_init(struct bigmac *bp) qec_init() argument 99 struct platform_device *qec_op = bp->qec_op; qec_init() 100 void __iomem *gregs = bp->gregs; qec_init() 101 u8 bsizes = bp->bigmac_bursts; qec_init() 164 static void bigmac_stop(struct bigmac *bp) bigmac_stop() argument 166 bigmac_tx_reset(bp->bregs); bigmac_stop() 167 bigmac_rx_reset(bp->bregs); bigmac_stop() 170 static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) bigmac_get_counters() argument 172 struct net_device_stats *stats = &bp->enet_stats; bigmac_get_counters() 192 static void bigmac_clean_rings(struct bigmac *bp) bigmac_clean_rings() argument 197 if (bp->rx_skbs[i] != NULL) { bigmac_clean_rings() 198 dev_kfree_skb_any(bp->rx_skbs[i]); bigmac_clean_rings() 199 bp->rx_skbs[i] = NULL; bigmac_clean_rings() 204 if (bp->tx_skbs[i] != NULL) { bigmac_clean_rings() 205 dev_kfree_skb_any(bp->tx_skbs[i]); bigmac_clean_rings() 206 bp->tx_skbs[i] = NULL; bigmac_clean_rings() 211 static void bigmac_init_rings(struct bigmac *bp, int from_irq) bigmac_init_rings() argument 213 struct bmac_init_block *bb = bp->bmac_block; bigmac_init_rings() 220 bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; bigmac_init_rings() 223 bigmac_clean_rings(bp); bigmac_init_rings() 233 bp->rx_skbs[i] = skb; bigmac_init_rings() 240 dma_map_single(&bp->bigmac_op->dev, bigmac_init_rings() 267 static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit) write_tcvr_bit() argument 269 if (bp->tcvr_type == internal) { write_tcvr_bit() 277 } else if (bp->tcvr_type == external) { write_tcvr_bit() 290 static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs) read_tcvr_bit() argument 294 if (bp->tcvr_type == internal) { read_tcvr_bit() 301 } else if (bp->tcvr_type == external) { read_tcvr_bit() 313 static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs) read_tcvr_bit2() argument 317 if (bp->tcvr_type == internal) { read_tcvr_bit2() 323 } else if (bp->tcvr_type == external) { read_tcvr_bit2() 335 static void put_tcvr_byte(struct bigmac *bp, put_tcvr_byte() argument 342 write_tcvr_bit(bp, tregs, ((byte >> shift) & 1)); put_tcvr_byte() 347 static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs, bigmac_tcvr_write() argument 354 switch(bp->tcvr_type) { bigmac_tcvr_write() 365 write_tcvr_bit(bp, tregs, 0); bigmac_tcvr_write() 366 write_tcvr_bit(bp, tregs, 1); bigmac_tcvr_write() 367 write_tcvr_bit(bp, tregs, 0); bigmac_tcvr_write() 368 write_tcvr_bit(bp, tregs, 1); bigmac_tcvr_write() 370 put_tcvr_byte(bp, tregs, bigmac_tcvr_write() 371 ((bp->tcvr_type == internal) ? bigmac_tcvr_write() 374 put_tcvr_byte(bp, tregs, reg); bigmac_tcvr_write() 376 write_tcvr_bit(bp, tregs, 1); bigmac_tcvr_write() 377 write_tcvr_bit(bp, tregs, 0); bigmac_tcvr_write() 381 write_tcvr_bit(bp, tregs, (val >> shift) & 1); bigmac_tcvr_write() 386 static unsigned short bigmac_tcvr_read(struct bigmac *bp, bigmac_tcvr_read() argument 393 switch(bp->tcvr_type) { bigmac_tcvr_read() 404 write_tcvr_bit(bp, tregs, 0); bigmac_tcvr_read() 405 write_tcvr_bit(bp, tregs, 1); bigmac_tcvr_read() 406 write_tcvr_bit(bp, tregs, 1); bigmac_tcvr_read() 407 write_tcvr_bit(bp, tregs, 0); bigmac_tcvr_read() 409 put_tcvr_byte(bp, tregs, bigmac_tcvr_read() 410 ((bp->tcvr_type == internal) ? bigmac_tcvr_read() 413 put_tcvr_byte(bp, tregs, reg); bigmac_tcvr_read() 415 if (bp->tcvr_type == external) { bigmac_tcvr_read() 418 (void) read_tcvr_bit2(bp, tregs); bigmac_tcvr_read() 419 (void) read_tcvr_bit2(bp, tregs); bigmac_tcvr_read() 424 tmp = read_tcvr_bit2(bp, tregs); bigmac_tcvr_read() 429 (void) read_tcvr_bit2(bp, tregs); bigmac_tcvr_read() 430 (void) read_tcvr_bit2(bp, tregs); bigmac_tcvr_read() 431 (void) read_tcvr_bit2(bp, tregs); bigmac_tcvr_read() 435 (void) read_tcvr_bit(bp, tregs); bigmac_tcvr_read() 436 (void) read_tcvr_bit(bp, tregs); bigmac_tcvr_read() 441 tmp = read_tcvr_bit(bp, tregs); bigmac_tcvr_read() 446 (void) read_tcvr_bit(bp, tregs); bigmac_tcvr_read() 447 (void) read_tcvr_bit(bp, tregs); bigmac_tcvr_read() 448 (void) read_tcvr_bit(bp, tregs); bigmac_tcvr_read() 453 static void bigmac_tcvr_init(struct bigmac *bp) bigmac_tcvr_init() argument 455 void __iomem *tregs = bp->tregs; bigmac_tcvr_init() 472 bp->tcvr_type = external; bigmac_tcvr_init() 477 bp->tcvr_type = internal; bigmac_tcvr_init() 493 static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) try_next_permutation() argument 495 if (bp->sw_bmcr & BMCR_SPEED100) { try_next_permutation() 499 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); try_next_permutation() 500 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); try_next_permutation() 501 bp->sw_bmcr = (BMCR_RESET); try_next_permutation() 502 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); try_next_permutation() 506 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); try_next_permutation() 507 if ((bp->sw_bmcr & BMCR_RESET) == 0) try_next_permutation() 512 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); try_next_permutation() 514 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); try_next_permutation() 517 bp->sw_bmcr &= ~(BMCR_SPEED100); try_next_permutation() 518 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); try_next_permutation() 528 struct bigmac *bp = (struct bigmac *) data; bigmac_timer() local 529 void __iomem *tregs = bp->tregs; bigmac_timer() 532 bp->timer_ticks++; bigmac_timer() 533 if (bp->timer_state == ltrywait) { bigmac_timer() 534 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); bigmac_timer() 535 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); bigmac_timer() 536 if (bp->sw_bmsr & BMSR_LSTATUS) { bigmac_timer() 538 bp->dev->name, bigmac_timer() 539 (bp->sw_bmcr & BMCR_SPEED100) ? bigmac_timer() 541 bp->timer_state = asleep; bigmac_timer() 544 if (bp->timer_ticks >= 4) { bigmac_timer() 547 ret = try_next_permutation(bp, tregs); bigmac_timer() 550 bp->dev->name); bigmac_timer() 551 ret = bigmac_init_hw(bp, 0); bigmac_timer() 554 "BigMAC.\n", bp->dev->name); bigmac_timer() 558 bp->timer_ticks = 0; bigmac_timer() 567 bp->dev->name); bigmac_timer() 569 bp->timer_ticks = 0; bigmac_timer() 570 bp->timer_state = asleep; /* foo on you */ bigmac_timer() 574 bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ bigmac_timer() 575 add_timer(&bp->bigmac_timer); bigmac_timer() 582 static void bigmac_begin_auto_negotiation(struct bigmac *bp) bigmac_begin_auto_negotiation() argument 584 void __iomem *tregs = bp->tregs; bigmac_begin_auto_negotiation() 588 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); bigmac_begin_auto_negotiation() 589 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); bigmac_begin_auto_negotiation() 592 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_begin_auto_negotiation() 593 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bigmac_begin_auto_negotiation() 594 bp->sw_bmcr = (BMCR_RESET); bigmac_begin_auto_negotiation() 595 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bigmac_begin_auto_negotiation() 599 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); bigmac_begin_auto_negotiation() 600 if ((bp->sw_bmcr & BMCR_RESET) == 0) bigmac_begin_auto_negotiation() 605 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bigmac_begin_auto_negotiation() 607 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); bigmac_begin_auto_negotiation() 610 bp->sw_bmcr |= BMCR_SPEED100; bigmac_begin_auto_negotiation() 611 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bigmac_begin_auto_negotiation() 613 bp->timer_state = ltrywait; bigmac_begin_auto_negotiation() 614 bp->timer_ticks = 0; bigmac_begin_auto_negotiation() 615 bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; bigmac_begin_auto_negotiation() 616 bp->bigmac_timer.data = (unsigned long) bp; bigmac_begin_auto_negotiation() 617 bp->bigmac_timer.function = bigmac_timer; bigmac_begin_auto_negotiation() 618 add_timer(&bp->bigmac_timer); bigmac_begin_auto_negotiation() 621 static int bigmac_init_hw(struct bigmac *bp, int from_irq) bigmac_init_hw() argument 623 void __iomem *gregs = bp->gregs; bigmac_init_hw() 624 void __iomem *cregs = bp->creg; bigmac_init_hw() 625 void __iomem *bregs = bp->bregs; bigmac_init_hw() 626 unsigned char *e = &bp->dev->dev_addr[0]; bigmac_init_hw() 629 bigmac_get_counters(bp, bregs); bigmac_init_hw() 635 qec_init(bp); bigmac_init_hw() 638 bigmac_init_rings(bp, from_irq); bigmac_init_hw() 641 bigmac_tcvr_init(bp); bigmac_init_hw() 644 bigmac_stop(bp); bigmac_init_hw() 674 sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), bigmac_init_hw() 676 sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), bigmac_init_hw() 710 bigmac_begin_auto_negotiation(bp); bigmac_init_hw() 717 static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status) bigmac_is_medium_rare() argument 752 bigmac_init_hw(bp, 1); bigmac_is_medium_rare() 756 static void bigmac_tx(struct bigmac *bp) bigmac_tx() argument 758 struct be_txd *txbase = &bp->bmac_block->be_txd[0]; bigmac_tx() 759 struct net_device *dev = bp->dev; bigmac_tx() 762 spin_lock(&bp->lock); bigmac_tx() 764 elem = bp->tx_old; bigmac_tx() 766 while (elem != bp->tx_new) { bigmac_tx() 775 skb = bp->tx_skbs[elem]; bigmac_tx() 776 bp->enet_stats.tx_packets++; bigmac_tx() 777 bp->enet_stats.tx_bytes += skb->len; bigmac_tx() 778 dma_unmap_single(&bp->bigmac_op->dev, bigmac_tx() 783 bp->tx_skbs[elem] = NULL; bigmac_tx() 789 bp->tx_old = elem; bigmac_tx() 792 TX_BUFFS_AVAIL(bp) > 0) bigmac_tx() 793 netif_wake_queue(bp->dev); bigmac_tx() 795 spin_unlock(&bp->lock); bigmac_tx() 799 static void bigmac_rx(struct bigmac *bp) bigmac_rx() argument 801 struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0]; bigmac_rx() 803 int elem = bp->rx_new, drops = 0; bigmac_rx() 813 bp->enet_stats.rx_errors++; bigmac_rx() 814 bp->enet_stats.rx_length_errors++; bigmac_rx() 818 bp->enet_stats.rx_dropped++; bigmac_rx() 823 skb = bp->rx_skbs[elem]; bigmac_rx() 833 dma_unmap_single(&bp->bigmac_op->dev, bigmac_rx() 837 bp->rx_skbs[elem] = new_skb; bigmac_rx() 841 dma_map_single(&bp->bigmac_op->dev, bigmac_rx() 851 struct sk_buff *copy_skb = netdev_alloc_skb(bp->dev, len + 2); bigmac_rx() 859 dma_sync_single_for_cpu(&bp->bigmac_op->dev, bigmac_rx() 863 dma_sync_single_for_device(&bp->bigmac_op->dev, bigmac_rx() 875 skb->protocol = eth_type_trans(skb, bp->dev); bigmac_rx() 877 bp->enet_stats.rx_packets++; bigmac_rx() 878 bp->enet_stats.rx_bytes += len; bigmac_rx() 883 bp->rx_new = elem; bigmac_rx() 885 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name); bigmac_rx() 890 struct bigmac *bp = (struct bigmac *) dev_id; bigmac_interrupt() local 896 bmac_status = sbus_readl(bp->creg + CREG_STAT); bigmac_interrupt() 897 qec_status = sbus_readl(bp->gregs + GLOB_STAT); bigmac_interrupt() 902 bigmac_is_medium_rare(bp, qec_status, bmac_status); bigmac_interrupt() 905 bigmac_tx(bp); bigmac_interrupt() 908 bigmac_rx(bp); bigmac_interrupt() 915 struct bigmac *bp = netdev_priv(dev); bigmac_open() local 918 ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp); bigmac_open() 923 init_timer(&bp->bigmac_timer); bigmac_open() 924 ret = bigmac_init_hw(bp, 0); bigmac_open() 926 free_irq(dev->irq, bp); bigmac_open() 932 struct bigmac *bp = netdev_priv(dev); bigmac_close() local 934 del_timer(&bp->bigmac_timer); bigmac_close() 935 bp->timer_state = asleep; bigmac_close() 936 bp->timer_ticks = 0; bigmac_close() 938 bigmac_stop(bp); bigmac_close() 939 bigmac_clean_rings(bp); bigmac_close() 940 free_irq(dev->irq, bp); bigmac_close() 946 struct bigmac *bp = netdev_priv(dev); bigmac_tx_timeout() local 948 bigmac_init_hw(bp, 0); bigmac_tx_timeout() 955 struct bigmac *bp = netdev_priv(dev); bigmac_start_xmit() local 960 mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, bigmac_start_xmit() 964 spin_lock_irq(&bp->lock); bigmac_start_xmit() 965 entry = bp->tx_new; bigmac_start_xmit() 967 bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; bigmac_start_xmit() 968 bp->tx_skbs[entry] = skb; bigmac_start_xmit() 969 bp->bmac_block->be_txd[entry].tx_addr = mapping; bigmac_start_xmit() 970 bp->bmac_block->be_txd[entry].tx_flags = bigmac_start_xmit() 972 bp->tx_new = NEXT_TX(entry); bigmac_start_xmit() 973 if (TX_BUFFS_AVAIL(bp) <= 0) bigmac_start_xmit() 975 spin_unlock_irq(&bp->lock); bigmac_start_xmit() 978 sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); bigmac_start_xmit() 986 struct bigmac *bp = netdev_priv(dev); bigmac_get_stats() local 988 bigmac_get_counters(bp, bp->bregs); bigmac_get_stats() 989 return &bp->enet_stats; bigmac_get_stats() 994 struct bigmac *bp = netdev_priv(dev); bigmac_set_multicast() local 995 void __iomem *bregs = bp->bregs; bigmac_set_multicast() 1046 struct bigmac *bp = netdev_priv(dev); bigmac_get_link() local 1048 spin_lock_irq(&bp->lock); bigmac_get_link() 1049 bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, MII_BMSR); bigmac_get_link() 1050 spin_unlock_irq(&bp->lock); bigmac_get_link() 1052 return (bp->sw_bmsr & BMSR_LSTATUS); bigmac_get_link() 1078 struct bigmac *bp; bigmac_ether_init() local 1093 bp = netdev_priv(dev); bigmac_ether_init() 1094 bp->qec_op = qec_op; bigmac_ether_init() 1095 bp->bigmac_op = op; bigmac_ether_init() 1099 spin_lock_init(&bp->lock); bigmac_ether_init() 1102 bp->gregs = of_ioremap(&qec_op->resource[0], 0, bigmac_ether_init() 1104 if (!bp->gregs) { bigmac_ether_init() 1110 if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) { bigmac_ether_init() 1116 if (qec_global_reset(bp->gregs)) bigmac_ether_init() 1129 bp->bigmac_bursts = bsizes; bigmac_ether_init() 1132 qec_init(bp); bigmac_ether_init() 1135 bp->creg = of_ioremap(&op->resource[0], 0, bigmac_ether_init() 1137 if (!bp->creg) { bigmac_ether_init() 1143 bp->bregs = of_ioremap(&op->resource[1], 0, bigmac_ether_init() 1145 if (!bp->bregs) { bigmac_ether_init() 1153 bp->tregs = of_ioremap(&op->resource[2], 0, bigmac_ether_init() 1155 if (!bp->tregs) { bigmac_ether_init() 1161 bigmac_stop(bp); bigmac_ether_init() 1164 bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, bigmac_ether_init() 1166 &bp->bblock_dvma, GFP_ATOMIC); bigmac_ether_init() 1167 if (bp->bmac_block == NULL || bp->bblock_dvma == 0) bigmac_ether_init() 1171 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, bigmac_ether_init() 1175 init_timer(&bp->bigmac_timer); bigmac_ether_init() 1176 bp->timer_state = asleep; bigmac_ether_init() 1177 bp->timer_ticks = 0; bigmac_ether_init() 1180 bp->dev = dev; bigmac_ether_init() 1188 dev->irq = bp->bigmac_op->archdata.irqs[0]; bigmac_ether_init() 1196 dev_set_drvdata(&bp->bigmac_op->dev, bp); bigmac_ether_init() 1206 if (bp->gregs) bigmac_ether_init() 1207 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); bigmac_ether_init() 1208 if (bp->creg) bigmac_ether_init() 1209 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); bigmac_ether_init() 1210 if (bp->bregs) bigmac_ether_init() 1211 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); bigmac_ether_init() 1212 if (bp->tregs) bigmac_ether_init() 1213 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); bigmac_ether_init() 1215 if (bp->bmac_block) bigmac_ether_init() 1216 dma_free_coherent(&bp->bigmac_op->dev, bigmac_ether_init() 1218 bp->bmac_block, bigmac_ether_init() 1219 bp->bblock_dvma); bigmac_ether_init() 1241 struct bigmac *bp = platform_get_drvdata(op); bigmac_sbus_remove() local 1243 struct net_device *net_dev = bp->dev; bigmac_sbus_remove() 1250 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); bigmac_sbus_remove() 1251 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); bigmac_sbus_remove() 1252 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); bigmac_sbus_remove() 1253 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); bigmac_sbus_remove() 1256 bp->bmac_block, bigmac_sbus_remove() 1257 bp->bblock_dvma); bigmac_sbus_remove()
|
H A D | sunbmac.h | 258 #define TX_BUFFS_AVAIL(bp) \ 259 (((bp)->tx_old <= (bp)->tx_new) ? \ 260 (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \ 261 (bp)->tx_old - (bp)->tx_new - 1)
|
/linux-4.1.27/drivers/net/ethernet/ |
H A D | dnet.c | 30 static u16 dnet_readw_mac(struct dnet *bp, u16 reg) dnet_readw_mac() argument 35 dnet_writel(bp, reg, MACREG_ADDR); dnet_readw_mac() 42 data_read = dnet_readl(bp, MACREG_DATA); dnet_readw_mac() 49 static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) dnet_writew_mac() argument 52 dnet_writel(bp, val, MACREG_DATA); dnet_writew_mac() 55 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR); dnet_writew_mac() 62 static void __dnet_set_hwaddr(struct dnet *bp) __dnet_set_hwaddr() argument 66 tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr); __dnet_set_hwaddr() 67 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); __dnet_set_hwaddr() 68 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2)); __dnet_set_hwaddr() 69 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); __dnet_set_hwaddr() 70 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4)); __dnet_set_hwaddr() 71 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); __dnet_set_hwaddr() 74 static void dnet_get_hwaddr(struct dnet *bp) dnet_get_hwaddr() argument 91 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG); dnet_get_hwaddr() 93 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG); dnet_get_hwaddr() 95 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG); dnet_get_hwaddr() 99 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); dnet_get_hwaddr() 104 struct dnet *bp = bus->priv; dnet_mdio_read() local 107 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) dnet_mdio_read() 120 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value); dnet_mdio_read() 123 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) dnet_mdio_read() 127 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG); dnet_mdio_read() 137 struct dnet *bp = bus->priv; dnet_mdio_write() local 142 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) dnet_mdio_write() 161 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value); dnet_mdio_write() 164 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp); dnet_mdio_write() 166 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) dnet_mdio_write() 175 struct dnet *bp = netdev_priv(dev); dnet_handle_link_change() local 176 struct phy_device *phydev = bp->phy_dev; dnet_handle_link_change() 182 spin_lock_irqsave(&bp->lock, flags); dnet_handle_link_change() 184 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG); dnet_handle_link_change() 185 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); dnet_handle_link_change() 188 if (bp->duplex != phydev->duplex) { dnet_handle_link_change() 196 bp->duplex = phydev->duplex; dnet_handle_link_change() 200 if (bp->speed != phydev->speed) { dnet_handle_link_change() 217 bp->speed = phydev->speed; dnet_handle_link_change() 221 if (phydev->link != bp->link) { dnet_handle_link_change() 229 bp->speed = 0; dnet_handle_link_change() 230 bp->duplex = -1; dnet_handle_link_change() 232 bp->link = phydev->link; dnet_handle_link_change() 238 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg); dnet_handle_link_change() 239 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg); dnet_handle_link_change() 242 spin_unlock_irqrestore(&bp->lock, flags); dnet_handle_link_change() 256 struct dnet *bp = netdev_priv(dev); dnet_mii_probe() local 262 if (bp->mii_bus->phy_map[phy_addr]) { dnet_mii_probe() 263 phydev = bp->mii_bus->phy_map[phy_addr]; dnet_mii_probe() 276 if (bp->capabilities & DNET_HAS_RMII) { dnet_mii_probe() 292 if (bp->capabilities & DNET_HAS_GIGABIT) dnet_mii_probe() 301 bp->link = 0; dnet_mii_probe() 302 bp->speed = 0; dnet_mii_probe() 303 bp->duplex = -1; dnet_mii_probe() 304 bp->phy_dev = phydev; dnet_mii_probe() 309 static int dnet_mii_init(struct dnet *bp) dnet_mii_init() argument 313 bp->mii_bus = mdiobus_alloc(); dnet_mii_init() 314 if (bp->mii_bus == NULL) dnet_mii_init() 317 bp->mii_bus->name = "dnet_mii_bus"; dnet_mii_init() 318 bp->mii_bus->read = &dnet_mdio_read; dnet_mii_init() 319 bp->mii_bus->write = &dnet_mdio_write; dnet_mii_init() 321 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", dnet_mii_init() 322 bp->pdev->name, bp->pdev->id); dnet_mii_init() 324 bp->mii_bus->priv = bp; dnet_mii_init() 326 bp->mii_bus->irq = devm_kmalloc(&bp->pdev->dev, dnet_mii_init() 328 if (!bp->mii_bus->irq) { dnet_mii_init() 334 bp->mii_bus->irq[i] = PHY_POLL; dnet_mii_init() 336 if (mdiobus_register(bp->mii_bus)) { dnet_mii_init() 341 if (dnet_mii_probe(bp->dev) != 0) { dnet_mii_init() 349 mdiobus_unregister(bp->mii_bus); dnet_mii_init() 351 mdiobus_free(bp->mii_bus); dnet_mii_init() 361 static void dnet_update_stats(struct dnet *bp) dnet_update_stats() argument 363 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT; dnet_update_stats() 364 u32 *p = &bp->hw_stats.rx_pkt_ignr; dnet_update_stats() 365 u32 *end = &bp->hw_stats.rx_byte + 1; dnet_update_stats() 373 reg = bp->regs + DNET_TX_UNICAST_CNT; dnet_update_stats() 374 p = &bp->hw_stats.tx_unicast; dnet_update_stats() 375 end = &bp->hw_stats.tx_byte + 1; dnet_update_stats() 386 struct dnet *bp = container_of(napi, struct dnet, napi); dnet_poll() local 387 struct net_device *dev = bp->dev; dnet_poll() 401 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) dnet_poll() 404 cmd_word = dnet_readl(bp, RX_LEN_FIFO); dnet_poll() 421 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO); dnet_poll() 436 int_enable = dnet_readl(bp, INTR_ENB); dnet_poll() 438 dnet_writel(bp, int_enable, INTR_ENB); dnet_poll() 447 struct dnet *bp = netdev_priv(dev); dnet_interrupt() local 452 spin_lock_irqsave(&bp->lock, flags); dnet_interrupt() 455 int_src = dnet_readl(bp, INTR_SRC); dnet_interrupt() 456 int_enable = dnet_readl(bp, INTR_ENB); dnet_interrupt() 461 int_enable = dnet_readl(bp, INTR_ENB); dnet_interrupt() 463 dnet_writel(bp, int_enable, INTR_ENB); dnet_interrupt() 472 dnet_readl(bp, RX_STATUS), int_current); dnet_interrupt() 474 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL); dnet_interrupt() 476 dnet_writel(bp, 0, SYS_CTL); dnet_interrupt() 484 dnet_readl(bp, TX_STATUS), int_current); dnet_interrupt() 486 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL); dnet_interrupt() 488 dnet_writel(bp, 0, SYS_CTL); dnet_interrupt() 493 if (napi_schedule_prep(&bp->napi)) { dnet_interrupt() 499 int_enable = dnet_readl(bp, INTR_ENB); dnet_interrupt() 501 dnet_writel(bp, int_enable, INTR_ENB); dnet_interrupt() 502 __napi_schedule(&bp->napi); dnet_interrupt() 510 spin_unlock_irqrestore(&bp->lock, flags); dnet_interrupt() 531 struct dnet *bp = netdev_priv(dev); dnet_start_xmit() local 537 tx_status = dnet_readl(bp, TX_STATUS); dnet_start_xmit() 546 spin_lock_irqsave(&bp->lock, flags); dnet_start_xmit() 548 tx_status = dnet_readl(bp, TX_STATUS); dnet_start_xmit() 557 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) { dnet_start_xmit() 559 dnet_writel(bp, *bufp++, TX_DATA_FIFO); dnet_start_xmit() 565 dnet_writel(bp, tx_cmd, TX_LEN_FIFO); dnet_start_xmit() 568 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) { dnet_start_xmit() 570 tx_status = dnet_readl(bp, INTR_SRC); dnet_start_xmit() 571 irq_enable = dnet_readl(bp, INTR_ENB); dnet_start_xmit() 573 dnet_writel(bp, irq_enable, INTR_ENB); dnet_start_xmit() 581 spin_unlock_irqrestore(&bp->lock, flags); dnet_start_xmit() 586 static void dnet_reset_hw(struct dnet *bp) dnet_reset_hw() argument 589 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN); dnet_reset_hw() 595 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH); dnet_reset_hw() 600 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH); dnet_reset_hw() 603 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH, dnet_reset_hw() 606 dnet_writel(bp, 0, SYS_CTL); dnet_reset_hw() 609 static void dnet_init_hw(struct dnet *bp) dnet_init_hw() argument 613 dnet_reset_hw(bp); dnet_init_hw() 614 __dnet_set_hwaddr(bp); dnet_init_hw() 616 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); dnet_init_hw() 618 if (bp->dev->flags & IFF_PROMISC) dnet_init_hw() 621 if (!(bp->dev->flags & IFF_BROADCAST)) dnet_init_hw() 630 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config); dnet_init_hw() 633 config = dnet_readl(bp, INTR_SRC); dnet_init_hw() 636 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY | dnet_init_hw() 645 struct dnet *bp = netdev_priv(dev); dnet_open() local 648 if (!bp->phy_dev) dnet_open() 651 napi_enable(&bp->napi); dnet_open() 652 dnet_init_hw(bp); dnet_open() 654 phy_start_aneg(bp->phy_dev); dnet_open() 657 phy_start(bp->phy_dev); dnet_open() 666 struct dnet *bp = netdev_priv(dev); dnet_close() local 669 napi_disable(&bp->napi); dnet_close() 671 if (bp->phy_dev) dnet_close() 672 phy_stop(bp->phy_dev); dnet_close() 674 dnet_reset_hw(bp); dnet_close() 716 struct dnet *bp = netdev_priv(dev); dnet_get_stats() local 718 struct dnet_stats *hwstat = &bp->hw_stats; dnet_get_stats() 721 dnet_update_stats(bp); dnet_get_stats() 752 struct dnet *bp = netdev_priv(dev); dnet_get_settings() local 753 struct phy_device *phydev = bp->phy_dev; dnet_get_settings() 763 struct dnet *bp = netdev_priv(dev); dnet_set_settings() local 764 struct phy_device *phydev = bp->phy_dev; dnet_set_settings() 774 struct dnet *bp = netdev_priv(dev); dnet_ioctl() local 775 struct phy_device *phydev = bp->phy_dev; dnet_ioctl() 817 struct dnet *bp; dnet_probe() local 824 dev = alloc_etherdev(sizeof(*bp)); dnet_probe() 831 bp = netdev_priv(dev); dnet_probe() 832 bp->dev = dev; dnet_probe() 837 spin_lock_init(&bp->lock); dnet_probe() 840 bp->regs = devm_ioremap_resource(&pdev->dev, res); dnet_probe() 841 if (IS_ERR(bp->regs)) { dnet_probe() 842 err = PTR_ERR(bp->regs); dnet_probe() 855 netif_napi_add(dev, &bp->napi, dnet_poll, 64); dnet_probe() 858 dev->base_addr = (unsigned long)bp->regs; dnet_probe() 860 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK; dnet_probe() 862 dnet_get_hwaddr(bp); dnet_probe() 867 __dnet_set_hwaddr(bp); dnet_probe() 883 err = dnet_mii_init(bp); dnet_probe() 888 bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr); dnet_probe() 890 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", dnet_probe() 891 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", dnet_probe() 892 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", dnet_probe() 893 (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); dnet_probe() 894 phydev = bp->phy_dev; dnet_probe() 914 struct dnet *bp; dnet_remove() local 919 bp = netdev_priv(dev); dnet_remove() 920 if (bp->phy_dev) dnet_remove() 921 phy_disconnect(bp->phy_dev); dnet_remove() 922 mdiobus_unregister(bp->mii_bus); dnet_remove() 923 mdiobus_free(bp->mii_bus); dnet_remove()
|
/linux-4.1.27/fs/afs/ |
H A D | fsclient.c | 24 const __be32 *bp = *_bp; xdr_decode_AFSFid() local 26 fid->vid = ntohl(*bp++); xdr_decode_AFSFid() 27 fid->vnode = ntohl(*bp++); xdr_decode_AFSFid() 28 fid->unique = ntohl(*bp++); xdr_decode_AFSFid() 29 *_bp = bp; xdr_decode_AFSFid() 41 const __be32 *bp = *_bp; xdr_decode_AFSFetchStatus() local 50 u32 x = ntohl(*bp++); \ xdr_decode_AFSFetchStatus() 55 status->if_version = ntohl(*bp++); xdr_decode_AFSFetchStatus() 58 size = ntohl(*bp++); xdr_decode_AFSFetchStatus() 59 data_version = ntohl(*bp++); xdr_decode_AFSFetchStatus() 61 owner = make_kuid(&init_user_ns, ntohl(*bp++)); xdr_decode_AFSFetchStatus() 69 bp++; /* seg size */ xdr_decode_AFSFetchStatus() 70 status->mtime_client = ntohl(*bp++); xdr_decode_AFSFetchStatus() 71 status->mtime_server = ntohl(*bp++); xdr_decode_AFSFetchStatus() 72 group = make_kgid(&init_user_ns, ntohl(*bp++)); xdr_decode_AFSFetchStatus() 75 bp++; /* sync counter */ xdr_decode_AFSFetchStatus() 76 data_version |= (u64) ntohl(*bp++) << 32; xdr_decode_AFSFetchStatus() 78 size |= (u64) ntohl(*bp++) << 32; xdr_decode_AFSFetchStatus() 79 bp++; /* spare 4 */ xdr_decode_AFSFetchStatus() 80 *_bp = bp; xdr_decode_AFSFetchStatus() 137 const __be32 *bp = *_bp; xdr_decode_AFSCallBack() local 139 vnode->cb_version = ntohl(*bp++); xdr_decode_AFSCallBack() 140 vnode->cb_expiry = ntohl(*bp++); xdr_decode_AFSCallBack() 141 vnode->cb_type = ntohl(*bp++); xdr_decode_AFSCallBack() 143 *_bp = bp; xdr_decode_AFSCallBack() 149 const __be32 *bp = *_bp; xdr_decode_AFSCallBack_raw() local 151 cb->version = ntohl(*bp++); xdr_decode_AFSCallBack_raw() 152 cb->expiry = ntohl(*bp++); xdr_decode_AFSCallBack_raw() 153 cb->type = ntohl(*bp++); xdr_decode_AFSCallBack_raw() 154 *_bp = bp; xdr_decode_AFSCallBack_raw() 163 const __be32 *bp = *_bp; xdr_decode_AFSVolSync() local 165 volsync->creation = ntohl(*bp++); xdr_decode_AFSVolSync() 166 bp++; /* spare2 */ xdr_decode_AFSVolSync() 167 bp++; /* spare3 */ xdr_decode_AFSVolSync() 168 bp++; /* spare4 */ xdr_decode_AFSVolSync() 169 bp++; /* spare5 */ xdr_decode_AFSVolSync() 170 bp++; /* spare6 */ xdr_decode_AFSVolSync() 171 *_bp = bp; xdr_decode_AFSVolSync() 179 __be32 *bp = *_bp; xdr_encode_AFS_StoreStatus() local 203 *bp++ = htonl(mask); xdr_encode_AFS_StoreStatus() 204 *bp++ = htonl(mtime); xdr_encode_AFS_StoreStatus() 205 *bp++ = htonl(owner); xdr_encode_AFS_StoreStatus() 206 *bp++ = htonl(group); xdr_encode_AFS_StoreStatus() 207 *bp++ = htonl(mode); xdr_encode_AFS_StoreStatus() 208 *bp++ = 0; /* segment size */ xdr_encode_AFS_StoreStatus() 209 *_bp = bp; xdr_encode_AFS_StoreStatus() 218 const __be32 *bp = *_bp; xdr_decode_AFSFetchVolumeStatus() local 220 vs->vid = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 221 vs->parent_id = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 222 vs->online = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 223 vs->in_service = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 224 vs->blessed = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 225 vs->needs_salvage = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 226 vs->type = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 227 vs->min_quota = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 228 vs->max_quota = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 229 vs->blocks_in_use = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 230 vs->part_blocks_avail = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 231 vs->part_max_blocks = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus() 232 *_bp = bp; xdr_decode_AFSFetchVolumeStatus() 242 const __be32 *bp; afs_deliver_fs_fetch_status() local 254 bp = call->buffer; afs_deliver_fs_fetch_status() 255 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_fetch_status() 256 xdr_decode_AFSCallBack(&bp, vnode); afs_deliver_fs_fetch_status() 258 xdr_decode_AFSVolSync(&bp, call->reply2); afs_deliver_fs_fetch_status() 284 __be32 *bp; afs_fs_fetch_file_status() local 300 bp = call->request; afs_fs_fetch_file_status() 301 bp[0] = htonl(FSFETCHSTATUS); afs_fs_fetch_file_status() 302 bp[1] = htonl(vnode->fid.vid); afs_fs_fetch_file_status() 303 bp[2] = htonl(vnode->fid.vnode); afs_fs_fetch_file_status() 304 bp[3] = htonl(vnode->fid.unique); afs_fs_fetch_file_status() 316 const __be32 *bp; afs_deliver_fs_fetch_data() local 398 bp = call->buffer; afs_deliver_fs_fetch_data() 399 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_fetch_data() 400 xdr_decode_AFSCallBack(&bp, vnode); afs_deliver_fs_fetch_data() 402 xdr_decode_AFSVolSync(&bp, call->reply2); afs_deliver_fs_fetch_data() 457 __be32 *bp; afs_fs_fetch_data64() local 476 bp = call->request; afs_fs_fetch_data64() 477 bp[0] = htonl(FSFETCHDATA64); afs_fs_fetch_data64() 478 bp[1] = htonl(vnode->fid.vid); afs_fs_fetch_data64() 479 bp[2] = htonl(vnode->fid.vnode); afs_fs_fetch_data64() 480 bp[3] = htonl(vnode->fid.unique); afs_fs_fetch_data64() 481 bp[4] = htonl(upper_32_bits(offset)); afs_fs_fetch_data64() 482 bp[5] = htonl((u32) offset); afs_fs_fetch_data64() 483 bp[6] = 0; afs_fs_fetch_data64() 484 bp[7] = htonl((u32) length); afs_fs_fetch_data64() 500 __be32 *bp; afs_fs_fetch_data() local 521 bp = call->request; afs_fs_fetch_data() 522 bp[0] = htonl(FSFETCHDATA); afs_fs_fetch_data() 523 bp[1] = htonl(vnode->fid.vid); afs_fs_fetch_data() 524 bp[2] = htonl(vnode->fid.vnode); afs_fs_fetch_data() 525 bp[3] = htonl(vnode->fid.unique); afs_fs_fetch_data() 526 bp[4] = htonl(offset); afs_fs_fetch_data() 527 bp[5] = htonl(length); afs_fs_fetch_data() 564 __be32 *bp, *tp; afs_fs_give_up_callbacks() local 588 bp = call->request; afs_fs_give_up_callbacks() 589 tp = bp + 2 + ncallbacks * 3; afs_fs_give_up_callbacks() 590 *bp++ = htonl(FSGIVEUPCALLBACKS); afs_fs_give_up_callbacks() 591 *bp++ = htonl(ncallbacks); afs_fs_give_up_callbacks() 599 *bp++ = htonl(cb->fid.vid); afs_fs_give_up_callbacks() 600 *bp++ = htonl(cb->fid.vnode); afs_fs_give_up_callbacks() 601 *bp++ = htonl(cb->fid.unique); afs_fs_give_up_callbacks() 624 const __be32 *bp; afs_deliver_fs_create_vnode() local 636 bp = call->buffer; afs_deliver_fs_create_vnode() 637 xdr_decode_AFSFid(&bp, call->reply2); afs_deliver_fs_create_vnode() 638 xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL, NULL); afs_deliver_fs_create_vnode() 639 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_create_vnode() 640 xdr_decode_AFSCallBack_raw(&bp, call->reply4); afs_deliver_fs_create_vnode() 641 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_create_vnode() 672 __be32 *bp; afs_fs_create() local 694 bp = call->request; afs_fs_create() 695 *bp++ = htonl(S_ISDIR(mode) ? FSMAKEDIR : FSCREATEFILE); afs_fs_create() 696 *bp++ = htonl(vnode->fid.vid); afs_fs_create() 697 *bp++ = htonl(vnode->fid.vnode); afs_fs_create() 698 *bp++ = htonl(vnode->fid.unique); afs_fs_create() 699 *bp++ = htonl(namesz); afs_fs_create() 700 memcpy(bp, name, namesz); afs_fs_create() 701 bp = (void *) bp + namesz; afs_fs_create() 703 memset(bp, 0, padsz); afs_fs_create() 704 bp = (void *) bp + padsz; afs_fs_create() 706 *bp++ = htonl(AFS_SET_MODE); afs_fs_create() 707 *bp++ = 0; /* mtime */ afs_fs_create() 708 *bp++ = 0; /* owner */ afs_fs_create() 709 *bp++ = 0; /* group */ afs_fs_create() 710 *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ afs_fs_create() 711 *bp++ = 0; /* segment size */ afs_fs_create() 723 const __be32 *bp; afs_deliver_fs_remove() local 735 bp = call->buffer; afs_deliver_fs_remove() 736 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_remove() 737 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_remove() 765 __be32 *bp; afs_fs_remove() local 783 bp = call->request; afs_fs_remove() 784 *bp++ = htonl(isdir ? FSREMOVEDIR : FSREMOVEFILE); afs_fs_remove() 785 *bp++ = htonl(vnode->fid.vid); afs_fs_remove() 786 *bp++ = htonl(vnode->fid.vnode); afs_fs_remove() 787 *bp++ = htonl(vnode->fid.unique); afs_fs_remove() 788 *bp++ = htonl(namesz); afs_fs_remove() 789 memcpy(bp, name, namesz); afs_fs_remove() 790 bp = (void *) bp + namesz; afs_fs_remove() 792 memset(bp, 0, padsz); afs_fs_remove() 793 bp = (void *) bp + padsz; afs_fs_remove() 806 const __be32 *bp; afs_deliver_fs_link() local 818 bp = call->buffer; afs_deliver_fs_link() 819 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_link() 820 xdr_decode_AFSFetchStatus(&bp, &dvnode->status, dvnode, NULL); afs_deliver_fs_link() 821 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_link() 849 __be32 *bp; afs_fs_link() local 868 bp = call->request; afs_fs_link() 869 *bp++ = htonl(FSLINK); afs_fs_link() 870 *bp++ = htonl(dvnode->fid.vid); afs_fs_link() 871 *bp++ = htonl(dvnode->fid.vnode); afs_fs_link() 872 *bp++ = htonl(dvnode->fid.unique); afs_fs_link() 873 *bp++ = htonl(namesz); afs_fs_link() 874 memcpy(bp, name, namesz); afs_fs_link() 875 bp = (void *) bp + namesz; afs_fs_link() 877 memset(bp, 0, padsz); afs_fs_link() 878 bp = (void *) bp + padsz; afs_fs_link() 880 *bp++ = htonl(vnode->fid.vid); afs_fs_link() 881 *bp++ = htonl(vnode->fid.vnode); afs_fs_link() 882 *bp++ = htonl(vnode->fid.unique); afs_fs_link() 894 const __be32 *bp; afs_deliver_fs_symlink() local 906 bp = call->buffer; afs_deliver_fs_symlink() 907 xdr_decode_AFSFid(&bp, call->reply2); afs_deliver_fs_symlink() 908 xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL, NULL); afs_deliver_fs_symlink() 909 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_symlink() 910 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_symlink() 940 __be32 *bp; afs_fs_symlink() local 965 bp = call->request; afs_fs_symlink() 966 *bp++ = htonl(FSSYMLINK); afs_fs_symlink() 967 *bp++ = htonl(vnode->fid.vid); afs_fs_symlink() 968 *bp++ = htonl(vnode->fid.vnode); afs_fs_symlink() 969 *bp++ = htonl(vnode->fid.unique); afs_fs_symlink() 970 *bp++ = htonl(namesz); afs_fs_symlink() 971 memcpy(bp, name, namesz); afs_fs_symlink() 972 bp = (void *) bp + namesz; afs_fs_symlink() 974 memset(bp, 0, padsz); afs_fs_symlink() 975 bp = (void *) bp + padsz; afs_fs_symlink() 977 *bp++ = htonl(c_namesz); afs_fs_symlink() 978 memcpy(bp, contents, c_namesz); afs_fs_symlink() 979 bp = (void *) bp + c_namesz; afs_fs_symlink() 981 memset(bp, 0, c_padsz); afs_fs_symlink() 982 bp = (void *) bp + c_padsz; afs_fs_symlink() 984 *bp++ = htonl(AFS_SET_MODE); afs_fs_symlink() 985 *bp++ = 0; /* mtime */ afs_fs_symlink() 986 *bp++ = 0; /* owner */ afs_fs_symlink() 987 *bp++ = 0; /* group */ afs_fs_symlink() 988 *bp++ = htonl(S_IRWXUGO); /* unix mode */ afs_fs_symlink() 989 *bp++ = 0; /* segment size */ afs_fs_symlink() 1001 const __be32 *bp; afs_deliver_fs_rename() local 1013 bp = call->buffer; afs_deliver_fs_rename() 1014 xdr_decode_AFSFetchStatus(&bp, &orig_dvnode->status, orig_dvnode, NULL); afs_deliver_fs_rename() 1016 xdr_decode_AFSFetchStatus(&bp, &new_dvnode->status, new_dvnode, afs_deliver_fs_rename() 1018 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_rename() 1047 __be32 *bp; afs_fs_rename() local 1073 bp = call->request; afs_fs_rename() 1074 *bp++ = htonl(FSRENAME); afs_fs_rename() 1075 *bp++ = htonl(orig_dvnode->fid.vid); afs_fs_rename() 1076 *bp++ = htonl(orig_dvnode->fid.vnode); afs_fs_rename() 1077 *bp++ = htonl(orig_dvnode->fid.unique); afs_fs_rename() 1078 *bp++ = htonl(o_namesz); afs_fs_rename() 1079 memcpy(bp, orig_name, o_namesz); afs_fs_rename() 1080 bp = (void *) bp + o_namesz; afs_fs_rename() 1082 memset(bp, 0, o_padsz); afs_fs_rename() 1083 bp = (void *) bp + o_padsz; afs_fs_rename() 1086 *bp++ = htonl(new_dvnode->fid.vid); afs_fs_rename() 1087 *bp++ = htonl(new_dvnode->fid.vnode); afs_fs_rename() 1088 *bp++ = htonl(new_dvnode->fid.unique); afs_fs_rename() 1089 *bp++ = htonl(n_namesz); afs_fs_rename() 1090 memcpy(bp, new_name, n_namesz); afs_fs_rename() 1091 bp = (void *) bp + n_namesz; afs_fs_rename() 1093 memset(bp, 0, n_padsz); afs_fs_rename() 1094 bp = (void *) bp + n_padsz; afs_fs_rename() 1107 const __be32 *bp; afs_deliver_fs_store_data() local 1124 bp = call->buffer; afs_deliver_fs_store_data() 1125 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, afs_deliver_fs_store_data() 1127 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_store_data() 1164 __be32 *bp; afs_fs_store_data64() local 1189 bp = call->request; afs_fs_store_data64() 1190 *bp++ = htonl(FSSTOREDATA64); afs_fs_store_data64() 1191 *bp++ = htonl(vnode->fid.vid); afs_fs_store_data64() 1192 *bp++ = htonl(vnode->fid.vnode); afs_fs_store_data64() 1193 *bp++ = htonl(vnode->fid.unique); afs_fs_store_data64() 1195 *bp++ = 0; /* mask */ afs_fs_store_data64() 1196 *bp++ = 0; /* mtime */ afs_fs_store_data64() 1197 *bp++ = 0; /* owner */ afs_fs_store_data64() 1198 *bp++ = 0; /* group */ afs_fs_store_data64() 1199 *bp++ = 0; /* unix mode */ afs_fs_store_data64() 1200 *bp++ = 0; /* segment size */ afs_fs_store_data64() 1202 *bp++ = htonl(pos >> 32); afs_fs_store_data64() 1203 *bp++ = htonl((u32) pos); afs_fs_store_data64() 1204 *bp++ = htonl(size >> 32); afs_fs_store_data64() 1205 *bp++ = htonl((u32) size); afs_fs_store_data64() 1206 *bp++ = htonl(i_size >> 32); afs_fs_store_data64() 1207 *bp++ = htonl((u32) i_size); afs_fs_store_data64() 1223 __be32 *bp; afs_fs_store_data() local 1266 bp = call->request; afs_fs_store_data() 1267 *bp++ = htonl(FSSTOREDATA); afs_fs_store_data() 1268 *bp++ = htonl(vnode->fid.vid); afs_fs_store_data() 1269 *bp++ = htonl(vnode->fid.vnode); afs_fs_store_data() 1270 *bp++ = htonl(vnode->fid.unique); afs_fs_store_data() 1272 *bp++ = 0; /* mask */ afs_fs_store_data() 1273 *bp++ = 0; /* mtime */ afs_fs_store_data() 1274 *bp++ = 0; /* owner */ afs_fs_store_data() 1275 *bp++ = 0; /* group */ afs_fs_store_data() 1276 *bp++ = 0; /* unix mode */ afs_fs_store_data() 1277 *bp++ = 0; /* segment size */ afs_fs_store_data() 1279 *bp++ = htonl(pos); afs_fs_store_data() 1280 *bp++ = htonl(size); afs_fs_store_data() 1281 *bp++ = htonl(i_size); afs_fs_store_data() 1294 const __be32 *bp; afs_deliver_fs_store_status() local 1315 bp = call->buffer; afs_deliver_fs_store_status() 1316 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, store_version); afs_deliver_fs_store_status() 1317 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_store_status() 1356 __be32 *bp; afs_fs_setattr_size64() local 1377 bp = call->request; afs_fs_setattr_size64() 1378 *bp++ = htonl(FSSTOREDATA64); afs_fs_setattr_size64() 1379 *bp++ = htonl(vnode->fid.vid); afs_fs_setattr_size64() 1380 *bp++ = htonl(vnode->fid.vnode); afs_fs_setattr_size64() 1381 *bp++ = htonl(vnode->fid.unique); afs_fs_setattr_size64() 1383 xdr_encode_AFS_StoreStatus(&bp, attr); afs_fs_setattr_size64() 1385 *bp++ = 0; /* position of start of write */ afs_fs_setattr_size64() 1386 *bp++ = 0; afs_fs_setattr_size64() 1387 *bp++ = 0; /* size of write */ afs_fs_setattr_size64() 1388 *bp++ = 0; afs_fs_setattr_size64() 1389 *bp++ = htonl(attr->ia_size >> 32); /* new file length */ afs_fs_setattr_size64() 1390 *bp++ = htonl((u32) attr->ia_size); afs_fs_setattr_size64() 1404 __be32 *bp; afs_fs_setattr_size() local 1428 bp = call->request; afs_fs_setattr_size() 1429 *bp++ = htonl(FSSTOREDATA); afs_fs_setattr_size() 1430 *bp++ = htonl(vnode->fid.vid); afs_fs_setattr_size() 1431 *bp++ = htonl(vnode->fid.vnode); afs_fs_setattr_size() 1432 *bp++ = htonl(vnode->fid.unique); afs_fs_setattr_size() 1434 xdr_encode_AFS_StoreStatus(&bp, attr); afs_fs_setattr_size() 1436 *bp++ = 0; /* position of start of write */ afs_fs_setattr_size() 1437 *bp++ = 0; /* size of write */ afs_fs_setattr_size() 1438 *bp++ = htonl(attr->ia_size); /* new file length */ afs_fs_setattr_size() 1452 __be32 *bp; afs_fs_setattr() local 1474 bp = call->request; afs_fs_setattr() 1475 *bp++ = htonl(FSSTORESTATUS); afs_fs_setattr() 1476 *bp++ = htonl(vnode->fid.vid); afs_fs_setattr() 1477 *bp++ = htonl(vnode->fid.vnode); afs_fs_setattr() 1478 *bp++ = htonl(vnode->fid.unique); afs_fs_setattr() 1480 xdr_encode_AFS_StoreStatus(&bp, attr); afs_fs_setattr() 1491 const __be32 *bp; afs_deliver_fs_get_volume_status() local 1513 bp = call->buffer; afs_deliver_fs_get_volume_status() 1514 xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2); afs_deliver_fs_get_volume_status() 1730 __be32 *bp; afs_fs_get_volume_status() local 1753 bp = call->request; afs_fs_get_volume_status() 1754 bp[0] = htonl(FSGETVOLUMESTATUS); afs_fs_get_volume_status() 1755 bp[1] = htonl(vnode->fid.vid); afs_fs_get_volume_status() 1766 const __be32 *bp; afs_deliver_fs_xxxx_lock() local 1778 bp = call->buffer; afs_deliver_fs_xxxx_lock() 1779 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_xxxx_lock() 1825 __be32 *bp; afs_fs_set_lock() local 1839 bp = call->request; afs_fs_set_lock() 1840 *bp++ = htonl(FSSETLOCK); afs_fs_set_lock() 1841 *bp++ = htonl(vnode->fid.vid); afs_fs_set_lock() 1842 *bp++ = htonl(vnode->fid.vnode); afs_fs_set_lock() 1843 *bp++ = htonl(vnode->fid.unique); afs_fs_set_lock() 1844 *bp++ = htonl(type); afs_fs_set_lock() 1858 __be32 *bp; afs_fs_extend_lock() local 1872 bp = call->request; afs_fs_extend_lock() 1873 *bp++ = htonl(FSEXTENDLOCK); afs_fs_extend_lock() 1874 *bp++ = htonl(vnode->fid.vid); afs_fs_extend_lock() 1875 *bp++ = htonl(vnode->fid.vnode); afs_fs_extend_lock() 1876 *bp++ = htonl(vnode->fid.unique); afs_fs_extend_lock() 1890 __be32 *bp; afs_fs_release_lock() local 1904 bp = call->request; afs_fs_release_lock() 1905 *bp++ = htonl(FSRELEASELOCK); afs_fs_release_lock() 1906 *bp++ = htonl(vnode->fid.vid); afs_fs_release_lock() 1907 *bp++ = htonl(vnode->fid.vnode); afs_fs_release_lock() 1908 *bp++ = htonl(vnode->fid.unique); afs_fs_release_lock()
|
H A D | vlclient.c | 65 __be32 *bp; afs_deliver_vl_get_entry_by_xxx() local 80 bp = call->buffer; afs_deliver_vl_get_entry_by_xxx() 83 entry->name[loop] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx() 85 bp++; /* final NUL */ afs_deliver_vl_get_entry_by_xxx() 87 bp++; /* type */ afs_deliver_vl_get_entry_by_xxx() 88 entry->nservers = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx() 91 entry->servers[loop].s_addr = *bp++; afs_deliver_vl_get_entry_by_xxx() 93 bp += 8; /* partition IDs */ afs_deliver_vl_get_entry_by_xxx() 96 tmp = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx() 106 entry->vid[0] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx() 107 entry->vid[1] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx() 108 entry->vid[2] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx() 110 bp++; /* clone ID */ afs_deliver_vl_get_entry_by_xxx() 112 tmp = ntohl(*bp++); /* flags */ afs_deliver_vl_get_entry_by_xxx() 158 __be32 *bp; afs_vl_get_entry_by_name() local 176 bp = call->request; afs_vl_get_entry_by_name() 177 *bp++ = htonl(VLGETENTRYBYNAME); afs_vl_get_entry_by_name() 178 *bp++ = htonl(volnamesz); afs_vl_get_entry_by_name() 179 memcpy(bp, volname, volnamesz); afs_vl_get_entry_by_name() 181 memset((void *) bp + volnamesz, 0, padsz); afs_vl_get_entry_by_name() 198 __be32 *bp; afs_vl_get_entry_by_id() local 212 bp = call->request; afs_vl_get_entry_by_id() 213 *bp++ = htonl(VLGETENTRYBYID); afs_vl_get_entry_by_id() 214 *bp++ = htonl(volid); afs_vl_get_entry_by_id() 215 *bp = htonl(voltype); afs_vl_get_entry_by_id()
|
H A D | cmservice.c | 177 __be32 *bp; afs_deliver_cb_callback() local 227 bp = call->buffer; afs_deliver_cb_callback() 229 cb->fid.vid = ntohl(*bp++); afs_deliver_cb_callback() 230 cb->fid.vnode = ntohl(*bp++); afs_deliver_cb_callback() 231 cb->fid.unique = ntohl(*bp++); afs_deliver_cb_callback() 269 bp = call->buffer; afs_deliver_cb_callback() 271 cb->version = ntohl(*bp++); afs_deliver_cb_callback() 272 cb->expiry = ntohl(*bp++); afs_deliver_cb_callback() 273 cb->type = ntohl(*bp++); afs_deliver_cb_callback()
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | frame.h | 11 __ASM_SIZE(push,_cfi) %__ASM_REG(bp) 12 CFI_REL_OFFSET __ASM_REG(bp), 0 13 __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp) 16 __ASM_SIZE(pop,_cfi) %__ASM_REG(bp) 17 CFI_RESTORE __ASM_REG(bp)
|
H A D | stacktrace.h | 19 unsigned long bp, 27 unsigned long *stack, unsigned long bp, 33 unsigned long *stack, unsigned long bp, 47 unsigned long *stack, unsigned long bp, 52 #define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :) 55 #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) 62 unsigned long bp; stack_frame() local 65 return regs->bp; stack_frame() 68 /* Grab bp right from our regs */ stack_frame() 69 get_bp(bp); stack_frame() 70 return bp; stack_frame() 73 /* bp is the last reg pushed by switch_to */ stack_frame() 86 unsigned long *stack, unsigned long bp, char *log_lvl); 90 unsigned long *sp, unsigned long bp, char *log_lvl);
|
H A D | hw_breakpoint.h | 54 extern int arch_check_bp_in_kernelspace(struct perf_event *bp); 55 extern int arch_validate_hwbkpt_settings(struct perf_event *bp); 60 int arch_install_hw_breakpoint(struct perf_event *bp); 61 void arch_uninstall_hw_breakpoint(struct perf_event *bp); 62 void hw_breakpoint_pmu_read(struct perf_event *bp); 63 void hw_breakpoint_pmu_unthrottle(struct perf_event *bp); 66 arch_fill_perf_breakpoint(struct perf_event *bp);
|
H A D | kdebug.h | 28 unsigned long *sp, unsigned long bp);
|
H A D | a.out-core.h | 47 dump->regs.bp = regs->bp; aout_dump_thread()
|
H A D | sigcontext.h | 14 unsigned long bp; member in struct:sigcontext 51 unsigned long bp; member in struct:sigcontext
|
H A D | asm.h | 41 #define _ASM_BP __ASM_REG(bp)
|
H A D | syscall.h | 126 *args++ = regs->bp; syscall_get_arguments() 187 regs->bp = *args++; syscall_set_arguments()
|
H A D | elf.h | 111 _r->si = 0; _r->di = 0; _r->bp = 0; \ 127 pr_reg[5] = regs->bp; \ 176 regs->si = regs->di /*= regs->bp*/ = 0; elf_common_init() 212 (pr_reg)[4] = (regs)->bp; \
|
H A D | ptrace.h | 17 unsigned long bp; member in struct:pt_regs 42 unsigned long bp; member in struct:pt_regs 154 #define GET_FP(regs) ((regs)->bp)
|
/linux-4.1.27/drivers/media/usb/pvrusb2/ |
H A D | pvrusb2-io.c | 36 #define BUFFER_CHECK(bp) do { \ 37 if ((bp)->signature != BUFFER_SIG) { \ 40 (bp),__FILE__,__LINE__); \ 41 pvr2_buffer_describe(bp,"BadSig"); \ 46 #define BUFFER_CHECK(bp) do {} while(0) 113 static void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg) pvr2_buffer_describe() argument 120 bp, pvr2_buffer_describe() 121 (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"), pvr2_buffer_describe() 122 (bp ? bp->id : 0), pvr2_buffer_describe() 123 (bp ? bp->status : 0), pvr2_buffer_describe() 124 (bp ? bp->stream : NULL), pvr2_buffer_describe() 125 (bp ? bp->purb : NULL), pvr2_buffer_describe() 126 (bp ? bp->signature : 0)); pvr2_buffer_describe() 130 static void pvr2_buffer_remove(struct pvr2_buffer *bp) pvr2_buffer_remove() argument 135 struct pvr2_stream *sp = bp->stream; pvr2_buffer_remove() 136 switch (bp->state) { pvr2_buffer_remove() 140 ccnt = bp->max_count; pvr2_buffer_remove() 145 ccnt = bp->max_count; pvr2_buffer_remove() 150 ccnt = bp->used_count; pvr2_buffer_remove() 155 list_del_init(&bp->list_overhead); pvr2_buffer_remove() 161 pvr2_buffer_state_decode(bp->state),*bcnt,*cnt); pvr2_buffer_remove() 162 bp->state = pvr2_buffer_state_none; pvr2_buffer_remove() 165 static void pvr2_buffer_set_none(struct pvr2_buffer *bp) pvr2_buffer_set_none() argument 169 BUFFER_CHECK(bp); pvr2_buffer_set_none() 170 sp = bp->stream; pvr2_buffer_set_none() 173 bp, pvr2_buffer_set_none() 174 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_none() 177 pvr2_buffer_remove(bp); pvr2_buffer_set_none() 181 static int pvr2_buffer_set_ready(struct pvr2_buffer *bp) pvr2_buffer_set_ready() argument 186 BUFFER_CHECK(bp); pvr2_buffer_set_ready() 187 sp = bp->stream; pvr2_buffer_set_ready() 190 bp, pvr2_buffer_set_ready() 191 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_ready() 195 pvr2_buffer_remove(bp); pvr2_buffer_set_ready() 196 list_add_tail(&bp->list_overhead,&sp->ready_list); pvr2_buffer_set_ready() 197 bp->state = pvr2_buffer_state_ready; pvr2_buffer_set_ready() 199 sp->r_bcount += bp->used_count; pvr2_buffer_set_ready() 203 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_ready() 209 static void pvr2_buffer_set_idle(struct pvr2_buffer *bp) pvr2_buffer_set_idle() argument 213 BUFFER_CHECK(bp); pvr2_buffer_set_idle() 214 sp = bp->stream; pvr2_buffer_set_idle() 217 bp, pvr2_buffer_set_idle() 218 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_idle() 221 pvr2_buffer_remove(bp); pvr2_buffer_set_idle() 222 list_add_tail(&bp->list_overhead,&sp->idle_list); pvr2_buffer_set_idle() 223 bp->state = pvr2_buffer_state_idle; pvr2_buffer_set_idle() 225 sp->i_bcount += bp->max_count; pvr2_buffer_set_idle() 229 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_idle() 234 static void pvr2_buffer_set_queued(struct pvr2_buffer *bp) pvr2_buffer_set_queued() argument 238 BUFFER_CHECK(bp); pvr2_buffer_set_queued() 239 sp = bp->stream; pvr2_buffer_set_queued() 242 bp, pvr2_buffer_set_queued() 243 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_queued() 246 pvr2_buffer_remove(bp); pvr2_buffer_set_queued() 247 list_add_tail(&bp->list_overhead,&sp->queued_list); pvr2_buffer_set_queued() 248 bp->state = pvr2_buffer_state_queued; pvr2_buffer_set_queued() 250 sp->q_bcount += bp->max_count; pvr2_buffer_set_queued() 254 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_queued() 259 static void pvr2_buffer_wipe(struct pvr2_buffer *bp) pvr2_buffer_wipe() argument 261 if (bp->state == pvr2_buffer_state_queued) { pvr2_buffer_wipe() 262 usb_kill_urb(bp->purb); pvr2_buffer_wipe() 266 static int pvr2_buffer_init(struct pvr2_buffer *bp, pvr2_buffer_init() argument 270 memset(bp,0,sizeof(*bp)); pvr2_buffer_init() 271 bp->signature = BUFFER_SIG; pvr2_buffer_init() 272 bp->id = id; pvr2_buffer_init() 274 "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp); pvr2_buffer_init() 275 bp->stream = sp; pvr2_buffer_init() 276 bp->state = pvr2_buffer_state_none; pvr2_buffer_init() 277 INIT_LIST_HEAD(&bp->list_overhead); pvr2_buffer_init() 278 bp->purb = usb_alloc_urb(0,GFP_KERNEL); pvr2_buffer_init() 279 if (! bp->purb) return -ENOMEM; pvr2_buffer_init() 281 pvr2_buffer_describe(bp,"create"); pvr2_buffer_init() 286 static void pvr2_buffer_done(struct pvr2_buffer *bp) pvr2_buffer_done() argument 289 pvr2_buffer_describe(bp,"delete"); pvr2_buffer_done() 291 pvr2_buffer_wipe(bp); pvr2_buffer_done() 292 pvr2_buffer_set_none(bp); pvr2_buffer_done() 293 bp->signature = 0; pvr2_buffer_done() 294 bp->stream = NULL; pvr2_buffer_done() 295 usb_free_urb(bp->purb); pvr2_buffer_done() 297 " bufferDone %p",bp); pvr2_buffer_done() 332 struct pvr2_buffer *bp; pvr2_stream_buffer_count() local 333 bp = kmalloc(sizeof(*bp),GFP_KERNEL); pvr2_stream_buffer_count() 334 if (!bp) return -ENOMEM; pvr2_stream_buffer_count() 335 ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count); pvr2_stream_buffer_count() 337 kfree(bp); pvr2_stream_buffer_count() 340 sp->buffers[sp->buffer_total_count] = bp; pvr2_stream_buffer_count() 342 pvr2_buffer_set_idle(bp); pvr2_stream_buffer_count() 346 struct pvr2_buffer *bp; pvr2_stream_buffer_count() local 347 bp = sp->buffers[sp->buffer_total_count - 1]; pvr2_stream_buffer_count() 351 pvr2_buffer_done(bp); pvr2_stream_buffer_count() 352 kfree(bp); pvr2_stream_buffer_count() 371 struct pvr2_buffer *bp; pvr2_stream_achieve_buffer_count() local 387 bp = sp->buffers[sp->buffer_total_count - (cnt + 1)]; pvr2_stream_achieve_buffer_count() 388 if (bp->state != pvr2_buffer_state_idle) break; pvr2_stream_achieve_buffer_count() 437 struct pvr2_buffer *bp = urb->context; buffer_complete() local 440 BUFFER_CHECK(bp); buffer_complete() 441 sp = bp->stream; buffer_complete() 442 bp->used_count = 0; buffer_complete() 443 bp->status = 0; buffer_complete() 446 bp,urb->status,urb->actual_length); buffer_complete() 454 bp->used_count = urb->actual_length; buffer_complete() 472 bp->status = urb->status; buffer_complete() 475 pvr2_buffer_set_ready(bp); buffer_complete() 592 struct pvr2_buffer *bp; pvr2_stream_kill() local 595 while ((bp = pvr2_stream_get_ready_buffer(sp)) != NULL) { pvr2_stream_kill() 596 pvr2_buffer_set_idle(bp); pvr2_stream_kill() 604 int pvr2_buffer_queue(struct pvr2_buffer *bp) pvr2_buffer_queue() argument 613 if (!bp) return -EINVAL; pvr2_buffer_queue() 614 sp = bp->stream; pvr2_buffer_queue() 616 pvr2_buffer_wipe(bp); pvr2_buffer_queue() 621 pvr2_buffer_set_queued(bp); pvr2_buffer_queue() 623 for (idx = 0; idx < (bp->max_count) / 4; idx++) { pvr2_buffer_queue() 624 val = bp->id << 24; pvr2_buffer_queue() 626 ((unsigned int *)(bp->ptr))[idx] = val; pvr2_buffer_queue() 629 bp->status = -EINPROGRESS; pvr2_buffer_queue() 630 usb_fill_bulk_urb(bp->purb, // struct urb *urb pvr2_buffer_queue() 634 bp->ptr, // void *transfer_buffer pvr2_buffer_queue() 635 bp->max_count, // int buffer_length pvr2_buffer_queue() 637 bp); pvr2_buffer_queue() 638 usb_submit_urb(bp->purb,GFP_KERNEL); pvr2_buffer_queue() 643 int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt) pvr2_buffer_set_buffer() argument 648 if (!bp) return -EINVAL; pvr2_buffer_set_buffer() 649 sp = bp->stream; pvr2_buffer_set_buffer() 652 if (bp->state != pvr2_buffer_state_idle) { pvr2_buffer_set_buffer() 655 bp->ptr = ptr; pvr2_buffer_set_buffer() 656 bp->stream->i_bcount -= bp->max_count; pvr2_buffer_set_buffer() 657 bp->max_count = cnt; pvr2_buffer_set_buffer() 658 bp->stream->i_bcount += bp->max_count; pvr2_buffer_set_buffer() 664 bp->stream->i_bcount,bp->stream->i_count); pvr2_buffer_set_buffer() 671 unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp) pvr2_buffer_get_count() argument 673 return bp->used_count; pvr2_buffer_get_count() 676 int pvr2_buffer_get_status(struct pvr2_buffer *bp) pvr2_buffer_get_status() argument 678 return bp->status; pvr2_buffer_get_status() 681 int pvr2_buffer_get_id(struct pvr2_buffer *bp) pvr2_buffer_get_id() argument 683 return bp->id; pvr2_buffer_get_id()
|
H A D | pvrusb2-dvb.c | 38 struct pvr2_buffer *bp; pvr2_dvb_feed_func() local 52 bp = pvr2_stream_get_ready_buffer(stream); pvr2_dvb_feed_func() 53 if (bp != NULL) { pvr2_dvb_feed_func() 54 count = pvr2_buffer_get_count(bp); pvr2_dvb_feed_func() 59 pvr2_buffer_get_id(bp)], pvr2_dvb_feed_func() 62 ret = pvr2_buffer_get_status(bp); pvr2_dvb_feed_func() 65 ret = pvr2_buffer_queue(bp); pvr2_dvb_feed_func() 147 struct pvr2_buffer *bp; pvr2_dvb_stream_do_start() local 171 bp = pvr2_stream_get_buffer(stream, idx); pvr2_dvb_stream_do_start() 172 pvr2_buffer_set_buffer(bp, pvr2_dvb_stream_do_start() 180 while ((bp = pvr2_stream_get_idle_buffer(stream)) != NULL) { pvr2_dvb_stream_do_start() 181 ret = pvr2_buffer_queue(bp); pvr2_dvb_stream_do_start()
|
H A D | pvrusb2-ioread.c | 163 struct pvr2_buffer *bp; pvr2_ioread_start() local 168 while ((bp = pvr2_stream_get_idle_buffer(cp->stream)) != NULL) { pvr2_ioread_start() 169 stat = pvr2_buffer_queue(bp); pvr2_ioread_start() 206 struct pvr2_buffer *bp; pvr2_ioread_setup() local 231 bp = pvr2_stream_get_buffer(sp,idx); pvr2_ioread_setup() 232 pvr2_buffer_set_buffer(bp, pvr2_ioread_setup()
|
/linux-4.1.27/kernel/events/ |
H A D | hw_breakpoint.c | 74 /* Gather the number of total pinned and un-pinned bp in a cpuset */ 83 __weak int hw_breakpoint_weight(struct perf_event *bp) hw_breakpoint_weight() argument 88 static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) find_slot_idx() argument 90 if (bp->attr.bp_type & HW_BREAKPOINT_RW) find_slot_idx() 117 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) task_bp_pinned() argument 119 struct task_struct *tsk = bp->hw.target; task_bp_pinned() 133 static const struct cpumask *cpumask_of_bp(struct perf_event *bp) cpumask_of_bp() argument 135 if (bp->cpu >= 0) cpumask_of_bp() 136 return cpumask_of(bp->cpu); cpumask_of_bp() 145 fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, fetch_bp_busy_slots() argument 148 const struct cpumask *cpumask = cpumask_of_bp(bp); fetch_bp_busy_slots() 156 if (!bp->hw.target) for_each_cpu() 159 nr += task_bp_pinned(cpu, bp, type); for_each_cpu() 184 static void toggle_bp_task_slot(struct perf_event *bp, int cpu, toggle_bp_task_slot() argument 190 old_idx = task_bp_pinned(cpu, bp, type) - 1; toggle_bp_task_slot() 203 toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, toggle_bp_slot() argument 206 const struct cpumask *cpumask = cpumask_of_bp(bp); toggle_bp_slot() 213 if (!bp->hw.target) { toggle_bp_slot() 214 get_bp_info(bp->cpu, type)->cpu_pinned += weight; toggle_bp_slot() 220 toggle_bp_task_slot(bp, cpu, type, weight); toggle_bp_slot() 223 list_add_tail(&bp->hw.bp_list, &bp_task_head); toggle_bp_slot() 225 list_del(&bp->hw.bp_list); toggle_bp_slot() 231 __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) arch_unregister_hw_breakpoint() argument 261 * bp for every cpu and we keep the max one. Same for the per tasks 280 static int __reserve_bp_slot(struct perf_event *bp) __reserve_bp_slot() argument 291 if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || __reserve_bp_slot() 292 bp->attr.bp_type == HW_BREAKPOINT_INVALID) __reserve_bp_slot() 295 type = find_slot_idx(bp); __reserve_bp_slot() 296 weight = hw_breakpoint_weight(bp); __reserve_bp_slot() 298 fetch_bp_busy_slots(&slots, bp, type); __reserve_bp_slot() 309 toggle_bp_slot(bp, true, type, weight); __reserve_bp_slot() 314 int reserve_bp_slot(struct perf_event *bp) reserve_bp_slot() argument 320 ret = __reserve_bp_slot(bp); reserve_bp_slot() 327 static void __release_bp_slot(struct perf_event *bp) __release_bp_slot() argument 332 type = find_slot_idx(bp); __release_bp_slot() 333 weight = hw_breakpoint_weight(bp); __release_bp_slot() 334 toggle_bp_slot(bp, false, type, weight); __release_bp_slot() 337 void release_bp_slot(struct perf_event *bp) release_bp_slot() argument 341 arch_unregister_hw_breakpoint(bp); release_bp_slot() 342 __release_bp_slot(bp); release_bp_slot() 352 int dbg_reserve_bp_slot(struct perf_event *bp) dbg_reserve_bp_slot() argument 357 return __reserve_bp_slot(bp); dbg_reserve_bp_slot() 360 int dbg_release_bp_slot(struct perf_event *bp) dbg_release_bp_slot() argument 365 __release_bp_slot(bp); dbg_release_bp_slot() 370 static int validate_hw_breakpoint(struct perf_event *bp) validate_hw_breakpoint() argument 374 ret = arch_validate_hwbkpt_settings(bp); validate_hw_breakpoint() 378 if (arch_check_bp_in_kernelspace(bp)) { validate_hw_breakpoint() 379 if (bp->attr.exclude_kernel) validate_hw_breakpoint() 392 int register_perf_hw_breakpoint(struct perf_event *bp) register_perf_hw_breakpoint() argument 396 ret = reserve_bp_slot(bp); register_perf_hw_breakpoint() 400 ret = validate_hw_breakpoint(bp); register_perf_hw_breakpoint() 402 /* if arch_validate_hwbkpt_settings() fails then release bp slot */ register_perf_hw_breakpoint() 404 release_bp_slot(bp); register_perf_hw_breakpoint() 428 * @bp: the breakpoint structure to modify 433 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) modify_user_hw_breakpoint() argument 435 u64 old_addr = bp->attr.bp_addr; modify_user_hw_breakpoint() 436 u64 old_len = bp->attr.bp_len; modify_user_hw_breakpoint() 437 int old_type = bp->attr.bp_type; modify_user_hw_breakpoint() 446 if (irqs_disabled() && bp->ctx && bp->ctx->task == current) modify_user_hw_breakpoint() 447 __perf_event_disable(bp); modify_user_hw_breakpoint() 449 perf_event_disable(bp); modify_user_hw_breakpoint() 451 bp->attr.bp_addr = attr->bp_addr; modify_user_hw_breakpoint() 452 bp->attr.bp_type = attr->bp_type; modify_user_hw_breakpoint() 453 bp->attr.bp_len = attr->bp_len; modify_user_hw_breakpoint() 458 err = validate_hw_breakpoint(bp); modify_user_hw_breakpoint() 460 perf_event_enable(bp); modify_user_hw_breakpoint() 463 bp->attr.bp_addr = old_addr; modify_user_hw_breakpoint() 464 bp->attr.bp_type = old_type; modify_user_hw_breakpoint() 465 bp->attr.bp_len = old_len; modify_user_hw_breakpoint() 466 if (!bp->attr.disabled) modify_user_hw_breakpoint() 467 perf_event_enable(bp); modify_user_hw_breakpoint() 473 bp->attr.disabled = attr->disabled; modify_user_hw_breakpoint() 481 * @bp: the breakpoint structure to unregister 483 void unregister_hw_breakpoint(struct perf_event *bp) unregister_hw_breakpoint() argument 485 if (!bp) unregister_hw_breakpoint() 487 perf_event_release_kernel(bp); unregister_hw_breakpoint() 503 struct perf_event * __percpu *cpu_events, *bp; register_wide_hw_breakpoint() local 513 bp = perf_event_create_kernel_counter(attr, cpu, NULL, for_each_online_cpu() 515 if (IS_ERR(bp)) { for_each_online_cpu() 516 err = PTR_ERR(bp); for_each_online_cpu() 520 per_cpu(*cpu_events, cpu) = bp; for_each_online_cpu() 558 static int hw_breakpoint_event_init(struct perf_event *bp) hw_breakpoint_event_init() argument 562 if (bp->attr.type != PERF_TYPE_BREAKPOINT) hw_breakpoint_event_init() 568 if (has_branch_stack(bp)) hw_breakpoint_event_init() 571 err = register_perf_hw_breakpoint(bp); hw_breakpoint_event_init() 575 bp->destroy = bp_perf_event_destroy; hw_breakpoint_event_init() 580 static int hw_breakpoint_add(struct perf_event *bp, int flags) hw_breakpoint_add() argument 583 bp->hw.state = PERF_HES_STOPPED; hw_breakpoint_add() 585 if (is_sampling_event(bp)) { hw_breakpoint_add() 586 bp->hw.last_period = bp->hw.sample_period; hw_breakpoint_add() 587 perf_swevent_set_period(bp); hw_breakpoint_add() 590 return arch_install_hw_breakpoint(bp); hw_breakpoint_add() 593 static void hw_breakpoint_del(struct perf_event *bp, int flags) hw_breakpoint_del() argument 595 arch_uninstall_hw_breakpoint(bp); hw_breakpoint_del() 598 static void hw_breakpoint_start(struct perf_event *bp, int flags) hw_breakpoint_start() argument 600 bp->hw.state = 0; hw_breakpoint_start() 603 static void hw_breakpoint_stop(struct perf_event *bp, int flags) hw_breakpoint_stop() argument 605 bp->hw.state = PERF_HES_STOPPED; hw_breakpoint_stop()
|
/linux-4.1.27/drivers/power/ |
H A D | apm_power.c | 47 struct find_bat_param *bp = (struct find_bat_param *)data; __find_main_battery() local 49 bp->bat = dev_get_drvdata(dev); __find_main_battery() 51 if (bp->bat->desc->use_for_apm) { __find_main_battery() 53 bp->main = bp->bat; __find_main_battery() 57 if (!PSY_PROP(bp->bat, CHARGE_FULL_DESIGN, &bp->full) || __find_main_battery() 58 !PSY_PROP(bp->bat, CHARGE_FULL, &bp->full)) { __find_main_battery() 59 if (bp->full.intval > bp->max_charge) { __find_main_battery() 60 bp->max_charge_bat = bp->bat; __find_main_battery() 61 bp->max_charge = bp->full.intval; __find_main_battery() 63 } else if (!PSY_PROP(bp->bat, ENERGY_FULL_DESIGN, &bp->full) || __find_main_battery() 64 !PSY_PROP(bp->bat, ENERGY_FULL, &bp->full)) { __find_main_battery() 65 if (bp->full.intval > bp->max_energy) { __find_main_battery() 66 bp->max_energy_bat = bp->bat; __find_main_battery() 67 bp->max_energy = bp->full.intval; __find_main_battery() 75 struct find_bat_param bp; find_main_battery() local 78 memset(&bp, 0, sizeof(struct find_bat_param)); find_main_battery() 80 bp.main = main_battery; find_main_battery() 82 error = class_for_each_device(power_supply_class, NULL, &bp, find_main_battery() 85 main_battery = bp.main; find_main_battery() 89 if ((bp.max_energy_bat && bp.max_charge_bat) && find_main_battery() 90 (bp.max_energy_bat != bp.max_charge_bat)) { find_main_battery() 92 if (!PSY_PROP(bp.max_charge_bat, VOLTAGE_MAX_DESIGN, find_main_battery() 93 &bp.full)) { find_main_battery() 94 if (bp.max_energy > bp.max_charge * bp.full.intval) find_main_battery() 95 main_battery = bp.max_energy_bat; find_main_battery() 97 main_battery = bp.max_charge_bat; find_main_battery() 98 } else if (!PSY_PROP(bp.max_energy_bat, VOLTAGE_MAX_DESIGN, find_main_battery() 99 &bp.full)) { find_main_battery() 100 if (bp.max_charge > bp.max_energy / bp.full.intval) find_main_battery() 101 main_battery = bp.max_charge_bat; find_main_battery() 103 main_battery = bp.max_energy_bat; find_main_battery() 106 main_battery = bp.max_energy_bat; find_main_battery() 108 } else if (bp.max_charge_bat) { find_main_battery() 109 main_battery = bp.max_charge_bat; find_main_battery() 110 } else if (bp.max_energy_bat) { find_main_battery() 111 main_battery = bp.max_energy_bat; find_main_battery() 114 main_battery = bp.bat; find_main_battery()
|
/linux-4.1.27/drivers/net/ethernet/cadence/ |
H A D | macb.c | 95 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) macb_rx_desc() argument 97 return &bp->rx_ring[macb_rx_ring_wrap(index)]; macb_rx_desc() 100 static void *macb_rx_buffer(struct macb *bp, unsigned int index) macb_rx_buffer() argument 102 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); macb_rx_buffer() 105 static void macb_set_hwaddr(struct macb *bp) macb_set_hwaddr() argument 110 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); macb_set_hwaddr() 111 macb_or_gem_writel(bp, SA1B, bottom); macb_set_hwaddr() 112 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); macb_set_hwaddr() 113 macb_or_gem_writel(bp, SA1T, top); macb_set_hwaddr() 116 macb_or_gem_writel(bp, SA2B, 0); macb_set_hwaddr() 117 macb_or_gem_writel(bp, SA2T, 0); macb_set_hwaddr() 118 macb_or_gem_writel(bp, SA3B, 0); macb_set_hwaddr() 119 macb_or_gem_writel(bp, SA3T, 0); macb_set_hwaddr() 120 macb_or_gem_writel(bp, SA4B, 0); macb_set_hwaddr() 121 macb_or_gem_writel(bp, SA4T, 0); macb_set_hwaddr() 124 static void macb_get_hwaddr(struct macb *bp) macb_get_hwaddr() argument 132 pdata = dev_get_platdata(&bp->pdev->dev); macb_get_hwaddr() 136 bottom = macb_or_gem_readl(bp, SA1B + i * 8); macb_get_hwaddr() 137 top = macb_or_gem_readl(bp, SA1T + i * 8); macb_get_hwaddr() 156 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); macb_get_hwaddr() 161 netdev_info(bp->dev, "invalid hw address, using random\n"); macb_get_hwaddr() 162 eth_hw_addr_random(bp->dev); macb_get_hwaddr() 167 struct macb *bp = bus->priv; macb_mdio_read() local 170 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) macb_mdio_read() 177 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) macb_mdio_read() 180 value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); macb_mdio_read() 188 struct macb *bp = bus->priv; macb_mdio_write() local 190 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) macb_mdio_write() 198 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) macb_mdio_write() 250 struct macb *bp = netdev_priv(dev); macb_handle_link_change() local 251 struct phy_device *phydev = bp->phy_dev; macb_handle_link_change() 256 spin_lock_irqsave(&bp->lock, flags); macb_handle_link_change() 259 if ((bp->speed != phydev->speed) || macb_handle_link_change() 260 (bp->duplex != phydev->duplex)) { macb_handle_link_change() 263 reg = macb_readl(bp, NCFGR); macb_handle_link_change() 265 if (macb_is_gem(bp)) macb_handle_link_change() 273 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) macb_handle_link_change() 276 macb_or_gem_writel(bp, NCFGR, reg); macb_handle_link_change() 278 bp->speed = phydev->speed; macb_handle_link_change() 279 bp->duplex = phydev->duplex; macb_handle_link_change() 284 if (phydev->link != bp->link) { macb_handle_link_change() 286 bp->speed = 0; macb_handle_link_change() 287 bp->duplex = -1; macb_handle_link_change() 289 bp->link = phydev->link; macb_handle_link_change() 294 spin_unlock_irqrestore(&bp->lock, flags); macb_handle_link_change() 301 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); macb_handle_link_change() 318 struct macb *bp = netdev_priv(dev); macb_mii_probe() local 324 phydev = phy_find_first(bp->mii_bus); macb_mii_probe() 330 pdata = dev_get_platdata(&bp->pdev->dev); macb_mii_probe() 332 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); macb_mii_probe() 341 bp->phy_interface); macb_mii_probe() 348 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) macb_mii_probe() 353 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) macb_mii_probe() 358 bp->link = 0; macb_mii_probe() 359 bp->speed = 0; macb_mii_probe() 360 bp->duplex = -1; macb_mii_probe() 361 bp->phy_dev = phydev; macb_mii_probe() 366 static int macb_mii_init(struct macb *bp) macb_mii_init() argument 373 macb_writel(bp, NCR, MACB_BIT(MPE)); macb_mii_init() 375 bp->mii_bus = mdiobus_alloc(); macb_mii_init() 376 if (bp->mii_bus == NULL) { macb_mii_init() 381 bp->mii_bus->name = "MACB_mii_bus"; macb_mii_init() 382 bp->mii_bus->read = &macb_mdio_read; macb_mii_init() 383 bp->mii_bus->write = &macb_mdio_write; macb_mii_init() 384 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", macb_mii_init() 385 bp->pdev->name, bp->pdev->id); macb_mii_init() 386 bp->mii_bus->priv = bp; macb_mii_init() 387 bp->mii_bus->parent = &bp->dev->dev; macb_mii_init() 388 pdata = dev_get_platdata(&bp->pdev->dev); macb_mii_init() 390 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); macb_mii_init() 391 if (!bp->mii_bus->irq) { macb_mii_init() 396 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); macb_mii_init() 398 np = bp->pdev->dev.of_node; macb_mii_init() 401 err = of_mdiobus_register(bp->mii_bus, np); macb_mii_init() 405 if (!err && !phy_find_first(bp->mii_bus)) { macb_mii_init() 409 phydev = mdiobus_scan(bp->mii_bus, i); macb_mii_init() 421 bp->mii_bus->irq[i] = PHY_POLL; macb_mii_init() 424 bp->mii_bus->phy_mask = pdata->phy_mask; macb_mii_init() 426 err = mdiobus_register(bp->mii_bus); macb_mii_init() 432 err = macb_mii_probe(bp->dev); macb_mii_init() 439 mdiobus_unregister(bp->mii_bus); macb_mii_init() 441 kfree(bp->mii_bus->irq); macb_mii_init() 443 mdiobus_free(bp->mii_bus); macb_mii_init() 448 static void macb_update_stats(struct macb *bp) macb_update_stats() argument 450 u32 __iomem *reg = bp->regs + MACB_PFR; macb_update_stats() 451 u32 *p = &bp->hw_stats.macb.rx_pause_frames; macb_update_stats() 452 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; macb_update_stats() 460 static int macb_halt_tx(struct macb *bp) macb_halt_tx() argument 465 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); macb_halt_tx() 470 status = macb_readl(bp, TSR); macb_halt_tx() 480 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) macb_tx_unmap() argument 484 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, macb_tx_unmap() 487 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, macb_tx_unmap() 502 struct macb *bp = queue->bp; macb_tx_error_task() local 509 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", macb_tx_error_task() 510 (unsigned int)(queue - bp->queues), macb_tx_error_task() 519 spin_lock_irqsave(&bp->lock, flags); macb_tx_error_task() 522 netif_tx_stop_all_queues(bp->dev); macb_tx_error_task() 529 if (macb_halt_tx(bp)) macb_tx_error_task() 531 netdev_err(bp->dev, "BUG: halt tx timed out\n"); macb_tx_error_task() 548 macb_tx_unmap(bp, tx_skb); macb_tx_error_task() 558 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", macb_tx_error_task() 560 bp->stats.tx_packets++; macb_tx_error_task() 561 bp->stats.tx_bytes += skb->len; macb_tx_error_task() 570 netdev_err(bp->dev, macb_tx_error_task() 576 macb_tx_unmap(bp, tx_skb); macb_tx_error_task() 594 macb_writel(bp, TSR, macb_readl(bp, TSR)); macb_tx_error_task() 598 netif_tx_start_all_queues(bp->dev); macb_tx_error_task() 599 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); macb_tx_error_task() 601 spin_unlock_irqrestore(&bp->lock, flags); macb_tx_error_task() 609 struct macb *bp = queue->bp; macb_tx_interrupt() local 610 u16 queue_index = queue - bp->queues; macb_tx_interrupt() 612 status = macb_readl(bp, TSR); macb_tx_interrupt() 613 macb_writel(bp, TSR, status); macb_tx_interrupt() 615 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_tx_interrupt() 618 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", macb_tx_interrupt() 648 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", macb_tx_interrupt() 650 bp->stats.tx_packets++; macb_tx_interrupt() 651 bp->stats.tx_bytes += skb->len; macb_tx_interrupt() 655 macb_tx_unmap(bp, tx_skb); macb_tx_interrupt() 667 if (__netif_subqueue_stopped(bp->dev, queue_index) && macb_tx_interrupt() 670 netif_wake_subqueue(bp->dev, queue_index); macb_tx_interrupt() 673 static void gem_rx_refill(struct macb *bp) gem_rx_refill() argument 679 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { gem_rx_refill() 680 entry = macb_rx_ring_wrap(bp->rx_prepared_head); gem_rx_refill() 685 bp->rx_prepared_head++; gem_rx_refill() 687 if (bp->rx_skbuff[entry] == NULL) { gem_rx_refill() 689 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); gem_rx_refill() 691 netdev_err(bp->dev, gem_rx_refill() 697 paddr = dma_map_single(&bp->pdev->dev, skb->data, gem_rx_refill() 698 bp->rx_buffer_size, DMA_FROM_DEVICE); gem_rx_refill() 699 if (dma_mapping_error(&bp->pdev->dev, paddr)) { gem_rx_refill() 704 bp->rx_skbuff[entry] = skb; gem_rx_refill() 708 bp->rx_ring[entry].addr = paddr; gem_rx_refill() 709 bp->rx_ring[entry].ctrl = 0; gem_rx_refill() 714 bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); gem_rx_refill() 715 bp->rx_ring[entry].ctrl = 0; gem_rx_refill() 722 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", gem_rx_refill() 723 bp->rx_prepared_head, bp->rx_tail); gem_rx_refill() 727 static void discard_partial_frame(struct macb *bp, unsigned int begin, discard_partial_frame() argument 733 struct macb_dma_desc *desc = macb_rx_desc(bp, frag); discard_partial_frame() 747 static int gem_rx(struct macb *bp, int budget) gem_rx() argument 758 entry = macb_rx_ring_wrap(bp->rx_tail); gem_rx() 759 desc = &bp->rx_ring[entry]; gem_rx() 770 bp->rx_tail++; gem_rx() 774 netdev_err(bp->dev, gem_rx() 776 bp->stats.rx_dropped++; gem_rx() 779 skb = bp->rx_skbuff[entry]; gem_rx() 781 netdev_err(bp->dev, gem_rx() 783 bp->stats.rx_dropped++; gem_rx() 787 bp->rx_skbuff[entry] = NULL; gem_rx() 790 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); gem_rx() 794 dma_unmap_single(&bp->pdev->dev, addr, gem_rx() 795 bp->rx_buffer_size, DMA_FROM_DEVICE); gem_rx() 797 skb->protocol = eth_type_trans(skb, bp->dev); gem_rx() 799 if (bp->dev->features & NETIF_F_RXCSUM && gem_rx() 800 !(bp->dev->flags & IFF_PROMISC) && gem_rx() 804 bp->stats.rx_packets++; gem_rx() 805 bp->stats.rx_bytes += skb->len; gem_rx() 808 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", gem_rx() 819 gem_rx_refill(bp); gem_rx() 824 static int macb_rx_frame(struct macb *bp, unsigned int first_frag, macb_rx_frame() argument 833 desc = macb_rx_desc(bp, last_frag); macb_rx_frame() 836 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", macb_rx_frame() 849 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); macb_rx_frame() 851 bp->stats.rx_dropped++; macb_rx_frame() 853 desc = macb_rx_desc(bp, frag); macb_rx_frame() 871 unsigned int frag_len = bp->rx_buffer_size; macb_rx_frame() 878 macb_rx_buffer(bp, frag), frag_len); macb_rx_frame() 879 offset += bp->rx_buffer_size; macb_rx_frame() 880 desc = macb_rx_desc(bp, frag); macb_rx_frame() 891 skb->protocol = eth_type_trans(skb, bp->dev); macb_rx_frame() 893 bp->stats.rx_packets++; macb_rx_frame() 894 bp->stats.rx_bytes += skb->len; macb_rx_frame() 895 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", macb_rx_frame() 902 static int macb_rx(struct macb *bp, int budget) macb_rx() argument 908 for (tail = bp->rx_tail; budget > 0; tail++) { macb_rx() 909 struct macb_dma_desc *desc = macb_rx_desc(bp, tail); macb_rx() 923 discard_partial_frame(bp, first_frag, tail); macb_rx() 931 dropped = macb_rx_frame(bp, first_frag, tail); macb_rx() 941 bp->rx_tail = first_frag; macb_rx() 943 bp->rx_tail = tail; macb_rx() 950 struct macb *bp = container_of(napi, struct macb, napi); macb_poll() local 954 status = macb_readl(bp, RSR); macb_poll() 955 macb_writel(bp, RSR, status); macb_poll() 959 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", macb_poll() 962 work_done = bp->macbgem_ops.mog_rx(bp, budget); macb_poll() 967 status = macb_readl(bp, RSR); macb_poll() 969 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_poll() 970 macb_writel(bp, ISR, MACB_BIT(RCOMP)); macb_poll() 973 macb_writel(bp, IER, MACB_RX_INT_FLAGS); macb_poll() 985 struct macb *bp = queue->bp; macb_interrupt() local 986 struct net_device *dev = bp->dev; macb_interrupt() 994 spin_lock(&bp->lock); macb_interrupt() 1003 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", macb_interrupt() 1004 (unsigned int)(queue - bp->queues), macb_interrupt() 1016 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_interrupt() 1019 if (napi_schedule_prep(&bp->napi)) { macb_interrupt() 1020 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); macb_interrupt() 1021 __napi_schedule(&bp->napi); macb_interrupt() 1029 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_interrupt() 1050 ctrl = macb_readl(bp, NCR); macb_interrupt() 1051 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); macb_interrupt() 1052 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); macb_interrupt() 1054 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_interrupt() 1055 macb_writel(bp, ISR, MACB_BIT(RXUBR)); macb_interrupt() 1060 if (macb_is_gem(bp)) macb_interrupt() 1061 bp->hw_stats.gem.rx_overruns++; macb_interrupt() 1063 bp->hw_stats.macb.rx_overruns++; macb_interrupt() 1065 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_interrupt() 1077 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_interrupt() 1084 spin_unlock(&bp->lock); macb_interrupt() 1096 struct macb *bp = netdev_priv(dev); macb_poll_controller() local 1102 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) macb_poll_controller() 1108 static inline unsigned int macb_count_tx_descriptors(struct macb *bp, macb_count_tx_descriptors() argument 1111 return (len + bp->max_tx_length - 1) / bp->max_tx_length; macb_count_tx_descriptors() 1114 static unsigned int macb_tx_map(struct macb *bp, macb_tx_map() argument 1131 size = min(len, bp->max_tx_length); macb_tx_map() 1135 mapping = dma_map_single(&bp->pdev->dev, macb_tx_map() 1138 if (dma_mapping_error(&bp->pdev->dev, mapping)) macb_tx_map() 1160 size = min(len, bp->max_tx_length); macb_tx_map() 1164 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, macb_tx_map() 1166 if (dma_mapping_error(&bp->pdev->dev, mapping)) macb_tx_map() 1184 netdev_err(bp->dev, "BUG! empty skb!\n"); macb_tx_map() 1232 netdev_err(bp->dev, "TX DMA map failed\n"); macb_tx_map() 1237 macb_tx_unmap(bp, tx_skb); macb_tx_map() 1246 struct macb *bp = netdev_priv(dev); macb_start_xmit() local 1247 struct macb_queue *queue = &bp->queues[queue_index]; macb_start_xmit() 1252 netdev_vdbg(bp->dev, macb_start_xmit() 1264 count = macb_count_tx_descriptors(bp, skb_headlen(skb)); macb_start_xmit() 1268 count += macb_count_tx_descriptors(bp, frag_size); macb_start_xmit() 1271 spin_lock_irqsave(&bp->lock, flags); macb_start_xmit() 1276 spin_unlock_irqrestore(&bp->lock, flags); macb_start_xmit() 1277 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", macb_start_xmit() 1283 if (!macb_tx_map(bp, queue, skb)) { macb_start_xmit() 1293 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); macb_start_xmit() 1299 spin_unlock_irqrestore(&bp->lock, flags); macb_start_xmit() 1304 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) macb_init_rx_buffer_size() argument 1306 if (!macb_is_gem(bp)) { macb_init_rx_buffer_size() 1307 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; macb_init_rx_buffer_size() 1309 bp->rx_buffer_size = size; macb_init_rx_buffer_size() 1311 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { macb_init_rx_buffer_size() 1312 netdev_dbg(bp->dev, macb_init_rx_buffer_size() 1315 bp->rx_buffer_size = macb_init_rx_buffer_size() 1316 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); macb_init_rx_buffer_size() 1320 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", macb_init_rx_buffer_size() 1321 bp->dev->mtu, bp->rx_buffer_size); macb_init_rx_buffer_size() 1324 static void gem_free_rx_buffers(struct macb *bp) gem_free_rx_buffers() argument 1331 if (!bp->rx_skbuff) gem_free_rx_buffers() 1335 skb = bp->rx_skbuff[i]; gem_free_rx_buffers() 1340 desc = &bp->rx_ring[i]; gem_free_rx_buffers() 1342 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, gem_free_rx_buffers() 1348 kfree(bp->rx_skbuff); gem_free_rx_buffers() 1349 bp->rx_skbuff = NULL; gem_free_rx_buffers() 1352 static void macb_free_rx_buffers(struct macb *bp) macb_free_rx_buffers() argument 1354 if (bp->rx_buffers) { macb_free_rx_buffers() 1355 dma_free_coherent(&bp->pdev->dev, macb_free_rx_buffers() 1356 RX_RING_SIZE * bp->rx_buffer_size, macb_free_rx_buffers() 1357 bp->rx_buffers, bp->rx_buffers_dma); macb_free_rx_buffers() 1358 bp->rx_buffers = NULL; macb_free_rx_buffers() 1362 static void macb_free_consistent(struct macb *bp) macb_free_consistent() argument 1367 bp->macbgem_ops.mog_free_rx_buffers(bp); macb_free_consistent() 1368 if (bp->rx_ring) { macb_free_consistent() 1369 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, macb_free_consistent() 1370 bp->rx_ring, bp->rx_ring_dma); macb_free_consistent() 1371 bp->rx_ring = NULL; macb_free_consistent() 1374 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_free_consistent() 1378 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, macb_free_consistent() 1385 static int gem_alloc_rx_buffers(struct macb *bp) gem_alloc_rx_buffers() argument 1390 bp->rx_skbuff = kzalloc(size, GFP_KERNEL); gem_alloc_rx_buffers() 1391 if (!bp->rx_skbuff) gem_alloc_rx_buffers() 1394 netdev_dbg(bp->dev, gem_alloc_rx_buffers() 1396 RX_RING_SIZE, bp->rx_skbuff); gem_alloc_rx_buffers() 1400 static int macb_alloc_rx_buffers(struct macb *bp) macb_alloc_rx_buffers() argument 1404 size = RX_RING_SIZE * bp->rx_buffer_size; macb_alloc_rx_buffers() 1405 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, macb_alloc_rx_buffers() 1406 &bp->rx_buffers_dma, GFP_KERNEL); macb_alloc_rx_buffers() 1407 if (!bp->rx_buffers) macb_alloc_rx_buffers() 1410 netdev_dbg(bp->dev, macb_alloc_rx_buffers() 1412 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); macb_alloc_rx_buffers() 1416 static int macb_alloc_consistent(struct macb *bp) macb_alloc_consistent() argument 1422 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_alloc_consistent() 1424 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, macb_alloc_consistent() 1429 netdev_dbg(bp->dev, macb_alloc_consistent() 1441 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, macb_alloc_consistent() 1442 &bp->rx_ring_dma, GFP_KERNEL); macb_alloc_consistent() 1443 if (!bp->rx_ring) macb_alloc_consistent() 1445 netdev_dbg(bp->dev, macb_alloc_consistent() 1447 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); macb_alloc_consistent() 1449 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) macb_alloc_consistent() 1455 macb_free_consistent(bp); macb_alloc_consistent() 1459 static void gem_init_rings(struct macb *bp) gem_init_rings() argument 1465 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { gem_init_rings() 1475 bp->rx_tail = 0; gem_init_rings() 1476 bp->rx_prepared_head = 0; gem_init_rings() 1478 gem_rx_refill(bp); gem_init_rings() 1481 static void macb_init_rings(struct macb *bp) macb_init_rings() argument 1486 addr = bp->rx_buffers_dma; macb_init_rings() 1488 bp->rx_ring[i].addr = addr; macb_init_rings() 1489 bp->rx_ring[i].ctrl = 0; macb_init_rings() 1490 addr += bp->rx_buffer_size; macb_init_rings() 1492 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); macb_init_rings() 1495 bp->queues[0].tx_ring[i].addr = 0; macb_init_rings() 1496 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); macb_init_rings() 1498 bp->queues[0].tx_head = 0; macb_init_rings() 1499 bp->queues[0].tx_tail = 0; macb_init_rings() 1500 bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); macb_init_rings() 1502 bp->rx_tail = 0; macb_init_rings() 1505 static void macb_reset_hw(struct macb *bp) macb_reset_hw() argument 1514 macb_writel(bp, NCR, 0); macb_reset_hw() 1517 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); macb_reset_hw() 1520 macb_writel(bp, TSR, -1); macb_reset_hw() 1521 macb_writel(bp, RSR, -1); macb_reset_hw() 1524 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_reset_hw() 1530 static u32 gem_mdc_clk_div(struct macb *bp) gem_mdc_clk_div() argument 1533 unsigned long pclk_hz = clk_get_rate(bp->pclk); gem_mdc_clk_div() 1551 static u32 macb_mdc_clk_div(struct macb *bp) macb_mdc_clk_div() argument 1556 if (macb_is_gem(bp)) macb_mdc_clk_div() 1557 return gem_mdc_clk_div(bp); macb_mdc_clk_div() 1559 pclk_hz = clk_get_rate(bp->pclk); macb_mdc_clk_div() 1577 static u32 macb_dbw(struct macb *bp) macb_dbw() argument 1579 if (!macb_is_gem(bp)) macb_dbw() 1582 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { macb_dbw() 1601 static void macb_configure_dma(struct macb *bp) macb_configure_dma() argument 1606 if (macb_is_gem(bp)) { macb_configure_dma() 1607 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); macb_configure_dma() 1608 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); macb_configure_dma() 1609 if (bp->dma_burst_length) macb_configure_dma() 1610 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); macb_configure_dma() 1618 ncr = macb_readl(bp, NCR); macb_configure_dma() 1619 __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR); macb_configure_dma() 1620 tmp = __raw_readl(bp->regs + MACB_NCR); macb_configure_dma() 1628 macb_writel(bp, NCR, ncr); macb_configure_dma() 1630 if (bp->dev->features & NETIF_F_HW_CSUM) macb_configure_dma() 1634 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", macb_configure_dma() 1636 gem_writel(bp, DMACFG, dmacfg); macb_configure_dma() 1640 static void macb_init_hw(struct macb *bp) macb_init_hw() argument 1647 macb_reset_hw(bp); macb_init_hw() 1648 macb_set_hwaddr(bp); macb_init_hw() 1650 config = macb_mdc_clk_div(bp); macb_init_hw() 1655 if (bp->dev->flags & IFF_PROMISC) macb_init_hw() 1657 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) macb_init_hw() 1659 if (!(bp->dev->flags & IFF_BROADCAST)) macb_init_hw() 1661 config |= macb_dbw(bp); macb_init_hw() 1662 macb_writel(bp, NCFGR, config); macb_init_hw() 1663 bp->speed = SPEED_10; macb_init_hw() 1664 bp->duplex = DUPLEX_HALF; macb_init_hw() 1666 macb_configure_dma(bp); macb_init_hw() 1669 macb_writel(bp, RBQP, bp->rx_ring_dma); macb_init_hw() 1670 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_init_hw() 1681 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); macb_init_hw() 1751 struct macb *bp = netdev_priv(dev); macb_sethashtable() local 1760 macb_or_gem_writel(bp, HRB, mc_filter[0]); 1761 macb_or_gem_writel(bp, HRT, mc_filter[1]); 1770 struct macb *bp = netdev_priv(dev); macb_set_rx_mode() local 1772 cfg = macb_readl(bp, NCFGR); macb_set_rx_mode() 1779 if (macb_is_gem(bp)) macb_set_rx_mode() 1786 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) macb_set_rx_mode() 1792 macb_or_gem_writel(bp, HRB, -1); macb_set_rx_mode() 1793 macb_or_gem_writel(bp, HRT, -1); macb_set_rx_mode() 1801 macb_or_gem_writel(bp, HRB, 0); macb_set_rx_mode() 1802 macb_or_gem_writel(bp, HRT, 0); macb_set_rx_mode() 1806 macb_writel(bp, NCFGR, cfg); macb_set_rx_mode() 1811 struct macb *bp = netdev_priv(dev); macb_open() local 1815 netdev_dbg(bp->dev, "open\n"); macb_open() 1821 if (!bp->phy_dev) macb_open() 1825 macb_init_rx_buffer_size(bp, bufsz); macb_open() 1827 err = macb_alloc_consistent(bp); macb_open() 1834 napi_enable(&bp->napi); macb_open() 1836 bp->macbgem_ops.mog_init_rings(bp); macb_open() 1837 macb_init_hw(bp); macb_open() 1840 phy_start(bp->phy_dev); macb_open() 1849 struct macb *bp = netdev_priv(dev); macb_close() local 1853 napi_disable(&bp->napi); macb_close() 1855 if (bp->phy_dev) macb_close() 1856 phy_stop(bp->phy_dev); macb_close() 1858 spin_lock_irqsave(&bp->lock, flags); macb_close() 1859 macb_reset_hw(bp); macb_close() 1861 spin_unlock_irqrestore(&bp->lock, flags); macb_close() 1863 macb_free_consistent(bp); macb_close() 1868 static void gem_update_stats(struct macb *bp) gem_update_stats() argument 1871 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; gem_update_stats() 1875 u64 val = readl_relaxed(bp->regs + offset); gem_update_stats() 1877 bp->ethtool_stats[i] += val; gem_update_stats() 1882 val = readl_relaxed(bp->regs + offset + 4); gem_update_stats() 1883 bp->ethtool_stats[i] += ((u64)val) << 32; gem_update_stats() 1889 static struct net_device_stats *gem_get_stats(struct macb *bp) gem_get_stats() argument 1891 struct gem_stats *hwstat = &bp->hw_stats.gem; gem_get_stats() 1892 struct net_device_stats *nstat = &bp->stats; gem_get_stats() 1894 gem_update_stats(bp); gem_get_stats() 1930 struct macb *bp; gem_get_ethtool_stats() local 1932 bp = netdev_priv(dev); gem_get_ethtool_stats() 1933 gem_update_stats(bp); gem_get_ethtool_stats() 1934 memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN); gem_get_ethtool_stats() 1962 struct macb *bp = netdev_priv(dev); macb_get_stats() local 1963 struct net_device_stats *nstat = &bp->stats; macb_get_stats() 1964 struct macb_stats *hwstat = &bp->hw_stats.macb; macb_get_stats() 1966 if (macb_is_gem(bp)) macb_get_stats() 1967 return gem_get_stats(bp); macb_get_stats() 1970 macb_update_stats(bp); macb_get_stats() 2009 struct macb *bp = netdev_priv(dev); macb_get_settings() local 2010 struct phy_device *phydev = bp->phy_dev; macb_get_settings() 2020 struct macb *bp = netdev_priv(dev); macb_set_settings() local 2021 struct phy_device *phydev = bp->phy_dev; macb_set_settings() 2037 struct macb *bp = netdev_priv(dev); macb_get_regs() local 2041 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) macb_get_regs() 2044 tail = macb_tx_ring_wrap(bp->queues[0].tx_tail); macb_get_regs() 2045 head = macb_tx_ring_wrap(bp->queues[0].tx_head); macb_get_regs() 2047 regs_buff[0] = macb_readl(bp, NCR); macb_get_regs() 2048 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); macb_get_regs() 2049 regs_buff[2] = macb_readl(bp, NSR); macb_get_regs() 2050 regs_buff[3] = macb_readl(bp, TSR); macb_get_regs() 2051 regs_buff[4] = macb_readl(bp, RBQP); macb_get_regs() 2052 regs_buff[5] = macb_readl(bp, TBQP); macb_get_regs() 2053 regs_buff[6] = macb_readl(bp, RSR); macb_get_regs() 2054 regs_buff[7] = macb_readl(bp, IMR); macb_get_regs() 2058 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); macb_get_regs() 2059 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); macb_get_regs() 2061 regs_buff[12] = macb_or_gem_readl(bp, USRIO); macb_get_regs() 2062 if (macb_is_gem(bp)) { macb_get_regs() 2063 regs_buff[13] = gem_readl(bp, DMACFG); macb_get_regs() 2090 struct macb *bp = netdev_priv(dev); macb_ioctl() local 2091 struct phy_device *phydev = bp->phy_dev; macb_ioctl() 2105 struct macb *bp = netdev_priv(netdev); macb_set_features() local 2109 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) { macb_set_features() 2112 dmacfg = gem_readl(bp, DMACFG); macb_set_features() 2117 gem_writel(bp, DMACFG, dmacfg); macb_set_features() 2121 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) { macb_set_features() 2124 netcfg = gem_readl(bp, NCFGR); macb_set_features() 2130 gem_writel(bp, NCFGR, netcfg); macb_set_features() 2156 static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf) macb_configure_caps() argument 2161 bp->caps = dt_conf->caps; macb_configure_caps() 2163 if (macb_is_gem_hw(bp->regs)) { macb_configure_caps() 2164 bp->caps |= MACB_CAPS_MACB_IS_GEM; macb_configure_caps() 2166 dcfg = gem_readl(bp, DCFG1); macb_configure_caps() 2168 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; macb_configure_caps() 2169 dcfg = gem_readl(bp, DCFG2); macb_configure_caps() 2171 bp->caps |= MACB_CAPS_FIFO_MODE; macb_configure_caps() 2174 netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); macb_configure_caps() 2261 struct macb *bp = netdev_priv(dev); macb_init() local 2271 if (!(bp->queue_mask & (1 << hw_q))) macb_init() 2274 queue = &bp->queues[q]; macb_init() 2275 queue->bp = bp; macb_init() 2311 netif_napi_add(dev, &bp->napi, macb_poll, 64); macb_init() 2314 if (macb_is_gem(bp)) { macb_init() 2315 bp->max_tx_length = GEM_MAX_TX_LEN; macb_init() 2316 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; macb_init() 2317 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; macb_init() 2318 bp->macbgem_ops.mog_init_rings = gem_init_rings; macb_init() 2319 bp->macbgem_ops.mog_rx = gem_rx; macb_init() 2322 bp->max_tx_length = MACB_MAX_TX_LEN; macb_init() 2323 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; macb_init() 2324 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; macb_init() 2325 bp->macbgem_ops.mog_init_rings = macb_init_rings; macb_init() 2326 bp->macbgem_ops.mog_rx = macb_rx; macb_init() 2333 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) macb_init() 2335 if (bp->caps & MACB_CAPS_SG_DISABLED) macb_init() 2340 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) macb_init() 2342 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && macb_init() 2343 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) macb_init() 2345 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) macb_init() 2348 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) macb_init() 2351 macb_or_gem_writel(bp, USRIO, val); macb_init() 2354 val = macb_mdc_clk_div(bp); macb_init() 2355 val |= macb_dbw(bp); macb_init() 2356 macb_writel(bp, NCFGR, val); macb_init() 2650 struct macb *bp = netdev_priv(dev); at91ether_init() local 2662 macb_writel(bp, NCR, 0); at91ether_init() 2665 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) at91ether_init() 2668 macb_writel(bp, NCFGR, reg); at91ether_init() 2745 struct macb *bp; macb_probe() local 2771 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); macb_probe() 2781 bp = netdev_priv(dev); macb_probe() 2782 bp->pdev = pdev; macb_probe() 2783 bp->dev = dev; macb_probe() 2784 bp->regs = mem; macb_probe() 2785 bp->num_queues = num_queues; macb_probe() 2786 bp->queue_mask = queue_mask; macb_probe() 2788 bp->dma_burst_length = macb_config->dma_burst_length; macb_probe() 2789 bp->pclk = pclk; macb_probe() 2790 bp->hclk = hclk; macb_probe() 2791 bp->tx_clk = tx_clk; macb_probe() 2792 spin_lock_init(&bp->lock); macb_probe() 2795 macb_configure_caps(bp, macb_config); macb_probe() 2807 memcpy(bp->dev->dev_addr, mac, ETH_ALEN); macb_probe() 2809 macb_get_hwaddr(bp); macb_probe() 2815 bp->phy_interface = PHY_INTERFACE_MODE_RMII; macb_probe() 2817 bp->phy_interface = PHY_INTERFACE_MODE_MII; macb_probe() 2819 bp->phy_interface = err; macb_probe() 2833 err = macb_mii_init(bp); macb_probe() 2840 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), macb_probe() 2843 phydev = bp->phy_dev; macb_probe() 2866 struct macb *bp; macb_remove() local 2871 bp = netdev_priv(dev); macb_remove() 2872 if (bp->phy_dev) macb_remove() 2873 phy_disconnect(bp->phy_dev); macb_remove() 2874 mdiobus_unregister(bp->mii_bus); macb_remove() 2875 kfree(bp->mii_bus->irq); macb_remove() 2876 mdiobus_free(bp->mii_bus); macb_remove() 2878 clk_disable_unprepare(bp->tx_clk); macb_remove() 2879 clk_disable_unprepare(bp->hclk); macb_remove() 2880 clk_disable_unprepare(bp->pclk); macb_remove() 2891 struct macb *bp = netdev_priv(netdev); macb_suspend() local 2896 clk_disable_unprepare(bp->tx_clk); macb_suspend() 2897 clk_disable_unprepare(bp->hclk); macb_suspend() 2898 clk_disable_unprepare(bp->pclk); macb_suspend() 2907 struct macb *bp = netdev_priv(netdev); macb_resume() local 2909 clk_prepare_enable(bp->pclk); macb_resume() 2910 clk_prepare_enable(bp->hclk); macb_resume() 2911 clk_prepare_enable(bp->tx_clk); macb_resume()
|
/linux-4.1.27/fs/xfs/libxfs/ |
H A D | xfs_symlink_remote.c | 56 struct xfs_buf *bp) xfs_symlink_hdr_set() 58 struct xfs_dsymlink_hdr *dsl = bp->b_addr; xfs_symlink_hdr_set() 68 dsl->sl_blkno = cpu_to_be64(bp->b_bn); xfs_symlink_hdr_set() 69 bp->b_ops = &xfs_symlink_buf_ops; xfs_symlink_hdr_set() 84 struct xfs_buf *bp) xfs_symlink_hdr_ok() 86 struct xfs_dsymlink_hdr *dsl = bp->b_addr; xfs_symlink_hdr_ok() 101 struct xfs_buf *bp) xfs_symlink_verify() 103 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_symlink_verify() 104 struct xfs_dsymlink_hdr *dsl = bp->b_addr; xfs_symlink_verify() 112 if (bp->b_bn != be64_to_cpu(dsl->sl_blkno)) xfs_symlink_verify() 125 struct xfs_buf *bp) xfs_symlink_read_verify() 127 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_symlink_read_verify() 133 if (!xfs_buf_verify_cksum(bp, XFS_SYMLINK_CRC_OFF)) xfs_symlink_read_verify() 134 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_symlink_read_verify() 135 else if (!xfs_symlink_verify(bp)) xfs_symlink_read_verify() 136 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_symlink_read_verify() 138 if (bp->b_error) xfs_symlink_read_verify() 139 xfs_verifier_error(bp); xfs_symlink_read_verify() 144 struct xfs_buf *bp) xfs_symlink_write_verify() 146 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_symlink_write_verify() 147 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_symlink_write_verify() 153 if (!xfs_symlink_verify(bp)) { xfs_symlink_write_verify() 154 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_symlink_write_verify() 155 xfs_verifier_error(bp); xfs_symlink_write_verify() 160 struct xfs_dsymlink_hdr *dsl = bp->b_addr; xfs_symlink_write_verify() 163 xfs_buf_update_cksum(bp, XFS_SYMLINK_CRC_OFF); xfs_symlink_write_verify() 174 struct xfs_buf *bp, xfs_symlink_local_to_remote() 181 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF); xfs_symlink_local_to_remote() 184 bp->b_ops = NULL; xfs_symlink_local_to_remote() 185 memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); xfs_symlink_local_to_remote() 193 ASSERT(BBTOB(bp->b_length) >= xfs_symlink_local_to_remote() 196 bp->b_ops = &xfs_symlink_buf_ops; xfs_symlink_local_to_remote() 198 buf = bp->b_addr; xfs_symlink_local_to_remote() 199 buf += xfs_symlink_hdr_set(mp, ip->i_ino, 0, ifp->if_bytes, bp); xfs_symlink_local_to_remote() 51 xfs_symlink_hdr_set( struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset, uint32_t size, struct xfs_buf *bp) xfs_symlink_hdr_set() argument 80 xfs_symlink_hdr_ok( xfs_ino_t ino, uint32_t offset, uint32_t size, struct xfs_buf *bp) xfs_symlink_hdr_ok() argument 100 xfs_symlink_verify( struct xfs_buf *bp) xfs_symlink_verify() argument 124 xfs_symlink_read_verify( struct xfs_buf *bp) xfs_symlink_read_verify() argument 143 xfs_symlink_write_verify( struct xfs_buf *bp) xfs_symlink_write_verify() argument 172 xfs_symlink_local_to_remote( struct xfs_trans *tp, struct xfs_buf *bp, struct xfs_inode *ip, struct xfs_ifork *ifp) xfs_symlink_local_to_remote() argument
|
H A D | xfs_dquot_buf.c | 177 struct xfs_buf *bp) xfs_dquot_buf_verify_crc() 179 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; xfs_dquot_buf_verify_crc() 195 XFS_BB_TO_FSB(mp, bp->b_length)); xfs_dquot_buf_verify_crc() 210 struct xfs_buf *bp, xfs_dquot_buf_verify() 213 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; xfs_dquot_buf_verify() 226 ndquots = xfs_calc_dquots_per_chunk(bp->b_length); xfs_dquot_buf_verify() 253 struct xfs_buf *bp) xfs_dquot_buf_read_verify() 255 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dquot_buf_read_verify() 257 if (!xfs_dquot_buf_verify_crc(mp, bp)) xfs_dquot_buf_read_verify() 258 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_dquot_buf_read_verify() 259 else if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) xfs_dquot_buf_read_verify() 260 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dquot_buf_read_verify() 262 if (bp->b_error) xfs_dquot_buf_read_verify() 263 xfs_verifier_error(bp); xfs_dquot_buf_read_verify() 274 struct xfs_buf *bp) xfs_dquot_buf_readahead_verify() 276 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dquot_buf_readahead_verify() 278 if (!xfs_dquot_buf_verify_crc(mp, bp) || xfs_dquot_buf_readahead_verify() 279 !xfs_dquot_buf_verify(mp, bp, 0)) { xfs_dquot_buf_readahead_verify() 280 xfs_buf_ioerror(bp, -EIO); xfs_dquot_buf_readahead_verify() 281 bp->b_flags &= ~XBF_DONE; xfs_dquot_buf_readahead_verify() 292 struct xfs_buf *bp) xfs_dquot_buf_write_verify() 294 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dquot_buf_write_verify() 296 if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) { xfs_dquot_buf_write_verify() 297 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dquot_buf_write_verify() 298 xfs_verifier_error(bp); xfs_dquot_buf_write_verify() 175 xfs_dquot_buf_verify_crc( struct xfs_mount *mp, struct xfs_buf *bp) xfs_dquot_buf_verify_crc() argument 208 xfs_dquot_buf_verify( struct xfs_mount *mp, struct xfs_buf *bp, int warn) xfs_dquot_buf_verify() argument 252 xfs_dquot_buf_read_verify( struct xfs_buf *bp) xfs_dquot_buf_read_verify() argument 273 xfs_dquot_buf_readahead_verify( struct xfs_buf *bp) xfs_dquot_buf_readahead_verify() argument 291 xfs_dquot_buf_write_verify( struct xfs_buf *bp) xfs_dquot_buf_write_verify() argument
|
H A D | xfs_dir2_block.c | 41 static void xfs_dir2_block_log_leaf(xfs_trans_t *tp, struct xfs_buf *bp, 43 static void xfs_dir2_block_log_tail(xfs_trans_t *tp, struct xfs_buf *bp); 62 struct xfs_buf *bp) xfs_dir3_block_verify() 64 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_block_verify() 65 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_block_verify() 72 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) xfs_dir3_block_verify() 78 if (__xfs_dir3_data_check(NULL, bp)) xfs_dir3_block_verify() 85 struct xfs_buf *bp) xfs_dir3_block_read_verify() 87 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_block_read_verify() 90 !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF)) xfs_dir3_block_read_verify() 91 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_dir3_block_read_verify() 92 else if (!xfs_dir3_block_verify(bp)) xfs_dir3_block_read_verify() 93 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_block_read_verify() 95 if (bp->b_error) xfs_dir3_block_read_verify() 96 xfs_verifier_error(bp); xfs_dir3_block_read_verify() 101 struct xfs_buf *bp) xfs_dir3_block_write_verify() 103 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_block_write_verify() 104 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_dir3_block_write_verify() 105 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_block_write_verify() 107 if (!xfs_dir3_block_verify(bp)) { xfs_dir3_block_write_verify() 108 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_block_write_verify() 109 xfs_verifier_error(bp); xfs_dir3_block_write_verify() 119 xfs_buf_update_cksum(bp, XFS_DIR3_DATA_CRC_OFF); xfs_dir3_block_write_verify() 147 struct xfs_buf *bp, xfs_dir3_block_init() 150 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_block_init() 152 bp->b_ops = &xfs_dir3_block_buf_ops; xfs_dir3_block_init() 153 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_BLOCK_BUF); xfs_dir3_block_init() 158 hdr3->blkno = cpu_to_be64(bp->b_bn); xfs_dir3_block_init() 282 struct xfs_buf *bp, xfs_dir2_block_compact() 314 xfs_dir2_data_make_free(args, bp, xfs_dir2_block_compact() 336 struct xfs_buf *bp; /* buffer for block */ xfs_dir2_block_addname() local 363 /* Read the (one and only) directory block into bp. */ xfs_dir2_block_addname() 364 error = xfs_dir3_block_read(tp, dp, &bp); xfs_dir2_block_addname() 373 hdr = bp->b_addr; xfs_dir2_block_addname() 388 xfs_trans_brelse(tp, bp); xfs_dir2_block_addname() 405 error = xfs_dir2_block_to_leaf(args, bp); xfs_dir2_block_addname() 417 xfs_dir2_block_compact(args, bp, hdr, btp, blp, &needlog, xfs_dir2_block_addname() 452 xfs_dir2_data_use_free(args, bp, enddup, xfs_dir2_block_addname() 535 xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh); xfs_dir2_block_addname() 539 xfs_dir2_data_use_free(args, bp, dup, xfs_dir2_block_addname() 557 xfs_dir2_data_log_header(args, bp); xfs_dir2_block_addname() 558 xfs_dir2_block_log_tail(tp, bp); xfs_dir2_block_addname() 559 xfs_dir2_data_log_entry(args, bp, dep); xfs_dir2_block_addname() 560 xfs_dir3_data_check(dp, bp); xfs_dir2_block_addname() 570 struct xfs_buf *bp, /* block buffer */ xfs_dir2_block_log_leaf() 574 xfs_dir2_data_hdr_t *hdr = bp->b_addr; xfs_dir2_block_log_leaf() 580 xfs_trans_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)hdr), xfs_dir2_block_log_leaf() 590 struct xfs_buf *bp) /* block buffer */ xfs_dir2_block_log_tail() 592 xfs_dir2_data_hdr_t *hdr = bp->b_addr; xfs_dir2_block_log_tail() 596 xfs_trans_log_buf(tp, bp, (uint)((char *)btp - (char *)hdr), xfs_dir2_block_log_tail() 610 struct xfs_buf *bp; /* block buffer */ xfs_dir2_block_lookup() local 623 if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) xfs_dir2_block_lookup() 626 hdr = bp->b_addr; xfs_dir2_block_lookup() 627 xfs_dir3_data_check(dp, bp); xfs_dir2_block_lookup() 642 xfs_trans_brelse(args->trans, bp); xfs_dir2_block_lookup() 658 struct xfs_buf *bp; /* block buffer */ xfs_dir2_block_lookup_int() local 675 error = xfs_dir3_block_read(tp, dp, &bp); xfs_dir2_block_lookup_int() 679 hdr = bp->b_addr; xfs_dir2_block_lookup_int() 680 xfs_dir3_data_check(dp, bp); xfs_dir2_block_lookup_int() 698 xfs_trans_brelse(tp, bp); xfs_dir2_block_lookup_int() 728 *bpp = bp; xfs_dir2_block_lookup_int() 746 xfs_trans_brelse(tp, bp); xfs_dir2_block_lookup_int() 760 struct xfs_buf *bp; /* block buffer */ xfs_dir2_block_removename() local 778 if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) { xfs_dir2_block_removename() 783 hdr = bp->b_addr; xfs_dir2_block_removename() 796 xfs_dir2_data_make_free(args, bp, xfs_dir2_block_removename() 803 xfs_dir2_block_log_tail(tp, bp); xfs_dir2_block_removename() 808 xfs_dir2_block_log_leaf(tp, bp, ent, ent); xfs_dir2_block_removename() 815 xfs_dir2_data_log_header(args, bp); xfs_dir2_block_removename() 816 xfs_dir3_data_check(dp, bp); xfs_dir2_block_removename() 827 return xfs_dir2_block_to_sf(args, bp, size, &sfh); xfs_dir2_block_removename() 840 struct xfs_buf *bp; /* block buffer */ xfs_dir2_block_replace() local 853 if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) { xfs_dir2_block_replace() 857 hdr = bp->b_addr; xfs_dir2_block_replace() 872 xfs_dir2_data_log_entry(args, bp, dep); xfs_dir2_block_replace() 873 xfs_dir3_data_check(dp, bp); xfs_dir2_block_replace() 1049 struct xfs_buf *bp; /* block buffer */ xfs_dir2_sf_to_block() local 1115 error = xfs_dir3_data_init(args, blkno, &bp); xfs_dir2_sf_to_block() 1120 xfs_dir3_block_init(mp, tp, bp, dp); xfs_dir2_sf_to_block() 1121 hdr = bp->b_addr; xfs_dir2_sf_to_block() 1134 xfs_dir2_data_use_free(args, bp, dup, args->geo->blksize - i, xfs_dir2_sf_to_block() 1148 xfs_dir2_data_use_free(args, bp, dup, xfs_dir2_sf_to_block() 1161 xfs_dir2_data_log_entry(args, bp, dep); xfs_dir2_sf_to_block() 1175 xfs_dir2_data_log_entry(args, bp, dep); xfs_dir2_sf_to_block() 1209 xfs_dir2_data_log_unused(args, bp, dup); xfs_dir2_sf_to_block() 1226 xfs_dir2_data_log_entry(args, bp, dep); xfs_dir2_sf_to_block() 1250 xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1); xfs_dir2_sf_to_block() 1251 xfs_dir2_block_log_tail(tp, bp); xfs_dir2_sf_to_block() 1252 xfs_dir3_data_check(dp, bp); xfs_dir2_sf_to_block() 61 xfs_dir3_block_verify( struct xfs_buf *bp) xfs_dir3_block_verify() argument 84 xfs_dir3_block_read_verify( struct xfs_buf *bp) xfs_dir3_block_read_verify() argument 100 xfs_dir3_block_write_verify( struct xfs_buf *bp) xfs_dir3_block_write_verify() argument 144 xfs_dir3_block_init( struct xfs_mount *mp, struct xfs_trans *tp, struct xfs_buf *bp, struct xfs_inode *dp) xfs_dir3_block_init() argument 280 xfs_dir2_block_compact( struct xfs_da_args *args, struct xfs_buf *bp, struct xfs_dir2_data_hdr *hdr, struct xfs_dir2_block_tail *btp, struct xfs_dir2_leaf_entry *blp, int *needlog, int *lfloghigh, int *lfloglow) xfs_dir2_block_compact() argument 568 xfs_dir2_block_log_leaf( xfs_trans_t *tp, struct xfs_buf *bp, int first, int last) xfs_dir2_block_log_leaf() argument 588 xfs_dir2_block_log_tail( xfs_trans_t *tp, struct xfs_buf *bp) xfs_dir2_block_log_tail() argument
|
H A D | xfs_da_btree.c | 113 state->altpath.blk[i].bp = NULL; xfs_da_state_kill_altpath() 132 struct xfs_buf *bp) xfs_da3_node_verify() 134 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_da3_node_verify() 135 struct xfs_da_intnode *hdr = bp->b_addr; xfs_da3_node_verify() 144 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; xfs_da3_node_verify() 151 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn) xfs_da3_node_verify() 179 struct xfs_buf *bp) xfs_da3_node_write_verify() 181 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_da3_node_write_verify() 182 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_da3_node_write_verify() 183 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; xfs_da3_node_write_verify() 185 if (!xfs_da3_node_verify(bp)) { xfs_da3_node_write_verify() 186 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_da3_node_write_verify() 187 xfs_verifier_error(bp); xfs_da3_node_write_verify() 197 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF); xfs_da3_node_write_verify() 208 struct xfs_buf *bp) xfs_da3_node_read_verify() 210 struct xfs_da_blkinfo *info = bp->b_addr; xfs_da3_node_read_verify() 214 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) { xfs_da3_node_read_verify() 215 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_da3_node_read_verify() 220 if (!xfs_da3_node_verify(bp)) { xfs_da3_node_read_verify() 221 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_da3_node_read_verify() 227 bp->b_ops = &xfs_attr3_leaf_buf_ops; xfs_da3_node_read_verify() 228 bp->b_ops->verify_read(bp); xfs_da3_node_read_verify() 232 bp->b_ops = &xfs_dir3_leafn_buf_ops; xfs_da3_node_read_verify() 233 bp->b_ops->verify_read(bp); xfs_da3_node_read_verify() 240 xfs_verifier_error(bp); xfs_da3_node_read_verify() 307 struct xfs_buf *bp; xfs_da3_node_create() local 314 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork); xfs_da3_node_create() 317 bp->b_ops = &xfs_da3_node_buf_ops; xfs_da3_node_create() 318 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); xfs_da3_node_create() 319 node = bp->b_addr; xfs_da3_node_create() 322 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; xfs_da3_node_create() 325 hdr3->info.blkno = cpu_to_be64(bp->b_bn); xfs_da3_node_create() 334 xfs_trans_log_buf(tp, bp, xfs_da3_node_create() 337 *bpp = bp; xfs_da3_node_create() 353 struct xfs_buf *bp; xfs_da3_split() local 421 addblk->bp = NULL; xfs_da3_split() 449 addblk->bp = NULL; xfs_da3_split() 464 node = oldblk->bp->b_addr; xfs_da3_split() 467 bp = addblk->bp; xfs_da3_split() 470 bp = state->extrablk.bp; xfs_da3_split() 472 node = bp->b_addr; xfs_da3_split() 474 xfs_trans_log_buf(state->args->trans, bp, xfs_da3_split() 478 node = oldblk->bp->b_addr; xfs_da3_split() 481 bp = addblk->bp; xfs_da3_split() 484 bp = state->extrablk.bp; xfs_da3_split() 486 node = bp->b_addr; xfs_da3_split() 488 xfs_trans_log_buf(state->args->trans, bp, xfs_da3_split() 492 addblk->bp = NULL; xfs_da3_split() 512 struct xfs_buf *bp; xfs_da3_root_split() local 534 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork); xfs_da3_root_split() 537 node = bp->b_addr; xfs_da3_root_split() 538 oldroot = blk1->bp->b_addr; xfs_da3_root_split() 549 * we are about to copy oldroot to bp, so set up the type xfs_da3_root_split() 550 * of bp while we know exactly what it will be. xfs_da3_root_split() 552 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); xfs_da3_root_split() 567 * we are about to copy oldroot to bp, so set up the type xfs_da3_root_split() 568 * of bp while we know exactly what it will be. xfs_da3_root_split() 570 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); xfs_da3_root_split() 584 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn); xfs_da3_root_split() 586 xfs_trans_log_buf(tp, bp, 0, size - 1); xfs_da3_root_split() 588 bp->b_ops = blk1->bp->b_ops; xfs_da3_root_split() 589 xfs_trans_buf_copy_type(bp, blk1->bp); xfs_da3_root_split() 590 blk1->bp = bp; xfs_da3_root_split() 598 level + 1, &bp, args->whichfork); xfs_da3_root_split() 602 node = bp->b_addr; xfs_da3_root_split() 623 xfs_trans_log_buf(tp, bp, xfs_da3_root_split() 651 node = oldblk->bp->b_addr; xfs_da3_node_split() 672 &newblk->bp, state->args->whichfork); xfs_da3_node_split() 698 node = oldblk->bp->b_addr; xfs_da3_node_split() 752 node1 = blk1->bp->b_addr; xfs_da3_node_rebalance() 753 node2 = blk2->bp->b_addr; xfs_da3_node_rebalance() 818 xfs_trans_log_buf(tp, blk1->bp, xfs_da3_node_rebalance() 836 xfs_trans_log_buf(tp, blk1->bp, xfs_da3_node_rebalance() 840 xfs_trans_log_buf(tp, blk2->bp, xfs_da3_node_rebalance() 850 node1 = blk1->bp->b_addr; xfs_da3_node_rebalance() 851 node2 = blk2->bp->b_addr; xfs_da3_node_rebalance() 886 node = oldblk->bp->b_addr; xfs_da3_node_add() 906 xfs_trans_log_buf(state->args->trans, oldblk->bp, xfs_da3_node_add() 912 xfs_trans_log_buf(state->args->trans, oldblk->bp, xfs_da3_node_add() 996 drop_blk->bp); xfs_da3_join() 997 drop_blk->bp = NULL; xfs_da3_join() 1046 struct xfs_buf *bp; xfs_da3_root_join() local 1057 oldroot = root_blk->bp->b_addr; xfs_da3_root_join() 1075 error = xfs_da3_node_read(args->trans, dp, child, -1, &bp, xfs_da3_root_join() 1079 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level); xfs_da3_root_join() 1088 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize); xfs_da3_root_join() 1089 root_blk->bp->b_ops = bp->b_ops; xfs_da3_root_join() 1090 xfs_trans_buf_copy_type(root_blk->bp, bp); xfs_da3_root_join() 1092 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr; xfs_da3_root_join() 1093 da3->blkno = cpu_to_be64(root_blk->bp->b_bn); xfs_da3_root_join() 1095 xfs_trans_log_buf(args->trans, root_blk->bp, 0, xfs_da3_root_join() 1097 error = xfs_da_shrink_inode(args, child, bp); xfs_da3_root_join() 1119 struct xfs_buf *bp; xfs_da3_node_toosmall() local 1136 info = blk->bp->b_addr; xfs_da3_node_toosmall() 1191 blkno, -1, &bp, state->args->whichfork); xfs_da3_node_toosmall() 1195 node = bp->b_addr; xfs_da3_node_toosmall() 1197 xfs_trans_brelse(state->args->trans, bp); xfs_da3_node_toosmall() 1235 struct xfs_buf *bp, xfs_da3_node_lasthash() 1242 node = bp->b_addr; xfs_da3_node_lasthash() 1275 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count); xfs_da3_fixhashpath() 1280 lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count); xfs_da3_fixhashpath() 1285 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count); xfs_da3_fixhashpath() 1293 node = blk->bp->b_addr; xfs_da3_fixhashpath() 1300 xfs_trans_log_buf(state->args->trans, blk->bp, xfs_da3_fixhashpath() 1325 node = drop_blk->bp->b_addr; xfs_da3_node_remove() 1339 xfs_trans_log_buf(state->args->trans, drop_blk->bp, xfs_da3_node_remove() 1344 xfs_trans_log_buf(state->args->trans, drop_blk->bp, xfs_da3_node_remove() 1348 xfs_trans_log_buf(state->args->trans, drop_blk->bp, xfs_da3_node_remove() 1380 drop_node = drop_blk->bp->b_addr; xfs_da3_node_unbalance() 1381 save_node = save_blk->bp->b_addr; xfs_da3_node_unbalance() 1401 xfs_trans_log_buf(tp, save_blk->bp, xfs_da3_node_unbalance() 1407 xfs_trans_log_buf(tp, save_blk->bp, xfs_da3_node_unbalance() 1420 xfs_trans_log_buf(tp, save_blk->bp, xfs_da3_node_unbalance() 1481 -1, &blk->bp, args->whichfork); xfs_da3_node_lookup_int() 1487 curr = blk->bp->b_addr; xfs_da3_node_lookup_int() 1493 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); xfs_da3_node_lookup_int() 1501 blk->bp, NULL); xfs_da3_node_lookup_int() 1511 node = blk->bp->b_addr; xfs_da3_node_lookup_int() 1570 retval = xfs_dir2_leafn_lookup_int(blk->bp, args, xfs_da3_node_lookup_int() 1573 retval = xfs_attr3_leaf_lookup_int(blk->bp, args); xfs_da3_node_lookup_int() 1648 struct xfs_buf *bp; xfs_da3_blk_link() local 1658 old_info = old_blk->bp->b_addr; xfs_da3_blk_link() 1659 new_info = new_blk->bp->b_addr; xfs_da3_blk_link() 1666 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp); xfs_da3_blk_link() 1669 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp); xfs_da3_blk_link() 1672 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp); xfs_da3_blk_link() 1689 -1, &bp, args->whichfork); xfs_da3_blk_link() 1692 ASSERT(bp != NULL); xfs_da3_blk_link() 1693 tmp_info = bp->b_addr; xfs_da3_blk_link() 1697 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); xfs_da3_blk_link() 1710 -1, &bp, args->whichfork); xfs_da3_blk_link() 1713 ASSERT(bp != NULL); xfs_da3_blk_link() 1714 tmp_info = bp->b_addr; xfs_da3_blk_link() 1718 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); xfs_da3_blk_link() 1723 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); xfs_da3_blk_link() 1724 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); xfs_da3_blk_link() 1741 struct xfs_buf *bp; xfs_da3_blk_unlink() local 1749 save_info = save_blk->bp->b_addr; xfs_da3_blk_unlink() 1750 drop_info = drop_blk->bp->b_addr; xfs_da3_blk_unlink() 1769 -1, &bp, args->whichfork); xfs_da3_blk_unlink() 1772 ASSERT(bp != NULL); xfs_da3_blk_unlink() 1773 tmp_info = bp->b_addr; xfs_da3_blk_unlink() 1777 xfs_trans_log_buf(args->trans, bp, 0, xfs_da3_blk_unlink() 1786 -1, &bp, args->whichfork); xfs_da3_blk_unlink() 1789 ASSERT(bp != NULL); xfs_da3_blk_unlink() 1790 tmp_info = bp->b_addr; xfs_da3_blk_unlink() 1794 xfs_trans_log_buf(args->trans, bp, 0, xfs_da3_blk_unlink() 1799 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); xfs_da3_blk_unlink() 1843 node = blk->bp->b_addr; xfs_da3_path_shift() 1873 xfs_trans_brelse(args->trans, blk->bp); xfs_da3_path_shift() 1880 &blk->bp, args->whichfork); xfs_da3_path_shift() 1883 info = blk->bp->b_addr; xfs_da3_path_shift() 1915 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); xfs_da3_path_shift() 1923 blk->bp, NULL); xfs_da3_path_shift() 2538 struct xfs_buf *bp; xfs_da_get_buf() local 2556 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp, xfs_da_get_buf() 2558 error = bp ? bp->b_error : -EIO; xfs_da_get_buf() 2560 if (bp) xfs_da_get_buf() 2561 xfs_trans_brelse(trans, bp); xfs_da_get_buf() 2565 *bpp = bp; xfs_da_get_buf() 2587 struct xfs_buf *bp; xfs_da_read_buf() local 2607 mapp, nmap, 0, &bp, ops); xfs_da_read_buf() 2612 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF); xfs_da_read_buf() 2614 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF); xfs_da_read_buf() 2615 *bpp = bp; xfs_da_read_buf() 131 xfs_da3_node_verify( struct xfs_buf *bp) xfs_da3_node_verify() argument 178 xfs_da3_node_write_verify( struct xfs_buf *bp) xfs_da3_node_write_verify() argument 207 xfs_da3_node_read_verify( struct xfs_buf *bp) xfs_da3_node_read_verify() argument 1233 xfs_da3_node_lasthash( struct xfs_inode *dp, struct xfs_buf *bp, int *count) xfs_da3_node_lasthash() argument
|
H A D | xfs_alloc_btree.c | 106 struct xfs_buf *bp) xfs_allocbt_free_block() 113 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); xfs_allocbt_free_block() 122 xfs_trans_binval(cur->bc_tp, bp); xfs_allocbt_free_block() 274 struct xfs_buf *bp) xfs_allocbt_verify() 276 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_allocbt_verify() 277 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_allocbt_verify() 278 struct xfs_perag *pag = bp->b_pag; xfs_allocbt_verify() 300 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn)) xfs_allocbt_verify() 318 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn)) xfs_allocbt_verify() 354 struct xfs_buf *bp) xfs_allocbt_read_verify() 356 if (!xfs_btree_sblock_verify_crc(bp)) xfs_allocbt_read_verify() 357 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_allocbt_read_verify() 358 else if (!xfs_allocbt_verify(bp)) xfs_allocbt_read_verify() 359 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_allocbt_read_verify() 361 if (bp->b_error) { xfs_allocbt_read_verify() 362 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_allocbt_read_verify() 363 xfs_verifier_error(bp); xfs_allocbt_read_verify() 369 struct xfs_buf *bp) xfs_allocbt_write_verify() 371 if (!xfs_allocbt_verify(bp)) { xfs_allocbt_write_verify() 372 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_allocbt_write_verify() 373 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_allocbt_write_verify() 374 xfs_verifier_error(bp); xfs_allocbt_write_verify() 377 xfs_btree_sblock_calc_crc(bp); xfs_allocbt_write_verify() 104 xfs_allocbt_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_allocbt_free_block() argument 273 xfs_allocbt_verify( struct xfs_buf *bp) xfs_allocbt_verify() argument 353 xfs_allocbt_read_verify( struct xfs_buf *bp) xfs_allocbt_read_verify() argument 368 xfs_allocbt_write_verify( struct xfs_buf *bp) xfs_allocbt_write_verify() argument
|
H A D | xfs_ialloc_btree.c | 126 struct xfs_buf *bp) xfs_inobt_free_block() 131 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)); xfs_inobt_free_block() 136 xfs_trans_binval(cur->bc_tp, bp); xfs_inobt_free_block() 211 struct xfs_buf *bp) xfs_inobt_verify() 213 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_inobt_verify() 214 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_inobt_verify() 215 struct xfs_perag *pag = bp->b_pag; xfs_inobt_verify() 235 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn)) xfs_inobt_verify() 270 struct xfs_buf *bp) xfs_inobt_read_verify() 272 if (!xfs_btree_sblock_verify_crc(bp)) xfs_inobt_read_verify() 273 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_inobt_read_verify() 274 else if (!xfs_inobt_verify(bp)) xfs_inobt_read_verify() 275 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_inobt_read_verify() 277 if (bp->b_error) { xfs_inobt_read_verify() 278 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_inobt_read_verify() 279 xfs_verifier_error(bp); xfs_inobt_read_verify() 285 struct xfs_buf *bp) xfs_inobt_write_verify() 287 if (!xfs_inobt_verify(bp)) { xfs_inobt_write_verify() 288 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_inobt_write_verify() 289 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_inobt_write_verify() 290 xfs_verifier_error(bp); xfs_inobt_write_verify() 293 xfs_btree_sblock_calc_crc(bp); xfs_inobt_write_verify() 124 xfs_inobt_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_inobt_free_block() argument 210 xfs_inobt_verify( struct xfs_buf *bp) xfs_inobt_verify() argument 269 xfs_inobt_read_verify( struct xfs_buf *bp) xfs_inobt_read_verify() argument 284 xfs_inobt_write_verify( struct xfs_buf *bp) xfs_inobt_write_verify() argument
|
H A D | xfs_attr_remote.c | 120 struct xfs_buf *bp) xfs_attr3_rmt_read_verify() 122 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_rmt_read_verify() 132 ptr = bp->b_addr; xfs_attr3_rmt_read_verify() 133 bno = bp->b_bn; xfs_attr3_rmt_read_verify() 134 len = BBTOB(bp->b_length); xfs_attr3_rmt_read_verify() 139 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_attr3_rmt_read_verify() 143 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_attr3_rmt_read_verify() 151 if (bp->b_error) xfs_attr3_rmt_read_verify() 152 xfs_verifier_error(bp); xfs_attr3_rmt_read_verify() 159 struct xfs_buf *bp) xfs_attr3_rmt_write_verify() 161 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_rmt_write_verify() 171 ptr = bp->b_addr; xfs_attr3_rmt_write_verify() 172 bno = bp->b_bn; xfs_attr3_rmt_write_verify() 173 len = BBTOB(bp->b_length); xfs_attr3_rmt_write_verify() 180 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_attr3_rmt_write_verify() 181 xfs_verifier_error(bp); xfs_attr3_rmt_write_verify() 190 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_attr3_rmt_write_verify() 191 xfs_verifier_error(bp); xfs_attr3_rmt_write_verify() 250 struct xfs_buf *bp, xfs_attr_rmtval_copyout() 256 char *src = bp->b_addr; xfs_attr_rmtval_copyout() 257 xfs_daddr_t bno = bp->b_bn; xfs_attr_rmtval_copyout() 258 int len = BBTOB(bp->b_length); xfs_attr_rmtval_copyout() 298 struct xfs_buf *bp, xfs_attr_rmtval_copyin() 304 char *dst = bp->b_addr; xfs_attr_rmtval_copyin() 305 xfs_daddr_t bno = bp->b_bn; xfs_attr_rmtval_copyin() 306 int len = BBTOB(bp->b_length); xfs_attr_rmtval_copyin() 354 struct xfs_buf *bp; xfs_attr_rmtval_get() local 388 dblkno, dblkcnt, 0, &bp, xfs_attr_rmtval_get() 393 error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino, xfs_attr_rmtval_get() 396 xfs_buf_relse(bp); xfs_attr_rmtval_get() 511 struct xfs_buf *bp; xfs_attr_rmtval_set() local 531 bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0); xfs_attr_rmtval_set() 532 if (!bp) xfs_attr_rmtval_set() 534 bp->b_ops = &xfs_attr3_rmt_buf_ops; xfs_attr_rmtval_set() 536 xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset, xfs_attr_rmtval_set() 539 error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */ xfs_attr_rmtval_set() 540 xfs_buf_relse(bp); xfs_attr_rmtval_set() 576 struct xfs_buf *bp; xfs_attr_rmtval_remove() local 599 bp = xfs_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK); xfs_attr_rmtval_remove() 600 if (bp) { xfs_attr_rmtval_remove() 601 xfs_buf_stale(bp); xfs_attr_rmtval_remove() 602 xfs_buf_relse(bp); xfs_attr_rmtval_remove() 603 bp = NULL; xfs_attr_rmtval_remove() 119 xfs_attr3_rmt_read_verify( struct xfs_buf *bp) xfs_attr3_rmt_read_verify() argument 158 xfs_attr3_rmt_write_verify( struct xfs_buf *bp) xfs_attr3_rmt_write_verify() argument 248 xfs_attr_rmtval_copyout( struct xfs_mount *mp, struct xfs_buf *bp, xfs_ino_t ino, int *offset, int *valuelen, __uint8_t **dst) xfs_attr_rmtval_copyout() argument 296 xfs_attr_rmtval_copyin( struct xfs_mount *mp, struct xfs_buf *bp, xfs_ino_t ino, int *offset, int *valuelen, __uint8_t **src) xfs_attr_rmtval_copyin() argument
|
H A D | xfs_dir2_data.c | 43 struct xfs_buf *bp) /* data block's buffer */ __xfs_dir3_data_check() 66 mp = bp->b_target->bt_mount; __xfs_dir3_data_check() 75 hdr = bp->b_addr; __xfs_dir3_data_check() 215 struct xfs_buf *bp) xfs_dir3_data_verify() 217 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_data_verify() 218 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_data_verify() 225 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) xfs_dir3_data_verify() 231 if (__xfs_dir3_data_check(NULL, bp)) xfs_dir3_data_verify() 243 struct xfs_buf *bp) xfs_dir3_data_reada_verify() 245 struct xfs_dir2_data_hdr *hdr = bp->b_addr; xfs_dir3_data_reada_verify() 250 bp->b_ops = &xfs_dir3_block_buf_ops; xfs_dir3_data_reada_verify() 251 bp->b_ops->verify_read(bp); xfs_dir3_data_reada_verify() 255 bp->b_ops = &xfs_dir3_data_buf_ops; xfs_dir3_data_reada_verify() 256 bp->b_ops->verify_read(bp); xfs_dir3_data_reada_verify() 259 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_data_reada_verify() 260 xfs_verifier_error(bp); xfs_dir3_data_reada_verify() 267 struct xfs_buf *bp) xfs_dir3_data_read_verify() 269 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_data_read_verify() 272 !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF)) xfs_dir3_data_read_verify() 273 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_dir3_data_read_verify() 274 else if (!xfs_dir3_data_verify(bp)) xfs_dir3_data_read_verify() 275 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_data_read_verify() 277 if (bp->b_error) xfs_dir3_data_read_verify() 278 xfs_verifier_error(bp); xfs_dir3_data_read_verify() 283 struct xfs_buf *bp) xfs_dir3_data_write_verify() 285 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_data_write_verify() 286 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_dir3_data_write_verify() 287 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_data_write_verify() 289 if (!xfs_dir3_data_verify(bp)) { xfs_dir3_data_write_verify() 290 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_data_write_verify() 291 xfs_verifier_error(bp); xfs_dir3_data_write_verify() 301 xfs_buf_update_cksum(bp, XFS_DIR3_DATA_CRC_OFF); xfs_dir3_data_write_verify() 573 struct xfs_buf *bp; /* block buffer */ xfs_dir3_data_init() local 591 -1, &bp, XFS_DATA_FORK); xfs_dir3_data_init() 594 bp->b_ops = &xfs_dir3_data_buf_ops; xfs_dir3_data_init() 595 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_DATA_BUF); xfs_dir3_data_init() 600 hdr = bp->b_addr; xfs_dir3_data_init() 602 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_data_init() 606 hdr3->blkno = cpu_to_be64(bp->b_bn); xfs_dir3_data_init() 633 xfs_dir2_data_log_header(args, bp); xfs_dir3_data_init() 634 xfs_dir2_data_log_unused(args, bp, dup); xfs_dir3_data_init() 635 *bpp = bp; xfs_dir3_data_init() 645 struct xfs_buf *bp, xfs_dir2_data_log_entry() 648 struct xfs_dir2_data_hdr *hdr = bp->b_addr; xfs_dir2_data_log_entry() 655 xfs_trans_log_buf(args->trans, bp, (uint)((char *)dep - (char *)hdr), xfs_dir2_data_log_entry() 666 struct xfs_buf *bp) xfs_dir2_data_log_header() 669 struct xfs_dir2_data_hdr *hdr = bp->b_addr; xfs_dir2_data_log_header() 677 xfs_trans_log_buf(args->trans, bp, 0, xfs_dir2_data_log_header() 687 struct xfs_buf *bp, xfs_dir2_data_log_unused() 690 xfs_dir2_data_hdr_t *hdr = bp->b_addr; xfs_dir2_data_log_unused() 700 xfs_trans_log_buf(args->trans, bp, (uint)((char *)dup - (char *)hdr), xfs_dir2_data_log_unused() 706 xfs_trans_log_buf(args->trans, bp, xfs_dir2_data_log_unused() 719 struct xfs_buf *bp, xfs_dir2_data_make_free() 734 hdr = bp->b_addr; xfs_dir2_data_make_free() 802 xfs_dir2_data_log_unused(args, bp, prevdup); xfs_dir2_data_make_free() 837 xfs_dir2_data_log_unused(args, bp, prevdup); xfs_dir2_data_make_free() 865 xfs_dir2_data_log_unused(args, bp, newdup); xfs_dir2_data_make_free() 892 xfs_dir2_data_log_unused(args, bp, newdup); xfs_dir2_data_make_free() 904 struct xfs_buf *bp, xfs_dir2_data_use_free() 921 hdr = bp->b_addr; xfs_dir2_data_use_free() 966 xfs_dir2_data_log_unused(args, bp, newdup); xfs_dir2_data_use_free() 994 xfs_dir2_data_log_unused(args, bp, newdup); xfs_dir2_data_use_free() 1022 xfs_dir2_data_log_unused(args, bp, newdup); xfs_dir2_data_use_free() 1028 xfs_dir2_data_log_unused(args, bp, newdup2); xfs_dir2_data_use_free() 41 __xfs_dir3_data_check( struct xfs_inode *dp, struct xfs_buf *bp) __xfs_dir3_data_check() argument 214 xfs_dir3_data_verify( struct xfs_buf *bp) xfs_dir3_data_verify() argument 242 xfs_dir3_data_reada_verify( struct xfs_buf *bp) xfs_dir3_data_reada_verify() argument 266 xfs_dir3_data_read_verify( struct xfs_buf *bp) xfs_dir3_data_read_verify() argument 282 xfs_dir3_data_write_verify( struct xfs_buf *bp) xfs_dir3_data_write_verify() argument 643 xfs_dir2_data_log_entry( struct xfs_da_args *args, struct xfs_buf *bp, xfs_dir2_data_entry_t *dep) xfs_dir2_data_log_entry() argument 664 xfs_dir2_data_log_header( struct xfs_da_args *args, struct xfs_buf *bp) xfs_dir2_data_log_header() argument 685 xfs_dir2_data_log_unused( struct xfs_da_args *args, struct xfs_buf *bp, xfs_dir2_data_unused_t *dup) xfs_dir2_data_log_unused() argument 717 xfs_dir2_data_make_free( struct xfs_da_args *args, struct xfs_buf *bp, xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp) xfs_dir2_data_make_free() argument 902 xfs_dir2_data_use_free( struct xfs_da_args *args, struct xfs_buf *bp, xfs_dir2_data_unused_t *dup, xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp) xfs_dir2_data_use_free() argument
|
H A D | xfs_dir2_priv.h | 43 #define xfs_dir3_data_check(dp,bp) __xfs_dir3_data_check(dp, bp); 45 #define xfs_dir3_data_check(dp,bp) 48 extern int __xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp); 68 struct xfs_dir3_icleaf_hdr *leafhdr, struct xfs_buf *bp); 75 struct xfs_buf *bp, int first, int last); 77 struct xfs_buf *bp); 98 struct xfs_buf *bp, int *count); 99 extern int xfs_dir2_leafn_lookup_int(struct xfs_buf *bp, 122 extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp,
|
H A D | xfs_inode_buf.c | 40 xfs_buf_t *bp) xfs_inobp_check() 49 dip = (xfs_dinode_t *)xfs_buf_offset(bp, xfs_inobp_check() 54 i, (long long)bp->b_bn); xfs_inobp_check() 77 struct xfs_buf *bp, xfs_inode_buf_verify() 80 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_inode_buf_verify() 87 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock; xfs_inode_buf_verify() 92 dip = (struct xfs_dinode *)xfs_buf_offset(bp, xfs_inode_buf_verify() 100 bp->b_flags &= ~XBF_DONE; xfs_inode_buf_verify() 101 xfs_buf_ioerror(bp, -EIO); xfs_inode_buf_verify() 105 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_inode_buf_verify() 106 xfs_verifier_error(bp); xfs_inode_buf_verify() 110 (unsigned long long)bp->b_bn, i, xfs_inode_buf_verify() 115 xfs_inobp_check(mp, bp); xfs_inode_buf_verify() 121 struct xfs_buf *bp) xfs_inode_buf_read_verify() 123 xfs_inode_buf_verify(bp, false); xfs_inode_buf_read_verify() 128 struct xfs_buf *bp) xfs_inode_buf_readahead_verify() 130 xfs_inode_buf_verify(bp, true); xfs_inode_buf_readahead_verify() 135 struct xfs_buf *bp) xfs_inode_buf_write_verify() 137 xfs_inode_buf_verify(bp, false); xfs_inode_buf_write_verify() 170 struct xfs_buf *bp; xfs_imap_to_bp() local 175 (int)imap->im_len, buf_flags, &bp, xfs_imap_to_bp() 192 *bpp = bp; xfs_imap_to_bp() 193 *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset); xfs_imap_to_bp() 353 xfs_buf_t *bp; xfs_iread() local 384 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags); xfs_iread() 463 xfs_buf_set_ref(bp, XFS_INO_REF); xfs_iread() 478 xfs_trans_brelse(tp, bp); xfs_iread() 38 xfs_inobp_check( xfs_mount_t *mp, xfs_buf_t *bp) xfs_inobp_check() argument 76 xfs_inode_buf_verify( struct xfs_buf *bp, bool readahead) xfs_inode_buf_verify() argument 120 xfs_inode_buf_read_verify( struct xfs_buf *bp) xfs_inode_buf_read_verify() argument 127 xfs_inode_buf_readahead_verify( struct xfs_buf *bp) xfs_inode_buf_readahead_verify() argument 134 xfs_inode_buf_write_verify( struct xfs_buf *bp) xfs_inode_buf_write_verify() argument
|
H A D | xfs_rtbitmap.c | 56 xfs_buf_t *bp; /* block buffer, result */ xfs_rtbuf_get() local 71 mp->m_bsize, 0, &bp, NULL); xfs_rtbuf_get() 74 *bpp = bp; xfs_rtbuf_get() 93 xfs_buf_t *bp; /* buf for the block */ xfs_rtfind_back() local 108 error = xfs_rtbuf_get(mp, tp, block, 0, &bp); xfs_rtfind_back() 112 bufp = bp->b_addr; xfs_rtfind_back() 145 xfs_trans_brelse(tp, bp); xfs_rtfind_back() 159 xfs_trans_brelse(tp, bp); xfs_rtfind_back() 160 error = xfs_rtbuf_get(mp, tp, --block, 0, &bp); xfs_rtfind_back() 164 bufp = bp->b_addr; xfs_rtfind_back() 191 xfs_trans_brelse(tp, bp); xfs_rtfind_back() 205 xfs_trans_brelse(tp, bp); xfs_rtfind_back() 206 error = xfs_rtbuf_get(mp, tp, --block, 0, &bp); xfs_rtfind_back() 210 bufp = bp->b_addr; xfs_rtfind_back() 238 xfs_trans_brelse(tp, bp); xfs_rtfind_back() 248 xfs_trans_brelse(tp, bp); xfs_rtfind_back() 268 xfs_buf_t *bp; /* buf for the block */ xfs_rtfind_forw() local 283 error = xfs_rtbuf_get(mp, tp, block, 0, &bp); xfs_rtfind_forw() 287 bufp = bp->b_addr; xfs_rtfind_forw() 319 xfs_trans_brelse(tp, bp); xfs_rtfind_forw() 333 xfs_trans_brelse(tp, bp); xfs_rtfind_forw() 334 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtfind_forw() 338 b = bufp = bp->b_addr; xfs_rtfind_forw() 364 xfs_trans_brelse(tp, bp); xfs_rtfind_forw() 378 xfs_trans_brelse(tp, bp); xfs_rtfind_forw() 379 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtfind_forw() 383 b = bufp = bp->b_addr; xfs_rtfind_forw() 408 xfs_trans_brelse(tp, bp); xfs_rtfind_forw() 418 xfs_trans_brelse(tp, bp); xfs_rtfind_forw() 443 xfs_buf_t *bp; /* buffer for the summary block */ xfs_rtmodify_summary_int() local 461 bp = *rbpp; xfs_rtmodify_summary_int() 471 error = xfs_rtbuf_get(mp, tp, sb, 1, &bp); xfs_rtmodify_summary_int() 478 *rbpp = bp; xfs_rtmodify_summary_int() 484 sp = XFS_SUMPTR(mp, bp, so); xfs_rtmodify_summary_int() 486 uint first = (uint)((char *)sp - (char *)bp->b_addr); xfs_rtmodify_summary_int() 489 xfs_trans_log_buf(tp, bp, first, first + sizeof(*sp) - 1); xfs_rtmodify_summary_int() 525 xfs_buf_t *bp; /* buf for the block */ xfs_rtmodify_range() local 541 error = xfs_rtbuf_get(mp, tp, block, 0, &bp); xfs_rtmodify_range() 545 bufp = bp->b_addr; xfs_rtmodify_range() 583 xfs_trans_log_buf(tp, bp, xfs_rtmodify_range() 586 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtmodify_range() 590 first = b = bufp = bp->b_addr; xfs_rtmodify_range() 623 xfs_trans_log_buf(tp, bp, xfs_rtmodify_range() 626 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtmodify_range() 630 first = b = bufp = bp->b_addr; xfs_rtmodify_range() 662 xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp), xfs_rtmodify_range() 760 xfs_buf_t *bp; /* buf for the block */ xfs_rtcheck_range() local 776 error = xfs_rtbuf_get(mp, tp, block, 0, &bp); xfs_rtcheck_range() 780 bufp = bp->b_addr; xfs_rtcheck_range() 811 xfs_trans_brelse(tp, bp); xfs_rtcheck_range() 826 xfs_trans_brelse(tp, bp); xfs_rtcheck_range() 827 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtcheck_range() 831 b = bufp = bp->b_addr; xfs_rtcheck_range() 857 xfs_trans_brelse(tp, bp); xfs_rtcheck_range() 872 xfs_trans_brelse(tp, bp); xfs_rtcheck_range() 873 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtcheck_range() 877 b = bufp = bp->b_addr; xfs_rtcheck_range() 902 xfs_trans_brelse(tp, bp); xfs_rtcheck_range() 913 xfs_trans_brelse(tp, bp); xfs_rtcheck_range()
|
H A D | xfs_dir2_node.c | 40 static int xfs_dir2_leafn_add(struct xfs_buf *bp, xfs_da_args_t *args, 45 static int xfs_dir2_leafn_remove(xfs_da_args_t *args, struct xfs_buf *bp, 55 #define xfs_dir3_leaf_check(dp, bp) \ 57 if (!xfs_dir3_leafn_check((dp), (bp))) \ 64 struct xfs_buf *bp) xfs_dir3_leafn_check() 66 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leafn_check() 72 struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr; xfs_dir3_leafn_check() 73 if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn) xfs_dir3_leafn_check() 81 #define xfs_dir3_leaf_check(dp, bp) 86 struct xfs_buf *bp) xfs_dir3_free_verify() 88 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_free_verify() 89 struct xfs_dir2_free_hdr *hdr = bp->b_addr; xfs_dir3_free_verify() 92 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_free_verify() 98 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) xfs_dir3_free_verify() 112 struct xfs_buf *bp) xfs_dir3_free_read_verify() 114 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_free_read_verify() 117 !xfs_buf_verify_cksum(bp, XFS_DIR3_FREE_CRC_OFF)) xfs_dir3_free_read_verify() 118 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_dir3_free_read_verify() 119 else if (!xfs_dir3_free_verify(bp)) xfs_dir3_free_read_verify() 120 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_free_read_verify() 122 if (bp->b_error) xfs_dir3_free_read_verify() 123 xfs_verifier_error(bp); xfs_dir3_free_read_verify() 128 struct xfs_buf *bp) xfs_dir3_free_write_verify() 130 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_free_write_verify() 131 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_dir3_free_write_verify() 132 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_free_write_verify() 134 if (!xfs_dir3_free_verify(bp)) { xfs_dir3_free_write_verify() 135 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_free_write_verify() 136 xfs_verifier_error(bp); xfs_dir3_free_write_verify() 146 xfs_buf_update_cksum(bp, XFS_DIR3_FREE_CRC_OFF); xfs_dir3_free_write_verify() 203 struct xfs_buf *bp; xfs_dir3_free_get_buf() local 208 -1, &bp, XFS_DATA_FORK); xfs_dir3_free_get_buf() 212 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_FREE_BUF); xfs_dir3_free_get_buf() 213 bp->b_ops = &xfs_dir3_free_buf_ops; xfs_dir3_free_get_buf() 219 memset(bp->b_addr, 0, sizeof(struct xfs_dir3_free_hdr)); xfs_dir3_free_get_buf() 223 struct xfs_dir3_free_hdr *hdr3 = bp->b_addr; xfs_dir3_free_get_buf() 227 hdr3->hdr.blkno = cpu_to_be64(bp->b_bn); xfs_dir3_free_get_buf() 232 dp->d_ops->free_hdr_to_disk(bp->b_addr, &hdr); xfs_dir3_free_get_buf() 233 *bpp = bp; xfs_dir3_free_get_buf() 243 struct xfs_buf *bp, xfs_dir2_free_log_bests() 250 free = bp->b_addr; xfs_dir2_free_log_bests() 254 xfs_trans_log_buf(args->trans, bp, xfs_dir2_free_log_bests() 266 struct xfs_buf *bp) xfs_dir2_free_log_header() 271 free = bp->b_addr; xfs_dir2_free_log_header() 275 xfs_trans_log_buf(args->trans, bp, 0, xfs_dir2_free_log_header() 374 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_add() 392 leaf = bp->b_addr; xfs_dir2_leafn_add() 449 xfs_dir3_leaf_log_header(args, bp); xfs_dir2_leafn_add() 450 xfs_dir3_leaf_log_ents(args, bp, lfloglow, lfloghigh); xfs_dir2_leafn_add() 451 xfs_dir3_leaf_check(dp, bp); xfs_dir2_leafn_add() 459 struct xfs_buf *bp, xfs_dir2_free_hdr_check() 464 dp->d_ops->free_hdr_from_disk(&hdr, bp->b_addr); xfs_dir2_free_hdr_check() 472 #define xfs_dir2_free_hdr_check(dp, bp, db) 482 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_lasthash() 485 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir2_leafn_lasthash() 509 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_lookup_for_addname() 535 leaf = bp->b_addr; xfs_dir2_leafn_lookup_for_addname() 539 xfs_dir3_leaf_check(dp, bp); xfs_dir2_leafn_lookup_for_addname() 545 index = xfs_dir2_leaf_search_hash(args, bp); xfs_dir2_leafn_lookup_for_addname() 551 curbp = state->extrablk.bp; xfs_dir2_leafn_lookup_for_addname() 638 state->extrablk.bp = curbp; xfs_dir2_leafn_lookup_for_addname() 664 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_lookup_for_entry() 687 leaf = bp->b_addr; xfs_dir2_leafn_lookup_for_entry() 691 xfs_dir3_leaf_check(dp, bp); xfs_dir2_leafn_lookup_for_entry() 697 index = xfs_dir2_leaf_search_hash(args, bp); xfs_dir2_leafn_lookup_for_entry() 702 curbp = state->extrablk.bp; xfs_dir2_leafn_lookup_for_entry() 742 curbp = state->extrablk.bp; xfs_dir2_leafn_lookup_for_entry() 770 xfs_trans_brelse(tp, state->extrablk.bp); xfs_dir2_leafn_lookup_for_entry() 776 state->extrablk.bp = curbp; xfs_dir2_leafn_lookup_for_entry() 792 state->extrablk.bp = curbp; xfs_dir2_leafn_lookup_for_entry() 800 if (state->extrablk.bp != curbp) xfs_dir2_leafn_lookup_for_entry() 817 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_lookup_int() 823 return xfs_dir2_leafn_lookup_for_addname(bp, args, indexp, xfs_dir2_leafn_lookup_int() 825 return xfs_dir2_leafn_lookup_for_entry(bp, args, indexp, state); xfs_dir2_leafn_lookup_int() 970 if ((swap = xfs_dir2_leafn_order(dp, blk1->bp, blk2->bp))) { xfs_dir2_leafn_rebalance() 977 leaf1 = blk1->bp->b_addr; xfs_dir2_leafn_rebalance() 978 leaf2 = blk2->bp->b_addr; xfs_dir2_leafn_rebalance() 1016 xfs_dir3_leafn_moveents(args, blk1->bp, &hdr1, ents1, xfs_dir2_leafn_rebalance() 1017 hdr1.count - count, blk2->bp, xfs_dir2_leafn_rebalance() 1020 xfs_dir3_leafn_moveents(args, blk2->bp, &hdr2, ents2, 0, xfs_dir2_leafn_rebalance() 1021 blk1->bp, &hdr1, ents1, xfs_dir2_leafn_rebalance() 1030 xfs_dir3_leaf_log_header(args, blk1->bp); xfs_dir2_leafn_rebalance() 1031 xfs_dir3_leaf_log_header(args, blk2->bp); xfs_dir2_leafn_rebalance() 1033 xfs_dir3_leaf_check(dp, blk1->bp); xfs_dir2_leafn_rebalance() 1034 xfs_dir3_leaf_check(dp, blk2->bp); xfs_dir2_leafn_rebalance() 1151 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_remove() 1176 leaf = bp->b_addr; xfs_dir2_leafn_remove() 1199 xfs_dir3_leaf_log_header(args, bp); xfs_dir2_leafn_remove() 1202 xfs_dir3_leaf_log_ents(args, bp, index, index); xfs_dir2_leafn_remove() 1208 dbp = dblk->bp; xfs_dir2_leafn_remove() 1272 dblk->bp = NULL; xfs_dir2_leafn_remove() 1293 xfs_dir3_leaf_check(dp, bp); xfs_dir2_leafn_remove() 1332 &newblk->bp, XFS_DIR2_LEAFN_MAGIC); xfs_dir2_leafn_split() 1351 error = xfs_dir2_leafn_add(oldblk->bp, args, oldblk->index); xfs_dir2_leafn_split() 1353 error = xfs_dir2_leafn_add(newblk->bp, args, newblk->index); xfs_dir2_leafn_split() 1357 oldblk->hashval = xfs_dir2_leafn_lasthash(dp, oldblk->bp, NULL); xfs_dir2_leafn_split() 1358 newblk->hashval = xfs_dir2_leafn_lasthash(dp, newblk->bp, NULL); xfs_dir2_leafn_split() 1359 xfs_dir3_leaf_check(dp, oldblk->bp); xfs_dir2_leafn_split() 1360 xfs_dir3_leaf_check(dp, newblk->bp); xfs_dir2_leafn_split() 1380 struct xfs_buf *bp; /* leaf buffer */ xfs_dir2_leafn_toosmall() local 1398 leaf = blk->bp->b_addr; xfs_dir2_leafn_toosmall() 1401 xfs_dir3_leaf_check(dp, blk->bp); xfs_dir2_leafn_toosmall() 1440 for (i = 0, bp = NULL; i < 2; forward = !forward, i++) { xfs_dir2_leafn_toosmall() 1450 blkno, -1, &bp); xfs_dir2_leafn_toosmall() 1461 leaf = bp->b_addr; xfs_dir2_leafn_toosmall() 1472 xfs_trans_brelse(state->args->trans, bp); xfs_dir2_leafn_toosmall() 1522 drop_leaf = drop_blk->bp->b_addr; xfs_dir2_leafn_unbalance() 1523 save_leaf = save_blk->bp->b_addr; xfs_dir2_leafn_unbalance() 1535 xfs_dir3_leaf_compact(args, &drophdr, drop_blk->bp); xfs_dir2_leafn_unbalance() 1537 xfs_dir3_leaf_compact(args, &savehdr, save_blk->bp); xfs_dir2_leafn_unbalance() 1543 if (xfs_dir2_leafn_order(dp, save_blk->bp, drop_blk->bp)) xfs_dir2_leafn_unbalance() 1544 xfs_dir3_leafn_moveents(args, drop_blk->bp, &drophdr, dents, 0, xfs_dir2_leafn_unbalance() 1545 save_blk->bp, &savehdr, sents, 0, xfs_dir2_leafn_unbalance() 1548 xfs_dir3_leafn_moveents(args, drop_blk->bp, &drophdr, dents, 0, xfs_dir2_leafn_unbalance() 1549 save_blk->bp, &savehdr, sents, xfs_dir2_leafn_unbalance() 1556 xfs_dir3_leaf_log_header(args, save_blk->bp); xfs_dir2_leafn_unbalance() 1557 xfs_dir3_leaf_log_header(args, drop_blk->bp); xfs_dir2_leafn_unbalance() 1559 xfs_dir3_leaf_check(dp, save_blk->bp); xfs_dir2_leafn_unbalance() 1560 xfs_dir3_leaf_check(dp, drop_blk->bp); xfs_dir2_leafn_unbalance() 1607 rval = xfs_dir2_leafn_add(blk->bp, args, blk->index); xfs_dir2_node_addname() 1676 fbp = fblk->bp; xfs_dir2_node_addname_int() 1792 if (fblk && fblk->bp) xfs_dir2_node_addname_int() 1793 fblk->bp = NULL; xfs_dir2_node_addname_int() 1822 if (fblk && fblk->bp) xfs_dir2_node_addname_int() 1823 fblk->bp = NULL; xfs_dir2_node_addname_int() 2038 ((char *)state->extrablk.bp->b_addr + xfs_dir2_node_lookup() 2046 xfs_trans_brelse(args->trans, state->path.blk[i].bp); xfs_dir2_node_lookup() 2047 state->path.blk[i].bp = NULL; xfs_dir2_node_lookup() 2052 if (state->extravalid && state->extrablk.bp) { xfs_dir2_node_lookup() 2053 xfs_trans_brelse(args->trans, state->extrablk.bp); xfs_dir2_node_lookup() 2054 state->extrablk.bp = NULL; xfs_dir2_node_lookup() 2099 error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, xfs_dir2_node_removename() 2175 leaf = blk->bp->b_addr; xfs_dir2_node_replace() 2182 hdr = state->extrablk.bp->b_addr; xfs_dir2_node_replace() 2195 xfs_dir2_data_log_entry(args, state->extrablk.bp, dep); xfs_dir2_node_replace() 2202 xfs_trans_brelse(args->trans, state->extrablk.bp); xfs_dir2_node_replace() 2203 state->extrablk.bp = NULL; xfs_dir2_node_replace() 2209 xfs_trans_brelse(args->trans, state->path.blk[i].bp); xfs_dir2_node_replace() 2210 state->path.blk[i].bp = NULL; xfs_dir2_node_replace() 2226 struct xfs_buf *bp; /* freespace buffer */ xfs_dir2_node_trim_free() local 2238 error = xfs_dir2_free_try_read(tp, dp, fo, &bp); xfs_dir2_node_trim_free() 2245 if (!bp) xfs_dir2_node_trim_free() 2247 free = bp->b_addr; xfs_dir2_node_trim_free() 2254 xfs_trans_brelse(tp, bp); xfs_dir2_node_trim_free() 2262 xfs_dir2_da_to_db(args->geo, (xfs_dablk_t)fo), bp); xfs_dir2_node_trim_free() 2270 xfs_trans_brelse(tp, bp); xfs_dir2_node_trim_free() 62 xfs_dir3_leafn_check( struct xfs_inode *dp, struct xfs_buf *bp) xfs_dir3_leafn_check() argument 85 xfs_dir3_free_verify( struct xfs_buf *bp) xfs_dir3_free_verify() argument 111 xfs_dir3_free_read_verify( struct xfs_buf *bp) xfs_dir3_free_read_verify() argument 127 xfs_dir3_free_write_verify( struct xfs_buf *bp) xfs_dir3_free_write_verify() argument 241 xfs_dir2_free_log_bests( struct xfs_da_args *args, struct xfs_buf *bp, int first, int last) xfs_dir2_free_log_bests() argument 264 xfs_dir2_free_log_header( struct xfs_da_args *args, struct xfs_buf *bp) xfs_dir2_free_log_header() argument 373 xfs_dir2_leafn_add( struct xfs_buf *bp, xfs_da_args_t *args, int index) xfs_dir2_leafn_add() argument 457 xfs_dir2_free_hdr_check( struct xfs_inode *dp, struct xfs_buf *bp, xfs_dir2_db_t db) xfs_dir2_free_hdr_check() argument 480 xfs_dir2_leafn_lasthash( struct xfs_inode *dp, struct xfs_buf *bp, int *count) xfs_dir2_leafn_lasthash() argument 508 xfs_dir2_leafn_lookup_for_addname( struct xfs_buf *bp, xfs_da_args_t *args, int *indexp, xfs_da_state_t *state) xfs_dir2_leafn_lookup_for_addname() argument 663 xfs_dir2_leafn_lookup_for_entry( struct xfs_buf *bp, xfs_da_args_t *args, int *indexp, xfs_da_state_t *state) xfs_dir2_leafn_lookup_for_entry() argument 816 xfs_dir2_leafn_lookup_int( struct xfs_buf *bp, xfs_da_args_t *args, int *indexp, xfs_da_state_t *state) xfs_dir2_leafn_lookup_int() argument 1149 xfs_dir2_leafn_remove( xfs_da_args_t *args, struct xfs_buf *bp, int index, xfs_da_state_blk_t *dblk, int *rval) xfs_dir2_leafn_remove() argument
|
H A D | xfs_attr_leaf.h | 54 int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp); 62 int xfs_attr3_leaf_to_shortform(struct xfs_buf *bp, 76 int xfs_attr3_leaf_getvalue(struct xfs_buf *bp, struct xfs_da_args *args); 81 int xfs_attr3_leaf_list_int(struct xfs_buf *bp, 96 xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_buf *bp, int *count);
|
H A D | xfs_attr_leaf.c | 251 struct xfs_buf *bp) xfs_attr3_leaf_verify() 253 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_leaf_verify() 254 struct xfs_attr_leafblock *leaf = bp->b_addr; xfs_attr3_leaf_verify() 260 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; xfs_attr3_leaf_verify() 267 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn) xfs_attr3_leaf_verify() 284 struct xfs_buf *bp) xfs_attr3_leaf_write_verify() 286 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_leaf_write_verify() 287 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_attr3_leaf_write_verify() 288 struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr; xfs_attr3_leaf_write_verify() 290 if (!xfs_attr3_leaf_verify(bp)) { xfs_attr3_leaf_write_verify() 291 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_attr3_leaf_write_verify() 292 xfs_verifier_error(bp); xfs_attr3_leaf_write_verify() 302 xfs_buf_update_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF); xfs_attr3_leaf_write_verify() 313 struct xfs_buf *bp) xfs_attr3_leaf_read_verify() 315 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_leaf_read_verify() 318 !xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF)) xfs_attr3_leaf_read_verify() 319 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_attr3_leaf_read_verify() 320 else if (!xfs_attr3_leaf_verify(bp)) xfs_attr3_leaf_read_verify() 321 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_attr3_leaf_read_verify() 323 if (bp->b_error) xfs_attr3_leaf_read_verify() 324 xfs_verifier_error(bp); xfs_attr3_leaf_read_verify() 744 struct xfs_buf *bp; xfs_attr_shortform_to_leaf() local 761 bp = NULL; xfs_attr_shortform_to_leaf() 776 error = xfs_attr3_leaf_create(args, blkno, &bp); xfs_attr_shortform_to_leaf() 778 error = xfs_da_shrink_inode(args, 0, bp); xfs_attr_shortform_to_leaf() 779 bp = NULL; xfs_attr_shortform_to_leaf() 806 error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */ xfs_attr_shortform_to_leaf() 808 error = xfs_attr3_leaf_add(bp, &nargs); xfs_attr_shortform_to_leaf() 827 struct xfs_buf *bp, xfs_attr_shortform_allfit() 836 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr_shortform_allfit() 838 leaf = bp->b_addr; xfs_attr_shortform_allfit() 869 struct xfs_buf *bp, xfs_attr3_leaf_to_shortform() 889 memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); xfs_attr3_leaf_to_shortform() 896 memset(bp->b_addr, 0, args->geo->blksize); xfs_attr3_leaf_to_shortform() 901 error = xfs_da_shrink_inode(args, 0, bp); xfs_attr3_leaf_to_shortform() 1035 struct xfs_buf *bp; xfs_attr3_leaf_create() local 1040 error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp, xfs_attr3_leaf_create() 1044 bp->b_ops = &xfs_attr3_leaf_buf_ops; xfs_attr3_leaf_create() 1045 xfs_trans_buf_set_type(args->trans, bp, XFS_BLFT_ATTR_LEAF_BUF); xfs_attr3_leaf_create() 1046 leaf = bp->b_addr; xfs_attr3_leaf_create() 1053 struct xfs_da3_blkinfo *hdr3 = bp->b_addr; xfs_attr3_leaf_create() 1057 hdr3->blkno = cpu_to_be64(bp->b_bn); xfs_attr3_leaf_create() 1069 xfs_trans_log_buf(args->trans, bp, 0, args->geo->blksize - 1); xfs_attr3_leaf_create() 1071 *bpp = bp; xfs_attr3_leaf_create() 1096 error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp); xfs_attr3_leaf_split() 1120 error = xfs_attr3_leaf_add(oldblk->bp, state->args); xfs_attr3_leaf_split() 1123 error = xfs_attr3_leaf_add(newblk->bp, state->args); xfs_attr3_leaf_split() 1129 oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL); xfs_attr3_leaf_split() 1130 newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL); xfs_attr3_leaf_split() 1139 struct xfs_buf *bp, xfs_attr3_leaf_add() 1152 leaf = bp->b_addr; xfs_attr3_leaf_add() 1174 tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, i); xfs_attr3_leaf_add() 1192 xfs_attr3_leaf_compact(args, &ichdr, bp); xfs_attr3_leaf_add() 1203 tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, 0); xfs_attr3_leaf_add() 1207 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_add() 1218 struct xfs_buf *bp, xfs_attr3_leaf_add_work() 1233 leaf = bp->b_addr; xfs_attr3_leaf_add_work() 1245 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_add_work() 1275 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_add_work() 1308 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_add_work() 1340 struct xfs_buf *bp) xfs_attr3_leaf_compact() 1351 memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); xfs_attr3_leaf_compact() 1352 memset(bp->b_addr, 0, args->geo->blksize); xfs_attr3_leaf_compact() 1354 leaf_dst = bp->b_addr; xfs_attr3_leaf_compact() 1361 memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src)); xfs_attr3_leaf_compact() 1386 xfs_trans_log_buf(trans, bp, 0, args->geo->blksize - 1); xfs_attr3_leaf_compact() 1467 leaf1 = blk1->bp->b_addr; xfs_attr3_leaf_rebalance() 1468 leaf2 = blk2->bp->b_addr; xfs_attr3_leaf_rebalance() 1483 if (xfs_attr3_leaf_order(blk1->bp, &ichdr1, blk2->bp, &ichdr2)) { xfs_attr3_leaf_rebalance() 1496 leaf1 = blk1->bp->b_addr; xfs_attr3_leaf_rebalance() 1497 leaf2 = blk2->bp->b_addr; xfs_attr3_leaf_rebalance() 1533 xfs_attr3_leaf_compact(args, &ichdr2, blk2->bp); xfs_attr3_leaf_rebalance() 1562 xfs_attr3_leaf_compact(args, &ichdr1, blk1->bp); xfs_attr3_leaf_rebalance() 1573 xfs_trans_log_buf(args->trans, blk1->bp, 0, args->geo->blksize - 1); xfs_attr3_leaf_rebalance() 1574 xfs_trans_log_buf(args->trans, blk2->bp, 0, args->geo->blksize - 1); xfs_attr3_leaf_rebalance() 1650 struct xfs_attr_leafblock *leaf1 = blk1->bp->b_addr; xfs_attr3_leaf_figure_balance() 1651 struct xfs_attr_leafblock *leaf2 = blk2->bp->b_addr; xfs_attr3_leaf_figure_balance() 1748 struct xfs_buf *bp; xfs_attr3_leaf_toosmall() local 1764 leaf = blk->bp->b_addr; xfs_attr3_leaf_toosmall() 1817 blkno, -1, &bp); xfs_attr3_leaf_toosmall() 1821 xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, bp->b_addr); xfs_attr3_leaf_toosmall() 1830 xfs_trans_brelse(state->args->trans, bp); xfs_attr3_leaf_toosmall() 1869 struct xfs_buf *bp, xfs_attr3_leaf_remove() 1885 leaf = bp->b_addr; xfs_attr3_leaf_remove() 1969 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_remove() 1976 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_remove() 2004 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_remove() 2027 struct xfs_attr_leafblock *drop_leaf = drop_blk->bp->b_addr; xfs_attr3_leaf_unbalance() 2028 struct xfs_attr_leafblock *save_leaf = save_blk->bp->b_addr; xfs_attr3_leaf_unbalance() 2035 drop_leaf = drop_blk->bp->b_addr; xfs_attr3_leaf_unbalance() 2036 save_leaf = save_blk->bp->b_addr; xfs_attr3_leaf_unbalance() 2056 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr, xfs_attr3_leaf_unbalance() 2057 drop_blk->bp, &drophdr)) { xfs_attr3_leaf_unbalance() 2094 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr, xfs_attr3_leaf_unbalance() 2095 drop_blk->bp, &drophdr)) { xfs_attr3_leaf_unbalance() 2120 xfs_trans_log_buf(state->args->trans, save_blk->bp, 0, xfs_attr3_leaf_unbalance() 2149 struct xfs_buf *bp, xfs_attr3_leaf_lookup_int() 2164 leaf = bp->b_addr; xfs_attr3_leaf_lookup_int() 2259 struct xfs_buf *bp, xfs_attr3_leaf_getvalue() 2269 leaf = bp->b_addr; xfs_attr3_leaf_getvalue() 2466 struct xfs_buf *bp, xfs_attr_leaf_lasthash() 2471 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr_leaf_lasthash() 2473 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, bp->b_addr); xfs_attr_leaf_lasthash() 2474 entries = xfs_attr3_leaf_entryp(bp->b_addr); xfs_attr_leaf_lasthash() 2545 struct xfs_buf *bp; xfs_attr3_leaf_clearflag() local 2558 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); xfs_attr3_leaf_clearflag() 2562 leaf = bp->b_addr; xfs_attr3_leaf_clearflag() 2586 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_clearflag() 2594 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_clearflag() 2614 struct xfs_buf *bp; xfs_attr3_leaf_setflag() local 2625 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); xfs_attr3_leaf_setflag() 2629 leaf = bp->b_addr; xfs_attr3_leaf_setflag() 2639 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_setflag() 2645 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_setflag() 250 xfs_attr3_leaf_verify( struct xfs_buf *bp) xfs_attr3_leaf_verify() argument 283 xfs_attr3_leaf_write_verify( struct xfs_buf *bp) xfs_attr3_leaf_write_verify() argument 312 xfs_attr3_leaf_read_verify( struct xfs_buf *bp) xfs_attr3_leaf_read_verify() argument 826 xfs_attr_shortform_allfit( struct xfs_buf *bp, struct xfs_inode *dp) xfs_attr_shortform_allfit() argument 868 xfs_attr3_leaf_to_shortform( struct xfs_buf *bp, struct xfs_da_args *args, int forkoff) xfs_attr3_leaf_to_shortform() argument 1138 xfs_attr3_leaf_add( struct xfs_buf *bp, struct xfs_da_args *args) xfs_attr3_leaf_add() argument 1217 xfs_attr3_leaf_add_work( struct xfs_buf *bp, struct xfs_attr3_icleaf_hdr *ichdr, struct xfs_da_args *args, int mapindex) xfs_attr3_leaf_add_work() argument 1337 xfs_attr3_leaf_compact( struct xfs_da_args *args, struct xfs_attr3_icleaf_hdr *ichdr_dst, struct xfs_buf *bp) xfs_attr3_leaf_compact() argument 1868 xfs_attr3_leaf_remove( struct xfs_buf *bp, struct xfs_da_args *args) xfs_attr3_leaf_remove() argument 2148 xfs_attr3_leaf_lookup_int( struct xfs_buf *bp, struct xfs_da_args *args) xfs_attr3_leaf_lookup_int() argument 2258 xfs_attr3_leaf_getvalue( struct xfs_buf *bp, struct xfs_da_args *args) xfs_attr3_leaf_getvalue() argument 2465 xfs_attr_leaf_lasthash( struct xfs_buf *bp, int *count) xfs_attr_leaf_lasthash() argument
|
H A D | xfs_dir2_leaf.c | 43 struct xfs_buf *bp, int first, int last); 45 struct xfs_buf *bp); 52 #define xfs_dir3_leaf_check(dp, bp) \ 54 if (!xfs_dir3_leaf1_check((dp), (bp))) \ 61 struct xfs_buf *bp) xfs_dir3_leaf1_check() 63 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf1_check() 69 struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr; xfs_dir3_leaf1_check() 70 if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn) xfs_dir3_leaf1_check() 78 #define xfs_dir3_leaf_check(dp, bp) 146 struct xfs_buf *bp, xfs_dir3_leaf_verify() 149 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_leaf_verify() 150 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_verify() 155 struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr; xfs_dir3_leaf_verify() 165 if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn) xfs_dir3_leaf_verify() 177 struct xfs_buf *bp, __read_verify() 180 struct xfs_mount *mp = bp->b_target->bt_mount; __read_verify() 183 !xfs_buf_verify_cksum(bp, XFS_DIR3_LEAF_CRC_OFF)) __read_verify() 184 xfs_buf_ioerror(bp, -EFSBADCRC); __read_verify() 185 else if (!xfs_dir3_leaf_verify(bp, magic)) __read_verify() 186 xfs_buf_ioerror(bp, -EFSCORRUPTED); __read_verify() 188 if (bp->b_error) __read_verify() 189 xfs_verifier_error(bp); __read_verify() 194 struct xfs_buf *bp, __write_verify() 197 struct xfs_mount *mp = bp->b_target->bt_mount; __write_verify() 198 struct xfs_buf_log_item *bip = bp->b_fspriv; __write_verify() 199 struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr; __write_verify() 201 if (!xfs_dir3_leaf_verify(bp, magic)) { __write_verify() 202 xfs_buf_ioerror(bp, -EFSCORRUPTED); __write_verify() 203 xfs_verifier_error(bp); __write_verify() 213 xfs_buf_update_cksum(bp, XFS_DIR3_LEAF_CRC_OFF); __write_verify() 218 struct xfs_buf *bp) xfs_dir3_leaf1_read_verify() 220 __read_verify(bp, XFS_DIR2_LEAF1_MAGIC); xfs_dir3_leaf1_read_verify() 225 struct xfs_buf *bp) xfs_dir3_leaf1_write_verify() 227 __write_verify(bp, XFS_DIR2_LEAF1_MAGIC); xfs_dir3_leaf1_write_verify() 232 struct xfs_buf *bp) xfs_dir3_leafn_read_verify() 234 __read_verify(bp, XFS_DIR2_LEAFN_MAGIC); xfs_dir3_leafn_read_verify() 239 struct xfs_buf *bp) xfs_dir3_leafn_write_verify() 241 __write_verify(bp, XFS_DIR2_LEAFN_MAGIC); xfs_dir3_leafn_write_verify() 295 struct xfs_buf *bp, xfs_dir3_leaf_init() 299 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_init() 304 struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr; xfs_dir3_leaf_init() 311 leaf3->info.blkno = cpu_to_be64(bp->b_bn); xfs_dir3_leaf_init() 328 bp->b_ops = &xfs_dir3_leaf1_buf_ops; xfs_dir3_leaf_init() 329 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAF1_BUF); xfs_dir3_leaf_init() 331 bp->b_ops = &xfs_dir3_leafn_buf_ops; xfs_dir3_leaf_init() 332 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); xfs_dir3_leaf_init() 346 struct xfs_buf *bp; xfs_dir3_leaf_get_buf() local 354 -1, &bp, XFS_DATA_FORK); xfs_dir3_leaf_get_buf() 358 xfs_dir3_leaf_init(mp, tp, bp, dp->i_ino, magic); xfs_dir3_leaf_get_buf() 359 xfs_dir3_leaf_log_header(args, bp); xfs_dir3_leaf_get_buf() 361 xfs_dir3_leaf_log_tail(args, bp); xfs_dir3_leaf_get_buf() 362 *bpp = bp; xfs_dir3_leaf_get_buf() 911 struct xfs_buf *bp) /* leaf buffer */ xfs_dir3_leaf_compact() 920 leaf = bp->b_addr; xfs_dir3_leaf_compact() 949 xfs_dir3_leaf_log_header(args, bp); xfs_dir3_leaf_compact() 951 xfs_dir3_leaf_log_ents(args, bp, loglow, to - 1); xfs_dir3_leaf_compact() 1054 struct xfs_buf *bp, /* leaf buffer */ xfs_dir3_leaf_log_bests() 1060 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_log_bests() 1069 xfs_trans_log_buf(args->trans, bp, xfs_dir3_leaf_log_bests() 1080 struct xfs_buf *bp, xfs_dir3_leaf_log_ents() 1086 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_log_ents() 1097 xfs_trans_log_buf(args->trans, bp, xfs_dir3_leaf_log_ents() 1108 struct xfs_buf *bp) xfs_dir3_leaf_log_header() 1110 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_log_header() 1117 xfs_trans_log_buf(args->trans, bp, xfs_dir3_leaf_log_header() 1128 struct xfs_buf *bp) xfs_dir3_leaf_log_tail() 1130 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_log_tail() 1139 xfs_trans_log_buf(args->trans, bp, (uint)((char *)ltp - (char *)leaf), xfs_dir3_leaf_log_tail() 1737 lbp = state->path.blk[0].bp; xfs_dir2_node_to_leaf() 1817 state->path.blk[0].bp = NULL; xfs_dir2_node_to_leaf() 59 xfs_dir3_leaf1_check( struct xfs_inode *dp, struct xfs_buf *bp) xfs_dir3_leaf1_check() argument 145 xfs_dir3_leaf_verify( struct xfs_buf *bp, __uint16_t magic) xfs_dir3_leaf_verify() argument 176 __read_verify( struct xfs_buf *bp, __uint16_t magic) __read_verify() argument 193 __write_verify( struct xfs_buf *bp, __uint16_t magic) __write_verify() argument 217 xfs_dir3_leaf1_read_verify( struct xfs_buf *bp) xfs_dir3_leaf1_read_verify() argument 224 xfs_dir3_leaf1_write_verify( struct xfs_buf *bp) xfs_dir3_leaf1_write_verify() argument 231 xfs_dir3_leafn_read_verify( struct xfs_buf *bp) xfs_dir3_leafn_read_verify() argument 238 xfs_dir3_leafn_write_verify( struct xfs_buf *bp) xfs_dir3_leafn_write_verify() argument 292 xfs_dir3_leaf_init( struct xfs_mount *mp, struct xfs_trans *tp, struct xfs_buf *bp, xfs_ino_t owner, __uint16_t type) xfs_dir3_leaf_init() argument 908 xfs_dir3_leaf_compact( xfs_da_args_t *args, struct xfs_dir3_icleaf_hdr *leafhdr, struct xfs_buf *bp) xfs_dir3_leaf_compact() argument 1052 xfs_dir3_leaf_log_bests( struct xfs_da_args *args, struct xfs_buf *bp, int first, int last) xfs_dir3_leaf_log_bests() argument 1078 xfs_dir3_leaf_log_ents( struct xfs_da_args *args, struct xfs_buf *bp, int first, int last) xfs_dir3_leaf_log_ents() argument 1106 xfs_dir3_leaf_log_header( struct xfs_da_args *args, struct xfs_buf *bp) xfs_dir3_leaf_log_header() argument 1126 xfs_dir3_leaf_log_tail( struct xfs_da_args *args, struct xfs_buf *bp) xfs_dir3_leaf_log_tail() argument
|
H A D | xfs_attr.c | 573 struct xfs_buf *bp; xfs_attr_leaf_addname() local 583 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); xfs_attr_leaf_addname() 591 retval = xfs_attr3_leaf_lookup_int(bp, args); xfs_attr_leaf_addname() 593 xfs_trans_brelse(args->trans, bp); xfs_attr_leaf_addname() 597 xfs_trans_brelse(args->trans, bp); xfs_attr_leaf_addname() 625 retval = xfs_attr3_leaf_add(bp, args); xfs_attr_leaf_addname() 722 -1, &bp); xfs_attr_leaf_addname() 726 xfs_attr3_leaf_remove(bp, args); xfs_attr_leaf_addname() 731 if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { xfs_attr_leaf_addname() 733 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); xfs_attr_leaf_addname() 734 /* bp is gone due to xfs_da_shrink_inode */ xfs_attr_leaf_addname() 780 struct xfs_buf *bp; xfs_attr_leaf_removename() local 790 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); xfs_attr_leaf_removename() 794 error = xfs_attr3_leaf_lookup_int(bp, args); xfs_attr_leaf_removename() 796 xfs_trans_brelse(args->trans, bp); xfs_attr_leaf_removename() 800 xfs_attr3_leaf_remove(bp, args); xfs_attr_leaf_removename() 805 if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { xfs_attr_leaf_removename() 807 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); xfs_attr_leaf_removename() 808 /* bp is gone due to xfs_da_shrink_inode */ xfs_attr_leaf_removename() 839 struct xfs_buf *bp; xfs_attr_leaf_get() local 845 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); xfs_attr_leaf_get() 849 error = xfs_attr3_leaf_lookup_int(bp, args); xfs_attr_leaf_get() 851 xfs_trans_brelse(args->trans, bp); xfs_attr_leaf_get() 854 error = xfs_attr3_leaf_getvalue(bp, args); xfs_attr_leaf_get() 855 xfs_trans_brelse(args->trans, bp); xfs_attr_leaf_get() 932 retval = xfs_attr3_leaf_add(blk->bp, state->args); xfs_attr_node_addname() 1083 error = xfs_attr3_leaf_remove(blk->bp, args); xfs_attr_node_addname() 1151 struct xfs_buf *bp; xfs_attr_node_removename() local 1180 ASSERT(blk->bp != NULL); xfs_attr_node_removename() 1217 retval = xfs_attr3_leaf_remove(blk->bp, args); xfs_attr_node_removename() 1260 ASSERT(state->path.blk[0].bp); xfs_attr_node_removename() 1261 state->path.blk[0].bp = NULL; xfs_attr_node_removename() 1263 error = xfs_attr3_leaf_read(args->trans, args->dp, 0, -1, &bp); xfs_attr_node_removename() 1267 if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { xfs_attr_node_removename() 1269 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); xfs_attr_node_removename() 1270 /* bp is gone due to xfs_da_shrink_inode */ xfs_attr_node_removename() 1291 xfs_trans_brelse(args->trans, bp); xfs_attr_node_removename() 1322 if (blk->bp) { xfs_attr_fillstate() 1323 blk->disk_blkno = XFS_BUF_ADDR(blk->bp); xfs_attr_fillstate() 1324 blk->bp = NULL; xfs_attr_fillstate() 1337 if (blk->bp) { xfs_attr_fillstate() 1338 blk->disk_blkno = XFS_BUF_ADDR(blk->bp); xfs_attr_fillstate() 1339 blk->bp = NULL; xfs_attr_fillstate() 1374 &blk->bp, XFS_ATTR_FORK); xfs_attr_refillstate() 1378 blk->bp = NULL; xfs_attr_refillstate() 1393 &blk->bp, XFS_ATTR_FORK); xfs_attr_refillstate() 1397 blk->bp = NULL; xfs_attr_refillstate() 1433 ASSERT(blk->bp != NULL); xfs_attr_node_get() 1439 retval = xfs_attr3_leaf_getvalue(blk->bp, args); xfs_attr_node_get() 1450 xfs_trans_brelse(args->trans, state->path.blk[i].bp); xfs_attr_node_get() 1451 state->path.blk[i].bp = NULL; xfs_attr_node_get()
|
H A D | xfs_btree.c | 59 struct xfs_buf *bp) /* buffer for block, if any */ xfs_btree_check_lblock() 70 bp ? bp->b_bn : XFS_BUF_DADDR_NULL); xfs_btree_check_lblock() 90 if (bp) xfs_btree_check_lblock() 91 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_btree_check_lblock() 103 struct xfs_buf *bp) /* buffer containing block */ xfs_btree_check_sblock() 120 bp ? bp->b_bn : XFS_BUF_DADDR_NULL); xfs_btree_check_sblock() 138 if (bp) xfs_btree_check_sblock() 139 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_btree_check_sblock() 154 struct xfs_buf *bp) /* buffer containing block, if any */ xfs_btree_check_block() 157 return xfs_btree_check_lblock(cur, block, level, bp); xfs_btree_check_block() 159 return xfs_btree_check_sblock(cur, block, level, bp); xfs_btree_check_block() 228 struct xfs_buf *bp) xfs_btree_lblock_calc_crc() 230 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_btree_lblock_calc_crc() 231 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_btree_lblock_calc_crc() 233 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) xfs_btree_lblock_calc_crc() 237 xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF); xfs_btree_lblock_calc_crc() 242 struct xfs_buf *bp) xfs_btree_lblock_verify_crc() 244 if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) xfs_btree_lblock_verify_crc() 245 return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF); xfs_btree_lblock_verify_crc() 260 struct xfs_buf *bp) xfs_btree_sblock_calc_crc() 262 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_btree_sblock_calc_crc() 263 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_btree_sblock_calc_crc() 265 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) xfs_btree_sblock_calc_crc() 269 xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF); xfs_btree_sblock_calc_crc() 274 struct xfs_buf *bp) xfs_btree_sblock_verify_crc() 276 if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) xfs_btree_sblock_verify_crc() 277 return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF); xfs_btree_sblock_verify_crc() 329 xfs_buf_t *bp; /* btree block's buffer pointer */ xfs_btree_dup_cursor() local 355 bp = cur->bc_bufs[i]; xfs_btree_dup_cursor() 356 if (bp) { xfs_btree_dup_cursor() 358 XFS_BUF_ADDR(bp), mp->m_bsize, xfs_btree_dup_cursor() 359 0, &bp, xfs_btree_dup_cursor() 367 new->bc_bufs[i] = bp; xfs_btree_dup_cursor() 591 xfs_buf_t *bp; /* buffer containing block */ xfs_btree_islastblock() local 593 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_islastblock() 594 xfs_btree_check_block(cur, block, level, bp); xfs_btree_islastblock() 611 xfs_buf_t *bp; /* buffer containing block */ xfs_btree_firstrec() local 616 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_firstrec() 617 xfs_btree_check_block(cur, block, level, bp); xfs_btree_firstrec() 640 xfs_buf_t *bp; /* buffer containing block */ xfs_btree_lastrec() local 645 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_lastrec() 646 xfs_btree_check_block(cur, block, level, bp); xfs_btree_lastrec() 709 struct xfs_buf *bp; /* return value */ xfs_btree_read_bufl() local 716 mp->m_bsize, lock, &bp, ops); xfs_btree_read_bufl() 719 if (bp) xfs_btree_read_bufl() 720 xfs_buf_set_ref(bp, refval); xfs_btree_read_bufl() 721 *bpp = bp; xfs_btree_read_bufl() 883 * Set the buffer for level "lev" in the cursor to bp, releasing 890 xfs_buf_t *bp) /* new buffer to set */ xfs_btree_setbuf() 896 cur->bc_bufs[lev] = bp; xfs_btree_setbuf() 899 b = XFS_BUF_TO_BLOCK(bp); xfs_btree_setbuf() 1025 struct xfs_buf *bp, xfs_btree_init_block() 1032 xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn, xfs_btree_init_block() 1039 struct xfs_buf *bp, xfs_btree_init_block_cur() 1056 xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn, xfs_btree_init_block_cur() 1088 struct xfs_buf *bp, xfs_btree_buf_to_ptr() 1093 XFS_BUF_ADDR(bp))); xfs_btree_buf_to_ptr() 1096 XFS_BUF_ADDR(bp))); xfs_btree_buf_to_ptr() 1103 struct xfs_buf *bp) xfs_btree_set_refs() 1108 xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF); xfs_btree_set_refs() 1112 xfs_buf_set_ref(bp, XFS_INO_BTREE_REF); xfs_btree_set_refs() 1115 xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF); xfs_btree_set_refs() 1284 struct xfs_buf *bp, xfs_btree_log_keys() 1289 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); xfs_btree_log_keys() 1291 if (bp) { xfs_btree_log_keys() 1292 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_keys() 1293 xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_log_keys() 1310 struct xfs_buf *bp, xfs_btree_log_recs() 1315 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); xfs_btree_log_recs() 1317 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_recs() 1318 xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_log_recs() 1331 struct xfs_buf *bp, /* buffer containing btree block */ xfs_btree_log_ptrs() 1336 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); xfs_btree_log_ptrs() 1338 if (bp) { xfs_btree_log_ptrs() 1339 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_btree_log_ptrs() 1342 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_ptrs() 1343 xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_log_ptrs() 1360 struct xfs_buf *bp, /* buffer containing btree block */ xfs_btree_log_block() 1394 XFS_BTREE_TRACE_ARGBI(cur, bp, fields); xfs_btree_log_block() 1396 if (bp) { xfs_btree_log_block() 1417 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_block() 1418 xfs_trans_log_buf(cur->bc_tp, bp, first, last); xfs_btree_log_block() 1439 struct xfs_buf *bp; xfs_btree_increment() local 1452 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_increment() 1455 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_increment() 1476 block = xfs_btree_get_block(cur, lev, &bp); xfs_btree_increment() 1479 error = xfs_btree_check_block(cur, block, lev, bp); xfs_btree_increment() 1508 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { xfs_btree_increment() 1513 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); xfs_btree_increment() 1517 xfs_btree_setbuf(cur, lev, bp); xfs_btree_increment() 1546 xfs_buf_t *bp; xfs_btree_decrement() local 1564 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_decrement() 1567 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_decrement() 1607 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { xfs_btree_decrement() 1612 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); xfs_btree_decrement() 1615 xfs_btree_setbuf(cur, lev, bp); xfs_btree_decrement() 1640 struct xfs_buf *bp; /* buffer pointer for btree block */ xfs_btree_lookup_get_block() local 1656 bp = cur->bc_bufs[level]; xfs_btree_lookup_get_block() 1657 if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) { xfs_btree_lookup_get_block() 1658 *blkp = XFS_BUF_TO_BLOCK(bp); xfs_btree_lookup_get_block() 1662 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp); xfs_btree_lookup_get_block() 1666 xfs_btree_setbuf(cur, level, bp); xfs_btree_lookup_get_block() 1862 struct xfs_buf *bp; xfs_btree_updkey() local 1881 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_updkey() 1883 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_updkey() 1892 xfs_btree_log_keys(cur, bp, ptr, ptr); xfs_btree_updkey() 1910 struct xfs_buf *bp; xfs_btree_update() local 1919 block = xfs_btree_get_block(cur, 0, &bp); xfs_btree_update() 1922 error = xfs_btree_check_block(cur, block, 0, bp); xfs_btree_update() 1932 xfs_btree_log_recs(cur, bp, ptr, ptr); xfs_btree_update() 2704 struct xfs_buf *bp; /* buffer containing block */ xfs_btree_new_root() local 2744 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp); xfs_btree_new_root() 2747 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp); xfs_btree_new_root() 2755 lbp = bp; xfs_btree_new_root() 2761 bp = rbp; xfs_btree_new_root() 2765 rbp = bp; xfs_btree_new_root() 2772 bp = lbp; xfs_btree_new_root() 2903 struct xfs_buf *bp; /* buffer for block */ xfs_btree_insrec() local 2950 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_insrec() 2954 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_insrec() 2986 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_insrec() 2990 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_insrec() 3031 xfs_btree_log_ptrs(cur, bp, ptr, numrecs); xfs_btree_insrec() 3032 xfs_btree_log_keys(cur, bp, ptr, numrecs); xfs_btree_insrec() 3050 xfs_btree_log_recs(cur, bp, ptr, numrecs); xfs_btree_insrec() 3060 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); xfs_btree_insrec() 3290 struct xfs_buf *bp, xfs_btree_kill_root() 3305 error = cur->bc_ops->free_block(cur, bp); xfs_btree_kill_root() 3355 struct xfs_buf *bp; /* buffer for block */ xfs_btree_delrec() local 3388 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_delrec() 3392 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_delrec() 3427 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1); xfs_btree_delrec() 3428 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1); xfs_btree_delrec() 3443 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1); xfs_btree_delrec() 3461 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); xfs_btree_delrec() 3505 error = xfs_btree_kill_root(cur, bp, level, pp); xfs_btree_delrec() 3720 rbp = bp; xfs_btree_delrec() 3737 lbp = bp; xfs_btree_delrec() 3827 if (bp != lbp) { xfs_btree_delrec() 3921 struct xfs_buf *bp; /* buffer pointer */ xfs_btree_get_rec() local 3928 block = xfs_btree_get_block(cur, 0, &bp); xfs_btree_get_rec() 3931 error = xfs_btree_check_block(cur, block, 0, bp); xfs_btree_get_rec() 3984 struct xfs_buf *bp; xfs_btree_block_change_owner() local 3991 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_block_change_owner() 4004 if (bp) { xfs_btree_block_change_owner() 4006 xfs_trans_ordered_buf(cur->bc_tp, bp); xfs_btree_block_change_owner() 4007 xfs_btree_log_block(cur, bp, XFS_BB_OWNER); xfs_btree_block_change_owner() 4009 xfs_buf_delwri_queue(bp, buffer_list); xfs_btree_block_change_owner() 55 xfs_btree_check_lblock( struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level, struct xfs_buf *bp) xfs_btree_check_lblock() argument 99 xfs_btree_check_sblock( struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level, struct xfs_buf *bp) xfs_btree_check_sblock() argument 150 xfs_btree_check_block( struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level, struct xfs_buf *bp) xfs_btree_check_block() argument 227 xfs_btree_lblock_calc_crc( struct xfs_buf *bp) xfs_btree_lblock_calc_crc() argument 241 xfs_btree_lblock_verify_crc( struct xfs_buf *bp) xfs_btree_lblock_verify_crc() argument 259 xfs_btree_sblock_calc_crc( struct xfs_buf *bp) xfs_btree_sblock_calc_crc() argument 273 xfs_btree_sblock_verify_crc( struct xfs_buf *bp) xfs_btree_sblock_verify_crc() argument 887 xfs_btree_setbuf( xfs_btree_cur_t *cur, int lev, xfs_buf_t *bp) xfs_btree_setbuf() argument 1023 xfs_btree_init_block( struct xfs_mount *mp, struct xfs_buf *bp, __u32 magic, __u16 level, __u16 numrecs, __u64 owner, unsigned int flags) xfs_btree_init_block() argument 1037 xfs_btree_init_block_cur( struct xfs_btree_cur *cur, struct xfs_buf *bp, int level, int numrecs) xfs_btree_init_block_cur() argument 1086 xfs_btree_buf_to_ptr( struct xfs_btree_cur *cur, struct xfs_buf *bp, union xfs_btree_ptr *ptr) xfs_btree_buf_to_ptr() argument 1101 xfs_btree_set_refs( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_btree_set_refs() argument 1282 xfs_btree_log_keys( struct xfs_btree_cur *cur, struct xfs_buf *bp, int first, int last) xfs_btree_log_keys() argument 1308 xfs_btree_log_recs( struct xfs_btree_cur *cur, struct xfs_buf *bp, int first, int last) xfs_btree_log_recs() argument 1329 xfs_btree_log_ptrs( struct xfs_btree_cur *cur, struct xfs_buf *bp, int first, int last) xfs_btree_log_ptrs() argument 1358 xfs_btree_log_block( struct xfs_btree_cur *cur, struct xfs_buf *bp, int fields) xfs_btree_log_block() argument 3288 xfs_btree_kill_root( struct xfs_btree_cur *cur, struct xfs_buf *bp, int level, union xfs_btree_ptr *newroot) xfs_btree_kill_root() argument
|
H A D | xfs_bmap_btree.c | 521 struct xfs_buf *bp) xfs_bmbt_free_block() 526 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp)); xfs_bmbt_free_block() 533 xfs_trans_binval(tp, bp); xfs_bmbt_free_block() 640 struct xfs_buf *bp) xfs_bmbt_verify() 642 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_bmbt_verify() 643 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_bmbt_verify() 652 if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn) xfs_bmbt_verify() 695 struct xfs_buf *bp) xfs_bmbt_read_verify() 697 if (!xfs_btree_lblock_verify_crc(bp)) xfs_bmbt_read_verify() 698 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_bmbt_read_verify() 699 else if (!xfs_bmbt_verify(bp)) xfs_bmbt_read_verify() 700 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_bmbt_read_verify() 702 if (bp->b_error) { xfs_bmbt_read_verify() 703 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_bmbt_read_verify() 704 xfs_verifier_error(bp); xfs_bmbt_read_verify() 710 struct xfs_buf *bp) xfs_bmbt_write_verify() 712 if (!xfs_bmbt_verify(bp)) { xfs_bmbt_write_verify() 713 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_bmbt_write_verify() 714 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_bmbt_write_verify() 715 xfs_verifier_error(bp); xfs_bmbt_write_verify() 718 xfs_btree_lblock_calc_crc(bp); xfs_bmbt_write_verify() 519 xfs_bmbt_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_bmbt_free_block() argument 639 xfs_bmbt_verify( struct xfs_buf *bp) xfs_bmbt_verify() argument 694 xfs_bmbt_read_verify( struct xfs_buf *bp) xfs_bmbt_read_verify() argument 709 xfs_bmbt_write_verify( struct xfs_buf *bp) xfs_bmbt_write_verify() argument
|
H A D | xfs_sb.c | 526 struct xfs_buf *bp, xfs_sb_verify() 529 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_sb_verify() 536 __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false); xfs_sb_verify() 542 return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR, xfs_sb_verify() 560 struct xfs_buf *bp) xfs_sb_read_verify() 562 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_sb_read_verify() 563 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp); xfs_sb_read_verify() 575 if (!xfs_buf_verify_cksum(bp, XFS_SB_CRC_OFF)) { xfs_sb_read_verify() 577 if (bp->b_bn == XFS_SB_DADDR || xfs_sb_read_verify() 584 error = xfs_sb_verify(bp, true); xfs_sb_read_verify() 588 xfs_buf_ioerror(bp, error); xfs_sb_read_verify() 590 xfs_verifier_error(bp); xfs_sb_read_verify() 602 struct xfs_buf *bp) xfs_sb_quiet_read_verify() 604 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp); xfs_sb_quiet_read_verify() 608 xfs_sb_read_verify(bp); xfs_sb_quiet_read_verify() 612 xfs_buf_ioerror(bp, -EWRONGFS); xfs_sb_quiet_read_verify() 617 struct xfs_buf *bp) xfs_sb_write_verify() 619 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_sb_write_verify() 620 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_sb_write_verify() 623 error = xfs_sb_verify(bp, false); xfs_sb_write_verify() 625 xfs_buf_ioerror(bp, error); xfs_sb_write_verify() 626 xfs_verifier_error(bp); xfs_sb_write_verify() 634 XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_sb_write_verify() 636 xfs_buf_update_cksum(bp, XFS_SB_CRC_OFF); xfs_sb_write_verify() 762 struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0); xfs_log_sb() local 768 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb); xfs_log_sb() 769 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); xfs_log_sb() 770 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb)); xfs_log_sb() 525 xfs_sb_verify( struct xfs_buf *bp, bool check_version) xfs_sb_verify() argument 559 xfs_sb_read_verify( struct xfs_buf *bp) xfs_sb_read_verify() argument 601 xfs_sb_quiet_read_verify( struct xfs_buf *bp) xfs_sb_quiet_read_verify() argument 616 xfs_sb_write_verify( struct xfs_buf *bp) xfs_sb_write_verify() argument
|
H A D | xfs_dir2.h | 158 struct xfs_buf *bp); 163 struct xfs_buf *bp, struct xfs_dir2_data_entry *dep); 165 struct xfs_buf *bp); 167 struct xfs_buf *bp, struct xfs_dir2_data_unused *dup); 169 struct xfs_buf *bp, xfs_dir2_data_aoff_t offset, 172 struct xfs_buf *bp, struct xfs_dir2_data_unused *dup,
|
H A D | xfs_alloc.c | 447 struct xfs_buf *bp) xfs_agfl_verify() 449 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agfl_verify() 450 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp); xfs_agfl_verify() 463 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno) xfs_agfl_verify() 476 struct xfs_buf *bp) xfs_agfl_read_verify() 478 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agfl_read_verify() 489 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF)) xfs_agfl_read_verify() 490 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_agfl_read_verify() 491 else if (!xfs_agfl_verify(bp)) xfs_agfl_read_verify() 492 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agfl_read_verify() 494 if (bp->b_error) xfs_agfl_read_verify() 495 xfs_verifier_error(bp); xfs_agfl_read_verify() 500 struct xfs_buf *bp) xfs_agfl_write_verify() 502 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agfl_write_verify() 503 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_agfl_write_verify() 509 if (!xfs_agfl_verify(bp)) { xfs_agfl_write_verify() 510 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agfl_write_verify() 511 xfs_verifier_error(bp); xfs_agfl_write_verify() 516 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_agfl_write_verify() 518 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF); xfs_agfl_write_verify() 536 xfs_buf_t *bp; /* return value */ xfs_alloc_read_agfl() local 543 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops); xfs_alloc_read_agfl() 546 xfs_buf_set_ref(bp, XFS_AGFL_REF); xfs_alloc_read_agfl() 547 *bpp = bp; xfs_alloc_read_agfl() 1493 xfs_buf_t *bp; xfs_alloc_ag_vextent_small() local 1495 bp = xfs_btree_get_bufs(args->mp, args->tp, xfs_alloc_ag_vextent_small() 1497 xfs_trans_binval(args->tp, bp); xfs_alloc_ag_vextent_small() 1953 xfs_buf_t *bp; xfs_alloc_fix_freelist() local 1960 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); xfs_alloc_fix_freelist() 1961 xfs_trans_binval(tp, bp); xfs_alloc_fix_freelist() 2088 xfs_buf_t *bp, /* buffer for a.g. freelist header */ xfs_alloc_log_agf() 2110 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_); xfs_alloc_log_agf() 2112 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF); xfs_alloc_log_agf() 2115 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); xfs_alloc_log_agf() 2128 xfs_buf_t *bp; xfs_alloc_pagf_init() local 2131 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp))) xfs_alloc_pagf_init() 2133 if (bp) xfs_alloc_pagf_init() 2134 xfs_trans_brelse(tp, bp); xfs_alloc_pagf_init() 2201 struct xfs_buf *bp) xfs_agf_verify() 2203 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp); xfs_agf_verify() 2227 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno) xfs_agf_verify() 2240 struct xfs_buf *bp) xfs_agf_read_verify() 2242 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agf_read_verify() 2245 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF)) xfs_agf_read_verify() 2246 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_agf_read_verify() 2247 else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp, xfs_agf_read_verify() 2250 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agf_read_verify() 2252 if (bp->b_error) xfs_agf_read_verify() 2253 xfs_verifier_error(bp); xfs_agf_read_verify() 2258 struct xfs_buf *bp) xfs_agf_write_verify() 2260 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agf_write_verify() 2261 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_agf_write_verify() 2263 if (!xfs_agf_verify(mp, bp)) { xfs_agf_write_verify() 2264 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agf_write_verify() 2265 xfs_verifier_error(bp); xfs_agf_write_verify() 2273 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_agf_write_verify() 2275 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF); xfs_agf_write_verify() 446 xfs_agfl_verify( struct xfs_buf *bp) xfs_agfl_verify() argument 475 xfs_agfl_read_verify( struct xfs_buf *bp) xfs_agfl_read_verify() argument 499 xfs_agfl_write_verify( struct xfs_buf *bp) xfs_agfl_write_verify() argument 2086 xfs_alloc_log_agf( xfs_trans_t *tp, xfs_buf_t *bp, int fields) xfs_alloc_log_agf() argument 2199 xfs_agf_verify( struct xfs_mount *mp, struct xfs_buf *bp) xfs_agf_verify() argument 2239 xfs_agf_read_verify( struct xfs_buf *bp) xfs_agf_read_verify() argument 2257 xfs_agf_write_verify( struct xfs_buf *bp) xfs_agf_write_verify() argument
|
H A D | xfs_ialloc.c | 1979 xfs_buf_t *bp, /* allocation group header buffer */ xfs_ialloc_log_agi() 2004 agi = XFS_BUF_TO_AGI(bp); xfs_ialloc_log_agi() 2008 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGI_BUF); xfs_ialloc_log_agi() 2018 xfs_trans_log_buf(tp, bp, first, last); xfs_ialloc_log_agi() 2029 xfs_trans_log_buf(tp, bp, first, last); xfs_ialloc_log_agi() 2049 struct xfs_buf *bp) xfs_agi_verify() 2051 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agi_verify() 2052 struct xfs_agi *agi = XFS_BUF_TO_AGI(bp); xfs_agi_verify() 2073 if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno) xfs_agi_verify() 2082 struct xfs_buf *bp) xfs_agi_read_verify() 2084 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agi_read_verify() 2087 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) xfs_agi_read_verify() 2088 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_agi_read_verify() 2089 else if (XFS_TEST_ERROR(!xfs_agi_verify(bp), mp, xfs_agi_read_verify() 2092 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agi_read_verify() 2094 if (bp->b_error) xfs_agi_read_verify() 2095 xfs_verifier_error(bp); xfs_agi_read_verify() 2100 struct xfs_buf *bp) xfs_agi_write_verify() 2102 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agi_write_verify() 2103 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_agi_write_verify() 2105 if (!xfs_agi_verify(bp)) { xfs_agi_write_verify() 2106 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agi_write_verify() 2107 xfs_verifier_error(bp); xfs_agi_write_verify() 2115 XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_agi_write_verify() 2116 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF); xfs_agi_write_verify() 2193 xfs_buf_t *bp = NULL; xfs_ialloc_pagi_init() local 2196 error = xfs_ialloc_read_agi(mp, tp, agno, &bp); xfs_ialloc_pagi_init() 2199 if (bp) xfs_ialloc_pagi_init() 2200 xfs_trans_brelse(tp, bp); xfs_ialloc_pagi_init() 1977 xfs_ialloc_log_agi( xfs_trans_t *tp, xfs_buf_t *bp, int fields) xfs_ialloc_log_agi() argument 2048 xfs_agi_verify( struct xfs_buf *bp) xfs_agi_verify() argument 2081 xfs_agi_read_verify( struct xfs_buf *bp) xfs_agi_read_verify() argument 2099 xfs_agi_write_verify( struct xfs_buf *bp) xfs_agi_write_verify() argument
|
H A D | xfs_inode_buf.h | 47 #define xfs_inobp_check(mp, bp)
|
H A D | xfs_sb.h | 30 extern void xfs_sb_calc_crc(struct xfs_buf *bp);
|
H A D | xfs_shared.h | 238 uint32_t size, struct xfs_buf *bp); 240 uint32_t size, struct xfs_buf *bp); 241 void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
|
H A D | xfs_btree.h | 136 int (*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp); 242 #define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)((bp)->b_addr)) 253 struct xfs_buf *bp); /* buffer containing block, if any */ 367 struct xfs_buf *bp,
|
/linux-4.1.27/arch/tile/lib/ |
H A D | cpumask.c | 25 int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits) bitmap_parselist_crop() argument 31 if (!isdigit(*bp)) bitmap_parselist_crop() 33 a = simple_strtoul(bp, (char **)&bp, 10); bitmap_parselist_crop() 35 if (*bp == '-') { bitmap_parselist_crop() 36 bp++; bitmap_parselist_crop() 37 if (!isdigit(*bp)) bitmap_parselist_crop() 39 b = simple_strtoul(bp, (char **)&bp, 10); bitmap_parselist_crop() 49 if (*bp == ',') bitmap_parselist_crop() 50 bp++; bitmap_parselist_crop() 51 } while (*bp != '\0' && *bp != '\n'); bitmap_parselist_crop()
|
/linux-4.1.27/include/linux/ |
H A D | hw_breakpoint.h | 31 static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) hw_breakpoint_addr() argument 33 return bp->attr.bp_addr; hw_breakpoint_addr() 36 static inline int hw_breakpoint_type(struct perf_event *bp) hw_breakpoint_type() argument 38 return bp->attr.bp_type; hw_breakpoint_type() 41 static inline unsigned long hw_breakpoint_len(struct perf_event *bp) hw_breakpoint_len() argument 43 return bp->attr.bp_len; hw_breakpoint_len() 54 modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); 70 extern int register_perf_hw_breakpoint(struct perf_event *bp); 71 extern int __register_perf_hw_breakpoint(struct perf_event *bp); 72 extern void unregister_hw_breakpoint(struct perf_event *bp); 75 extern int dbg_reserve_bp_slot(struct perf_event *bp); 76 extern int dbg_release_bp_slot(struct perf_event *bp); 77 extern int reserve_bp_slot(struct perf_event *bp); 78 extern void release_bp_slot(struct perf_event *bp); 82 static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) counter_arch_bp() argument 84 return &bp->hw.info; counter_arch_bp() 97 modify_user_hw_breakpoint(struct perf_event *bp, modify_user_hw_breakpoint() argument 109 register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } register_perf_hw_breakpoint() argument 111 __register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } unregister_hw_breakpoint() argument 112 static inline void unregister_hw_breakpoint(struct perf_event *bp) { } unregister_hw_breakpoint() argument 116 reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; } release_bp_slot() argument 117 static inline void release_bp_slot(struct perf_event *bp) { } release_bp_slot() argument 121 static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) counter_arch_bp() argument
|
H A D | hdlcdrv.h | 116 unsigned char *bp; member in struct:hdlcdrv_state::hdlcdrv_hdlcrx 139 unsigned char *bp; member in struct:hdlcdrv_state::hdlcdrv_hdlctx
|
/linux-4.1.27/arch/powerpc/sysdev/ |
H A D | grackle.c | 30 static inline void grackle_set_stg(struct pci_controller* bp, int enable) grackle_set_stg() argument 34 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); grackle_set_stg() 35 val = in_le32(bp->cfg_data); grackle_set_stg() 38 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); grackle_set_stg() 39 out_le32(bp->cfg_data, val); grackle_set_stg() 40 (void)in_le32(bp->cfg_data); grackle_set_stg() 43 static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable) grackle_set_loop_snoop() argument 47 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); grackle_set_loop_snoop() 48 val = in_le32(bp->cfg_data); grackle_set_loop_snoop() 51 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); grackle_set_loop_snoop() 52 out_le32(bp->cfg_data, val); grackle_set_loop_snoop() 53 (void)in_le32(bp->cfg_data); grackle_set_loop_snoop()
|
H A D | cpm2.c | 121 u32 __iomem *bp; __cpm2_setbrg() local 127 bp = cpm2_map_size(im_brgc1, 16); __cpm2_setbrg() 129 bp = cpm2_map_size(im_brgc5, 16); __cpm2_setbrg() 132 bp += brg; __cpm2_setbrg() 138 out_be32(bp, val); __cpm2_setbrg() 139 cpm2_unmap(bp); __cpm2_setbrg()
|
/linux-4.1.27/drivers/net/ethernet/apple/ |
H A D | bmac.c | 228 struct bmac_data *bp = netdev_priv(dev); bmac_enable_and_reset_chip() local 229 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_enable_and_reset_chip() 230 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_enable_and_reset_chip() 237 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1); bmac_enable_and_reset_chip() 312 struct bmac_data *bp = netdev_priv(dev); bmac_init_registers() local 329 if (!bp->is_bmac_plus) { bmac_init_registers() 371 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; bmac_init_registers() 372 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ bmac_init_registers() 373 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ bmac_init_registers() 374 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ bmac_init_registers() 375 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ bmac_init_registers() 405 struct bmac_data *bp = netdev_priv(dev); bmac_start_chip() local 406 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_start_chip() 425 struct bmac_data *bp = netdev_priv(dev); bmac_init_phy() local 435 if (bp->is_bmac_plus) { bmac_init_phy() 459 struct bmac_data *bp = netdev_priv(dev); bmac_suspend() local 466 spin_lock_irqsave(&bp->lock, flags); bmac_suspend() 467 if (bp->timeout_active) { bmac_suspend() 468 del_timer(&bp->tx_timeout); bmac_suspend() 469 bp->timeout_active = 0; bmac_suspend() 472 disable_irq(bp->tx_dma_intr); bmac_suspend() 473 disable_irq(bp->rx_dma_intr); bmac_suspend() 474 bp->sleeping = 1; bmac_suspend() 475 spin_unlock_irqrestore(&bp->lock, flags); bmac_suspend() 476 if (bp->opened) { bmac_suspend() 477 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_suspend() 478 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_suspend() 490 if (bp->rx_bufs[i] != NULL) { bmac_suspend() 491 dev_kfree_skb(bp->rx_bufs[i]); bmac_suspend() 492 bp->rx_bufs[i] = NULL; bmac_suspend() 496 if (bp->tx_bufs[i] != NULL) { bmac_suspend() 497 dev_kfree_skb(bp->tx_bufs[i]); bmac_suspend() 498 bp->tx_bufs[i] = NULL; bmac_suspend() 502 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); bmac_suspend() 509 struct bmac_data *bp = netdev_priv(dev); bmac_resume() local 512 if (bp->opened) bmac_resume() 516 enable_irq(bp->tx_dma_intr); bmac_resume() 517 enable_irq(bp->rx_dma_intr); bmac_resume() 526 struct bmac_data *bp = netdev_priv(dev); bmac_set_address() local 533 spin_lock_irqsave(&bp->lock, flags); bmac_set_address() 544 spin_unlock_irqrestore(&bp->lock, flags); bmac_set_address() 551 struct bmac_data *bp = netdev_priv(dev); bmac_set_timeout() local 554 spin_lock_irqsave(&bp->lock, flags); bmac_set_timeout() 555 if (bp->timeout_active) bmac_set_timeout() 556 del_timer(&bp->tx_timeout); bmac_set_timeout() 557 bp->tx_timeout.expires = jiffies + TX_TIMEOUT; bmac_set_timeout() 558 bp->tx_timeout.function = bmac_tx_timeout; bmac_set_timeout() 559 bp->tx_timeout.data = (unsigned long) dev; bmac_set_timeout() 560 add_timer(&bp->tx_timeout); bmac_set_timeout() 561 bp->timeout_active = 1; bmac_set_timeout() 562 spin_unlock_irqrestore(&bp->lock, flags); bmac_set_timeout() 589 bmac_init_tx_ring(struct bmac_data *bp) bmac_init_tx_ring() argument 591 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_init_tx_ring() 593 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd)); bmac_init_tx_ring() 595 bp->tx_empty = 0; bmac_init_tx_ring() 596 bp->tx_fill = 0; bmac_init_tx_ring() 597 bp->tx_fullup = 0; bmac_init_tx_ring() 600 dbdma_setcmd(&bp->tx_cmds[N_TX_RING], bmac_init_tx_ring() 601 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds)); bmac_init_tx_ring() 606 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds)); bmac_init_tx_ring() 612 struct bmac_data *bp = netdev_priv(dev); bmac_init_rx_ring() local 613 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_init_rx_ring() 618 memset((char *)bp->rx_cmds, 0, bmac_init_rx_ring() 621 if ((skb = bp->rx_bufs[i]) == NULL) { bmac_init_rx_ring() 622 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); bmac_init_rx_ring() 626 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); bmac_init_rx_ring() 629 bp->rx_empty = 0; bmac_init_rx_ring() 630 bp->rx_fill = i; bmac_init_rx_ring() 633 dbdma_setcmd(&bp->rx_cmds[N_RX_RING], bmac_init_rx_ring() 634 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds)); bmac_init_rx_ring() 638 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds)); bmac_init_rx_ring() 646 struct bmac_data *bp = netdev_priv(dev); bmac_transmit_packet() local 647 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_transmit_packet() 652 /* bp->tx_empty, bp->tx_fill)); */ bmac_transmit_packet() 653 i = bp->tx_fill + 1; bmac_transmit_packet() 656 if (i == bp->tx_empty) { bmac_transmit_packet() 658 bp->tx_fullup = 1; bmac_transmit_packet() 663 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0); bmac_transmit_packet() 665 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]); bmac_transmit_packet() 667 bp->tx_bufs[bp->tx_fill] = skb; bmac_transmit_packet() 668 bp->tx_fill = i; bmac_transmit_packet() 682 struct bmac_data *bp = netdev_priv(dev); bmac_rxdma_intr() local 683 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_rxdma_intr() 691 spin_lock_irqsave(&bp->lock, flags); bmac_rxdma_intr() 698 i = bp->rx_empty; bmac_rxdma_intr() 701 cp = &bp->rx_cmds[i]; bmac_rxdma_intr() 712 skb = bp->rx_bufs[i]; bmac_rxdma_intr() 713 bp->rx_bufs[i] = NULL; bmac_rxdma_intr() 725 if ((skb = bp->rx_bufs[i]) == NULL) { bmac_rxdma_intr() 726 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); bmac_rxdma_intr() 728 skb_reserve(bp->rx_bufs[i], 2); bmac_rxdma_intr() 730 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); bmac_rxdma_intr() 738 bp->rx_fill = last; bmac_rxdma_intr() 739 bp->rx_empty = i; bmac_rxdma_intr() 743 spin_unlock_irqrestore(&bp->lock, flags); bmac_rxdma_intr() 756 struct bmac_data *bp = netdev_priv(dev); bmac_txdma_intr() local 761 spin_lock_irqsave(&bp->lock, flags); bmac_txdma_intr() 767 /* del_timer(&bp->tx_timeout); */ bmac_txdma_intr() 768 /* bp->timeout_active = 0; */ bmac_txdma_intr() 771 cp = &bp->tx_cmds[bp->tx_empty]; bmac_txdma_intr() 780 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) bmac_txdma_intr() 784 if (bp->tx_bufs[bp->tx_empty]) { bmac_txdma_intr() 786 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); bmac_txdma_intr() 788 bp->tx_bufs[bp->tx_empty] = NULL; bmac_txdma_intr() 789 bp->tx_fullup = 0; bmac_txdma_intr() 791 if (++bp->tx_empty >= N_TX_RING) bmac_txdma_intr() 792 bp->tx_empty = 0; bmac_txdma_intr() 793 if (bp->tx_empty == bp->tx_fill) bmac_txdma_intr() 797 spin_unlock_irqrestore(&bp->lock, flags); bmac_txdma_intr() 867 bmac_addhash(struct bmac_data *bp, unsigned char *addr) bmac_addhash() argument 875 if (bp->hash_use_count[crc]++) return; /* This bit is already set */ bmac_addhash() 878 bp->hash_use_count[crc/16] |= mask; bmac_addhash() 882 bmac_removehash(struct bmac_data *bp, unsigned char *addr) bmac_removehash() argument 890 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ bmac_removehash() 891 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ bmac_removehash() 894 bp->hash_table_mask[crc/16] &= mask; bmac_removehash() 934 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp) bmac_update_hash_table_mask() argument 936 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ bmac_update_hash_table_mask() 937 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ bmac_update_hash_table_mask() 938 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ bmac_update_hash_table_mask() 939 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ bmac_update_hash_table_mask() 945 struct bmac_data *bp, unsigned char *addr) 948 bmac_addhash(bp, addr); 950 bmac_update_hash_table_mask(dev, bp); 957 struct bmac_data *bp, unsigned char *addr) 959 bmac_removehash(bp, addr); 961 bmac_update_hash_table_mask(dev, bp); 975 struct bmac_data *bp = netdev_priv(dev); bmac_set_multicast() local 980 if (bp->sleeping) bmac_set_multicast() 986 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; bmac_set_multicast() 987 bmac_update_hash_table_mask(dev, bp); bmac_set_multicast() 997 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; bmac_set_multicast() 998 for (i=0; i<64; i++) bp->hash_use_count[i] = 0; bmac_set_multicast() 1004 bmac_addhash(bp, ha->addr); bmac_set_multicast() 1005 bmac_update_hash_table_mask(dev, bp); bmac_set_multicast() 1201 struct bmac_data *bp = netdev_priv(dev); bmac_reset_and_enable() local 1206 spin_lock_irqsave(&bp->lock, flags); bmac_reset_and_enable() 1208 bmac_init_tx_ring(bp); bmac_reset_and_enable() 1213 bp->sleeping = 0; bmac_reset_and_enable() 1227 spin_unlock_irqrestore(&bp->lock, flags); bmac_reset_and_enable() 1247 struct bmac_data *bp; bmac_probe() local 1273 bp = netdev_priv(dev); bmac_probe() 1277 bp->mdev = mdev; bmac_probe() 1278 spin_lock_init(&bp->lock); bmac_probe() 1310 bp->is_bmac_plus = is_bmac_plus; bmac_probe() 1311 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1)); bmac_probe() 1312 if (!bp->tx_dma) bmac_probe() 1314 bp->tx_dma_intr = macio_irq(mdev, 1); bmac_probe() 1315 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2)); bmac_probe() 1316 if (!bp->rx_dma) bmac_probe() 1318 bp->rx_dma_intr = macio_irq(mdev, 2); bmac_probe() 1320 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1); bmac_probe() 1321 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1; bmac_probe() 1323 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); bmac_probe() 1324 skb_queue_head_init(bp->queue); bmac_probe() 1326 init_timer(&bp->tx_timeout); bmac_probe() 1333 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev); bmac_probe() 1335 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr); bmac_probe() 1338 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev); bmac_probe() 1340 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr); bmac_probe() 1348 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); bmac_probe() 1363 free_irq(bp->rx_dma_intr, dev); bmac_probe() 1365 free_irq(bp->tx_dma_intr, dev); bmac_probe() 1369 iounmap(bp->rx_dma); bmac_probe() 1371 iounmap(bp->tx_dma); bmac_probe() 1377 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); bmac_probe() 1385 struct bmac_data *bp = netdev_priv(dev); bmac_open() local 1388 bp->opened = 1; bmac_open() 1396 struct bmac_data *bp = netdev_priv(dev); bmac_close() local 1397 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_close() 1398 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_close() 1402 bp->sleeping = 1; bmac_close() 1420 if (bp->rx_bufs[i] != NULL) { bmac_close() 1421 dev_kfree_skb(bp->rx_bufs[i]); bmac_close() 1422 bp->rx_bufs[i] = NULL; bmac_close() 1427 if (bp->tx_bufs[i] != NULL) { bmac_close() 1428 dev_kfree_skb(bp->tx_bufs[i]); bmac_close() 1429 bp->tx_bufs[i] = NULL; bmac_close() 1434 bp->opened = 0; bmac_close() 1436 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); bmac_close() 1444 struct bmac_data *bp = netdev_priv(dev); bmac_start() local 1449 if (bp->sleeping) bmac_start() 1452 spin_lock_irqsave(&bp->lock, flags); bmac_start() 1454 i = bp->tx_fill + 1; bmac_start() 1457 if (i == bp->tx_empty) bmac_start() 1459 skb = skb_dequeue(bp->queue); bmac_start() 1464 spin_unlock_irqrestore(&bp->lock, flags); bmac_start() 1470 struct bmac_data *bp = netdev_priv(dev); bmac_output() local 1471 skb_queue_tail(bp->queue, skb); bmac_output() 1479 struct bmac_data *bp = netdev_priv(dev); bmac_tx_timeout() local 1480 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_tx_timeout() 1481 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_tx_timeout() 1488 spin_lock_irqsave(&bp->lock, flags); bmac_tx_timeout() 1489 bp->timeout_active = 0; bmac_tx_timeout() 1492 /* bmac_handle_misc_intrs(bp, 0); */ bmac_tx_timeout() 1494 cp = &bp->tx_cmds[bp->tx_empty]; bmac_tx_timeout() 1496 /* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */ bmac_tx_timeout() 1517 bp->tx_empty, bp->tx_fill, bp->tx_fullup)); bmac_tx_timeout() 1518 i = bp->tx_empty; bmac_tx_timeout() 1520 if (i != bp->tx_fill) { bmac_tx_timeout() 1521 dev_kfree_skb(bp->tx_bufs[i]); bmac_tx_timeout() 1522 bp->tx_bufs[i] = NULL; bmac_tx_timeout() 1524 bp->tx_empty = i; bmac_tx_timeout() 1526 bp->tx_fullup = 0; bmac_tx_timeout() 1528 if (i != bp->tx_fill) { bmac_tx_timeout() 1529 cp = &bp->tx_cmds[i]; bmac_tx_timeout() 1544 spin_unlock_irqrestore(&bp->lock, flags); bmac_tx_timeout() 1605 struct bmac_data *bp = netdev_priv(dev); bmac_remove() local 1610 free_irq(bp->tx_dma_intr, dev); bmac_remove() 1611 free_irq(bp->rx_dma_intr, dev); bmac_remove() 1614 iounmap(bp->tx_dma); bmac_remove() 1615 iounmap(bp->rx_dma); bmac_remove()
|
/linux-4.1.27/drivers/net/fddi/ |
H A D | defxx.c | 266 static void dfx_bus_config_check(DFX_board_t *bp); 271 static int dfx_adap_init(DFX_board_t *bp, int get_buffers); 276 static void dfx_int_pr_halt_id(DFX_board_t *bp); 277 static void dfx_int_type_0_process(DFX_board_t *bp); 284 static int dfx_ctl_update_cam(DFX_board_t *bp); 285 static int dfx_ctl_update_filters(DFX_board_t *bp); 287 static int dfx_hw_dma_cmd_req(DFX_board_t *bp); 288 static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data); 289 static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type); 290 static int dfx_hw_adap_state_rd(DFX_board_t *bp); 291 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type); 293 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers); 294 static void dfx_rcv_queue_process(DFX_board_t *bp); 296 static void dfx_rcv_flush(DFX_board_t *bp); 298 static inline void dfx_rcv_flush(DFX_board_t *bp) {} dfx_rcv_flush() argument 303 static int dfx_xmt_done(DFX_board_t *bp); dfx_rcv_flush() 304 static void dfx_xmt_flush(DFX_board_t *bp); dfx_rcv_flush() 326 * bp - pointer to board information dfx_rcv_flush() 352 * bp->base is a valid base I/O address for this adapter. dfx_rcv_flush() 363 static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data) dfx_writel() argument 365 writel(data, bp->base.mem + offset); dfx_writel() 369 static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data) dfx_outl() argument 371 outl(data, bp->base.port + offset); dfx_outl() 374 static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data) dfx_port_write_long() argument 376 struct device __maybe_unused *bdev = bp->bus_dev; dfx_port_write_long() 381 dfx_writel(bp, offset, data); dfx_port_write_long() 383 dfx_outl(bp, offset, data); dfx_port_write_long() 387 static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data) dfx_readl() argument 390 *data = readl(bp->base.mem + offset); dfx_readl() 393 static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data) dfx_inl() argument 395 *data = inl(bp->base.port + offset); dfx_inl() 398 static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) dfx_port_read_long() argument 400 struct device __maybe_unused *bdev = bp->bus_dev; dfx_port_read_long() 405 dfx_readl(bp, offset, data); dfx_port_read_long() 407 dfx_inl(bp, offset, data); dfx_port_read_long() 535 DFX_board_t *bp; /* board pointer */ dfx_register() local 547 dev = alloc_fddidev(sizeof(*bp)); dfx_register() 566 bp = netdev_priv(dev); dfx_register() 567 bp->bus_dev = bdev; dfx_register() 617 bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]); dfx_register() 618 if (!bp->base.mem) { dfx_register() 624 bp->base.port = bar_start[0]; dfx_register() 650 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + dfx_register() 654 if (bp->kmalloced) dfx_register() 656 bp->kmalloced, bp->kmalloced_dma); dfx_register() 660 iounmap(bp->base.mem); dfx_register() 708 * bp->base has already been set with the proper 719 DFX_board_t *bp = netdev_priv(dev); dfx_bus_init() local 720 struct device *bdev = bp->bus_dev; dfx_bus_init() 730 bp->dev = dev; dfx_bus_init() 845 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val); dfx_bus_init() 870 * bp->base has already been set with the proper 879 DFX_board_t *bp = netdev_priv(dev); dfx_bus_uninit() local 880 struct device *bdev = bp->bus_dev; dfx_bus_uninit() 905 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0); dfx_bus_uninit() 923 * bp - pointer to board information 940 static void dfx_bus_config_check(DFX_board_t *bp) dfx_bus_config_check() argument 942 struct device __maybe_unused *bdev = bp->bus_dev; dfx_bus_config_check() 964 status = dfx_hw_port_ctrl_req(bp, dfx_bus_config_check() 979 switch (bp->burst_size) dfx_bus_config_check() 983 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8; dfx_bus_config_check() 992 bp->full_duplex_enb = PI_SNMP_K_FALSE; dfx_bus_config_check() 1039 DFX_board_t *bp = netdev_priv(dev); dfx_driver_init() local 1040 struct device *bdev = bp->bus_dev; dfx_driver_init() 1067 bp->full_duplex_enb = PI_SNMP_K_FALSE; dfx_driver_init() 1068 bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */ dfx_driver_init() 1069 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF; dfx_driver_init() 1070 bp->rcv_bufs_to_post = RCV_BUFS_DEF; dfx_driver_init() 1081 dfx_bus_config_check(bp); dfx_driver_init() 1085 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); dfx_driver_init() 1089 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST); dfx_driver_init() 1093 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0, dfx_driver_init() 1100 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32)); dfx_driver_init() 1102 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0, dfx_driver_init() 1109 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16)); dfx_driver_init() 1118 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); dfx_driver_init() 1138 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + dfx_driver_init() 1142 bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size, dfx_driver_init() 1143 &bp->kmalloced_dma, dfx_driver_init() 1148 top_p = bp->kmalloced_dma; /* get physical address of buffer */ dfx_driver_init() 1167 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v; dfx_driver_init() 1168 bp->descr_block_phys = curr_p; dfx_driver_init() 1174 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v; dfx_driver_init() 1175 bp->cmd_req_phys = curr_p; dfx_driver_init() 1181 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v; dfx_driver_init() 1182 bp->cmd_rsp_phys = curr_p; dfx_driver_init() 1188 bp->rcv_block_virt = curr_v; dfx_driver_init() 1189 bp->rcv_block_phys = curr_p; dfx_driver_init() 1192 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX); dfx_driver_init() 1193 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX); dfx_driver_init() 1198 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v; dfx_driver_init() 1199 bp->cons_block_phys = curr_p; dfx_driver_init() 1204 print_name, bp->descr_block_virt, &bp->descr_block_phys); dfx_driver_init() 1206 print_name, bp->cmd_req_virt, &bp->cmd_req_phys); dfx_driver_init() 1208 print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys); dfx_driver_init() 1210 print_name, bp->rcv_block_virt, &bp->rcv_block_phys); dfx_driver_init() 1212 print_name, bp->cons_block_virt, &bp->cons_block_phys); dfx_driver_init() 1230 * bp - pointer to board information 1243 * bp->reset_type should be set to a valid reset type value before 1251 static int dfx_adap_init(DFX_board_t *bp, int get_buffers) dfx_adap_init() argument 1257 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); dfx_adap_init() 1261 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS) dfx_adap_init() 1263 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name); dfx_adap_init() 1272 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0); dfx_adap_init() 1281 bp->cmd_req_reg.lword = 0; dfx_adap_init() 1282 bp->cmd_rsp_reg.lword = 0; dfx_adap_init() 1283 bp->rcv_xmt_reg.lword = 0; dfx_adap_init() 1287 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK)); dfx_adap_init() 1291 if (dfx_hw_port_ctrl_req(bp, dfx_adap_init() 1294 bp->burst_size, dfx_adap_init() 1297 printk("%s: Could not set adapter burst size!\n", bp->dev->name); dfx_adap_init() 1308 if (dfx_hw_port_ctrl_req(bp, dfx_adap_init() 1310 bp->cons_block_phys, dfx_adap_init() 1314 printk("%s: Could not set consumer block address!\n", bp->dev->name); dfx_adap_init() 1328 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT, dfx_adap_init() 1329 (u32)(bp->descr_block_phys | dfx_adap_init() 1333 bp->dev->name); dfx_adap_init() 1339 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET; dfx_adap_init() 1340 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME; dfx_adap_init() 1341 bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */ dfx_adap_init() 1342 bp->cmd_req_virt->char_set.item[0].item_index = 0; dfx_adap_init() 1343 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL; dfx_adap_init() 1344 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_adap_init() 1346 printk("%s: DMA command request failed!\n", bp->dev->name); dfx_adap_init() 1352 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET; dfx_adap_init() 1353 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS; dfx_adap_init() 1354 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb; dfx_adap_init() 1355 bp->cmd_req_virt->snmp_set.item[0].item_index = 0; dfx_adap_init() 1356 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ; dfx_adap_init() 1357 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt; dfx_adap_init() 1358 bp->cmd_req_virt->snmp_set.item[1].item_index = 0; dfx_adap_init() 1359 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL; dfx_adap_init() 1360 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_adap_init() 1362 printk("%s: DMA command request failed!\n", bp->dev->name); dfx_adap_init() 1368 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) dfx_adap_init() 1370 printk("%s: Adapter CAM update failed!\n", bp->dev->name); dfx_adap_init() 1376 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) dfx_adap_init() 1378 printk("%s: Adapter filters update failed!\n", bp->dev->name); dfx_adap_init() 1388 dfx_rcv_flush(bp); dfx_adap_init() 1392 if (dfx_rcv_init(bp, get_buffers)) dfx_adap_init() 1394 printk("%s: Receive buffer allocation failed\n", bp->dev->name); dfx_adap_init() 1396 dfx_rcv_flush(bp); dfx_adap_init() 1402 bp->cmd_req_virt->cmd_type = PI_CMD_K_START; dfx_adap_init() 1403 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_adap_init() 1405 printk("%s: Start command failed\n", bp->dev->name); dfx_adap_init() 1407 dfx_rcv_flush(bp); dfx_adap_init() 1413 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS); dfx_adap_init() 1450 DFX_board_t *bp = netdev_priv(dev); dfx_open() local 1475 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); dfx_open() 1479 memset(bp->uc_table, 0, sizeof(bp->uc_table)); dfx_open() 1480 memset(bp->mc_table, 0, sizeof(bp->mc_table)); dfx_open() 1481 bp->uc_count = 0; dfx_open() 1482 bp->mc_count = 0; dfx_open() 1486 bp->ind_group_prom = PI_FSTATE_K_BLOCK; dfx_open() 1487 bp->group_prom = PI_FSTATE_K_BLOCK; dfx_open() 1489 spin_lock_init(&bp->lock); dfx_open() 1493 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */ dfx_open() 1494 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS) dfx_open() 1541 DFX_board_t *bp = netdev_priv(dev); dfx_close() local 1547 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); dfx_close() 1551 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST); dfx_close() 1562 dfx_xmt_flush(bp); dfx_close() 1575 bp->cmd_req_reg.lword = 0; dfx_close() 1576 bp->cmd_rsp_reg.lword = 0; dfx_close() 1577 bp->rcv_xmt_reg.lword = 0; dfx_close() 1581 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK)); dfx_close() 1585 dfx_rcv_flush(bp); dfx_close() 1611 * bp - pointer to board information 1626 static void dfx_int_pr_halt_id(DFX_board_t *bp) dfx_int_pr_halt_id() argument 1633 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); dfx_int_pr_halt_id() 1641 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name); dfx_int_pr_halt_id() 1645 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name); dfx_int_pr_halt_id() 1649 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name); dfx_int_pr_halt_id() 1653 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name); dfx_int_pr_halt_id() 1657 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name); dfx_int_pr_halt_id() 1661 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name); dfx_int_pr_halt_id() 1665 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name); dfx_int_pr_halt_id() 1669 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name); dfx_int_pr_halt_id() 1673 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name); dfx_int_pr_halt_id() 1677 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id); dfx_int_pr_halt_id() 1695 * bp - pointer to board information 1730 static void dfx_int_type_0_process(DFX_board_t *bp) dfx_int_type_0_process() argument 1742 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status); dfx_int_type_0_process() 1743 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status); dfx_int_type_0_process() 1754 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name); dfx_int_type_0_process() 1759 printk("%s: Packet Memory Parity Error\n", bp->dev->name); dfx_int_type_0_process() 1764 printk("%s: Host Bus Parity Error\n", bp->dev->name); dfx_int_type_0_process() 1768 bp->link_available = PI_K_FALSE; /* link is no longer available */ dfx_int_type_0_process() 1769 bp->reset_type = 0; /* rerun on-board diagnostics */ dfx_int_type_0_process() 1770 printk("%s: Resetting adapter...\n", bp->dev->name); dfx_int_type_0_process() 1771 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS) dfx_int_type_0_process() 1773 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name); dfx_int_type_0_process() 1774 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); dfx_int_type_0_process() 1777 printk("%s: Adapter reset successful!\n", bp->dev->name); dfx_int_type_0_process() 1787 bp->link_available = PI_K_FALSE; /* link is no longer available */ dfx_int_type_0_process() 1788 dfx_xmt_flush(bp); /* flush any outstanding packets */ dfx_int_type_0_process() 1789 (void) dfx_hw_port_ctrl_req(bp, dfx_int_type_0_process() 1802 state = dfx_hw_adap_state_rd(bp); /* get adapter state */ dfx_int_type_0_process() 1811 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name); dfx_int_type_0_process() 1812 dfx_int_pr_halt_id(bp); /* display halt id as string */ dfx_int_type_0_process() 1816 bp->link_available = PI_K_FALSE; /* link is no longer available */ dfx_int_type_0_process() 1817 bp->reset_type = 0; /* rerun on-board diagnostics */ dfx_int_type_0_process() 1818 printk("%s: Resetting adapter...\n", bp->dev->name); dfx_int_type_0_process() 1819 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS) dfx_int_type_0_process() 1821 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name); dfx_int_type_0_process() 1822 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); dfx_int_type_0_process() 1825 printk("%s: Adapter reset successful!\n", bp->dev->name); dfx_int_type_0_process() 1829 bp->link_available = PI_K_TRUE; /* set link available flag */ dfx_int_type_0_process() 1847 * bp - pointer to board information 1877 DFX_board_t *bp = netdev_priv(dev); dfx_int_common() local 1882 if(dfx_xmt_done(bp)) /* free consumed xmt packets */ dfx_int_common() 1887 dfx_rcv_queue_process(bp); /* service received LLC frames */ dfx_int_common() 1896 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); dfx_int_common() 1900 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); dfx_int_common() 1905 dfx_int_type_0_process(bp); /* process Type 0 interrupts */ dfx_int_common() 1948 DFX_board_t *bp = netdev_priv(dev); dfx_interrupt() local 1949 struct device *bdev = bp->bus_dev; dfx_interrupt() 1959 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status); dfx_interrupt() 1963 spin_lock(&bp->lock); dfx_interrupt() 1966 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, dfx_interrupt() 1973 dfx_port_write_long(bp, PFI_K_REG_STATUS, dfx_interrupt() 1975 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, dfx_interrupt() 1979 spin_unlock(&bp->lock); dfx_interrupt() 1989 spin_lock(&bp->lock); dfx_interrupt() 2003 spin_unlock(&bp->lock); dfx_interrupt() 2008 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status); dfx_interrupt() 2018 spin_lock(&bp->lock); dfx_interrupt() 2023 spin_unlock(&bp->lock); dfx_interrupt() 2075 DFX_board_t *bp = netdev_priv(dev); dfx_ctl_get_stats() local 2077 /* Fill the bp->stats structure with driver-maintained counters */ dfx_ctl_get_stats() 2079 bp->stats.gen.rx_packets = bp->rcv_total_frames; dfx_ctl_get_stats() 2080 bp->stats.gen.tx_packets = bp->xmt_total_frames; dfx_ctl_get_stats() 2081 bp->stats.gen.rx_bytes = bp->rcv_total_bytes; dfx_ctl_get_stats() 2082 bp->stats.gen.tx_bytes = bp->xmt_total_bytes; dfx_ctl_get_stats() 2083 bp->stats.gen.rx_errors = bp->rcv_crc_errors + dfx_ctl_get_stats() 2084 bp->rcv_frame_status_errors + dfx_ctl_get_stats() 2085 bp->rcv_length_errors; dfx_ctl_get_stats() 2086 bp->stats.gen.tx_errors = bp->xmt_length_errors; dfx_ctl_get_stats() 2087 bp->stats.gen.rx_dropped = bp->rcv_discards; dfx_ctl_get_stats() 2088 bp->stats.gen.tx_dropped = bp->xmt_discards; dfx_ctl_get_stats() 2089 bp->stats.gen.multicast = bp->rcv_multicast_frames; dfx_ctl_get_stats() 2090 bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */ dfx_ctl_get_stats() 2094 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET; dfx_ctl_get_stats() 2095 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_ctl_get_stats() 2096 return (struct net_device_stats *)&bp->stats; dfx_ctl_get_stats() 2098 /* Fill the bp->stats structure with the SMT MIB object values */ dfx_ctl_get_stats() 2100 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id)); dfx_ctl_get_stats() 2101 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id; dfx_ctl_get_stats() 2102 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id; dfx_ctl_get_stats() 2103 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id; dfx_ctl_get_stats() 2104 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data)); dfx_ctl_get_stats() 2105 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id; dfx_ctl_get_stats() 2106 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct; dfx_ctl_get_stats() 2107 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct; dfx_ctl_get_stats() 2108 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct; dfx_ctl_get_stats() 2109 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths; dfx_ctl_get_stats() 2110 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities; dfx_ctl_get_stats() 2111 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy; dfx_ctl_get_stats() 2112 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy; dfx_ctl_get_stats() 2113 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify; dfx_ctl_get_stats() 2114 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy; dfx_ctl_get_stats() 2115 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration; dfx_ctl_get_stats() 2116 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present; dfx_ctl_get_stats() 2117 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state; dfx_ctl_get_stats() 2118 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state; dfx_ctl_get_stats() 2119 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag; dfx_ctl_get_stats() 2120 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status; dfx_ctl_get_stats() 2121 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag; dfx_ctl_get_stats() 2122 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls; dfx_ctl_get_stats() 2123 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls; dfx_ctl_get_stats() 2124 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions; dfx_ctl_get_stats() 2125 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability; dfx_ctl_get_stats() 2126 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability; dfx_ctl_get_stats() 2127 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths; dfx_ctl_get_stats() 2128 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path; dfx_ctl_get_stats() 2129 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN); dfx_ctl_get_stats() 2130 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN); dfx_ctl_get_stats() 2131 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN); dfx_ctl_get_stats() 2132 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN); dfx_ctl_get_stats() 2133 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test; dfx_ctl_get_stats() 2134 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths; dfx_ctl_get_stats() 2135 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type; dfx_ctl_get_stats() 2136 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN); dfx_ctl_get_stats() 2137 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req; dfx_ctl_get_stats() 2138 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg; dfx_ctl_get_stats() 2139 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max; dfx_ctl_get_stats() 2140 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value; dfx_ctl_get_stats() 2141 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold; dfx_ctl_get_stats() 2142 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio; dfx_ctl_get_stats() 2143 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state; dfx_ctl_get_stats() 2144 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag; dfx_ctl_get_stats() 2145 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag; dfx_ctl_get_stats() 2146 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag; dfx_ctl_get_stats() 2147 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available; dfx_ctl_get_stats() 2148 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present; dfx_ctl_get_stats() 2149 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable; dfx_ctl_get_stats() 2150 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound; dfx_ctl_get_stats() 2151 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound; dfx_ctl_get_stats() 2152 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req; dfx_ctl_get_stats() 2153 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration)); dfx_ctl_get_stats() 2154 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0]; dfx_ctl_get_stats() 2155 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1]; dfx_ctl_get_stats() 2156 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0]; dfx_ctl_get_stats() 2157 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1]; dfx_ctl_get_stats() 2158 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0]; dfx_ctl_get_stats() 2159 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1]; dfx_ctl_get_stats() 2160 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0]; dfx_ctl_get_stats() 2161 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1]; dfx_ctl_get_stats() 2162 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0]; dfx_ctl_get_stats() 2163 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1]; dfx_ctl_get_stats() 2164 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3); dfx_ctl_get_stats() 2165 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3); dfx_ctl_get_stats() 2166 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0]; dfx_ctl_get_stats() 2167 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1]; dfx_ctl_get_stats() 2168 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0]; dfx_ctl_get_stats() 2169 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1]; dfx_ctl_get_stats() 2170 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0]; dfx_ctl_get_stats() 2171 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1]; dfx_ctl_get_stats() 2172 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0]; dfx_ctl_get_stats() 2173 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1]; dfx_ctl_get_stats() 2174 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0]; dfx_ctl_get_stats() 2175 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1]; dfx_ctl_get_stats() 2176 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0]; dfx_ctl_get_stats() 2177 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1]; dfx_ctl_get_stats() 2178 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0]; dfx_ctl_get_stats() 2179 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1]; dfx_ctl_get_stats() 2180 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0]; dfx_ctl_get_stats() 2181 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1]; dfx_ctl_get_stats() 2182 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0]; dfx_ctl_get_stats() 2183 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1]; dfx_ctl_get_stats() 2184 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0]; dfx_ctl_get_stats() 2185 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1]; dfx_ctl_get_stats() 2186 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0]; dfx_ctl_get_stats() 2187 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1]; dfx_ctl_get_stats() 2188 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0]; dfx_ctl_get_stats() 2189 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1]; dfx_ctl_get_stats() 2190 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0]; dfx_ctl_get_stats() 2191 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1]; dfx_ctl_get_stats() 2195 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET; dfx_ctl_get_stats() 2196 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_ctl_get_stats() 2197 return (struct net_device_stats *)&bp->stats; dfx_ctl_get_stats() 2199 /* Fill the bp->stats structure with the FDDI counter values */ dfx_ctl_get_stats() 2201 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls; dfx_ctl_get_stats() 2202 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls; dfx_ctl_get_stats() 2203 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls; dfx_ctl_get_stats() 2204 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls; dfx_ctl_get_stats() 2205 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls; dfx_ctl_get_stats() 2206 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls; dfx_ctl_get_stats() 2207 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls; dfx_ctl_get_stats() 2208 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls; dfx_ctl_get_stats() 2209 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls; dfx_ctl_get_stats() 2210 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls; dfx_ctl_get_stats() 2211 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; dfx_ctl_get_stats() 2213 return (struct net_device_stats *)&bp->stats; dfx_ctl_get_stats() 2262 DFX_board_t *bp = netdev_priv(dev); dfx_ctl_set_multicast_list() local 2269 bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */ dfx_ctl_set_multicast_list() 2275 bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */ dfx_ctl_set_multicast_list() 2296 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count)) dfx_ctl_set_multicast_list() 2298 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */ dfx_ctl_set_multicast_list() 2299 bp->mc_count = 0; /* Don't add mc addrs to CAM */ dfx_ctl_set_multicast_list() 2303 bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */ dfx_ctl_set_multicast_list() 2304 bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */ dfx_ctl_set_multicast_list() 2311 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN], netdev_for_each_mc_addr() 2314 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) netdev_for_each_mc_addr() 2320 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count); 2326 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) 2376 DFX_board_t *bp = netdev_priv(dev); dfx_ctl_set_mac_address() local 2381 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */ dfx_ctl_set_mac_address() 2382 bp->uc_count = 1; dfx_ctl_set_mac_address() 2396 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE) dfx_ctl_set_mac_address() 2398 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */ dfx_ctl_set_mac_address() 2399 bp->mc_count = 0; /* Don't add mc addrs to CAM */ dfx_ctl_set_mac_address() 2403 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) dfx_ctl_set_mac_address() 2415 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) dfx_ctl_set_mac_address() 2440 * bp - pointer to board information 2460 static int dfx_ctl_update_cam(DFX_board_t *bp) dfx_ctl_update_cam() argument 2478 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */ dfx_ctl_update_cam() 2479 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET; dfx_ctl_update_cam() 2480 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0]; dfx_ctl_update_cam() 2484 for (i=0; i < (int)bp->uc_count; i++) dfx_ctl_update_cam() 2488 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN); dfx_ctl_update_cam() 2495 for (i=0; i < (int)bp->mc_count; i++) dfx_ctl_update_cam() 2497 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE) dfx_ctl_update_cam() 2499 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN); dfx_ctl_update_cam() 2506 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_ctl_update_cam() 2525 * bp - pointer to board information 2543 static int dfx_ctl_update_filters(DFX_board_t *bp) dfx_ctl_update_filters() argument 2549 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET; dfx_ctl_update_filters() 2553 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST; dfx_ctl_update_filters() 2554 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS; dfx_ctl_update_filters() 2558 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM; dfx_ctl_update_filters() 2559 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom; dfx_ctl_update_filters() 2563 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM; dfx_ctl_update_filters() 2564 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom; dfx_ctl_update_filters() 2568 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL; dfx_ctl_update_filters() 2572 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_ctl_update_filters() 2590 * bp - pointer to board information 2618 static int dfx_hw_dma_cmd_req(DFX_board_t *bp) dfx_hw_dma_cmd_req() argument 2625 status = dfx_hw_adap_state_rd(bp); dfx_hw_dma_cmd_req() 2634 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP | dfx_hw_dma_cmd_req() 2636 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys; dfx_hw_dma_cmd_req() 2640 bp->cmd_rsp_reg.index.prod += 1; dfx_hw_dma_cmd_req() 2641 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1; dfx_hw_dma_cmd_req() 2642 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); dfx_hw_dma_cmd_req() 2646 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP | dfx_hw_dma_cmd_req() 2648 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys; dfx_hw_dma_cmd_req() 2652 bp->cmd_req_reg.index.prod += 1; dfx_hw_dma_cmd_req() 2653 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1; dfx_hw_dma_cmd_req() 2654 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword); dfx_hw_dma_cmd_req() 2663 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req)) dfx_hw_dma_cmd_req() 2672 bp->cmd_req_reg.index.comp += 1; dfx_hw_dma_cmd_req() 2673 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1; dfx_hw_dma_cmd_req() 2674 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword); dfx_hw_dma_cmd_req() 2683 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp)) dfx_hw_dma_cmd_req() 2692 bp->cmd_rsp_reg.index.comp += 1; dfx_hw_dma_cmd_req() 2693 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1; dfx_hw_dma_cmd_req() 2694 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); dfx_hw_dma_cmd_req() 2711 * bp - pointer to board information 2733 DFX_board_t *bp, dfx_hw_port_ctrl_req() 2750 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a); dfx_hw_port_ctrl_req() 2751 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b); dfx_hw_port_ctrl_req() 2752 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd); dfx_hw_port_ctrl_req() 2763 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd); dfx_hw_port_ctrl_req() 2778 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data); dfx_hw_port_ctrl_req() 2795 * bp - pointer to board information 2817 DFX_board_t *bp, dfx_hw_adap_reset() 2824 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */ dfx_hw_adap_reset() 2825 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET); dfx_hw_adap_reset() 2833 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0); dfx_hw_adap_reset() 2849 * bp - pointer to board information 2864 static int dfx_hw_adap_state_rd(DFX_board_t *bp) dfx_hw_adap_state_rd() argument 2868 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); dfx_hw_adap_state_rd() 2885 * bp - pointer to board information 2904 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type) dfx_hw_dma_uninit() argument 2910 dfx_hw_adap_reset(bp, type); dfx_hw_dma_uninit() 2916 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL) dfx_hw_dma_uninit() 2953 * bp - pointer to board information 2976 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers) dfx_rcv_init() argument 3000 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++) dfx_rcv_init() 3001 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) dfx_rcv_init() 3006 newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, dfx_rcv_init() 3016 dma_addr = dma_map_single(bp->bus_dev, dfx_rcv_init() 3020 if (dma_mapping_error(bp->bus_dev, dma_addr)) { dfx_rcv_init() 3024 bp->descr_block_virt->rcv_data[i + j].long_0 = dfx_rcv_init() 3029 bp->descr_block_virt->rcv_data[i + j].long_1 = dfx_rcv_init() 3036 bp->p_rcv_buff_va[i+j] = (char *) newskb; dfx_rcv_init() 3039 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++) dfx_rcv_init() 3040 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) dfx_rcv_init() 3042 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP | dfx_rcv_init() 3044 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX)); dfx_rcv_init() 3045 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX)); dfx_rcv_init() 3052 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post; dfx_rcv_init() 3053 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); dfx_rcv_init() 3070 * bp - pointer to board information 3091 DFX_board_t *bp dfx_rcv_queue_process() 3102 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data); dfx_rcv_queue_process() 3103 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons) dfx_rcv_queue_process() 3109 entry = bp->rcv_xmt_reg.index.rcv_comp; dfx_rcv_queue_process() 3111 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data); dfx_rcv_queue_process() 3113 p_buff = bp->p_rcv_buff_va[entry]; dfx_rcv_queue_process() 3115 dma_addr = bp->descr_block_virt->rcv_data[entry].long_1; dfx_rcv_queue_process() 3116 dma_sync_single_for_cpu(bp->bus_dev, dfx_rcv_queue_process() 3125 bp->rcv_crc_errors++; dfx_rcv_queue_process() 3127 bp->rcv_frame_status_errors++; dfx_rcv_queue_process() 3138 bp->rcv_length_errors++; dfx_rcv_queue_process() 3146 newskb = netdev_alloc_skb(bp->dev, dfx_rcv_queue_process() 3151 bp->bus_dev, dfx_rcv_queue_process() 3156 bp->bus_dev, dfx_rcv_queue_process() 3165 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry]; dfx_rcv_queue_process() 3166 dma_unmap_single(bp->bus_dev, dfx_rcv_queue_process() 3171 bp->p_rcv_buff_va[entry] = (char *)newskb; dfx_rcv_queue_process() 3172 bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr; dfx_rcv_queue_process() 3179 skb = netdev_alloc_skb(bp->dev, dfx_rcv_queue_process() 3183 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name); dfx_rcv_queue_process() 3184 bp->rcv_discards++; dfx_rcv_queue_process() 3191 bp->bus_dev, dfx_rcv_queue_process() 3204 skb->protocol = fddi_type_trans(skb, bp->dev); dfx_rcv_queue_process() 3205 bp->rcv_total_bytes += skb->len; dfx_rcv_queue_process() 3209 bp->rcv_total_frames++; dfx_rcv_queue_process() 3211 bp->rcv_multicast_frames++; dfx_rcv_queue_process() 3224 bp->rcv_xmt_reg.index.rcv_prod += 1; dfx_rcv_queue_process() 3225 bp->rcv_xmt_reg.index.rcv_comp += 1; dfx_rcv_queue_process() 3294 DFX_board_t *bp = netdev_priv(dev); dfx_xmt_queue_pkt() local 3316 bp->xmt_length_errors++; /* bump error counter */ dfx_xmt_queue_pkt() 3333 if (bp->link_available == PI_K_FALSE) dfx_xmt_queue_pkt() 3335 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */ dfx_xmt_queue_pkt() 3336 bp->link_available = PI_K_TRUE; /* if so, set flag and continue */ dfx_xmt_queue_pkt() 3339 bp->xmt_discards++; /* bump error counter */ dfx_xmt_queue_pkt() 3353 dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len, dfx_xmt_queue_pkt() 3355 if (dma_mapping_error(bp->bus_dev, dma_addr)) { dfx_xmt_queue_pkt() 3360 spin_lock_irqsave(&bp->lock, flags); dfx_xmt_queue_pkt() 3364 prod = bp->rcv_xmt_reg.index.xmt_prod; dfx_xmt_queue_pkt() 3365 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]); dfx_xmt_queue_pkt() 3378 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */ dfx_xmt_queue_pkt() 3421 if (prod == bp->rcv_xmt_reg.index.xmt_comp) dfx_xmt_queue_pkt() 3424 spin_unlock_irqrestore(&bp->lock, flags); dfx_xmt_queue_pkt() 3448 bp->rcv_xmt_reg.index.xmt_prod = prod; dfx_xmt_queue_pkt() 3449 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); dfx_xmt_queue_pkt() 3450 spin_unlock_irqrestore(&bp->lock, flags); dfx_xmt_queue_pkt() 3468 * bp - pointer to board information 3488 static int dfx_xmt_done(DFX_board_t *bp) dfx_xmt_done() argument 3497 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data); dfx_xmt_done() 3498 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons) dfx_xmt_done() 3502 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]); dfx_xmt_done() 3506 bp->xmt_total_frames++; dfx_xmt_done() 3507 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len; dfx_xmt_done() 3510 comp = bp->rcv_xmt_reg.index.xmt_comp; dfx_xmt_done() 3511 dma_unmap_single(bp->bus_dev, dfx_xmt_done() 3512 bp->descr_block_virt->xmt_data[comp].long_1, dfx_xmt_done() 3528 bp->rcv_xmt_reg.index.xmt_comp += 1; dfx_xmt_done() 3547 * bp - pointer to board information 3562 static void dfx_rcv_flush( DFX_board_t *bp ) dfx_rcv_flush() 3566 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++) dfx_rcv_flush() 3567 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) dfx_rcv_flush() 3570 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j]; dfx_rcv_flush() 3572 dma_unmap_single(bp->bus_dev, dfx_rcv_flush() 3573 bp->descr_block_virt->rcv_data[i+j].long_1, dfx_rcv_flush() 3578 bp->p_rcv_buff_va[i+j] = NULL; dfx_rcv_flush() 3597 * bp - pointer to board information 3620 static void dfx_xmt_flush( DFX_board_t *bp ) dfx_xmt_flush() 3628 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod) dfx_xmt_flush() 3632 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]); dfx_xmt_flush() 3635 comp = bp->rcv_xmt_reg.index.xmt_comp; dfx_xmt_flush() 3636 dma_unmap_single(bp->bus_dev, dfx_xmt_flush() 3637 bp->descr_block_virt->xmt_data[comp].long_1, dfx_xmt_flush() 3644 bp->xmt_discards++; dfx_xmt_flush() 3657 bp->rcv_xmt_reg.index.xmt_comp += 1; dfx_xmt_flush() 3662 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX); dfx_xmt_flush() 3663 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX); dfx_xmt_flush() 3664 bp->cons_block_virt->xmt_rcv_data = prod_cons; dfx_xmt_flush() 3696 DFX_board_t *bp = netdev_priv(dev); dfx_unregister() local 3709 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + dfx_unregister() 3713 if (bp->kmalloced) dfx_unregister() 3715 bp->kmalloced, bp->kmalloced_dma); dfx_unregister() 3725 iounmap(bp->base.mem); dfx_unregister() 2732 dfx_hw_port_ctrl_req( DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data ) dfx_hw_port_ctrl_req() argument 2816 dfx_hw_adap_reset( DFX_board_t *bp, PI_UINT32 type ) dfx_hw_adap_reset() argument
|
/linux-4.1.27/drivers/net/fddi/skfp/ |
H A D | skfddi.c | 384 skfddi_priv *bp = &smc->os; skfp_driver_init() local 390 bp->base_addr = dev->base_addr; skfp_driver_init() 395 spin_lock_init(&bp->DriverLock); skfp_driver_init() 398 bp->LocalRxBuffer = pci_alloc_consistent(&bp->pdev, MAX_FRAME_SIZE, &bp->LocalRxBufferDMA); skfp_driver_init() 399 if (!bp->LocalRxBuffer) { skfp_driver_init() 406 bp->SharedMemSize = mac_drv_check_space(); skfp_driver_init() 407 pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize); skfp_driver_init() 408 if (bp->SharedMemSize > 0) { skfp_driver_init() 409 bp->SharedMemSize += 16; // for descriptor alignment skfp_driver_init() 411 bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev, skfp_driver_init() 412 bp->SharedMemSize, skfp_driver_init() 413 &bp->SharedMemDMA); skfp_driver_init() 414 if (!bp->SharedMemAddr) { skfp_driver_init() 417 bp->SharedMemSize); skfp_driver_init() 420 bp->SharedMemHeap = 0; // Nothing used yet. skfp_driver_init() 423 bp->SharedMemAddr = NULL; skfp_driver_init() 424 bp->SharedMemHeap = 0; skfp_driver_init() 427 memset(bp->SharedMemAddr, 0, bp->SharedMemSize); skfp_driver_init() 445 if (bp->SharedMemAddr) { skfp_driver_init() 446 pci_free_consistent(&bp->pdev, skfp_driver_init() 447 bp->SharedMemSize, skfp_driver_init() 448 bp->SharedMemAddr, skfp_driver_init() 449 bp->SharedMemDMA); skfp_driver_init() 450 bp->SharedMemAddr = NULL; skfp_driver_init() 452 if (bp->LocalRxBuffer) { skfp_driver_init() 453 pci_free_consistent(&bp->pdev, MAX_FRAME_SIZE, skfp_driver_init() 454 bp->LocalRxBuffer, bp->LocalRxBufferDMA); skfp_driver_init() 455 bp->LocalRxBuffer = NULL; skfp_driver_init() 552 skfddi_priv *bp = &smc->os; skfp_close() local 564 skb_queue_purge(&bp->SendSkbQueue); skfp_close() 565 bp->QueueSkb = MAX_TX_QUEUE_LEN; skfp_close() 610 skfddi_priv *bp; skfp_interrupt() local 613 bp = &smc->os; skfp_interrupt() 626 spin_lock(&bp->DriverLock); skfp_interrupt() 635 spin_unlock(&bp->DriverLock); skfp_interrupt() 675 struct s_smc *bp = netdev_priv(dev); skfp_ctl_get_stats() local 677 /* Fill the bp->stats structure with driver-maintained counters */ skfp_ctl_get_stats() 679 bp->os.MacStat.port_bs_flag[0] = 0x1234; skfp_ctl_get_stats() 680 bp->os.MacStat.port_bs_flag[1] = 0x5678; skfp_ctl_get_stats() 685 /* Fill the bp->stats structure with the SMT MIB object values */ skfp_ctl_get_stats() 687 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id)); skfp_ctl_get_stats() 688 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id; skfp_ctl_get_stats() 689 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id; skfp_ctl_get_stats() 690 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id; skfp_ctl_get_stats() 691 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data)); skfp_ctl_get_stats() 692 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id; skfp_ctl_get_stats() 693 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct; skfp_ctl_get_stats() 694 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct; skfp_ctl_get_stats() 695 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct; skfp_ctl_get_stats() 696 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths; skfp_ctl_get_stats() 697 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities; skfp_ctl_get_stats() 698 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy; skfp_ctl_get_stats() 699 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy; skfp_ctl_get_stats() 700 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify; skfp_ctl_get_stats() 701 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy; skfp_ctl_get_stats() 702 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration; skfp_ctl_get_stats() 703 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present; skfp_ctl_get_stats() 704 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state; skfp_ctl_get_stats() 705 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state; skfp_ctl_get_stats() 706 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag; skfp_ctl_get_stats() 707 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status; skfp_ctl_get_stats() 708 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag; skfp_ctl_get_stats() 709 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls; skfp_ctl_get_stats() 710 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls; skfp_ctl_get_stats() 711 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions; skfp_ctl_get_stats() 712 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability; skfp_ctl_get_stats() 713 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability; skfp_ctl_get_stats() 714 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths; skfp_ctl_get_stats() 715 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path; skfp_ctl_get_stats() 716 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN); skfp_ctl_get_stats() 717 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN); skfp_ctl_get_stats() 718 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN); skfp_ctl_get_stats() 719 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN); skfp_ctl_get_stats() 720 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test; skfp_ctl_get_stats() 721 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths; skfp_ctl_get_stats() 722 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type; skfp_ctl_get_stats() 723 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN); skfp_ctl_get_stats() 724 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req; skfp_ctl_get_stats() 725 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg; skfp_ctl_get_stats() 726 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max; skfp_ctl_get_stats() 727 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value; skfp_ctl_get_stats() 728 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold; skfp_ctl_get_stats() 729 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio; skfp_ctl_get_stats() 730 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state; skfp_ctl_get_stats() 731 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag; skfp_ctl_get_stats() 732 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag; skfp_ctl_get_stats() 733 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag; skfp_ctl_get_stats() 734 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available; skfp_ctl_get_stats() 735 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present; skfp_ctl_get_stats() 736 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable; skfp_ctl_get_stats() 737 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound; skfp_ctl_get_stats() 738 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound; skfp_ctl_get_stats() 739 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req; skfp_ctl_get_stats() 740 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration)); skfp_ctl_get_stats() 741 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0]; skfp_ctl_get_stats() 742 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1]; skfp_ctl_get_stats() 743 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0]; skfp_ctl_get_stats() 744 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1]; skfp_ctl_get_stats() 745 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0]; skfp_ctl_get_stats() 746 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1]; skfp_ctl_get_stats() 747 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0]; skfp_ctl_get_stats() 748 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1]; skfp_ctl_get_stats() 749 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0]; skfp_ctl_get_stats() 750 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1]; skfp_ctl_get_stats() 751 memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3); skfp_ctl_get_stats() 752 memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3); skfp_ctl_get_stats() 753 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0]; skfp_ctl_get_stats() 754 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1]; skfp_ctl_get_stats() 755 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0]; skfp_ctl_get_stats() 756 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1]; skfp_ctl_get_stats() 757 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0]; skfp_ctl_get_stats() 758 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1]; skfp_ctl_get_stats() 759 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0]; skfp_ctl_get_stats() 760 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1]; skfp_ctl_get_stats() 761 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0]; skfp_ctl_get_stats() 762 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1]; skfp_ctl_get_stats() 763 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0]; skfp_ctl_get_stats() 764 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1]; skfp_ctl_get_stats() 765 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0]; skfp_ctl_get_stats() 766 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1]; skfp_ctl_get_stats() 767 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0]; skfp_ctl_get_stats() 768 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1]; skfp_ctl_get_stats() 769 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0]; skfp_ctl_get_stats() 770 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1]; skfp_ctl_get_stats() 771 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0]; skfp_ctl_get_stats() 772 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1]; skfp_ctl_get_stats() 773 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0]; skfp_ctl_get_stats() 774 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1]; skfp_ctl_get_stats() 775 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0]; skfp_ctl_get_stats() 776 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1]; skfp_ctl_get_stats() 777 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0]; skfp_ctl_get_stats() 778 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1]; skfp_ctl_get_stats() 781 /* Fill the bp->stats structure with the FDDI counter values */ skfp_ctl_get_stats() 783 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls; skfp_ctl_get_stats() 784 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls; skfp_ctl_get_stats() 785 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls; skfp_ctl_get_stats() 786 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls; skfp_ctl_get_stats() 787 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls; skfp_ctl_get_stats() 788 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls; skfp_ctl_get_stats() 789 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls; skfp_ctl_get_stats() 790 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls; skfp_ctl_get_stats() 791 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls; skfp_ctl_get_stats() 792 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls; skfp_ctl_get_stats() 793 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; skfp_ctl_get_stats() 796 return (struct net_device_stats *)&bp->os.MacStat; skfp_ctl_get_stats() 840 skfddi_priv *bp = &smc->os; skfp_ctl_set_multicast_list() local 843 spin_lock_irqsave(&bp->DriverLock, Flags); skfp_ctl_set_multicast_list() 845 spin_unlock_irqrestore(&bp->DriverLock, Flags); skfp_ctl_set_multicast_list() 925 skfddi_priv *bp = &smc->os; skfp_ctl_set_mac_address() local 930 spin_lock_irqsave(&bp->DriverLock, Flags); skfp_ctl_set_mac_address() 932 spin_unlock_irqrestore(&bp->DriverLock, Flags); skfp_ctl_set_mac_address() 1045 skfddi_priv *bp = &smc->os; skfp_send_pkt() local 1059 bp->MacStat.gen.tx_errors++; /* bump error counter */ skfp_send_pkt() 1065 if (bp->QueueSkb == 0) { // return with tbusy set: queue full skfp_send_pkt() 1070 bp->QueueSkb--; skfp_send_pkt() 1071 skb_queue_tail(&bp->SendSkbQueue, skb); skfp_send_pkt() 1073 if (bp->QueueSkb == 0) { skfp_send_pkt() 1106 skfddi_priv *bp = &smc->os; send_queued_packets() local 1119 skb = skb_dequeue(&bp->SendSkbQueue); send_queued_packets() 1126 spin_lock_irqsave(&bp->DriverLock, Flags); send_queued_packets() 1155 pr_debug("%s: out of TXDs.\n", bp->dev->name); send_queued_packets() 1158 bp->dev->name); send_queued_packets() 1163 skb_queue_head(&bp->SendSkbQueue, skb); send_queued_packets() 1164 spin_unlock_irqrestore(&bp->DriverLock, Flags); send_queued_packets() 1169 bp->QueueSkb++; // one packet less in local queue send_queued_packets() 1176 dma_address = pci_map_single(&bp->pdev, skb->data, send_queued_packets() 1186 pci_unmap_single(&bp->pdev, dma_address, send_queued_packets() 1190 spin_unlock_irqrestore(&bp->DriverLock, Flags); send_queued_packets() 1278 skfddi_priv *bp = &smc->os; llc_restart_tx() local 1283 spin_unlock(&bp->DriverLock); llc_restart_tx() 1285 spin_lock(&bp->DriverLock); llc_restart_tx() 1286 netif_start_queue(bp->dev);// system may send again if it was blocked llc_restart_tx() 1462 skfddi_priv *bp = &smc->os; dma_complete() local 1467 int MaxFrameSize = bp->MaxFrameSize; dma_complete() 1469 pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr, dma_complete() 1576 skfddi_priv *bp = &smc->os; mac_drv_rx_complete() local 1658 skb->protocol = fddi_type_trans(skb, bp->dev); mac_drv_rx_complete() 1851 skfddi_priv *bp = &smc->os; mac_drv_clear_rxd() local 1852 int MaxFrameSize = bp->MaxFrameSize; mac_drv_clear_rxd() 1854 pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr, mac_drv_clear_rxd()
|
/linux-4.1.27/arch/x86/um/asm/ |
H A D | processor_64.h | 38 #define current_bp() ({ unsigned long bp; __asm__("movq %%rbp, %0" : "=r" (bp) : ); bp; })
|
H A D | processor_32.h | 59 #define current_bp() ({ unsigned long bp; __asm__("movl %%ebp, %0" : "=r" (bp) : ); bp; })
|
/linux-4.1.27/arch/powerpc/kernel/ |
H A D | hw_breakpoint.c | 63 int arch_install_hw_breakpoint(struct perf_event *bp) arch_install_hw_breakpoint() argument 65 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_install_hw_breakpoint() 68 *slot = bp; arch_install_hw_breakpoint() 74 if (current->thread.last_hit_ubp != bp) arch_install_hw_breakpoint() 89 void arch_uninstall_hw_breakpoint(struct perf_event *bp) arch_uninstall_hw_breakpoint() argument 93 if (*slot != bp) { arch_uninstall_hw_breakpoint() 106 void arch_unregister_hw_breakpoint(struct perf_event *bp) arch_unregister_hw_breakpoint() argument 113 if (bp->ctx && bp->ctx->task) arch_unregister_hw_breakpoint() 114 bp->ctx->task->thread.last_hit_ubp = NULL; arch_unregister_hw_breakpoint() 120 int arch_check_bp_in_kernelspace(struct perf_event *bp) arch_check_bp_in_kernelspace() argument 122 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_check_bp_in_kernelspace() 142 int arch_validate_hwbkpt_settings(struct perf_event *bp) arch_validate_hwbkpt_settings() argument 145 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_validate_hwbkpt_settings() 147 if (!bp) arch_validate_hwbkpt_settings() 151 if (bp->attr.bp_type & HW_BREAKPOINT_R) arch_validate_hwbkpt_settings() 153 if (bp->attr.bp_type & HW_BREAKPOINT_W) arch_validate_hwbkpt_settings() 158 if (!(bp->attr.exclude_user)) arch_validate_hwbkpt_settings() 160 if (!(bp->attr.exclude_kernel)) arch_validate_hwbkpt_settings() 162 if (!(bp->attr.exclude_hv)) arch_validate_hwbkpt_settings() 164 info->address = bp->attr.bp_addr; arch_validate_hwbkpt_settings() 165 info->len = bp->attr.bp_len; arch_validate_hwbkpt_settings() 177 if ((bp->attr.bp_addr >> 10) != arch_validate_hwbkpt_settings() 178 ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) arch_validate_hwbkpt_settings() 211 struct perf_event *bp; hw_breakpoint_handler() local 229 bp = __this_cpu_read(bp_per_reg); hw_breakpoint_handler() 230 if (!bp) hw_breakpoint_handler() 232 info = counter_arch_bp(bp); hw_breakpoint_handler() 240 if (bp->overflow_handler == ptrace_triggered) { hw_breakpoint_handler() 241 perf_bp_event(bp, regs); hw_breakpoint_handler() 253 if (!((bp->attr.bp_addr <= dar) && hw_breakpoint_handler() 254 (dar - bp->attr.bp_addr < bp->attr.bp_len))) hw_breakpoint_handler() 259 current->thread.last_hit_ubp = bp; hw_breakpoint_handler() 277 perf_event_disable(bp); hw_breakpoint_handler() 285 perf_bp_event(bp, regs); hw_breakpoint_handler() 299 struct perf_event *bp = NULL; single_step_dabr_instruction() local 302 bp = current->thread.last_hit_ubp; single_step_dabr_instruction() 307 if (!bp) single_step_dabr_instruction() 310 info = counter_arch_bp(bp); single_step_dabr_instruction() 317 perf_bp_event(bp, regs); single_step_dabr_instruction() 363 void hw_breakpoint_pmu_read(struct perf_event *bp) hw_breakpoint_pmu_read() argument
|
H A D | ptrace.c | 918 void ptrace_triggered(struct perf_event *bp, ptrace_triggered() argument 929 attr = bp->attr; ptrace_triggered() 931 modify_user_hw_breakpoint(bp, &attr); ptrace_triggered() 941 struct perf_event *bp; ptrace_set_debugreg() local 979 bp = thread->ptrace_bps[0]; ptrace_set_debugreg() 981 if (bp) { ptrace_set_debugreg() 982 unregister_hw_breakpoint(bp); ptrace_set_debugreg() 987 if (bp) { ptrace_set_debugreg() 988 attr = bp->attr; ptrace_set_debugreg() 995 ret = modify_user_hw_breakpoint(bp, &attr); ptrace_set_debugreg() 999 thread->ptrace_bps[0] = bp; ptrace_set_debugreg() 1010 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, ptrace_set_debugreg() 1012 if (IS_ERR(bp)) { ptrace_set_debugreg() 1014 return PTR_ERR(bp); ptrace_set_debugreg() 1375 struct perf_event *bp; ppc_set_hwdebug() local 1445 bp = thread->ptrace_bps[0]; ppc_set_hwdebug() 1446 if (bp) ppc_set_hwdebug() 1455 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, ppc_set_hwdebug() 1457 if (IS_ERR(bp)) { ppc_set_hwdebug() 1459 return PTR_ERR(bp); ppc_set_hwdebug() 1482 struct perf_event *bp; ppc_del_hwdebug() local 1505 bp = thread->ptrace_bps[0]; ppc_del_hwdebug() 1506 if (bp) { ppc_del_hwdebug() 1507 unregister_hw_breakpoint(bp); ppc_del_hwdebug()
|
/linux-4.1.27/fs/jfs/ |
H A D | jfs_logmgr.c | 188 static void lbmFree(struct lbuf * bp); 189 static void lbmfree(struct lbuf * bp); 191 static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, int cant_block); 192 static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag); 193 static int lbmIOWait(struct lbuf * bp, int flag); 195 static void lbmStartIO(struct lbuf * bp); 361 struct lbuf *bp; /* dst log page buffer */ lmWriteRecord() local 380 bp = (struct lbuf *) log->bp; lmWriteRecord() 381 lp = (struct logpage *) bp->l_ldata; lmWriteRecord() 431 bp = log->bp; lmWriteRecord() 432 lp = (struct logpage *) bp->l_ldata; lmWriteRecord() 456 bp = (struct lbuf *) log->bp; lmWriteRecord() 457 lp = (struct logpage *) bp->l_ldata; lmWriteRecord() 509 bp->l_eor = dstoffset; lmWriteRecord() 515 bp->l_eor); lmWriteRecord() 531 tblk->bp = log->bp; lmWriteRecord() 541 jfs_info("lmWriteRecord: lrd:0x%04x bp:0x%p pn:%d eor:0x%x", lmWriteRecord() 542 le16_to_cpu(lrd->type), log->bp, log->page, dstoffset); lmWriteRecord() 552 bp = (struct lbuf *) log->bp; lmWriteRecord() 553 lp = (struct logpage *) bp->l_ldata; lmWriteRecord() 578 struct lbuf *bp; lmNextPage() local 584 bp = log->bp; lmNextPage() 585 lp = (struct logpage *) bp->l_ldata; lmNextPage() 620 if (bp->l_wqnext == NULL) lmNextPage() 621 lbmWrite(log, bp, 0, 0); lmNextPage() 635 bp->l_ceor = bp->l_eor; lmNextPage() 636 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lmNextPage() 637 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 0); lmNextPage() 653 log->bp = nextbp; lmNextPage() 760 struct lbuf *bp; lmGCwrite() local 789 bp = (struct lbuf *) tblk->bp; lmGCwrite() 790 lp = (struct logpage *) bp->l_ldata; lmGCwrite() 796 bp->l_ceor = bp->l_eor; lmGCwrite() 797 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lmGCwrite() 798 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmGC, lmGCwrite() 804 bp->l_ceor = tblk->eor; /* ? bp->l_ceor = bp->l_eor; */ lmGCwrite() 805 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lmGCwrite() 806 lbmWrite(log, bp, lbmWRITE | lbmGC, cant_write); lmGCwrite() 823 static void lmPostGC(struct lbuf * bp) lmPostGC() argument 826 struct jfs_log *log = bp->l_log; lmPostGC() 846 if (bp->l_flag & lbmERROR) lmPostGC() 881 lbmFree(bp); lmPostGC() 887 lp = (struct logpage *) bp->l_ldata; lmPostGC() 888 bp->l_ceor = bp->l_eor; lmPostGC() 889 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); lmPostGC() 891 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, lmPostGC() 905 ((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) || lmPostGC() 1268 struct lbuf *bp; lmLogInit() local 1305 bp = lbmAllocate(log , 0); lmLogInit() 1306 log->bp = bp; lmLogInit() 1307 bp->l_pn = bp->l_eor = 0; lmLogInit() 1358 if ((rc = lbmRead(log, log->page, &bp))) lmLogInit() 1361 lp = (struct logpage *) bp->l_ldata; lmLogInit() 1367 log->bp = bp; lmLogInit() 1368 bp->l_pn = log->page; lmLogInit() 1369 bp->l_eor = log->eor; lmLogInit() 1391 bp = log->bp; lmLogInit() 1392 bp->l_ceor = bp->l_eor; lmLogInit() 1393 lp = (struct logpage *) bp->l_ldata; lmLogInit() 1394 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); lmLogInit() 1395 lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0); lmLogInit() 1396 if ((rc = lbmIOWait(bp, 0))) lmLogInit() 1432 bp->l_wqnext = NULL; lmLogInit() 1433 lbmFree(bp); lmLogInit() 1661 struct lbuf *bp; lmLogShutdown() local 1679 bp = log->bp; lmLogShutdown() 1680 lp = (struct logpage *) bp->l_ldata; lmLogShutdown() 1681 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); lmLogShutdown() 1682 lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0); lmLogShutdown() 1683 lbmIOWait(log->bp, lbmFREE); lmLogShutdown() 1684 log->bp = NULL; lmLogShutdown() 1800 * bp->wrqueue field), and 1817 log->bp = NULL; lbmLogInit() 1901 struct lbuf *bp; lbmAllocate() local 1908 LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags); lbmAllocate() 1909 log->lbuf_free = bp->l_freelist; lbmAllocate() 1912 bp->l_flag = 0; lbmAllocate() 1914 bp->l_wqnext = NULL; lbmAllocate() 1915 bp->l_freelist = NULL; lbmAllocate() 1917 bp->l_pn = pn; lbmAllocate() 1918 bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize)); lbmAllocate() 1919 bp->l_ceor = 0; lbmAllocate() 1921 return bp; lbmAllocate() 1930 static void lbmFree(struct lbuf * bp) lbmFree() argument 1936 lbmfree(bp); lbmFree() 1941 static void lbmfree(struct lbuf * bp) lbmfree() argument 1943 struct jfs_log *log = bp->l_log; lbmfree() 1945 assert(bp->l_wqnext == NULL); lbmfree() 1950 bp->l_freelist = log->lbuf_free; lbmfree() 1951 log->lbuf_free = bp; lbmfree() 1964 * bp - log buffer 1969 static inline void lbmRedrive(struct lbuf *bp) lbmRedrive() argument 1974 bp->l_redrive_next = log_redrive_list; lbmRedrive() 1975 log_redrive_list = bp; lbmRedrive() 1988 struct lbuf *bp; lbmRead() local 1993 *bpp = bp = lbmAllocate(log, pn); lbmRead() 1994 jfs_info("lbmRead: bp:0x%p pn:0x%x", bp, pn); lbmRead() 1996 bp->l_flag |= lbmREAD; lbmRead() 2000 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); lbmRead() 2002 bio->bi_io_vec[0].bv_page = bp->l_page; lbmRead() 2004 bio->bi_io_vec[0].bv_offset = bp->l_offset; lbmRead() 2010 bio->bi_private = bp; lbmRead() 2019 wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); lbmRead() 2040 static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, lbmWrite() argument 2046 jfs_info("lbmWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn); lbmWrite() 2049 bp->l_blkno = lbmWrite() 2050 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); lbmWrite() 2057 bp->l_flag = flag; lbmWrite() 2060 * insert bp at tail of write queue associated with log lbmWrite() 2062 * (request is either for bp already/currently at head of queue lbmWrite() 2063 * or new bp to be inserted at tail) lbmWrite() 2068 if (bp->l_wqnext == NULL) { lbmWrite() 2071 log->wqueue = bp; lbmWrite() 2072 bp->l_wqnext = bp; lbmWrite() 2074 log->wqueue = bp; lbmWrite() 2075 bp->l_wqnext = tail->l_wqnext; lbmWrite() 2076 tail->l_wqnext = bp; lbmWrite() 2079 tail = bp; lbmWrite() 2083 if ((bp != tail->l_wqnext) || !(flag & lbmWRITE)) { lbmWrite() 2091 lbmRedrive(bp); lbmWrite() 2093 lbmStartIO(bp); lbmWrite() 2096 lbmStartIO(bp); lbmWrite() 2108 static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag) lbmDirectWrite() argument 2110 jfs_info("lbmDirectWrite: bp:0x%p flag:0x%x pn:0x%x", lbmDirectWrite() 2111 bp, flag, bp->l_pn); lbmDirectWrite() 2116 bp->l_flag = flag | lbmDIRECT; lbmDirectWrite() 2119 bp->l_blkno = lbmDirectWrite() 2120 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); lbmDirectWrite() 2125 lbmStartIO(bp); lbmDirectWrite() 2138 static void lbmStartIO(struct lbuf * bp) lbmStartIO() argument 2141 struct jfs_log *log = bp->l_log; lbmStartIO() 2146 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); lbmStartIO() 2148 bio->bi_io_vec[0].bv_page = bp->l_page; lbmStartIO() 2150 bio->bi_io_vec[0].bv_offset = bp->l_offset; lbmStartIO() 2156 bio->bi_private = bp; lbmStartIO() 2172 static int lbmIOWait(struct lbuf * bp, int flag) lbmIOWait() argument 2177 jfs_info("lbmIOWait1: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); lbmIOWait() 2181 LCACHE_SLEEP_COND(bp->l_ioevent, (bp->l_flag & lbmDONE), flags); lbmIOWait() 2183 rc = (bp->l_flag & lbmERROR) ? -EIO : 0; lbmIOWait() 2186 lbmfree(bp); lbmIOWait() 2190 jfs_info("lbmIOWait2: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); lbmIOWait() 2201 struct lbuf *bp = bio->bi_private; lbmIODone() local 2209 jfs_info("lbmIODone: bp:0x%p flag:0x%x", bp, bp->l_flag); lbmIODone() 2213 bp->l_flag |= lbmDONE; lbmIODone() 2216 bp->l_flag |= lbmERROR; lbmIODone() 2226 if (bp->l_flag & lbmREAD) { lbmIODone() 2227 bp->l_flag &= ~lbmREAD; lbmIODone() 2232 LCACHE_WAKEUP(&bp->l_ioevent); lbmIODone() 2240 * the bp at the head of write queue has completed pageout. lbmIODone() 2249 bp->l_flag &= ~lbmWRITE; lbmIODone() 2253 log = bp->l_log; lbmIODone() 2254 log->clsn = (bp->l_pn << L2LOGPSIZE) + bp->l_ceor; lbmIODone() 2256 if (bp->l_flag & lbmDIRECT) { lbmIODone() 2257 LCACHE_WAKEUP(&bp->l_ioevent); lbmIODone() 2265 if (bp == tail) { lbmIODone() 2269 if (bp->l_flag & lbmRELEASE) { lbmIODone() 2271 bp->l_wqnext = NULL; lbmIODone() 2279 if (bp->l_flag & lbmRELEASE) { lbmIODone() 2280 nextbp = tail->l_wqnext = bp->l_wqnext; lbmIODone() 2281 bp->l_wqnext = NULL; lbmIODone() 2310 if (bp->l_flag & lbmSYNC) { lbmIODone() 2314 LCACHE_WAKEUP(&bp->l_ioevent); lbmIODone() 2320 else if (bp->l_flag & lbmGC) { lbmIODone() 2322 lmPostGC(bp); lbmIODone() 2332 assert(bp->l_flag & lbmRELEASE); lbmIODone() 2333 assert(bp->l_flag & lbmFREE); lbmIODone() 2334 lbmfree(bp); lbmIODone() 2342 struct lbuf *bp; jfsIOWait() local 2346 while ((bp = log_redrive_list)) { jfsIOWait() 2347 log_redrive_list = bp->l_redrive_next; jfsIOWait() 2348 bp->l_redrive_next = NULL; jfsIOWait() 2350 lbmStartIO(bp); jfsIOWait() 2393 struct lbuf *bp; lmLogFormat() local 2401 bp = lbmAllocate(log, 1); lmLogFormat() 2417 logsuper = (struct logsuper *) bp->l_ldata; lmLogFormat() 2428 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; lmLogFormat() 2429 bp->l_blkno = logAddress + sbi->nbperpage; lmLogFormat() 2430 lbmStartIO(bp); lmLogFormat() 2431 if ((rc = lbmIOWait(bp, 0))) lmLogFormat() 2456 lp = (struct logpage *) bp->l_ldata; lmLogFormat() 2471 bp->l_blkno += sbi->nbperpage; lmLogFormat() 2472 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; lmLogFormat() 2473 lbmStartIO(bp); lmLogFormat() 2474 if ((rc = lbmIOWait(bp, 0))) lmLogFormat() 2484 bp->l_blkno += sbi->nbperpage; lmLogFormat() 2485 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; lmLogFormat() 2486 lbmStartIO(bp); lmLogFormat() 2487 if ((rc = lbmIOWait(bp, 0))) lmLogFormat() 2497 lbmFree(bp); lmLogFormat()
|
/linux-4.1.27/arch/sh/kernel/ |
H A D | hw_breakpoint.c | 49 int arch_install_hw_breakpoint(struct perf_event *bp) arch_install_hw_breakpoint() argument 51 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_install_hw_breakpoint() 58 *slot = bp; arch_install_hw_breakpoint() 81 void arch_uninstall_hw_breakpoint(struct perf_event *bp) arch_uninstall_hw_breakpoint() argument 83 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_uninstall_hw_breakpoint() 89 if (*slot == bp) { arch_uninstall_hw_breakpoint() 126 int arch_check_bp_in_kernelspace(struct perf_event *bp) arch_check_bp_in_kernelspace() argument 130 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_check_bp_in_kernelspace() 176 static int arch_build_bp_info(struct perf_event *bp) arch_build_bp_info() argument 178 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_build_bp_info() 180 info->address = bp->attr.bp_addr; arch_build_bp_info() 183 switch (bp->attr.bp_len) { arch_build_bp_info() 201 switch (bp->attr.bp_type) { arch_build_bp_info() 221 int arch_validate_hwbkpt_settings(struct perf_event *bp) arch_validate_hwbkpt_settings() argument 223 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_validate_hwbkpt_settings() 227 ret = arch_build_bp_info(bp); arch_validate_hwbkpt_settings() 284 struct perf_event *bp; hw_breakpoint_handler() local 319 bp = per_cpu(bp_per_reg[i], cpu); hw_breakpoint_handler() 320 if (bp) hw_breakpoint_handler() 330 * bp can be NULL due to concurrent perf counter hw_breakpoint_handler() 333 if (!bp) { hw_breakpoint_handler() 342 if (bp->overflow_handler == ptrace_triggered) hw_breakpoint_handler() 345 perf_bp_event(bp, args->regs); hw_breakpoint_handler() 348 if (!arch_check_bp_in_kernelspace(bp)) { hw_breakpoint_handler() 404 void hw_breakpoint_pmu_read(struct perf_event *bp) hw_breakpoint_pmu_read() argument
|
H A D | ptrace_32.c | 65 void ptrace_triggered(struct perf_event *bp, ptrace_triggered() argument 74 attr = bp->attr; ptrace_triggered() 76 modify_user_hw_breakpoint(bp, &attr); ptrace_triggered() 82 struct perf_event *bp; set_single_step() local 85 bp = thread->ptrace_bps[0]; set_single_step() 86 if (!bp) { set_single_step() 93 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, set_single_step() 95 if (IS_ERR(bp)) set_single_step() 96 return PTR_ERR(bp); set_single_step() 98 thread->ptrace_bps[0] = bp; set_single_step() 102 attr = bp->attr; set_single_step() 106 err = modify_user_hw_breakpoint(bp, &attr); set_single_step()
|
/linux-4.1.27/arch/sh/include/asm/ |
H A D | hw_breakpoint.h | 56 extern int arch_check_bp_in_kernelspace(struct perf_event *bp); 57 extern int arch_validate_hwbkpt_settings(struct perf_event *bp); 61 int arch_install_hw_breakpoint(struct perf_event *bp); 62 void arch_uninstall_hw_breakpoint(struct perf_event *bp); 63 void hw_breakpoint_pmu_read(struct perf_event *bp); 65 extern void arch_fill_perf_breakpoint(struct perf_event *bp);
|
/linux-4.1.27/net/ax25/ |
H A D | ax25_ip.c | 106 unsigned char *bp = skb->data; ax25_ip_xmit() local 115 dst = (ax25_address *)(bp + 1); ax25_ip_xmit() 116 src = (ax25_address *)(bp + 8); ax25_ip_xmit() 133 if (bp[16] == AX25_P_IP) { ax25_ip_xmit() 165 * to bp which is part of skb->data would not be valid ax25_ip_xmit() 169 bp = ourskb->data; ax25_ip_xmit() 170 dst_c = *(ax25_address *)(bp + 1); ax25_ip_xmit() 171 src_c = *(ax25_address *)(bp + 8); ax25_ip_xmit() 188 bp[7] &= ~AX25_CBIT; ax25_ip_xmit() 189 bp[7] &= ~AX25_EBIT; ax25_ip_xmit() 190 bp[7] |= AX25_SSSID_SPARE; ax25_ip_xmit() 192 bp[14] &= ~AX25_CBIT; ax25_ip_xmit() 193 bp[14] |= AX25_EBIT; ax25_ip_xmit() 194 bp[14] |= AX25_SSSID_SPARE; ax25_ip_xmit()
|
/linux-4.1.27/drivers/isdn/mISDN/ |
H A D | core.c | 129 char *bp = buf; channelmap_show() local 133 *bp++ = test_channelmap(i, mdev->channelmap) ? '1' : '0'; channelmap_show() 135 return bp - buf; channelmap_show() 286 struct Bprotocol *bp; get_all_Bprotocols() local 290 list_for_each_entry(bp, &Bprotocols, list) get_all_Bprotocols() 291 m |= bp->Bprotocols; get_all_Bprotocols() 299 struct Bprotocol *bp; get_Bprotocol4mask() local 302 list_for_each_entry(bp, &Bprotocols, list) get_Bprotocol4mask() 303 if (bp->Bprotocols & m) { get_Bprotocol4mask() 305 return bp; get_Bprotocol4mask() 326 mISDN_register_Bprotocol(struct Bprotocol *bp) mISDN_register_Bprotocol() argument 333 bp->name, bp->Bprotocols); mISDN_register_Bprotocol() 334 old = get_Bprotocol4mask(bp->Bprotocols); mISDN_register_Bprotocol() 338 old->name, old->Bprotocols, bp->name, bp->Bprotocols); mISDN_register_Bprotocol() 342 list_add_tail(&bp->list, &Bprotocols); mISDN_register_Bprotocol() 349 mISDN_unregister_Bprotocol(struct Bprotocol *bp) mISDN_unregister_Bprotocol() argument 354 printk(KERN_DEBUG "%s: %s/%x\n", __func__, bp->name, mISDN_unregister_Bprotocol() 355 bp->Bprotocols); mISDN_unregister_Bprotocol() 357 list_del(&bp->list); mISDN_unregister_Bprotocol()
|
/linux-4.1.27/drivers/staging/speakup/ |
H A D | selection.c | 55 char *bp, *obp; speakup_set_selection() local 102 bp = kmalloc((sel_end-sel_start)/2+1, GFP_ATOMIC); speakup_set_selection() 103 if (!bp) { speakup_set_selection() 108 sel_buffer = bp; speakup_set_selection() 110 obp = bp; speakup_set_selection() 112 *bp = sel_pos(i); speakup_set_selection() 113 if (!ishardspace(*bp++)) speakup_set_selection() 114 obp = bp; speakup_set_selection() 118 if (obp != bp) { speakup_set_selection() 119 bp = obp; speakup_set_selection() 120 *bp++ = '\r'; speakup_set_selection() 122 obp = bp; speakup_set_selection() 125 sel_buffer_lth = bp - sel_buffer; speakup_set_selection()
|
/linux-4.1.27/fs/freevxfs/ |
H A D | vxfs_olt.c | 80 struct buffer_head *bp; vxfs_read_olt() local 85 bp = sb_bread(sbp, vxfs_oblock(sbp, infp->vsi_oltext, bsize)); vxfs_read_olt() 86 if (!bp || !bp->b_data) vxfs_read_olt() 89 op = (struct vxfs_olt *)bp->b_data; vxfs_read_olt() 105 oaddr = bp->b_data + op->olt_size; vxfs_read_olt() 106 eaddr = bp->b_data + (infp->vsi_oltsize * sbp->s_blocksize); vxfs_read_olt() 124 brelse(bp); vxfs_read_olt() 128 brelse(bp); vxfs_read_olt()
|
H A D | vxfs_subr.c | 104 struct buffer_head *bp; vxfs_bread() local 108 bp = sb_bread(ip->i_sb, pblock); vxfs_bread() 110 return (bp); vxfs_bread() 117 * @bp: buffer skeleton 121 * The vxfs_get_block function fills @bp with the right physical 130 struct buffer_head *bp, int create) vxfs_getblk() 136 map_bh(bp, ip->i_sb, pblock); vxfs_getblk() 129 vxfs_getblk(struct inode *ip, sector_t iblock, struct buffer_head *bp, int create) vxfs_getblk() argument
|
H A D | vxfs_fshead.c | 78 struct buffer_head *bp; vxfs_getfsh() local 80 bp = vxfs_bread(ip, which); vxfs_getfsh() 81 if (bp) { vxfs_getfsh() 86 memcpy(fhp, bp->b_data, sizeof(*fhp)); vxfs_getfsh() 88 put_bh(bp); vxfs_getfsh() 92 brelse(bp); vxfs_getfsh()
|
H A D | vxfs_bmap.c | 130 struct buffer_head *bp = NULL; vxfs_bmap_indir() local 138 bp = sb_bread(ip->i_sb, vxfs_bmap_indir() 140 if (!bp || !buffer_mapped(bp)) vxfs_bmap_indir() 143 typ = ((struct vxfs_typed *)bp->b_data) + vxfs_bmap_indir() 148 brelse(bp); vxfs_bmap_indir() 179 brelse(bp); vxfs_bmap_indir() 185 brelse(bp); vxfs_bmap_indir()
|
H A D | vxfs_super.c | 152 struct buffer_head *bp = NULL; vxfs_fill_super() local 171 bp = sb_bread(sbp, 1); vxfs_fill_super() 172 if (!bp || !buffer_mapped(bp)) { vxfs_fill_super() 180 rsbp = (struct vxfs_sb *)bp->b_data; vxfs_fill_super() 202 infp->vsi_bp = bp; vxfs_fill_super() 240 brelse(bp); vxfs_fill_super()
|
H A D | vxfs_inode.c | 90 struct buffer_head *bp; vxfs_blkiget() local 95 bp = sb_bread(sbp, block); vxfs_blkiget() 97 if (bp && buffer_mapped(bp)) { vxfs_blkiget() 103 dip = (struct vxfs_dinode *)(bp->b_data + offset); vxfs_blkiget() 108 brelse(bp); vxfs_blkiget() 114 brelse(bp); vxfs_blkiget()
|
/linux-4.1.27/arch/um/kernel/ |
H A D | stacktrace.c | 24 unsigned long *sp, bp, addr; dump_trace() local 28 bp = get_frame_pointer(tsk, segv_regs); dump_trace() 31 frame = (struct stack_frame *)bp; dump_trace() 36 if ((unsigned long) sp == bp + sizeof(long)) { dump_trace() 38 bp = (unsigned long)frame; dump_trace()
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | io_no.h | 57 unsigned char *bp = (unsigned char *) buf; io_outsb() local 59 *ap = *bp++; io_outsb() 65 unsigned short *bp = (unsigned short *) buf; io_outsw() local 67 *ap = _swapw(*bp++); io_outsw() 73 unsigned int *bp = (unsigned int *) buf; io_outsl() local 75 *ap = _swapl(*bp++); io_outsl() 81 unsigned char *bp = (unsigned char *) buf; io_insb() local 83 *bp++ = *ap; io_insb() 89 unsigned short *bp = (unsigned short *) buf; io_insw() local 91 *bp++ = _swapw(*ap); io_insw() 97 unsigned int *bp = (unsigned int *) buf; io_insl() local 99 *bp++ = _swapl(*ap); io_insl()
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | hw_breakpoint.c | 104 int arch_install_hw_breakpoint(struct perf_event *bp) arch_install_hw_breakpoint() argument 106 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_install_hw_breakpoint() 114 *slot = bp; arch_install_hw_breakpoint() 144 void arch_uninstall_hw_breakpoint(struct perf_event *bp) arch_uninstall_hw_breakpoint() argument 146 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_uninstall_hw_breakpoint() 153 if (*slot == bp) { arch_uninstall_hw_breakpoint() 173 int arch_check_bp_in_kernelspace(struct perf_event *bp) arch_check_bp_in_kernelspace() argument 177 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_check_bp_in_kernelspace() 180 len = bp->attr.bp_len; arch_check_bp_in_kernelspace() 231 static int arch_build_bp_info(struct perf_event *bp) arch_build_bp_info() argument 233 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_build_bp_info() 235 info->address = bp->attr.bp_addr; arch_build_bp_info() 238 switch (bp->attr.bp_type) { arch_build_bp_info() 252 if (bp->attr.bp_len == sizeof(long)) { arch_build_bp_info() 263 switch (bp->attr.bp_len) { arch_build_bp_info() 279 if (!is_power_of_2(bp->attr.bp_len)) arch_build_bp_info() 283 info->mask = bp->attr.bp_len - 1; arch_build_bp_info() 293 int arch_validate_hwbkpt_settings(struct perf_event *bp) arch_validate_hwbkpt_settings() argument 295 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_validate_hwbkpt_settings() 300 ret = arch_build_bp_info(bp); arch_validate_hwbkpt_settings() 347 struct perf_event *bp; aout_dump_debugregs() local 352 bp = thread->ptrace_bps[i]; aout_dump_debugregs() 354 if (bp && !bp->attr.disabled) { aout_dump_debugregs() 355 dump->u_debugreg[i] = bp->attr.bp_addr; aout_dump_debugregs() 356 info = counter_arch_bp(bp); aout_dump_debugregs() 418 struct perf_event *bp; hw_breakpoint_handler() local 458 bp = per_cpu(bp_per_reg[i], cpu); hw_breakpoint_handler() 465 * bp can be NULL due to lazy debug register switching hw_breakpoint_handler() 468 if (!bp) { hw_breakpoint_handler() 473 perf_bp_event(bp, args->regs); hw_breakpoint_handler() 479 if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE) hw_breakpoint_handler() 511 void hw_breakpoint_pmu_read(struct perf_event *bp) hw_breakpoint_pmu_read() argument
|
H A D | dumpstack_32.c | 42 unsigned long *stack, unsigned long bp, dump_trace() 60 if (!bp) dump_trace() 61 bp = stack_frame(task, regs); dump_trace() 72 bp = ops->walk_stack(context, stack, bp, ops, data, dump_trace() 95 unsigned long *sp, unsigned long bp, char *log_lvl) show_stack_log_lvl() 120 show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_stack_log_lvl() 41 dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data) dump_trace() argument 94 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, unsigned long bp, char *log_lvl) show_stack_log_lvl() argument
|
H A D | dumpstack.c | 97 unsigned long *stack, unsigned long bp, print_context_stack() 101 struct stack_frame *frame = (struct stack_frame *)bp; print_context_stack() 108 if ((unsigned long) stack == bp + sizeof(long)) { print_context_stack() 111 bp = (unsigned long) frame; print_context_stack() 119 return bp; print_context_stack() 125 unsigned long *stack, unsigned long bp, print_context_stack_bp() 129 struct stack_frame *frame = (struct stack_frame *)bp; print_context_stack_bp() 171 unsigned long *stack, unsigned long bp, char *log_lvl) show_trace_log_lvl() 174 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); show_trace_log_lvl() 178 unsigned long *stack, unsigned long bp) show_trace() 180 show_trace_log_lvl(task, regs, stack, bp, ""); show_trace() 185 unsigned long bp = 0; show_stack() local 194 bp = stack_frame(current, NULL); show_stack() 197 show_stack_log_lvl(task, NULL, sp, bp, ""); show_stack() 96 print_context_stack(struct thread_info *tinfo, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end, int *graph) print_context_stack() argument 124 print_context_stack_bp(struct thread_info *tinfo, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end, int *graph) print_context_stack_bp() argument 170 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, char *log_lvl) show_trace_log_lvl() argument 177 show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp) show_trace() argument
|
H A D | dumpstack_64.c | 152 unsigned long *stack, unsigned long bp, dump_trace() 175 if (!bp) dump_trace() 176 bp = stack_frame(task, regs); dump_trace() 205 bp = ops->walk_stack(tinfo, stack, bp, ops, dump_trace() 221 bp = ops->walk_stack(tinfo, stack, bp, dump_trace() 243 bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); dump_trace() 250 unsigned long *sp, unsigned long bp, char *log_lvl) show_stack_log_lvl() 297 show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_stack_log_lvl() 151 dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data) dump_trace() argument 249 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, unsigned long bp, char *log_lvl) show_stack_log_lvl() argument
|
H A D | kgdb.c | 61 { "bp", 4, offsetof(struct pt_regs, bp) }, 77 { "bp", 8, offsetof(struct pt_regs, bp) }, 210 struct perf_event *bp; kgdb_correct_hw_break() local 224 bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); kgdb_correct_hw_break() 225 info = counter_arch_bp(bp); kgdb_correct_hw_break() 226 if (bp->attr.disabled != 1) kgdb_correct_hw_break() 228 bp->attr.bp_addr = breakinfo[breakno].addr; kgdb_correct_hw_break() 229 bp->attr.bp_len = breakinfo[breakno].len; kgdb_correct_hw_break() 230 bp->attr.bp_type = breakinfo[breakno].type; kgdb_correct_hw_break() 234 val = arch_install_hw_breakpoint(bp); kgdb_correct_hw_break() 236 bp->attr.disabled = 0; kgdb_correct_hw_break() 315 struct perf_event *bp; kgdb_remove_all_hw_break() local 320 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); kgdb_remove_all_hw_break() 321 if (!bp->attr.disabled) { kgdb_remove_all_hw_break() 322 arch_uninstall_hw_breakpoint(bp); kgdb_remove_all_hw_break() 323 bp->attr.disabled = 1; kgdb_remove_all_hw_break() 401 struct perf_event *bp; kgdb_disable_hw_debug() local 413 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); kgdb_disable_hw_debug() 414 if (bp->attr.disabled == 1) kgdb_disable_hw_debug() 416 arch_uninstall_hw_breakpoint(bp); kgdb_disable_hw_debug() 417 bp->attr.disabled = 1; kgdb_disable_hw_debug()
|
H A D | asm-offsets_64.c | 43 ENTRY(bp); main() 58 ENTRY(bp); main()
|
H A D | process_32.c | 94 regs->si, regs->di, regs->bp, sp); __show_regs() 152 childregs->bp = arg; copy_thread() 334 unsigned long bp, sp, ip; get_wchan() local 343 /* include/asm-i386/system.h:switch_to() pushes bp last. */ get_wchan() 344 bp = *(unsigned long *) sp; get_wchan() 346 if (bp < stack_page || bp > top_ebp+stack_page) get_wchan() 348 ip = *(unsigned long *) (bp+4); get_wchan() 351 bp = *(unsigned long *) bp; get_wchan()
|
H A D | asm-offsets_32.c | 26 OFFSET(IA32_SIGCONTEXT_bp, sigcontext, bp); foo() 49 OFFSET(PT_EBP, pt_regs, bp); foo()
|
H A D | ptrace.c | 80 REG_OFFSET_NAME(bp), 558 static void ptrace_triggered(struct perf_event *bp, ptrace_triggered() argument 570 if (thread->ptrace_bps[i] == bp) ptrace_triggered() 582 static unsigned long ptrace_get_dr7(struct perf_event *bp[]) ptrace_get_dr7() argument 589 if (bp[i] && !bp[i]->attr.disabled) { ptrace_get_dr7() 590 info = counter_arch_bp(bp[i]); ptrace_get_dr7() 631 static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, ptrace_modify_breakpoint() argument 634 struct perf_event_attr attr = bp->attr; ptrace_modify_breakpoint() 641 return modify_user_hw_breakpoint(bp, &attr); ptrace_modify_breakpoint() 662 struct perf_event *bp = thread->ptrace_bps[i]; ptrace_write_dr7() local 664 if (!bp) { ptrace_write_dr7() 668 bp = ptrace_register_breakpoint(tsk, ptrace_write_dr7() 670 if (IS_ERR(bp)) { ptrace_write_dr7() 671 rc = PTR_ERR(bp); ptrace_write_dr7() 675 thread->ptrace_bps[i] = bp; ptrace_write_dr7() 679 rc = ptrace_modify_breakpoint(bp, len, type, disabled); ptrace_write_dr7() 704 struct perf_event *bp = thread->ptrace_bps[n]; ptrace_get_debugreg() local 706 if (bp) ptrace_get_debugreg() 707 val = bp->hw.info.address; ptrace_get_debugreg() 720 struct perf_event *bp = t->ptrace_bps[nr]; ptrace_set_breakpoint_addr() local 723 if (!bp) { ptrace_set_breakpoint_addr() 725 * Put stub len and type to create an inactive but correct bp. ptrace_set_breakpoint_addr() 735 bp = ptrace_register_breakpoint(tsk, ptrace_set_breakpoint_addr() 738 if (IS_ERR(bp)) ptrace_set_breakpoint_addr() 739 err = PTR_ERR(bp); ptrace_set_breakpoint_addr() 741 t->ptrace_bps[nr] = bp; ptrace_set_breakpoint_addr() 743 struct perf_event_attr attr = bp->attr; ptrace_set_breakpoint_addr() 746 err = modify_user_hw_breakpoint(bp, &attr); ptrace_set_breakpoint_addr() 968 R32(ebp, bp); putreg32() 1038 R32(ebp, bp); getreg32() 1512 sd.args[5] = regs->bp; syscall_trace_enter_phase1()
|
H A D | time.c | 35 return *(unsigned long *)(regs->bp + sizeof(long)); profile_pc()
|
H A D | perf_regs.c | 25 PT_REGS_OFFSET(PERF_REG_X86_BP, bp), 157 regs_user_copy->bp = -1; perf_get_regs_user()
|
/linux-4.1.27/Documentation/spi/ |
H A D | spidev_fdx.c | 19 unsigned char buf[32], *bp; do_read() local 42 bp = buf + 2; do_read() 44 printf(" %02x", *bp++); do_read() 51 unsigned char buf[32], *bp; do_msg() local 74 for (bp = buf; len; len--) do_msg() 75 printf(" %02x", *bp++); do_msg()
|
/linux-4.1.27/net/sctp/ |
H A D | bind_addr.c | 125 void sctp_bind_addr_init(struct sctp_bind_addr *bp, __u16 port) sctp_bind_addr_init() argument 127 INIT_LIST_HEAD(&bp->address_list); sctp_bind_addr_init() 128 bp->port = port; sctp_bind_addr_init() 132 static void sctp_bind_addr_clean(struct sctp_bind_addr *bp) sctp_bind_addr_clean() argument 137 list_for_each_entry_safe(addr, temp, &bp->address_list, list) { sctp_bind_addr_clean() 145 void sctp_bind_addr_free(struct sctp_bind_addr *bp) sctp_bind_addr_free() argument 148 sctp_bind_addr_clean(bp); sctp_bind_addr_free() 152 int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, sctp_add_bind_addr() argument 168 addr->a.v4.sin_port = htons(bp->port); sctp_add_bind_addr() 178 list_add_tail_rcu(&addr->list, &bp->address_list); sctp_add_bind_addr() 187 int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) sctp_del_bind_addr() argument 195 list_for_each_entry_safe(addr, temp, &bp->address_list, list) { sctp_del_bind_addr() 219 union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, sctp_bind_addrs_to_raw() argument 236 list_for_each(pos, &bp->address_list) { sctp_bind_addrs_to_raw() 254 list_for_each_entry(addr, &bp->address_list, list) { sctp_bind_addrs_to_raw() 271 int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, sctp_raw_to_bind_addrs() argument 289 sctp_bind_addr_clean(bp); sctp_raw_to_bind_addrs() 294 retval = sctp_add_bind_addr(bp, &addr, SCTP_ADDR_SRC, gfp); sctp_raw_to_bind_addrs() 297 sctp_bind_addr_clean(bp); sctp_raw_to_bind_addrs() 314 int sctp_bind_addr_match(struct sctp_bind_addr *bp, sctp_bind_addr_match() argument 322 list_for_each_entry_rcu(laddr, &bp->address_list, list) { sctp_bind_addr_match() 336 * the bp. 338 int sctp_bind_addr_conflict(struct sctp_bind_addr *bp, sctp_bind_addr_conflict() argument 359 list_for_each_entry_rcu(laddr, &bp->address_list, list) { sctp_bind_addr_conflict() 373 int sctp_bind_addr_state(const struct sctp_bind_addr *bp, sctp_bind_addr_state() argument 385 list_for_each_entry_rcu(laddr, &bp->address_list, list) { sctp_bind_addr_state() 401 union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, sctp_find_unmatch_addr() argument 416 list_for_each_entry(laddr, &bp->address_list, list) { sctp_find_unmatch_addr() 525 struct sctp_bind_addr *bp; sctp_is_ep_boundall() local 528 bp = &sctp_sk(sk)->ep->base.bind_addr; sctp_is_ep_boundall() 529 if (sctp_list_single_entry(&bp->address_list)) { sctp_is_ep_boundall() 530 addr = list_entry(bp->address_list.next, sctp_is_ep_boundall()
|
/linux-4.1.27/lib/mpi/ |
H A D | mpi-pow.c | 41 mpi_ptr_t rp, ep, mp, bp; mpi_powm() local 91 bp = bp_marker = mpi_alloc_limb_space(bsize + 1); mpi_powm() 92 if (!bp) mpi_powm() 94 MPN_COPY(bp, base->d, bsize); mpi_powm() 97 mpihelp_divrem(bp + msize, 0, bp, bsize, mp, msize); mpi_powm() 101 MPN_NORMALIZE(bp, bsize); mpi_powm() 103 bp = base->d; mpi_powm() 115 if (rp == ep || rp == mp || rp == bp) { mpi_powm() 126 if (rp == bp) { mpi_powm() 129 bp = bp_marker = mpi_alloc_limb_space(bsize); mpi_powm() 130 if (!bp) mpi_powm() 132 MPN_COPY(bp, rp, bsize); mpi_powm() 151 MPN_COPY(rp, bp, bsize); mpi_powm() 225 /*mpihelp_mul( xp, rp, rsize, bp, bsize ); */ mpi_powm() 229 (xp, rp, rsize, bp, bsize, mpi_powm() 234 (xp, rp, rsize, bp, bsize, mpi_powm()
|
/linux-4.1.27/arch/powerpc/xmon/ |
H A D | xmon.c | 108 #define BP_NUM(bp) ((bp) - bpts + 1) 405 struct bpt *bp; xmon_core() local 418 bp = in_breakpoint_table(regs->nip, &offset); xmon_core() 419 if (bp != NULL) { xmon_core() 420 regs->nip = bp->address + offset; xmon_core() 421 atomic_dec(&bp->ref_count); xmon_core() 452 bp = NULL; xmon_core() 454 bp = at_breakpoint(regs->nip); xmon_core() 455 if (bp || unrecoverable_excp(regs)) xmon_core() 461 if (bp) { xmon_core() 463 cpu, BP_NUM(bp)); xmon_core() 504 if (bp || TRAP(regs) == 0xd00) xmon_core() 554 bp = at_breakpoint(regs->nip); xmon_core() 555 if (bp) { xmon_core() 556 printf("Stopped at breakpoint %lx (", BP_NUM(bp)); xmon_core() 565 if (bp || TRAP(regs) == 0xd00) xmon_core() 578 bp = at_breakpoint(regs->nip); xmon_core() 579 if (bp != NULL) { xmon_core() 580 regs->nip = (unsigned long) &bp->instr[0]; xmon_core() 581 atomic_inc(&bp->ref_count); xmon_core() 586 bp = at_breakpoint(regs->nip); xmon_core() 587 if (bp != NULL) { xmon_core() 588 int stepped = emulate_step(regs, bp->instr[0]); xmon_core() 590 regs->nip = (unsigned long) &bp->instr[0]; xmon_core() 591 atomic_inc(&bp->ref_count); xmon_core() 594 (IS_RFID(bp->instr[0])? "rfid": "mtmsrd")); xmon_core() 632 struct bpt *bp; xmon_bpt() local 638 /* Are we at the trap at bp->instr[1] for some bp? */ xmon_bpt() 639 bp = in_breakpoint_table(regs->nip, &offset); xmon_bpt() 640 if (bp != NULL && offset == 4) { xmon_bpt() 641 regs->nip = bp->address + 4; xmon_bpt() 642 atomic_dec(&bp->ref_count); xmon_bpt() 647 bp = at_breakpoint(regs->nip); xmon_bpt() 648 if (!bp) xmon_bpt() 695 struct bpt *bp; xmon_fault_handler() local 702 bp = in_breakpoint_table(regs->nip, &offset); xmon_fault_handler() 703 if (bp != NULL) { xmon_fault_handler() 704 regs->nip = bp->address + offset; xmon_fault_handler() 705 atomic_dec(&bp->ref_count); xmon_fault_handler() 715 struct bpt *bp; at_breakpoint() local 717 bp = bpts; at_breakpoint() 718 for (i = 0; i < NBPTS; ++i, ++bp) at_breakpoint() 719 if (bp->enabled && pc == bp->address) at_breakpoint() 720 return bp; at_breakpoint() 741 struct bpt *bp; new_breakpoint() local 744 bp = at_breakpoint(a); new_breakpoint() 745 if (bp) new_breakpoint() 746 return bp; new_breakpoint() 748 for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { new_breakpoint() 749 if (!bp->enabled && atomic_read(&bp->ref_count) == 0) { new_breakpoint() 750 bp->address = a; new_breakpoint() 751 bp->instr[1] = bpinstr; new_breakpoint() 752 store_inst(&bp->instr[1]); new_breakpoint() 753 return bp; new_breakpoint() 764 struct bpt *bp; insert_bpts() local 766 bp = bpts; insert_bpts() 767 for (i = 0; i < NBPTS; ++i, ++bp) { insert_bpts() 768 if ((bp->enabled & (BP_TRAP|BP_CIABR)) == 0) insert_bpts() 770 if (mread(bp->address, &bp->instr[0], 4) != 4) { insert_bpts() 772 "disabling breakpoint there\n", bp->address); insert_bpts() 773 bp->enabled = 0; insert_bpts() 776 if (IS_MTMSRD(bp->instr[0]) || IS_RFID(bp->instr[0])) { insert_bpts() 778 "instruction, disabling it\n", bp->address); insert_bpts() 779 bp->enabled = 0; insert_bpts() 782 store_inst(&bp->instr[0]); insert_bpts() 783 if (bp->enabled & BP_CIABR) insert_bpts() 785 if (mwrite(bp->address, &bpinstr, 4) != 4) { insert_bpts() 787 "disabling breakpoint there\n", bp->address); insert_bpts() 788 bp->enabled &= ~BP_TRAP; insert_bpts() 791 store_inst((void *)bp->address); insert_bpts() 813 struct bpt *bp; remove_bpts() local 816 bp = bpts; remove_bpts() 817 for (i = 0; i < NBPTS; ++i, ++bp) { remove_bpts() 818 if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP) remove_bpts() 820 if (mread(bp->address, &instr, 4) == 4 remove_bpts() 822 && mwrite(bp->address, &bp->instr, 4) != 4) remove_bpts() 824 bp->address); remove_bpts() 826 store_inst((void *)bp->address); remove_bpts() 1178 struct bpt *bp; bpt_cmds() local 1220 bp = new_breakpoint(a); bpt_cmds() 1221 if (bp != NULL) { bpt_cmds() 1222 bp->enabled |= BP_CIABR; bpt_cmds() 1223 iabr = bp; bpt_cmds() 1241 bp = &bpts[a-1]; /* bp nums are 1 based */ bpt_cmds() 1244 bp = at_breakpoint(a); bpt_cmds() 1245 if (bp == NULL) { bpt_cmds() 1251 printf("Cleared breakpoint %lx (", BP_NUM(bp)); bpt_cmds() 1252 xmon_print_symbol(bp->address, " ", ")\n"); bpt_cmds() 1253 bp->enabled = 0; bpt_cmds() 1275 for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { bpt_cmds() 1276 if (!bp->enabled) bpt_cmds() 1278 printf("%2x %s ", BP_NUM(bp), bpt_cmds() 1279 (bp->enabled & BP_CIABR) ? "inst": "trap"); bpt_cmds() 1280 xmon_print_symbol(bp->address, " ", "\n"); bpt_cmds() 1287 bp = new_breakpoint(a); bpt_cmds() 1288 if (bp != NULL) bpt_cmds() 1289 bp->enabled |= BP_TRAP; bpt_cmds()
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | hw_breakpoint.h | 120 extern int arch_check_bp_in_kernelspace(struct perf_event *bp); 121 extern int arch_validate_hwbkpt_settings(struct perf_event *bp); 129 int arch_install_hw_breakpoint(struct perf_event *bp); 130 void arch_uninstall_hw_breakpoint(struct perf_event *bp); 131 void hw_breakpoint_pmu_read(struct perf_event *bp);
|
/linux-4.1.27/arch/sparc/kernel/ |
H A D | chmc.c | 486 static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr) chmc_bank_match() argument 492 if (bp->valid == 0) chmc_bank_match() 496 upper_bits ^= bp->um; /* What bits are different? */ chmc_bank_match() 498 upper_bits |= bp->uk; /* What bits don't matter for matching? */ chmc_bank_match() 505 lower_bits ^= bp->lm; /* What bits are different? */ chmc_bank_match() 507 lower_bits |= bp->lk; /* What bits don't matter for matching? */ chmc_bank_match() 526 struct chmc_bank_info *bp; chmc_find_bank() local 528 bp = &p->logical_banks[bank_no]; chmc_find_bank() 529 if (chmc_bank_match(bp, phys_addr)) chmc_find_bank() 530 return bp; chmc_find_bank() 542 struct chmc_bank_info *bp; chmc_print_dimm() local 546 bp = chmc_find_bank(phys_addr); chmc_print_dimm() 547 if (bp == NULL || chmc_print_dimm() 557 prop = &bp->p->layout_prop; chmc_print_dimm() 558 bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1); chmc_print_dimm() 631 struct chmc_bank_info *bp = &p->logical_banks[which_bank]; chmc_interpret_one_decode_reg() local 633 bp->p = p; chmc_interpret_one_decode_reg() 634 bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank; chmc_interpret_one_decode_reg() 635 bp->raw_reg = val; chmc_interpret_one_decode_reg() 636 bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT; chmc_interpret_one_decode_reg() 637 bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT; chmc_interpret_one_decode_reg() 638 bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT; chmc_interpret_one_decode_reg() 639 bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT; chmc_interpret_one_decode_reg() 640 bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT; chmc_interpret_one_decode_reg() 642 bp->base = (bp->um); chmc_interpret_one_decode_reg() 643 bp->base &= ~(bp->uk); chmc_interpret_one_decode_reg() 644 bp->base <<= PA_UPPER_BITS_SHIFT; chmc_interpret_one_decode_reg() 646 switch(bp->lk) { chmc_interpret_one_decode_reg() 649 bp->interleave = 1; chmc_interpret_one_decode_reg() 653 bp->interleave = 2; chmc_interpret_one_decode_reg() 657 bp->interleave = 4; chmc_interpret_one_decode_reg() 661 bp->interleave = 8; chmc_interpret_one_decode_reg() 665 bp->interleave = 16; chmc_interpret_one_decode_reg() 672 bp->size = (((unsigned long)bp->uk & chmc_interpret_one_decode_reg() 674 bp->size /= bp->interleave; chmc_interpret_one_decode_reg()
|
/linux-4.1.27/drivers/input/joystick/ |
H A D | twidjoy.c | 106 struct twidjoy_button_spec *bp; twidjoy_process_packet() local 111 for (bp = twidjoy_buttons; bp->bitmask; bp++) { twidjoy_process_packet() 112 int value = (button_bits & (bp->bitmask << bp->bitshift)) >> bp->bitshift; twidjoy_process_packet() 115 for (i = 0; i < bp->bitmask; i++) twidjoy_process_packet() 116 input_report_key(dev, bp->buttons[i], i+1 == value); twidjoy_process_packet() 183 struct twidjoy_button_spec *bp; twidjoy_connect() local 209 for (bp = twidjoy_buttons; bp->bitmask; bp++) twidjoy_connect() 210 for (i = 0; i < bp->bitmask; i++) twidjoy_connect() 211 set_bit(bp->buttons[i], input_dev->keybit); twidjoy_connect()
|
/linux-4.1.27/drivers/tty/vt/ |
H A D | selection.c | 163 char *bp, *obp; set_selection() local 298 bp = kmalloc(((sel_end-sel_start)/2+1)*multiplier, GFP_KERNEL); set_selection() 299 if (!bp) { set_selection() 305 sel_buffer = bp; set_selection() 307 obp = bp; set_selection() 311 bp += store_utf8(c, bp); set_selection() 313 *bp++ = c; set_selection() 315 obp = bp; set_selection() 319 if (obp != bp) { set_selection() 320 bp = obp; set_selection() 321 *bp++ = '\r'; set_selection() 323 obp = bp; set_selection() 326 sel_buffer_lth = bp - sel_buffer; set_selection()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | hw_breakpoint.h | 63 extern int arch_check_bp_in_kernelspace(struct perf_event *bp); 64 extern int arch_validate_hwbkpt_settings(struct perf_event *bp); 67 int arch_install_hw_breakpoint(struct perf_event *bp); 68 void arch_uninstall_hw_breakpoint(struct perf_event *bp); 69 void hw_breakpoint_pmu_read(struct perf_event *bp); 73 extern void ptrace_triggered(struct perf_event *bp,
|
/linux-4.1.27/arch/arm64/kernel/ |
H A D | hw_breakpoint.c | 185 * @bp: perf_event to setup 194 struct perf_event *bp, hw_breakpoint_slot_setup() 205 *slot = bp; hw_breakpoint_slot_setup() 210 if (*slot == bp) { hw_breakpoint_slot_setup() 216 if (*slot == bp) hw_breakpoint_slot_setup() 227 static int hw_breakpoint_control(struct perf_event *bp, hw_breakpoint_control() argument 230 struct arch_hw_breakpoint *info = counter_arch_bp(bp); hw_breakpoint_control() 253 i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops); hw_breakpoint_control() 293 int arch_install_hw_breakpoint(struct perf_event *bp) arch_install_hw_breakpoint() argument 295 return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL); arch_install_hw_breakpoint() 298 void arch_uninstall_hw_breakpoint(struct perf_event *bp) arch_uninstall_hw_breakpoint() argument 300 hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL); arch_uninstall_hw_breakpoint() 326 * Check whether bp virtual address is in kernel space. 328 int arch_check_bp_in_kernelspace(struct perf_event *bp) arch_check_bp_in_kernelspace() argument 332 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_check_bp_in_kernelspace() 390 static int arch_build_bp_info(struct perf_event *bp) arch_build_bp_info() argument 392 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_build_bp_info() 395 switch (bp->attr.bp_type) { arch_build_bp_info() 413 switch (bp->attr.bp_len) { arch_build_bp_info() 452 info->address = bp->attr.bp_addr; arch_build_bp_info() 459 if (arch_check_bp_in_kernelspace(bp)) arch_build_bp_info() 465 info->ctrl.enabled = !bp->attr.disabled; arch_build_bp_info() 473 int arch_validate_hwbkpt_settings(struct perf_event *bp) arch_validate_hwbkpt_settings() argument 475 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_validate_hwbkpt_settings() 480 ret = arch_build_bp_info(bp); arch_validate_hwbkpt_settings() 530 if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) arch_validate_hwbkpt_settings() 586 struct perf_event *bp, **slots; breakpoint_handler() local 597 bp = slots[i]; breakpoint_handler() 599 if (bp == NULL) breakpoint_handler() 613 counter_arch_bp(bp)->trigger = addr; breakpoint_handler() 614 perf_bp_event(bp, regs); breakpoint_handler() 617 if (!bp->overflow_handler) breakpoint_handler() 943 void hw_breakpoint_pmu_read(struct perf_event *bp) hw_breakpoint_pmu_read() argument 193 hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots, struct perf_event *bp, enum hw_breakpoint_ops ops) hw_breakpoint_slot_setup() argument
|
H A D | ptrace.c | 73 static void ptrace_hbptriggered(struct perf_event *bp, ptrace_hbptriggered() argument 77 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); ptrace_hbptriggered() 92 if (current->thread.debug.hbp_break[i] == bp) { ptrace_hbptriggered() 99 if (current->thread.debug.hbp_watch[i] == bp) { ptrace_hbptriggered() 143 struct perf_event *bp = ERR_PTR(-EINVAL); ptrace_hbp_get_event() local 148 bp = tsk->thread.debug.hbp_break[idx]; ptrace_hbp_get_event() 152 bp = tsk->thread.debug.hbp_watch[idx]; ptrace_hbp_get_event() 156 return bp; ptrace_hbp_get_event() 162 struct perf_event *bp) ptrace_hbp_set_event() 169 tsk->thread.debug.hbp_break[idx] = bp; ptrace_hbp_set_event() 175 tsk->thread.debug.hbp_watch[idx] = bp; ptrace_hbp_set_event() 188 struct perf_event *bp; ptrace_hbp_create() local 214 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); ptrace_hbp_create() 215 if (IS_ERR(bp)) ptrace_hbp_create() 216 return bp; ptrace_hbp_create() 218 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); ptrace_hbp_create() 222 return bp; ptrace_hbp_create() 287 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); ptrace_hbp_get_ctrl() local 289 if (IS_ERR(bp)) ptrace_hbp_get_ctrl() 290 return PTR_ERR(bp); ptrace_hbp_get_ctrl() 292 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; ptrace_hbp_get_ctrl() 301 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); ptrace_hbp_get_addr() local 303 if (IS_ERR(bp)) ptrace_hbp_get_addr() 304 return PTR_ERR(bp); ptrace_hbp_get_addr() 306 *addr = bp ? bp->attr.bp_addr : 0; ptrace_hbp_get_addr() 314 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); ptrace_hbp_get_initialised_bp() local 316 if (!bp) ptrace_hbp_get_initialised_bp() 317 bp = ptrace_hbp_create(note_type, tsk, idx); ptrace_hbp_get_initialised_bp() 319 return bp; ptrace_hbp_get_initialised_bp() 328 struct perf_event *bp; ptrace_hbp_set_ctrl() local 332 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); ptrace_hbp_set_ctrl() 333 if (IS_ERR(bp)) { ptrace_hbp_set_ctrl() 334 err = PTR_ERR(bp); ptrace_hbp_set_ctrl() 338 attr = bp->attr; ptrace_hbp_set_ctrl() 344 return modify_user_hw_breakpoint(bp, &attr); ptrace_hbp_set_ctrl() 353 struct perf_event *bp; ptrace_hbp_set_addr() local 356 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); ptrace_hbp_set_addr() 357 if (IS_ERR(bp)) { ptrace_hbp_set_addr() 358 err = PTR_ERR(bp); ptrace_hbp_set_addr() 362 attr = bp->attr; ptrace_hbp_set_addr() 364 err = modify_user_hw_breakpoint(bp, &attr); ptrace_hbp_set_addr() 159 ptrace_hbp_set_event(unsigned int note_type, struct task_struct *tsk, unsigned long idx, struct perf_event *bp) ptrace_hbp_set_event() argument
|
H A D | debug-monitors.c | 333 bool bp = false; aarch32_break_handler() local 347 bp = thumb_instr == AARCH32_BREAK_THUMB2_HI; aarch32_break_handler() 349 bp = thumb_instr == AARCH32_BREAK_THUMB; aarch32_break_handler() 355 bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM; aarch32_break_handler() 358 if (!bp) aarch32_break_handler()
|
/linux-4.1.27/drivers/media/tuners/ |
H A D | tda827x.c | 98 u8 bp; member in struct:tda827x_data 105 { .lomax = 62000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1}, 106 { .lomax = 66000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1}, 107 { .lomax = 76000000, .spd = 3, .bs = 1, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0}, 108 { .lomax = 84000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0}, 109 { .lomax = 93000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0}, 110 { .lomax = 98000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0}, 111 { .lomax = 109000000, .spd = 3, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0}, 112 { .lomax = 123000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1}, 113 { .lomax = 133000000, .spd = 2, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1}, 114 { .lomax = 151000000, .spd = 2, .bs = 1, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0}, 115 { .lomax = 154000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0}, 116 { .lomax = 181000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 0, .div1p5 = 0}, 117 { .lomax = 185000000, .spd = 2, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0}, 118 { .lomax = 217000000, .spd = 2, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0}, 119 { .lomax = 244000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1}, 120 { .lomax = 265000000, .spd = 1, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1}, 121 { .lomax = 302000000, .spd = 1, .bs = 1, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0}, 122 { .lomax = 324000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0}, 123 { .lomax = 370000000, .spd = 1, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0}, 124 { .lomax = 454000000, .spd = 1, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0}, 125 { .lomax = 493000000, .spd = 0, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1}, 126 { .lomax = 530000000, .spd = 0, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1}, 127 { .lomax = 554000000, .spd = 0, .bs = 1, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0}, 128 { .lomax = 604000000, .spd = 0, .bs = 1, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0}, 129 { .lomax = 696000000, .spd = 0, .bs = 2, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0}, 130 { .lomax = 740000000, .spd = 0, .bs = 2, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0}, 131 { .lomax = 820000000, .spd = 0, .bs = 3, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0}, 132 { .lomax = 865000000, .spd = 0, .bs = 3, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0}, 133 { .lomax = 0, .spd = 0, .bs = 0, .bp = 0, .cp = 0, .gc3 = 0, .div1p5 = 0} 196 tda827x_table[i].bp; tda827xo_set_params() 284 (tda827x_table[i].bs << 3) + tda827x_table[i].bp; tda827xo_set_analog_params()
|
/linux-4.1.27/arch/mips/include/uapi/asm/ |
H A D | break.h | 18 #define BRK_USERBP 0 /* User bp (used by debuggers) */ 19 #define BRK_SSTEPBP 5 /* User bp (used by debuggers) */
|
/linux-4.1.27/net/bridge/netfilter/ |
H A D | ebt_among.c | 83 const __be32 *bp; get_ip_dst() local 91 bp = skb_header_pointer(skb, sizeof(struct arphdr) + get_ip_dst() 94 if (bp == NULL) get_ip_dst() 96 *addr = *bp; get_ip_dst() 114 const __be32 *bp; get_ip_src() local 122 bp = skb_header_pointer(skb, sizeof(struct arphdr) + get_ip_src() 124 if (bp == NULL) get_ip_src() 126 *addr = *bp; get_ip_src()
|
/linux-4.1.27/arch/blackfin/lib/ |
H A D | strcpy.S | 32 if cc jump 1b (bp);
|
H A D | strcmp.S | 36 if cc jump 1b (bp); /* no, keep going */
|
H A D | strncmp.S | 39 if ! cc jump 1b (bp); /* more to do, keep going */
|
/linux-4.1.27/arch/ia64/hp/sim/boot/ |
H A D | fw-emu.c | 241 struct ia64_boot_param *bp; sys_fw_init() local 268 bp = (void *) cp; cp += sizeof(*bp); sys_fw_init() 360 bp->efi_systab = __pa(&fw_mem); sys_fw_init() 361 bp->efi_memmap = __pa(efi_memmap); sys_fw_init() 362 bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t); sys_fw_init() 363 bp->efi_memdesc_size = sizeof(efi_memory_desc_t); sys_fw_init() 364 bp->efi_memdesc_version = 1; sys_fw_init() 365 bp->command_line = __pa(cmd_line); sys_fw_init() 366 bp->console_info.num_cols = 80; sys_fw_init() 367 bp->console_info.num_rows = 25; sys_fw_init() 368 bp->console_info.orig_x = 0; sys_fw_init() 369 bp->console_info.orig_y = 24; sys_fw_init() 370 bp->fpswa = 0; sys_fw_init() 372 return bp; sys_fw_init()
|
H A D | bootloader.c | 36 extern void jmp_to_kernel (unsigned long bp, unsigned long e_entry); 66 register struct ia64_boot_param *bp; start_bootloader() local 165 bp = sys_fw_init(args, arglen); start_bootloader() 170 jmp_to_kernel((unsigned long) bp, e_entry); start_bootloader()
|
/linux-4.1.27/arch/arm64/include/asm/ |
H A D | hw_breakpoint.h | 111 extern int arch_check_bp_in_kernelspace(struct perf_event *bp); 112 extern int arch_validate_hwbkpt_settings(struct perf_event *bp); 116 extern int arch_install_hw_breakpoint(struct perf_event *bp); 117 extern void arch_uninstall_hw_breakpoint(struct perf_event *bp); 118 extern void hw_breakpoint_pmu_read(struct perf_event *bp);
|
/linux-4.1.27/arch/arm/kernel/ |
H A D | hw_breakpoint.c | 334 int arch_install_hw_breakpoint(struct perf_event *bp) arch_install_hw_breakpoint() argument 336 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_install_hw_breakpoint() 362 *slot = bp; arch_install_hw_breakpoint() 391 void arch_uninstall_hw_breakpoint(struct perf_event *bp) arch_uninstall_hw_breakpoint() argument 393 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_uninstall_hw_breakpoint() 413 if (*slot == bp) { arch_uninstall_hw_breakpoint() 458 * Check whether bp virtual address is in kernel space. 460 int arch_check_bp_in_kernelspace(struct perf_event *bp) arch_check_bp_in_kernelspace() argument 464 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_check_bp_in_kernelspace() 522 static int arch_build_bp_info(struct perf_event *bp) arch_build_bp_info() argument 524 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_build_bp_info() 527 switch (bp->attr.bp_type) { arch_build_bp_info() 545 switch (bp->attr.bp_len) { arch_build_bp_info() 576 info->address = bp->attr.bp_addr; arch_build_bp_info() 580 if (arch_check_bp_in_kernelspace(bp)) arch_build_bp_info() 584 info->ctrl.enabled = !bp->attr.disabled; arch_build_bp_info() 595 int arch_validate_hwbkpt_settings(struct perf_event *bp) arch_validate_hwbkpt_settings() argument 597 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_validate_hwbkpt_settings() 606 ret = arch_build_bp_info(bp); arch_validate_hwbkpt_settings() 635 if (!bp->overflow_handler) { arch_validate_hwbkpt_settings() 644 if (arch_check_bp_in_kernelspace(bp)) arch_validate_hwbkpt_settings() 651 if (!bp->hw.target) arch_validate_hwbkpt_settings() 669 * Enable/disable single-stepping over the breakpoint bp at address addr. 671 static void enable_single_step(struct perf_event *bp, u32 addr) enable_single_step() argument 673 struct arch_hw_breakpoint *info = counter_arch_bp(bp); enable_single_step() 675 arch_uninstall_hw_breakpoint(bp); enable_single_step() 682 arch_install_hw_breakpoint(bp); enable_single_step() 685 static void disable_single_step(struct perf_event *bp) disable_single_step() argument 687 arch_uninstall_hw_breakpoint(bp); disable_single_step() 688 counter_arch_bp(bp)->step_ctrl.enabled = 0; disable_single_step() 689 arch_install_hw_breakpoint(bp); disable_single_step() 802 struct perf_event *bp, **slots; breakpoint_handler() local 815 bp = slots[i]; breakpoint_handler() 817 if (bp == NULL) breakpoint_handler() 820 info = counter_arch_bp(bp); breakpoint_handler() 833 perf_bp_event(bp, regs); breakpoint_handler() 834 if (!bp->overflow_handler) breakpoint_handler() 835 enable_single_step(bp, addr); breakpoint_handler() 842 disable_single_step(bp); breakpoint_handler() 1123 void hw_breakpoint_pmu_read(struct perf_event *bp) hw_breakpoint_pmu_read() argument
|
H A D | ptrace.c | 385 static void ptrace_hbptriggered(struct perf_event *bp, ptrace_hbptriggered() argument 389 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); ptrace_hbptriggered() 395 if (current->thread.debug.hbp[i] == bp) ptrace_hbptriggered() 477 struct perf_event *bp; ptrace_gethbpregs() local 489 bp = tsk->thread.debug.hbp[idx]; ptrace_gethbpregs() 490 if (!bp) { ptrace_gethbpregs() 495 arch_ctrl = counter_arch_bp(bp)->ctrl; ptrace_gethbpregs() 505 reg = bp->attr.bp_addr; ptrace_gethbpregs() 523 struct perf_event *bp; ptrace_sethbpregs() local 545 bp = tsk->thread.debug.hbp[idx]; ptrace_sethbpregs() 546 if (!bp) { ptrace_sethbpregs() 547 bp = ptrace_hbp_create(tsk, implied_type); ptrace_sethbpregs() 548 if (IS_ERR(bp)) { ptrace_sethbpregs() 549 ret = PTR_ERR(bp); ptrace_sethbpregs() 552 tsk->thread.debug.hbp[idx] = bp; ptrace_sethbpregs() 555 attr = bp->attr; ptrace_sethbpregs() 577 ret = modify_user_hw_breakpoint(bp, &attr); ptrace_sethbpregs()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_hmc.c | 94 sd_entry->u.bp.addr = mem; i40e_add_sd_table_entry() 95 sd_entry->u.bp.sd_pd_index = sd_index; i40e_add_sd_table_entry() 105 I40E_INC_BP_REFCNT(&sd_entry->u.bp); i40e_add_sd_table_entry() 165 pd_entry->bp.addr = mem; i40e_add_pd_table_entry() 166 pd_entry->bp.sd_pd_index = pd_index; i40e_add_pd_table_entry() 167 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; i40e_add_pd_table_entry() 181 I40E_INC_BP_REFCNT(&pd_entry->bp); i40e_add_pd_table_entry() 230 I40E_DEC_BP_REFCNT(&pd_entry->bp); i40e_remove_pd_bp() 231 if (pd_entry->bp.ref_cnt) i40e_remove_pd_bp() 243 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); i40e_remove_pd_bp() 265 I40E_DEC_BP_REFCNT(&sd_entry->u.bp); i40e_prep_remove_sd_bp() 266 if (sd_entry->u.bp.ref_cnt) { i40e_prep_remove_sd_bp() 300 ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); i40e_remove_sd_bp_new()
|
H A D | i40e_hmc.h | 63 struct i40e_hmc_bp bp; member in struct:i40e_hmc_pd_entry 83 struct i40e_hmc_bp bp; member in union:i40e_hmc_sd_entry::__anon6612 108 #define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) 112 #define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
|
/linux-4.1.27/drivers/scsi/libfc/ |
H A D | fc_frame.c | 38 const u8 *bp; fc_frame_crc_check() local 44 bp = (const u8 *) fr_hdr(fp); fc_frame_crc_check() 45 crc = ~crc32(~0, bp, len); fc_frame_crc_check()
|
H A D | fc_disc.c | 396 char *bp; fc_disc_gpn_ft_parse() local 409 bp = buf; fc_disc_gpn_ft_parse() 411 np = (struct fc_gpn_ft_resp *)bp; fc_disc_gpn_ft_parse() 422 memcpy((char *)np + tlen, bp, plen); fc_disc_gpn_ft_parse() 425 * Set bp so that the loop below will advance it to the fc_disc_gpn_ft_parse() 428 bp -= tlen; fc_disc_gpn_ft_parse() 438 * Normally, np == bp and plen == len, but from the partial case above, fc_disc_gpn_ft_parse() 439 * bp, len describe the overall buffer, and np, plen describe the fc_disc_gpn_ft_parse() 467 bp += sizeof(*np); fc_disc_gpn_ft_parse() 468 np = (struct fc_gpn_ft_resp *)bp; fc_disc_gpn_ft_parse()
|
/linux-4.1.27/arch/x86/kvm/ |
H A D | tss.h | 49 u16 bp; member in struct:tss_segment_16
|
/linux-4.1.27/net/sunrpc/ |
H A D | cache.c | 747 char *bp = crq->buf; cache_request() local 750 detail->cache_request(detail, crq->item, &bp, &len); cache_request() 1069 char *bp = *bpp; qword_add() local 1075 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t"); qword_add() 1077 bp += len; qword_add() 1080 bp += ret; qword_add() 1082 *bp++ = ' '; qword_add() 1085 *bpp = bp; qword_add() 1092 char *bp = *bpp; qword_addhex() local 1098 *bp++ = '\\'; qword_addhex() 1099 *bp++ = 'x'; qword_addhex() 1102 bp = hex_byte_pack(bp, *buf++); qword_addhex() 1109 *bp++ = ' '; qword_addhex() 1112 *bpp = bp; qword_addhex() 1213 char *bp = *bpp; qword_get() local 1216 while (*bp == ' ') bp++; qword_get() 1218 if (bp[0] == '\\' && bp[1] == 'x') { qword_get() 1220 bp += 2; qword_get() 1224 h = hex_to_bin(bp[0]); qword_get() 1228 l = hex_to_bin(bp[1]); qword_get() 1233 bp += 2; qword_get() 1238 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) { qword_get() 1239 if (*bp == '\\' && qword_get() 1240 isodigit(bp[1]) && (bp[1] <= '3') && qword_get() 1241 isodigit(bp[2]) && qword_get() 1242 isodigit(bp[3])) { qword_get() 1243 int byte = (*++bp -'0'); qword_get() 1244 bp++; qword_get() 1245 byte = (byte << 3) | (*bp++ - '0'); qword_get() 1246 byte = (byte << 3) | (*bp++ - '0'); qword_get() 1250 *dest++ = *bp++; qword_get() 1256 if (*bp != ' ' && *bp != '\n' && *bp != '\0') qword_get() 1258 while (*bp == ' ') bp++; qword_get() 1259 *bpp = bp; qword_get() 1439 char *bp, *ep; write_flush() local 1450 bp = tbuf; write_flush() 1451 cd->flush_time = get_expiry(&bp); write_flush()
|
/linux-4.1.27/drivers/net/ethernet/dec/tulip/ |
H A D | eeprom.c | 304 unsigned char *bp = leaf->leafdata; tulip_parse_eeprom() local 307 bp[0], bp[1], bp[2 + bp[1]*2], tulip_parse_eeprom() 308 bp[5 + bp[2 + bp[1]*2]*2], tulip_parse_eeprom() 309 bp[4 + bp[2 + bp[1]*2]*2]); tulip_parse_eeprom()
|
/linux-4.1.27/tools/perf/arch/x86/util/ |
H A D | dwarf-regs.c | 37 "%bp", 50 "%bp",
|
/linux-4.1.27/drivers/md/ |
H A D | dm-bufio.h | 49 struct dm_buffer **bp); 56 struct dm_buffer **bp); 63 struct dm_buffer **bp);
|
H A D | bitmap.c | 63 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ 66 if (bitmap->bp[page].map) /* page is already allocated, just return */ 81 * When this function completes, either bp[page].map or 82 * bp[page].hijacked. In either case, this function will 95 if (!bitmap->bp[page].map) 96 bitmap->bp[page].hijacked = 1; 97 } else if (bitmap->bp[page].map || 98 bitmap->bp[page].hijacked) { 106 bitmap->bp[page].map = mappage; 119 if (bitmap->bp[page].count) /* page is still busy */ bitmap_checkfree() 124 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ bitmap_checkfree() 125 bitmap->bp[page].hijacked = 0; bitmap_checkfree() 126 bitmap->bp[page].map = NULL; bitmap_checkfree() 129 ptr = bitmap->bp[page].map; bitmap_checkfree() 130 bitmap->bp[page].map = NULL; bitmap_checkfree() 1171 bitmap->bp[page].count += inc; bitmap_count_page() 1179 struct bitmap_page *bp = &bitmap->bp[page]; bitmap_set_pending() local 1181 if (!bp->pending) bitmap_set_pending() 1182 bp->pending = 1; bitmap_set_pending() 1259 if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) { bitmap_daemon_work() 1263 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0; bitmap_daemon_work() 1333 if (bitmap->bp[page].hijacked || 1334 bitmap->bp[page].map == NULL) 1346 if (bitmap->bp[page].hijacked) { /* hijacked pointer */ 1351 &bitmap->bp[page].map)[hi]; 1354 &(bitmap->bp[page].map[pageoff]); 1678 struct bitmap_page *bp; bitmap_free() local 1694 bp = bitmap->counts.bp; bitmap_free() 1699 if (bp) /* deallocate the page memory */ bitmap_free() 1701 if (bp[k].map && !bp[k].hijacked) bitmap_free() 1702 kfree(bp[k].map); bitmap_free() 1703 kfree(bp); bitmap_free() 2031 bitmap->counts.bp = new_bp; bitmap_resize()
|
/linux-4.1.27/sound/pci/emu10k1/ |
H A D | emu10k1_callback.c | 225 struct best_voice *bp; lookup_voices() local 246 bp = best + V_FREE; lookup_voices() 248 bp = best + V_OFF; lookup_voices() 252 bp = best + V_RELEASED; lookup_voices() 256 bp = best + V_OFF; lookup_voices() 262 bp = best + V_PLAYING; lookup_voices() 267 if (bp != best + V_OFF && bp != best + V_FREE && lookup_voices() 271 bp = best + V_OFF; lookup_voices() 274 if (vp->time < bp->time) { lookup_voices() 275 bp->time = vp->time; lookup_voices() 276 bp->voice = i; lookup_voices()
|
/linux-4.1.27/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/ |
H A D | EventClass.py | 57 flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf) 66 self.bp = bp
|
/linux-4.1.27/drivers/mtd/nand/ |
H A D | nand_ecc.c | 162 const uint32_t *bp = (uint32_t *)buf; __nand_calculate_ecc() local 195 cur = *bp++; __nand_calculate_ecc() 198 cur = *bp++; __nand_calculate_ecc() 201 cur = *bp++; __nand_calculate_ecc() 204 cur = *bp++; __nand_calculate_ecc() 208 cur = *bp++; __nand_calculate_ecc() 212 cur = *bp++; __nand_calculate_ecc() 215 cur = *bp++; __nand_calculate_ecc() 218 cur = *bp++; __nand_calculate_ecc() 222 cur = *bp++; __nand_calculate_ecc() 227 cur = *bp++; __nand_calculate_ecc() 231 cur = *bp++; __nand_calculate_ecc() 235 cur = *bp++; __nand_calculate_ecc() 239 cur = *bp++; __nand_calculate_ecc() 243 cur = *bp++; __nand_calculate_ecc() 246 cur = *bp++; __nand_calculate_ecc() 249 cur = *bp++; __nand_calculate_ecc()
|
/linux-4.1.27/samples/bpf/ |
H A D | tracex4_kern.c | 43 bpf_probe_read(&ip, sizeof(ip), (void *)(ctx->bp + sizeof(ip))); bpf_prog2()
|
/linux-4.1.27/scripts/dtc/ |
H A D | treesource.c | 170 const char *bp = val.val; write_propval_bytes() local 175 while (m && (m->offset == (bp-val.val))) { write_propval_bytes() 181 fprintf(f, "%02hhx", *bp++); write_propval_bytes() 182 if ((const void *)bp >= propend) write_propval_bytes()
|
/linux-4.1.27/drivers/isdn/hisax/ |
H A D | tei.c | 107 u_char *bp; put_tei_msg() local 113 bp = skb_put(skb, 3); put_tei_msg() 114 bp[0] = (TEI_SAPI << 2); put_tei_msg() 115 bp[1] = (GROUP_TEI << 1) | 0x1; put_tei_msg() 116 bp[2] = UI; put_tei_msg() 117 bp = skb_put(skb, 5); put_tei_msg() 118 bp[0] = TEI_ENTITY_ID; put_tei_msg() 119 bp[1] = ri >> 8; put_tei_msg() 120 bp[2] = ri & 0xff; put_tei_msg() 121 bp[3] = m_id; put_tei_msg() 122 bp[4] = (tei << 1) | 1; put_tei_msg()
|
/linux-4.1.27/arch/cris/arch-v32/kernel/ |
H A D | ptrace.c | 103 /* If no h/w bp configured, disable S bit. */ user_disable_single_step() 339 int bp; deconfigure_bp() local 345 for (bp = 0; bp < 6; bp++) { deconfigure_bp() 348 put_debugreg(pid, PT_BP + 3 + (bp * 2), 0); deconfigure_bp() 349 put_debugreg(pid, PT_BP + 4 + (bp * 2), 0); deconfigure_bp() 352 tmp = get_debugreg(pid, PT_BP_CTRL) & ~(3 << (2 + (bp * 4))); deconfigure_bp()
|
H A D | kgdb.c | 875 int S, bp, trig_bits = 0, rw_bits = 0; stub_is_stopped() local 890 for (bp = 0; bp < 6; bp++) { stub_is_stopped() 893 int bitpos_trig = 1 + bp * 2; stub_is_stopped() 895 int bitpos_config = 2 + bp * 4; stub_is_stopped() 910 trig_mask |= (1 << bp); stub_is_stopped() 912 if (reg.eda >= bp_d_regs[bp * 2] && stub_is_stopped() 913 reg.eda <= bp_d_regs[bp * 2 + 1]) { stub_is_stopped() 921 if (bp < 6) { stub_is_stopped() 925 for (bp = 0; bp < 6; bp++) { stub_is_stopped() 927 int bitpos_config = 2 + bp * 4; stub_is_stopped() 932 if (trig_mask & (1 << bp)) { stub_is_stopped() 934 if (reg.eda + 31 >= bp_d_regs[bp * 2]) { stub_is_stopped() 937 stopped_data_address = bp_d_regs[bp * 2]; stub_is_stopped() 948 BUG_ON(bp >= 6); stub_is_stopped() 1141 int bp; insert_watchpoint() local 1152 for (bp = 0; bp < 6; bp++) { insert_watchpoint() 1156 if (!(sreg.s0_3 & (0x3 << (2 + (bp * 4))))) { insert_watchpoint() 1161 if (bp > 5) { insert_watchpoint() 1170 sreg.s0_3 |= (1 << (2 + bp * 4)); insert_watchpoint() 1174 sreg.s0_3 |= (2 << (2 + bp * 4)); insert_watchpoint() 1178 bp_d_regs[bp * 2] = addr; insert_watchpoint() 1179 bp_d_regs[bp * 2 + 1] = (addr + len - 1); insert_watchpoint() 1219 int bp; remove_watchpoint() local 1228 for (bp = 0; bp < 6; bp++) { remove_watchpoint() 1229 if (bp_d_regs[bp * 2] == addr && remove_watchpoint() 1230 bp_d_regs[bp * 2 + 1] == (addr + len - 1)) { remove_watchpoint() 1232 int bitpos = 2 + bp * 4; remove_watchpoint() 1247 if (bp > 5) { remove_watchpoint() 1256 sreg.s0_3 &= ~(3 << (2 + (bp * 4))); remove_watchpoint() 1257 bp_d_regs[bp * 2] = 0; remove_watchpoint() 1258 bp_d_regs[bp * 2 + 1] = 0; remove_watchpoint()
|
/linux-4.1.27/arch/frv/include/asm/ |
H A D | io.h | 55 const uint8_t *bp = buf; io_outsb() local 58 __builtin_write8((volatile void __iomem *) __ioaddr, *bp++); io_outsb() 64 const uint16_t *bp = buf; io_outsw() local 67 __builtin_write16((volatile void __iomem *) __ioaddr, (*bp++)); io_outsw() 84 uint8_t *bp = buf; io_insb() local 87 *bp++ = __builtin_read8((volatile void __iomem *) addr); io_insb() 92 uint16_t *bp = buf; io_insw() local 95 *bp++ = __builtin_read16((volatile void __iomem *) addr); io_insw()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/ |
H A D | i40e_hmc.h | 63 struct i40e_hmc_bp bp; member in struct:i40e_hmc_pd_entry 83 struct i40e_hmc_bp bp; member in union:i40e_hmc_sd_entry::__anon6644 108 #define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) 112 #define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
|
/linux-4.1.27/drivers/mfd/ |
H A D | pm8921-core.c | 68 static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, unsigned int bp, pm8xxx_read_block_irq() argument 74 rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, bp); pm8xxx_read_block_irq() 76 pr_err("Failed Selecting Block %d rc=%d\n", bp, rc); pm8xxx_read_block_irq() 89 pm8xxx_config_irq(struct pm_irq_chip *chip, unsigned int bp, unsigned int cp) pm8xxx_config_irq() argument 94 rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, bp); pm8xxx_config_irq() 96 pr_err("Failed Selecting Block %d rc=%d\n", bp, rc); pm8xxx_config_irq()
|
H A D | ipaq-micro.c | 38 int i, bp; ipaq_micro_trigger_tx() local 42 bp = 0; ipaq_micro_trigger_tx() 43 tx->buf[bp++] = CHAR_SOF; ipaq_micro_trigger_tx() 46 tx->buf[bp++] = checksum; ipaq_micro_trigger_tx() 49 tx->buf[bp++] = msg->tx_data[i]; ipaq_micro_trigger_tx() 53 tx->buf[bp++] = checksum; ipaq_micro_trigger_tx() 54 tx->len = bp; ipaq_micro_trigger_tx()
|
/linux-4.1.27/drivers/pcmcia/ |
H A D | m32r_pcc.c | 153 unsigned char *bp = (unsigned char *)buf; pcc_iorw() local 163 writeb(*bp++, addr); pcc_iorw() 168 *bp++ = readb(addr); pcc_iorw() 173 unsigned short *bp = (unsigned short *)buf; pcc_iorw() local 185 unsigned char *cp = (unsigned char *)bp; pcc_iorw() 189 bp++; pcc_iorw() 192 writew(*bp++, addr); pcc_iorw() 199 unsigned char *cp = (unsigned char *)bp; pcc_iorw() 204 bp++; pcc_iorw() 207 *bp++ = readw(addr); pcc_iorw()
|
H A D | m32r_cfc.c | 108 unsigned char *bp = (unsigned char *)buf; pcc_ioread_byte() local 125 *bp++ = readb(addr); pcc_ioread_byte() 133 unsigned short *bp = (unsigned short *)buf; pcc_ioread_word() local 156 *bp++ = readw(addr); pcc_ioread_word() 164 unsigned char *bp = (unsigned char *)buf; pcc_iowrite_byte() local 181 writeb(*bp++, addr); pcc_iowrite_byte() 189 unsigned short *bp = (unsigned short *)buf; pcc_iowrite_word() local 218 writew(*bp++, addr); pcc_iowrite_word()
|
/linux-4.1.27/sound/isa/sb/ |
H A D | emu8000_callback.c | 173 struct best *bp; get_voice() local 192 bp = best + OFF; get_voice() 195 bp = best + RELEASED; get_voice() 198 bp = best + OFF; get_voice() 201 bp = best + PLAYING; get_voice() 210 bp = best + OFF; get_voice() 213 if (vp->time < bp->time) { get_voice() 214 bp->time = vp->time; get_voice() 215 bp->voice = i; get_voice()
|
/linux-4.1.27/drivers/spi/ |
H A D | spi-tle62x0.c | 103 char *bp = buf; tle62x0_status_show() local 125 bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2))); tle62x0_status_show() 128 *bp++ = '\n'; tle62x0_status_show() 131 return bp - buf; tle62x0_status_show()
|
/linux-4.1.27/arch/m68k/68360/ |
H A D | commproc.c | 292 volatile uint *bp; m360_cpm_setbrg() local 296 /* bp = (uint *)&cpmp->cp_brgc1; */ m360_cpm_setbrg() 297 bp = (volatile uint *)(&pquicc->brgc[0].l); m360_cpm_setbrg() 298 bp += brg; m360_cpm_setbrg() 299 *bp = ((BRG_UART_CLK / rate - 1) << 1) | CPM_BRG_EN; m360_cpm_setbrg()
|
/linux-4.1.27/drivers/tty/serial/ |
H A D | mpsc.c | 790 u8 *bp, *bp_p; mpsc_init_rings() local 836 bp = pi->rxb; mpsc_init_rings() 852 bp += MPSC_RXBE_SIZE; mpsc_init_rings() 860 bp = pi->txb; mpsc_init_rings() 871 bp += MPSC_TXBE_SIZE; mpsc_init_rings() 943 u8 *bp; mpsc_rx_intr() local 983 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); mpsc_rx_intr() 984 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE, mpsc_rx_intr() 988 invalidate_dcache_range((ulong)bp, mpsc_rx_intr() 989 (ulong)bp + MPSC_RXBE_SIZE); mpsc_rx_intr() 1029 if (uart_handle_sysrq_char(&pi->port, *bp)) { mpsc_rx_intr() 1030 bp++; mpsc_rx_intr() 1045 tty_insert_flip_char(port, *bp, flag); mpsc_rx_intr() 1048 tty_insert_flip_char(port, *bp++, TTY_NORMAL); mpsc_rx_intr() 1118 u8 *bp; mpsc_copy_tx_data() local 1133 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); mpsc_copy_tx_data() 1134 *bp = pi->port.x_char; mpsc_copy_tx_data() 1143 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); mpsc_copy_tx_data() 1144 memcpy(bp, &xmit->buf[xmit->tail], i); mpsc_copy_tx_data() 1153 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, mpsc_copy_tx_data() 1157 flush_dcache_range((ulong)bp, mpsc_copy_tx_data() 1158 (ulong)bp + MPSC_TXBE_SIZE); mpsc_copy_tx_data() 1566 u8 *bp; mpsc_get_poll_char() local 1598 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); mpsc_get_poll_char() 1599 dma_cache_sync(pi->port.dev, (void *) bp, mpsc_get_poll_char() 1603 invalidate_dcache_range((ulong)bp, mpsc_get_poll_char() 1604 (ulong)bp + MPSC_RXBE_SIZE); mpsc_get_poll_char() 1609 poll_buf[poll_cnt] = *bp; mpsc_get_poll_char() 1613 poll_buf[poll_cnt] = *bp++; mpsc_get_poll_char() 1713 u8 *bp, *dp, add_cr = 0; mpsc_console_write() local 1730 bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); mpsc_console_write() 1751 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, mpsc_console_write() 1755 flush_dcache_range((ulong)bp, mpsc_console_write() 1756 (ulong)bp + MPSC_TXBE_SIZE); mpsc_console_write()
|
/linux-4.1.27/drivers/parport/ |
H A D | ieee1284_ops.c | 734 unsigned char *bp = (unsigned char *) buffer; parport_ieee1284_epp_write_data() local 746 for (; len > 0; len--, bp++) { parport_ieee1284_epp_write_data() 748 parport_write_data (port, *bp); parport_ieee1284_epp_write_data() 778 unsigned char *bp = (unsigned char *) buffer; parport_ieee1284_epp_read_data() local 789 for (; len > 0; len--, bp++) { parport_ieee1284_epp_read_data() 799 *bp = parport_read_data (port); parport_ieee1284_epp_read_data() 822 unsigned char *bp = (unsigned char *) buffer; parport_ieee1284_epp_write_addr() local 834 for (; len > 0; len--, bp++) { parport_ieee1284_epp_write_addr() 836 parport_write_data (port, *bp); parport_ieee1284_epp_write_addr() 866 unsigned char *bp = (unsigned char *) buffer; parport_ieee1284_epp_read_addr() local 877 for (; len > 0; len--, bp++) { parport_ieee1284_epp_read_addr() 887 *bp = parport_read_data (port); parport_ieee1284_epp_read_addr()
|
/linux-4.1.27/scripts/gdb/linux/ |
H A D | symbols.py | 139 for bp in gdb.breakpoints(): 140 saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
|
/linux-4.1.27/arch/x86/include/uapi/asm/ |
H A D | sigcontext32.h | 59 unsigned int bp; member in struct:sigcontext_ia32
|
/linux-4.1.27/arch/x86/math-emu/ |
H A D | get_address.c | 36 offsetof(struct pt_regs, bp), 352 address += FPU_info->regs->bp + FPU_info->regs->si; FPU_get_address_16() 357 address += FPU_info->regs->bp + FPU_info->regs->di; FPU_get_address_16() 368 address += FPU_info->regs->bp; FPU_get_address_16()
|
/linux-4.1.27/arch/unicore32/include/asm/ |
H A D | processor.h | 39 struct debug_entry bp[2]; member in struct:debug_info
|
/linux-4.1.27/arch/arm/mach-mv78xx0/ |
H A D | db78x00-bp-setup.c | 2 * arch/arm/mach-mv78xx0/db78x00-bp-setup.c
|
/linux-4.1.27/sound/drivers/opl3/ |
H A D | opl3_midi.c | 165 struct best *bp; opl3_get_voice() local 182 bp = best; opl3_get_voice() 194 bp++; opl3_get_voice() 200 bp++; opl3_get_voice() 208 bp++; opl3_get_voice() 211 bp++; opl3_get_voice() 214 bp++; opl3_get_voice() 216 if (voice_time < bp->time) { opl3_get_voice() 217 bp->time = voice_time; opl3_get_voice() 218 bp->voice = i; opl3_get_voice()
|
/linux-4.1.27/drivers/scsi/ |
H A D | wd33c93.c | 2061 char *bp; wd33c93_write_info() local 2082 for (bp = buf; *bp; ) { wd33c93_write_info() 2083 while (',' == *bp || ' ' == *bp) wd33c93_write_info() 2084 ++bp; wd33c93_write_info() 2085 if (!strncmp(bp, "debug:", 6)) { wd33c93_write_info() 2086 hd->args = simple_strtoul(bp+6, &bp, 0) & DB_MASK; wd33c93_write_info() 2087 } else if (!strncmp(bp, "disconnect:", 11)) { wd33c93_write_info() 2088 x = simple_strtoul(bp+11, &bp, 0); wd33c93_write_info() 2092 } else if (!strncmp(bp, "period:", 7)) { wd33c93_write_info() 2093 x = simple_strtoul(bp+7, &bp, 0); wd33c93_write_info() 2097 } else if (!strncmp(bp, "resync:", 7)) { wd33c93_write_info() 2098 set_resync(hd, (int)simple_strtoul(bp+7, &bp, 0)); wd33c93_write_info() 2099 } else if (!strncmp(bp, "proc:", 5)) { wd33c93_write_info() 2100 hd->proc = simple_strtoul(bp+5, &bp, 0); wd33c93_write_info() 2101 } else if (!strncmp(bp, "nodma:", 6)) { wd33c93_write_info() 2102 hd->no_dma = simple_strtoul(bp+6, &bp, 0); wd33c93_write_info() 2103 } else if (!strncmp(bp, "level2:", 7)) { wd33c93_write_info() 2104 hd->level2 = simple_strtoul(bp+7, &bp, 0); wd33c93_write_info() 2105 } else if (!strncmp(bp, "burst:", 6)) { wd33c93_write_info() 2107 simple_strtol(bp+6, &bp, 0) ? CTRL_BURST:CTRL_DMA; wd33c93_write_info() 2108 } else if (!strncmp(bp, "fast:", 5)) { wd33c93_write_info() 2109 x = !!simple_strtol(bp+5, &bp, 0); wd33c93_write_info() 2113 } else if (!strncmp(bp, "nosync:", 7)) { wd33c93_write_info() 2114 x = simple_strtoul(bp+7, &bp, 0); wd33c93_write_info()
|
/linux-4.1.27/drivers/rtc/ |
H A D | rtc-ds1305.c | 216 u8 *bp = buf; ds1305_set_time() local 225 *bp++ = DS1305_WRITE | DS1305_SEC; ds1305_set_time() 227 *bp++ = bin2bcd(time->tm_sec); ds1305_set_time() 228 *bp++ = bin2bcd(time->tm_min); ds1305_set_time() 229 *bp++ = hour2bcd(ds1305->hr12, time->tm_hour); ds1305_set_time() 230 *bp++ = (time->tm_wday < 7) ? (time->tm_wday + 1) : 1; ds1305_set_time() 231 *bp++ = bin2bcd(time->tm_mday); ds1305_set_time() 232 *bp++ = bin2bcd(time->tm_mon + 1); ds1305_set_time() 233 *bp++ = bin2bcd(time->tm_year - 100); ds1305_set_time()
|
/linux-4.1.27/drivers/net/wireless/ath/ath9k/ |
H A D | ar9002_phy.c | 298 int bp = 0; ar9002_hw_spur_mitigate() local 299 for (bp = 0; bp < 30; bp++) { ar9002_hw_spur_mitigate() 301 pilot_mask = pilot_mask | 0x1 << bp; ar9002_hw_spur_mitigate() 302 chan_mask = chan_mask | 0x1 << bp; ar9002_hw_spur_mitigate()
|
/linux-4.1.27/drivers/edac/ |
H A D | mce_amd_inj.c | 9 * Copyright (c) 2010-14: Borislav Petkov <bp@alien8.de> 260 MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>");
|
H A D | edac_stub.c | 8 * Borislav Petkov <bp@alien8.de>
|
/linux-4.1.27/arch/x86/oprofile/ |
H A D | backtrace.c | 74 head = (struct stack_frame_ia32 *) regs->bp; x86_backtrace_32()
|
/linux-4.1.27/drivers/firmware/ |
H A D | dmi_scan.c | 40 const u8 *bp = ((u8 *) dm) + dm->length; dmi_string_nosave() local 44 while (s > 0 && *bp) { dmi_string_nosave() 45 bp += strlen(bp) + 1; dmi_string_nosave() 49 if (*bp != 0) { dmi_string_nosave() 50 size_t len = strlen(bp)+1; dmi_string_nosave() 53 if (!memcmp(bp, dmi_empty_string, cmp_len)) dmi_string_nosave() 55 return bp; dmi_string_nosave() 64 const char *bp = dmi_string_nosave(dm, s); dmi_string() local 68 if (bp == dmi_empty_string) dmi_string() 71 len = strlen(bp) + 1; dmi_string() 74 strcpy(str, bp); dmi_string()
|
/linux-4.1.27/drivers/net/ethernet/cirrus/ |
H A D | cs89x0.c | 420 unsigned char *bp = lp->rx_dma_ptr; dma_rx() local 422 status = bp[0] + (bp[1] << 8); dma_rx() 423 length = bp[2] + (bp[3] << 8); dma_rx() 424 bp += 4; dma_rx() 427 dev->name, (unsigned long)bp, status, length); dma_rx() 439 /* AKPM: advance bp to the next frame */ dma_rx() 441 bp += (length + 3) & ~3; dma_rx() 442 if (bp >= lp->end_dma_buff) dma_rx() 443 bp -= lp->dmasize * 1024; dma_rx() 444 lp->rx_dma_ptr = bp; dma_rx() 449 if (bp + length > lp->end_dma_buff) { dma_rx() 450 int semi_cnt = lp->end_dma_buff - bp; dma_rx() 451 memcpy(skb_put(skb, semi_cnt), bp, semi_cnt); dma_rx() local 455 memcpy(skb_put(skb, length), bp, length); dma_rx() local 457 bp += (length + 3) & ~3; dma_rx() 458 if (bp >= lp->end_dma_buff) dma_rx() 459 bp -= lp->dmasize*1024; dma_rx() 460 lp->rx_dma_ptr = bp; dma_rx()
|
/linux-4.1.27/arch/x86/kernel/cpu/mcheck/ |
H A D | mce_amd.c | 9 * Maintained by: Borislav Petkov <bp@alien8.de> 643 struct threshold_bank **bp; threshold_create_device() local 646 bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks, threshold_create_device() 648 if (!bp) threshold_create_device() 651 per_cpu(threshold_banks, cpu) = bp; threshold_create_device()
|