Lines Matching refs:vptr
95 static void velocity_set_power_state(struct velocity_info *vptr, char state) in velocity_set_power_state() argument
97 void *addr = vptr->mac_regs; in velocity_set_power_state()
99 if (vptr->pdev) in velocity_set_power_state()
100 pci_set_power_state(vptr->pdev, state); in velocity_set_power_state()
502 static void velocity_init_cam_filter(struct velocity_info *vptr) in velocity_init_cam_filter() argument
504 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_init_cam_filter()
512 memset(vptr->vCAMmask, 0, sizeof(u8) * 8); in velocity_init_cam_filter()
513 memset(vptr->mCAMmask, 0, sizeof(u8) * 8); in velocity_init_cam_filter()
514 mac_set_vlan_cam_mask(regs, vptr->vCAMmask); in velocity_init_cam_filter()
515 mac_set_cam_mask(regs, vptr->mCAMmask); in velocity_init_cam_filter()
518 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { in velocity_init_cam_filter()
520 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); in velocity_init_cam_filter()
524 mac_set_vlan_cam_mask(regs, vptr->vCAMmask); in velocity_init_cam_filter()
530 struct velocity_info *vptr = netdev_priv(dev); in velocity_vlan_rx_add_vid() local
532 spin_lock_irq(&vptr->lock); in velocity_vlan_rx_add_vid()
533 set_bit(vid, vptr->active_vlans); in velocity_vlan_rx_add_vid()
534 velocity_init_cam_filter(vptr); in velocity_vlan_rx_add_vid()
535 spin_unlock_irq(&vptr->lock); in velocity_vlan_rx_add_vid()
542 struct velocity_info *vptr = netdev_priv(dev); in velocity_vlan_rx_kill_vid() local
544 spin_lock_irq(&vptr->lock); in velocity_vlan_rx_kill_vid()
545 clear_bit(vid, vptr->active_vlans); in velocity_vlan_rx_kill_vid()
546 velocity_init_cam_filter(vptr); in velocity_vlan_rx_kill_vid()
547 spin_unlock_irq(&vptr->lock); in velocity_vlan_rx_kill_vid()
551 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) in velocity_init_rx_ring_indexes() argument
553 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; in velocity_init_rx_ring_indexes()
563 static void velocity_rx_reset(struct velocity_info *vptr) in velocity_rx_reset() argument
566 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_rx_reset()
569 velocity_init_rx_ring_indexes(vptr); in velocity_rx_reset()
574 for (i = 0; i < vptr->options.numrx; ++i) in velocity_rx_reset()
575 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; in velocity_rx_reset()
577 writew(vptr->options.numrx, ®s->RBRDU); in velocity_rx_reset()
578 writel(vptr->rx.pool_dma, ®s->RDBaseLo); in velocity_rx_reset()
580 writew(vptr->options.numrx - 1, ®s->RDCSize); in velocity_rx_reset()
591 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) in velocity_get_opt_media_mode() argument
595 switch (vptr->options.spd_dpx) { in velocity_get_opt_media_mode()
615 vptr->mii_status = status; in velocity_get_opt_media_mode()
792 static void set_mii_flow_control(struct velocity_info *vptr) in set_mii_flow_control() argument
795 switch (vptr->options.flow_cntl) { in set_mii_flow_control()
797 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
798 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
802 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
803 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
807 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
808 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
812 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
813 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
826 static void mii_set_auto_on(struct velocity_info *vptr) in mii_set_auto_on() argument
828 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs)) in mii_set_auto_on()
829 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); in mii_set_auto_on()
831 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); in mii_set_auto_on()
876 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) in velocity_set_media_mode() argument
879 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_set_media_mode()
881 vptr->mii_status = mii_check_media_mode(vptr->mac_regs); in velocity_set_media_mode()
882 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); in velocity_set_media_mode()
885 set_mii_flow_control(vptr); in velocity_set_media_mode()
898 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) in velocity_set_media_mode()
899 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); in velocity_set_media_mode()
909 …_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs); in velocity_set_media_mode()
910 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); in velocity_set_media_mode()
911 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); in velocity_set_media_mode()
914 mii_set_auto_on(vptr); in velocity_set_media_mode()
941 if (vptr->rev_id < REV_ID_VT3216_A0) in velocity_set_media_mode()
947 if (vptr->rev_id < REV_ID_VT3216_A0) in velocity_set_media_mode()
951 velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000); in velocity_set_media_mode()
957 velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000); in velocity_set_media_mode()
965 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR); in velocity_set_media_mode()
978 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR); in velocity_set_media_mode()
980 mii_set_auto_on(vptr); in velocity_set_media_mode()
996 static void velocity_print_link_status(struct velocity_info *vptr) in velocity_print_link_status() argument
999 if (vptr->mii_status & VELOCITY_LINK_FAIL) { in velocity_print_link_status()
1000 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name); in velocity_print_link_status()
1001 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_print_link_status()
1002 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name); in velocity_print_link_status()
1004 if (vptr->mii_status & VELOCITY_SPEED_1000) in velocity_print_link_status()
1006 else if (vptr->mii_status & VELOCITY_SPEED_100) in velocity_print_link_status()
1011 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in velocity_print_link_status()
1016 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name); in velocity_print_link_status()
1017 switch (vptr->options.spd_dpx) { in velocity_print_link_status()
1046 static void enable_flow_control_ability(struct velocity_info *vptr) in enable_flow_control_ability() argument
1049 struct mac_regs __iomem *regs = vptr->mac_regs; in enable_flow_control_ability()
1051 switch (vptr->options.flow_cntl) { in enable_flow_control_ability()
1098 static int velocity_soft_reset(struct velocity_info *vptr) in velocity_soft_reset() argument
1100 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_soft_reset()
1130 struct velocity_info *vptr = netdev_priv(dev); in velocity_set_multi() local
1131 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_set_multi()
1140 } else if ((netdev_mc_count(dev) > vptr->multicast_limit) || in velocity_set_multi()
1146 int offset = MCAM_SIZE - vptr->multicast_limit; in velocity_set_multi()
1147 mac_get_cam_mask(regs, vptr->mCAMmask); in velocity_set_multi()
1152 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); in velocity_set_multi()
1156 mac_set_cam_mask(regs, vptr->mCAMmask); in velocity_set_multi()
1177 static void mii_init(struct velocity_info *vptr, u32 mii_status) in mii_init() argument
1181 switch (PHYID_GET_PHY_ID(vptr->phy_id)) { in mii_init()
1184 MII_ADVERTISE, vptr->mac_regs); in mii_init()
1185 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
1187 vptr->mac_regs); in mii_init()
1190 vptr->mac_regs); in mii_init()
1191 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); in mii_init()
1197 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); in mii_init()
1203 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
1204 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1206 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1210 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); in mii_init()
1217 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); in mii_init()
1223 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
1224 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1226 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1234 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); in mii_init()
1238 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); in mii_init()
1243 velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR); in mii_init()
1246 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR); in mii_init()
1256 static void setup_queue_timers(struct velocity_info *vptr) in setup_queue_timers() argument
1259 if (vptr->rev_id >= REV_ID_VT3216_A0) { in setup_queue_timers()
1263 if (vptr->mii_status & (VELOCITY_SPEED_1000 | in setup_queue_timers()
1265 txqueue_timer = vptr->options.txqueue_timer; in setup_queue_timers()
1266 rxqueue_timer = vptr->options.rxqueue_timer; in setup_queue_timers()
1269 writeb(txqueue_timer, &vptr->mac_regs->TQETMR); in setup_queue_timers()
1270 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR); in setup_queue_timers()
1282 static void setup_adaptive_interrupts(struct velocity_info *vptr) in setup_adaptive_interrupts() argument
1284 struct mac_regs __iomem *regs = vptr->mac_regs; in setup_adaptive_interrupts()
1285 u16 tx_intsup = vptr->options.tx_intsup; in setup_adaptive_interrupts()
1286 u16 rx_intsup = vptr->options.rx_intsup; in setup_adaptive_interrupts()
1289 vptr->int_mask = INT_MASK_DEF; in setup_adaptive_interrupts()
1294 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I | in setup_adaptive_interrupts()
1303 vptr->int_mask &= ~ISR_PRXI; in setup_adaptive_interrupts()
1320 static void velocity_init_registers(struct velocity_info *vptr, in velocity_init_registers() argument
1323 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_init_registers()
1324 struct net_device *netdev = vptr->netdev; in velocity_init_registers()
1338 velocity_rx_reset(vptr); in velocity_init_registers()
1342 mii_status = velocity_get_opt_media_mode(vptr); in velocity_init_registers()
1343 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { in velocity_init_registers()
1344 velocity_print_link_status(vptr); in velocity_init_registers()
1345 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) in velocity_init_registers()
1349 enable_flow_control_ability(vptr); in velocity_init_registers()
1363 velocity_soft_reset(vptr); in velocity_init_registers()
1366 if (!vptr->no_eeprom) { in velocity_init_registers()
1376 mac_set_rx_thresh(regs, vptr->options.rx_thresh); in velocity_init_registers()
1377 mac_set_dma_length(regs, vptr->options.DMA_length); in velocity_init_registers()
1388 velocity_init_cam_filter(vptr); in velocity_init_registers()
1400 setup_adaptive_interrupts(vptr); in velocity_init_registers()
1402 writel(vptr->rx.pool_dma, ®s->RDBaseLo); in velocity_init_registers()
1403 writew(vptr->options.numrx - 1, ®s->RDCSize); in velocity_init_registers()
1407 writew(vptr->options.numtx - 1, ®s->TDCSize); in velocity_init_registers()
1409 for (i = 0; i < vptr->tx.numq; i++) { in velocity_init_registers()
1410 writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]); in velocity_init_registers()
1414 init_flow_control_register(vptr); in velocity_init_registers()
1419 mii_status = velocity_get_opt_media_mode(vptr); in velocity_init_registers()
1422 mii_init(vptr, mii_status); in velocity_init_registers()
1424 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { in velocity_init_registers()
1425 velocity_print_link_status(vptr); in velocity_init_registers()
1426 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) in velocity_init_registers()
1430 enable_flow_control_ability(vptr); in velocity_init_registers()
1432 mac_write_int_mask(vptr->int_mask, regs); in velocity_init_registers()
1438 static void velocity_give_many_rx_descs(struct velocity_info *vptr) in velocity_give_many_rx_descs() argument
1440 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_give_many_rx_descs()
1447 if (vptr->rx.filled < 4) in velocity_give_many_rx_descs()
1452 unusable = vptr->rx.filled & 0x0003; in velocity_give_many_rx_descs()
1453 dirty = vptr->rx.dirty - unusable; in velocity_give_many_rx_descs()
1454 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { in velocity_give_many_rx_descs()
1455 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; in velocity_give_many_rx_descs()
1456 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; in velocity_give_many_rx_descs()
1459 writew(vptr->rx.filled & 0xfffc, ®s->RBRDU); in velocity_give_many_rx_descs()
1460 vptr->rx.filled = unusable; in velocity_give_many_rx_descs()
1470 static int velocity_init_dma_rings(struct velocity_info *vptr) in velocity_init_dma_rings() argument
1472 struct velocity_opt *opt = &vptr->options; in velocity_init_dma_rings()
1485 pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq + in velocity_init_dma_rings()
1488 dev_err(vptr->dev, "%s : DMA memory allocation failed.\n", in velocity_init_dma_rings()
1489 vptr->netdev->name); in velocity_init_dma_rings()
1493 vptr->rx.ring = pool; in velocity_init_dma_rings()
1494 vptr->rx.pool_dma = pool_dma; in velocity_init_dma_rings()
1499 for (i = 0; i < vptr->tx.numq; i++) { in velocity_init_dma_rings()
1500 vptr->tx.rings[i] = pool; in velocity_init_dma_rings()
1501 vptr->tx.pool_dma[i] = pool_dma; in velocity_init_dma_rings()
1509 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) in velocity_set_rxbufsize() argument
1511 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; in velocity_set_rxbufsize()
1524 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) in velocity_alloc_rx_buf() argument
1526 struct rx_desc *rd = &(vptr->rx.ring[idx]); in velocity_alloc_rx_buf()
1527 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); in velocity_alloc_rx_buf()
1529 rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64); in velocity_alloc_rx_buf()
1539 rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data, in velocity_alloc_rx_buf()
1540 vptr->rx.buf_sz, DMA_FROM_DEVICE); in velocity_alloc_rx_buf()
1547 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; in velocity_alloc_rx_buf()
1554 static int velocity_rx_refill(struct velocity_info *vptr) in velocity_rx_refill() argument
1556 int dirty = vptr->rx.dirty, done = 0; in velocity_rx_refill()
1559 struct rx_desc *rd = vptr->rx.ring + dirty; in velocity_rx_refill()
1565 if (!vptr->rx.info[dirty].skb) { in velocity_rx_refill()
1566 if (velocity_alloc_rx_buf(vptr, dirty) < 0) in velocity_rx_refill()
1570 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; in velocity_rx_refill()
1571 } while (dirty != vptr->rx.curr); in velocity_rx_refill()
1574 vptr->rx.dirty = dirty; in velocity_rx_refill()
1575 vptr->rx.filled += done; in velocity_rx_refill()
1588 static void velocity_free_rd_ring(struct velocity_info *vptr) in velocity_free_rd_ring() argument
1592 if (vptr->rx.info == NULL) in velocity_free_rd_ring()
1595 for (i = 0; i < vptr->options.numrx; i++) { in velocity_free_rd_ring()
1596 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); in velocity_free_rd_ring()
1597 struct rx_desc *rd = vptr->rx.ring + i; in velocity_free_rd_ring()
1603 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, in velocity_free_rd_ring()
1611 kfree(vptr->rx.info); in velocity_free_rd_ring()
1612 vptr->rx.info = NULL; in velocity_free_rd_ring()
1622 static int velocity_init_rd_ring(struct velocity_info *vptr) in velocity_init_rd_ring() argument
1626 vptr->rx.info = kcalloc(vptr->options.numrx, in velocity_init_rd_ring()
1628 if (!vptr->rx.info) in velocity_init_rd_ring()
1631 velocity_init_rx_ring_indexes(vptr); in velocity_init_rd_ring()
1633 if (velocity_rx_refill(vptr) != vptr->options.numrx) { in velocity_init_rd_ring()
1635 "%s: failed to allocate RX buffer.\n", vptr->netdev->name); in velocity_init_rd_ring()
1636 velocity_free_rd_ring(vptr); in velocity_init_rd_ring()
1653 static int velocity_init_td_ring(struct velocity_info *vptr) in velocity_init_td_ring() argument
1658 for (j = 0; j < vptr->tx.numq; j++) { in velocity_init_td_ring()
1660 vptr->tx.infos[j] = kcalloc(vptr->options.numtx, in velocity_init_td_ring()
1663 if (!vptr->tx.infos[j]) { in velocity_init_td_ring()
1665 kfree(vptr->tx.infos[j]); in velocity_init_td_ring()
1669 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; in velocity_init_td_ring()
1680 static void velocity_free_dma_rings(struct velocity_info *vptr) in velocity_free_dma_rings() argument
1682 const int size = vptr->options.numrx * sizeof(struct rx_desc) + in velocity_free_dma_rings()
1683 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; in velocity_free_dma_rings()
1685 dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma); in velocity_free_dma_rings()
1688 static int velocity_init_rings(struct velocity_info *vptr, int mtu) in velocity_init_rings() argument
1692 velocity_set_rxbufsize(vptr, mtu); in velocity_init_rings()
1694 ret = velocity_init_dma_rings(vptr); in velocity_init_rings()
1698 ret = velocity_init_rd_ring(vptr); in velocity_init_rings()
1702 ret = velocity_init_td_ring(vptr); in velocity_init_rings()
1709 velocity_free_rd_ring(vptr); in velocity_init_rings()
1711 velocity_free_dma_rings(vptr); in velocity_init_rings()
1723 static void velocity_free_tx_buf(struct velocity_info *vptr, in velocity_free_tx_buf() argument
1742 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], in velocity_free_tx_buf()
1753 static void velocity_free_td_ring_entry(struct velocity_info *vptr, in velocity_free_td_ring_entry() argument
1756 struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]); in velocity_free_td_ring_entry()
1765 dma_unmap_single(vptr->dev, td_info->skb_dma[i], in velocity_free_td_ring_entry()
1782 static void velocity_free_td_ring(struct velocity_info *vptr) in velocity_free_td_ring() argument
1786 for (j = 0; j < vptr->tx.numq; j++) { in velocity_free_td_ring()
1787 if (vptr->tx.infos[j] == NULL) in velocity_free_td_ring()
1789 for (i = 0; i < vptr->options.numtx; i++) in velocity_free_td_ring()
1790 velocity_free_td_ring_entry(vptr, j, i); in velocity_free_td_ring()
1792 kfree(vptr->tx.infos[j]); in velocity_free_td_ring()
1793 vptr->tx.infos[j] = NULL; in velocity_free_td_ring()
1797 static void velocity_free_rings(struct velocity_info *vptr) in velocity_free_rings() argument
1799 velocity_free_td_ring(vptr); in velocity_free_rings()
1800 velocity_free_rd_ring(vptr); in velocity_free_rings()
1801 velocity_free_dma_rings(vptr); in velocity_free_rings()
1815 static void velocity_error(struct velocity_info *vptr, int status) in velocity_error() argument
1819 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_error()
1824 netif_stop_queue(vptr->netdev); in velocity_error()
1831 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_error()
1834 if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_error()
1835 vptr->mii_status = check_connection_type(regs); in velocity_error()
1842 if (vptr->rev_id < REV_ID_VT3216_A0) { in velocity_error()
1843 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in velocity_error()
1851 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) in velocity_error()
1856 setup_queue_timers(vptr); in velocity_error()
1864 vptr->mii_status &= ~VELOCITY_LINK_FAIL; in velocity_error()
1865 netif_carrier_on(vptr->netdev); in velocity_error()
1867 vptr->mii_status |= VELOCITY_LINK_FAIL; in velocity_error()
1868 netif_carrier_off(vptr->netdev); in velocity_error()
1871 velocity_print_link_status(vptr); in velocity_error()
1872 enable_flow_control_ability(vptr); in velocity_error()
1881 if (vptr->mii_status & VELOCITY_LINK_FAIL) in velocity_error()
1882 netif_stop_queue(vptr->netdev); in velocity_error()
1884 netif_wake_queue(vptr->netdev); in velocity_error()
1888 velocity_update_hw_mibs(vptr); in velocity_error()
1890 mac_rx_queue_wake(vptr->mac_regs); in velocity_error()
1901 static int velocity_tx_srv(struct velocity_info *vptr) in velocity_tx_srv() argument
1909 struct net_device_stats *stats = &vptr->netdev->stats; in velocity_tx_srv()
1911 for (qnum = 0; qnum < vptr->tx.numq; qnum++) { in velocity_tx_srv()
1912 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; in velocity_tx_srv()
1913 idx = (idx + 1) % vptr->options.numtx) { in velocity_tx_srv()
1918 td = &(vptr->tx.rings[qnum][idx]); in velocity_tx_srv()
1919 tdinfo = &(vptr->tx.infos[qnum][idx]); in velocity_tx_srv()
1942 velocity_free_tx_buf(vptr, tdinfo, td); in velocity_tx_srv()
1943 vptr->tx.used[qnum]--; in velocity_tx_srv()
1945 vptr->tx.tail[qnum] = idx; in velocity_tx_srv()
1947 if (AVAIL_TD(vptr, qnum) < 1) in velocity_tx_srv()
1954 if (netif_queue_stopped(vptr->netdev) && (full == 0) && in velocity_tx_srv()
1955 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { in velocity_tx_srv()
1956 netif_wake_queue(vptr->netdev); in velocity_tx_srv()
1998 struct velocity_info *vptr) in velocity_rx_copy() argument
2004 new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size); in velocity_rx_copy()
2025 static inline void velocity_iph_realign(struct velocity_info *vptr, in velocity_iph_realign() argument
2028 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { in velocity_iph_realign()
2042 static int velocity_receive_frame(struct velocity_info *vptr, int idx) in velocity_receive_frame() argument
2044 struct net_device_stats *stats = &vptr->netdev->stats; in velocity_receive_frame()
2045 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); in velocity_receive_frame()
2046 struct rx_desc *rd = &(vptr->rx.ring[idx]); in velocity_receive_frame()
2052 …VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->n… in velocity_receive_frame()
2062 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, in velocity_receive_frame()
2063 vptr->rx.buf_sz, DMA_FROM_DEVICE); in velocity_receive_frame()
2067 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { in velocity_receive_frame()
2068 velocity_iph_realign(vptr, skb, pkt_len); in velocity_receive_frame()
2070 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, in velocity_receive_frame()
2073 dma_sync_single_for_device(vptr->dev, rd_info->skb_dma, in velocity_receive_frame()
2074 vptr->rx.buf_sz, DMA_FROM_DEVICE); in velocity_receive_frame()
2078 skb->protocol = eth_type_trans(skb, vptr->netdev); in velocity_receive_frame()
2101 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) in velocity_rx_srv() argument
2103 struct net_device_stats *stats = &vptr->netdev->stats; in velocity_rx_srv()
2104 int rd_curr = vptr->rx.curr; in velocity_rx_srv()
2108 struct rx_desc *rd = vptr->rx.ring + rd_curr; in velocity_rx_srv()
2110 if (!vptr->rx.info[rd_curr].skb) in velocity_rx_srv()
2122 if (velocity_receive_frame(vptr, rd_curr) < 0) in velocity_rx_srv()
2136 if (rd_curr >= vptr->options.numrx) in velocity_rx_srv()
2141 vptr->rx.curr = rd_curr; in velocity_rx_srv()
2143 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) in velocity_rx_srv()
2144 velocity_give_many_rx_descs(vptr); in velocity_rx_srv()
2152 struct velocity_info *vptr = container_of(napi, in velocity_poll() local
2161 rx_done = velocity_rx_srv(vptr, budget); in velocity_poll()
2162 spin_lock_irqsave(&vptr->lock, flags); in velocity_poll()
2163 velocity_tx_srv(vptr); in velocity_poll()
2167 mac_enable_int(vptr->mac_regs); in velocity_poll()
2169 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_poll()
2187 struct velocity_info *vptr = netdev_priv(dev); in velocity_intr() local
2190 spin_lock(&vptr->lock); in velocity_intr()
2191 isr_status = mac_read_isr(vptr->mac_regs); in velocity_intr()
2195 spin_unlock(&vptr->lock); in velocity_intr()
2200 mac_write_isr(vptr->mac_regs, isr_status); in velocity_intr()
2202 if (likely(napi_schedule_prep(&vptr->napi))) { in velocity_intr()
2203 mac_disable_int(vptr->mac_regs); in velocity_intr()
2204 __napi_schedule(&vptr->napi); in velocity_intr()
2208 velocity_error(vptr, isr_status); in velocity_intr()
2210 spin_unlock(&vptr->lock); in velocity_intr()
2227 struct velocity_info *vptr = netdev_priv(dev); in velocity_open() local
2230 ret = velocity_init_rings(vptr, dev->mtu); in velocity_open()
2235 velocity_set_power_state(vptr, PCI_D0); in velocity_open()
2237 velocity_init_registers(vptr, VELOCITY_INIT_COLD); in velocity_open()
2243 velocity_set_power_state(vptr, PCI_D3hot); in velocity_open()
2244 velocity_free_rings(vptr); in velocity_open()
2248 velocity_give_many_rx_descs(vptr); in velocity_open()
2250 mac_enable_int(vptr->mac_regs); in velocity_open()
2252 napi_enable(&vptr->napi); in velocity_open()
2253 vptr->flags |= VELOCITY_FLAGS_OPENED; in velocity_open()
2265 static void velocity_shutdown(struct velocity_info *vptr) in velocity_shutdown() argument
2267 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_shutdown()
2287 struct velocity_info *vptr = netdev_priv(dev); in velocity_change_mtu() local
2292 vptr->netdev->name); in velocity_change_mtu()
2315 tmp_vptr->pdev = vptr->pdev; in velocity_change_mtu()
2316 tmp_vptr->dev = vptr->dev; in velocity_change_mtu()
2317 tmp_vptr->options = vptr->options; in velocity_change_mtu()
2318 tmp_vptr->tx.numq = vptr->tx.numq; in velocity_change_mtu()
2324 napi_disable(&vptr->napi); in velocity_change_mtu()
2326 spin_lock_irqsave(&vptr->lock, flags); in velocity_change_mtu()
2329 velocity_shutdown(vptr); in velocity_change_mtu()
2331 rx = vptr->rx; in velocity_change_mtu()
2332 tx = vptr->tx; in velocity_change_mtu()
2334 vptr->rx = tmp_vptr->rx; in velocity_change_mtu()
2335 vptr->tx = tmp_vptr->tx; in velocity_change_mtu()
2342 velocity_init_registers(vptr, VELOCITY_INIT_COLD); in velocity_change_mtu()
2344 velocity_give_many_rx_descs(vptr); in velocity_change_mtu()
2346 napi_enable(&vptr->napi); in velocity_change_mtu()
2348 mac_enable_int(vptr->mac_regs); in velocity_change_mtu()
2351 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_change_mtu()
2391 struct velocity_info *vptr = netdev_priv(dev); in velocity_mii_ioctl() local
2392 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_mii_ioctl()
2402 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0) in velocity_mii_ioctl()
2406 spin_lock_irqsave(&vptr->lock, flags); in velocity_mii_ioctl()
2407 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in); in velocity_mii_ioctl()
2408 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_mii_ioctl()
2409 check_connection_type(vptr->mac_regs); in velocity_mii_ioctl()
2430 struct velocity_info *vptr = netdev_priv(dev); in velocity_ioctl() local
2437 velocity_set_power_state(vptr, PCI_D0); in velocity_ioctl()
2450 velocity_set_power_state(vptr, PCI_D3hot); in velocity_ioctl()
2468 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_stats() local
2474 spin_lock_irq(&vptr->lock); in velocity_get_stats()
2475 velocity_update_hw_mibs(vptr); in velocity_get_stats()
2476 spin_unlock_irq(&vptr->lock); in velocity_get_stats()
2478 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; in velocity_get_stats()
2479 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; in velocity_get_stats()
2480 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors]; in velocity_get_stats()
2483 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; in velocity_get_stats()
2487 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE]; in velocity_get_stats()
2507 struct velocity_info *vptr = netdev_priv(dev); in velocity_close() local
2509 napi_disable(&vptr->napi); in velocity_close()
2511 velocity_shutdown(vptr); in velocity_close()
2513 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) in velocity_close()
2514 velocity_get_ip(vptr); in velocity_close()
2518 velocity_free_rings(vptr); in velocity_close()
2520 vptr->flags &= (~VELOCITY_FLAGS_OPENED); in velocity_close()
2535 struct velocity_info *vptr = netdev_priv(dev); in velocity_xmit() local
2558 spin_lock_irqsave(&vptr->lock, flags); in velocity_xmit()
2560 index = vptr->tx.curr[qnum]; in velocity_xmit()
2561 td_ptr = &(vptr->tx.rings[qnum][index]); in velocity_xmit()
2562 tdinfo = &(vptr->tx.infos[qnum][index]); in velocity_xmit()
2572 tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen, in velocity_xmit()
2583 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev, in velocity_xmit()
2615 prev = vptr->options.numtx - 1; in velocity_xmit()
2617 vptr->tx.used[qnum]++; in velocity_xmit()
2618 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; in velocity_xmit()
2620 if (AVAIL_TD(vptr, qnum) < 1) in velocity_xmit()
2623 td_ptr = &(vptr->tx.rings[qnum][prev]); in velocity_xmit()
2625 mac_tx_queue_wake(vptr->mac_regs, qnum); in velocity_xmit()
2627 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_xmit()
2658 static void velocity_init_info(struct velocity_info *vptr, in velocity_init_info() argument
2661 vptr->chip_id = info->chip_id; in velocity_init_info()
2662 vptr->tx.numq = info->txqueue; in velocity_init_info()
2663 vptr->multicast_limit = MCAM_SIZE; in velocity_init_info()
2664 spin_lock_init(&vptr->lock); in velocity_init_info()
2675 static int velocity_get_pci_info(struct velocity_info *vptr) in velocity_get_pci_info() argument
2677 struct pci_dev *pdev = vptr->pdev; in velocity_get_pci_info()
2681 vptr->ioaddr = pci_resource_start(pdev, 0); in velocity_get_pci_info()
2682 vptr->memaddr = pci_resource_start(pdev, 1); in velocity_get_pci_info()
2711 static int velocity_get_platform_info(struct velocity_info *vptr) in velocity_get_platform_info() argument
2716 if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL)) in velocity_get_platform_info()
2717 vptr->no_eeprom = 1; in velocity_get_platform_info()
2719 ret = of_address_to_resource(vptr->dev->of_node, 0, &res); in velocity_get_platform_info()
2721 dev_err(vptr->dev, "unable to find memory address\n"); in velocity_get_platform_info()
2725 vptr->memaddr = res.start; in velocity_get_platform_info()
2728 dev_err(vptr->dev, "memory region is too small.\n"); in velocity_get_platform_info()
2742 static void velocity_print_info(struct velocity_info *vptr) in velocity_print_info() argument
2744 struct net_device *dev = vptr->netdev; in velocity_print_info()
2746 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); in velocity_print_info()
2753 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_link() local
2754 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_get_link()
2775 struct velocity_info *vptr; in velocity_probe() local
2794 vptr = netdev_priv(netdev); in velocity_probe()
2805 vptr->netdev = netdev; in velocity_probe()
2806 vptr->dev = dev; in velocity_probe()
2808 velocity_init_info(vptr, info); in velocity_probe()
2811 vptr->pdev = to_pci_dev(dev); in velocity_probe()
2813 ret = velocity_get_pci_info(vptr); in velocity_probe()
2817 vptr->pdev = NULL; in velocity_probe()
2818 ret = velocity_get_platform_info(vptr); in velocity_probe()
2823 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); in velocity_probe()
2829 vptr->mac_regs = regs; in velocity_probe()
2830 vptr->rev_id = readb(®s->rev_id); in velocity_probe()
2840 velocity_get_options(&vptr->options, velocity_nics, drv_string); in velocity_probe()
2846 vptr->options.flags &= info->flags; in velocity_probe()
2852 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); in velocity_probe()
2854 vptr->wol_opts = vptr->options.wol_opts; in velocity_probe()
2855 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_probe()
2857 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); in velocity_probe()
2861 netif_napi_add(netdev, &vptr->napi, velocity_poll, in velocity_probe()
2876 vptr->mii_status |= VELOCITY_LINK_FAIL; in velocity_probe()
2879 velocity_print_info(vptr); in velocity_probe()
2880 dev_set_drvdata(vptr->dev, netdev); in velocity_probe()
2884 velocity_set_power_state(vptr, PCI_D3hot); in velocity_probe()
2890 netif_napi_del(&vptr->napi); in velocity_probe()
2908 struct velocity_info *vptr = netdev_priv(netdev); in velocity_remove() local
2911 netif_napi_del(&vptr->napi); in velocity_remove()
2912 iounmap(vptr->mac_regs); in velocity_remove()
3024 static int velocity_set_wol(struct velocity_info *vptr) in velocity_set_wol() argument
3026 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_set_wol()
3027 enum speed_opt spd_dpx = vptr->options.spd_dpx; in velocity_set_wol()
3045 if (vptr->wol_opts & VELOCITY_WOL_UCAST) in velocity_set_wol()
3048 if (vptr->wol_opts & VELOCITY_WOL_ARP) { in velocity_set_wol()
3059 memcpy(arp->ar_tip, vptr->ip_addr, 4); in velocity_set_wol()
3079 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { in velocity_set_wol()
3080 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) in velocity_set_wol()
3081 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); in velocity_set_wol()
3083 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); in velocity_set_wol()
3086 if (vptr->mii_status & VELOCITY_SPEED_1000) in velocity_set_wol()
3087 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); in velocity_set_wol()
3119 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context) in velocity_save_context() argument
3121 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_save_context()
3139 struct velocity_info *vptr = netdev_priv(netdev); in velocity_suspend() local
3142 if (!netif_running(vptr->netdev)) in velocity_suspend()
3145 netif_device_detach(vptr->netdev); in velocity_suspend()
3147 spin_lock_irqsave(&vptr->lock, flags); in velocity_suspend()
3148 if (vptr->pdev) in velocity_suspend()
3149 pci_save_state(vptr->pdev); in velocity_suspend()
3151 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { in velocity_suspend()
3152 velocity_get_ip(vptr); in velocity_suspend()
3153 velocity_save_context(vptr, &vptr->context); in velocity_suspend()
3154 velocity_shutdown(vptr); in velocity_suspend()
3155 velocity_set_wol(vptr); in velocity_suspend()
3156 if (vptr->pdev) in velocity_suspend()
3157 pci_enable_wake(vptr->pdev, PCI_D3hot, 1); in velocity_suspend()
3158 velocity_set_power_state(vptr, PCI_D3hot); in velocity_suspend()
3160 velocity_save_context(vptr, &vptr->context); in velocity_suspend()
3161 velocity_shutdown(vptr); in velocity_suspend()
3162 if (vptr->pdev) in velocity_suspend()
3163 pci_disable_device(vptr->pdev); in velocity_suspend()
3164 velocity_set_power_state(vptr, PCI_D3hot); in velocity_suspend()
3167 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_suspend()
3179 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context) in velocity_restore_context() argument
3181 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_restore_context()
3209 struct velocity_info *vptr = netdev_priv(netdev); in velocity_resume() local
3213 if (!netif_running(vptr->netdev)) in velocity_resume()
3216 velocity_set_power_state(vptr, PCI_D0); in velocity_resume()
3218 if (vptr->pdev) { in velocity_resume()
3219 pci_enable_wake(vptr->pdev, PCI_D0, 0); in velocity_resume()
3220 pci_restore_state(vptr->pdev); in velocity_resume()
3223 mac_wol_reset(vptr->mac_regs); in velocity_resume()
3225 spin_lock_irqsave(&vptr->lock, flags); in velocity_resume()
3226 velocity_restore_context(vptr, &vptr->context); in velocity_resume()
3227 velocity_init_registers(vptr, VELOCITY_INIT_WOL); in velocity_resume()
3228 mac_disable_int(vptr->mac_regs); in velocity_resume()
3230 velocity_tx_srv(vptr); in velocity_resume()
3232 for (i = 0; i < vptr->tx.numq; i++) { in velocity_resume()
3233 if (vptr->tx.used[i]) in velocity_resume()
3234 mac_tx_queue_wake(vptr->mac_regs, i); in velocity_resume()
3237 mac_enable_int(vptr->mac_regs); in velocity_resume()
3238 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_resume()
3239 netif_device_attach(vptr->netdev); in velocity_resume()
3280 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_up() local
3282 velocity_set_power_state(vptr, PCI_D0); in velocity_ethtool_up()
3295 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_down() local
3297 velocity_set_power_state(vptr, PCI_D3hot); in velocity_ethtool_down()
3303 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_settings() local
3304 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_get_settings()
3306 status = check_connection_type(vptr->mac_regs); in velocity_get_settings()
3318 if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_get_settings()
3327 switch (vptr->options.spd_dpx) { in velocity_get_settings()
3371 struct velocity_info *vptr = netdev_priv(dev); in velocity_set_settings() local
3377 curr_status = check_connection_type(vptr->mac_regs); in velocity_set_settings()
3406 vptr->options.spd_dpx = spd_dpx; in velocity_set_settings()
3408 velocity_set_media_mode(vptr, new_status); in velocity_set_settings()
3416 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_drvinfo() local
3420 if (vptr->pdev) in velocity_get_drvinfo()
3421 strlcpy(info->bus_info, pci_name(vptr->pdev), in velocity_get_drvinfo()
3429 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_get_wol() local
3436 if (vptr->wol_opts & VELOCITY_WOL_UCAST) in velocity_ethtool_get_wol()
3438 if (vptr->wol_opts & VELOCITY_WOL_ARP) in velocity_ethtool_get_wol()
3440 memcpy(&wol->sopass, vptr->wol_passwd, 6); in velocity_ethtool_get_wol()
3445 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_set_wol() local
3449 vptr->wol_opts = VELOCITY_WOL_MAGIC; in velocity_ethtool_set_wol()
3459 vptr->wol_opts |= VELOCITY_WOL_MAGIC; in velocity_ethtool_set_wol()
3460 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3463 vptr->wol_opts |= VELOCITY_WOL_UCAST; in velocity_ethtool_set_wol()
3464 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3467 vptr->wol_opts |= VELOCITY_WOL_ARP; in velocity_ethtool_set_wol()
3468 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3470 memcpy(vptr->wol_passwd, wol->sopass, 6); in velocity_ethtool_set_wol()
3530 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_coalesce() local
3532 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup; in velocity_get_coalesce()
3533 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup; in velocity_get_coalesce()
3535 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer); in velocity_get_coalesce()
3536 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer); in velocity_get_coalesce()
3544 struct velocity_info *vptr = netdev_priv(dev); in velocity_set_coalesce() local
3559 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames; in velocity_set_coalesce()
3560 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames; in velocity_set_coalesce()
3562 set_pending_timer_val(&vptr->options.rxqueue_timer, in velocity_set_coalesce()
3564 set_pending_timer_val(&vptr->options.txqueue_timer, in velocity_set_coalesce()
3568 spin_lock_irqsave(&vptr->lock, flags); in velocity_set_coalesce()
3569 mac_disable_int(vptr->mac_regs); in velocity_set_coalesce()
3570 setup_adaptive_interrupts(vptr); in velocity_set_coalesce()
3571 setup_queue_timers(vptr); in velocity_set_coalesce()
3573 mac_write_int_mask(vptr->int_mask, vptr->mac_regs); in velocity_set_coalesce()
3574 mac_clear_isr(vptr->mac_regs); in velocity_set_coalesce()
3575 mac_enable_int(vptr->mac_regs); in velocity_set_coalesce()
3576 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_set_coalesce()
3639 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_ethtool_stats() local
3640 u32 *p = vptr->mib_counter; in velocity_get_ethtool_stats()
3643 spin_lock_irq(&vptr->lock); in velocity_get_ethtool_stats()
3644 velocity_update_hw_mibs(vptr); in velocity_get_ethtool_stats()
3645 spin_unlock_irq(&vptr->lock); in velocity_get_ethtool_stats()