Lines Matching refs:vptr

95 static void velocity_set_power_state(struct velocity_info *vptr, char state)  in velocity_set_power_state()  argument
97 void *addr = vptr->mac_regs; in velocity_set_power_state()
99 if (vptr->pdev) in velocity_set_power_state()
100 pci_set_power_state(vptr->pdev, state); in velocity_set_power_state()
510 static void velocity_init_cam_filter(struct velocity_info *vptr) in velocity_init_cam_filter() argument
512 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_init_cam_filter()
520 memset(vptr->vCAMmask, 0, sizeof(u8) * 8); in velocity_init_cam_filter()
521 memset(vptr->mCAMmask, 0, sizeof(u8) * 8); in velocity_init_cam_filter()
522 mac_set_vlan_cam_mask(regs, vptr->vCAMmask); in velocity_init_cam_filter()
523 mac_set_cam_mask(regs, vptr->mCAMmask); in velocity_init_cam_filter()
526 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { in velocity_init_cam_filter()
528 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); in velocity_init_cam_filter()
532 mac_set_vlan_cam_mask(regs, vptr->vCAMmask); in velocity_init_cam_filter()
538 struct velocity_info *vptr = netdev_priv(dev); in velocity_vlan_rx_add_vid() local
540 spin_lock_irq(&vptr->lock); in velocity_vlan_rx_add_vid()
541 set_bit(vid, vptr->active_vlans); in velocity_vlan_rx_add_vid()
542 velocity_init_cam_filter(vptr); in velocity_vlan_rx_add_vid()
543 spin_unlock_irq(&vptr->lock); in velocity_vlan_rx_add_vid()
550 struct velocity_info *vptr = netdev_priv(dev); in velocity_vlan_rx_kill_vid() local
552 spin_lock_irq(&vptr->lock); in velocity_vlan_rx_kill_vid()
553 clear_bit(vid, vptr->active_vlans); in velocity_vlan_rx_kill_vid()
554 velocity_init_cam_filter(vptr); in velocity_vlan_rx_kill_vid()
555 spin_unlock_irq(&vptr->lock); in velocity_vlan_rx_kill_vid()
559 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) in velocity_init_rx_ring_indexes() argument
561 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; in velocity_init_rx_ring_indexes()
571 static void velocity_rx_reset(struct velocity_info *vptr) in velocity_rx_reset() argument
574 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_rx_reset()
577 velocity_init_rx_ring_indexes(vptr); in velocity_rx_reset()
582 for (i = 0; i < vptr->options.numrx; ++i) in velocity_rx_reset()
583 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; in velocity_rx_reset()
585 writew(vptr->options.numrx, &regs->RBRDU); in velocity_rx_reset()
586 writel(vptr->rx.pool_dma, &regs->RDBaseLo); in velocity_rx_reset()
588 writew(vptr->options.numrx - 1, &regs->RDCSize); in velocity_rx_reset()
599 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) in velocity_get_opt_media_mode() argument
603 switch (vptr->options.spd_dpx) { in velocity_get_opt_media_mode()
623 vptr->mii_status = status; in velocity_get_opt_media_mode()
800 static void set_mii_flow_control(struct velocity_info *vptr) in set_mii_flow_control() argument
803 switch (vptr->options.flow_cntl) { in set_mii_flow_control()
805 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
806 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
810 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
811 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
815 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
816 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
820 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
821 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
834 static void mii_set_auto_on(struct velocity_info *vptr) in mii_set_auto_on() argument
836 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs)) in mii_set_auto_on()
837 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); in mii_set_auto_on()
839 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); in mii_set_auto_on()
884 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) in velocity_set_media_mode() argument
887 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_set_media_mode()
889 vptr->mii_status = mii_check_media_mode(vptr->mac_regs); in velocity_set_media_mode()
890 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); in velocity_set_media_mode()
893 set_mii_flow_control(vptr); in velocity_set_media_mode()
906 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) in velocity_set_media_mode()
907 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); in velocity_set_media_mode()
917 …_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs); in velocity_set_media_mode()
918 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); in velocity_set_media_mode()
919 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); in velocity_set_media_mode()
922 mii_set_auto_on(vptr); in velocity_set_media_mode()
949 if (vptr->rev_id < REV_ID_VT3216_A0) in velocity_set_media_mode()
955 if (vptr->rev_id < REV_ID_VT3216_A0) in velocity_set_media_mode()
959 velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000); in velocity_set_media_mode()
965 velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000); in velocity_set_media_mode()
973 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR); in velocity_set_media_mode()
986 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR); in velocity_set_media_mode()
988 mii_set_auto_on(vptr); in velocity_set_media_mode()
1004 static void velocity_print_link_status(struct velocity_info *vptr) in velocity_print_link_status() argument
1007 if (vptr->mii_status & VELOCITY_LINK_FAIL) { in velocity_print_link_status()
1008 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name); in velocity_print_link_status()
1009 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_print_link_status()
1010 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name); in velocity_print_link_status()
1012 if (vptr->mii_status & VELOCITY_SPEED_1000) in velocity_print_link_status()
1014 else if (vptr->mii_status & VELOCITY_SPEED_100) in velocity_print_link_status()
1019 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in velocity_print_link_status()
1024 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name); in velocity_print_link_status()
1025 switch (vptr->options.spd_dpx) { in velocity_print_link_status()
1054 static void enable_flow_control_ability(struct velocity_info *vptr) in enable_flow_control_ability() argument
1057 struct mac_regs __iomem *regs = vptr->mac_regs; in enable_flow_control_ability()
1059 switch (vptr->options.flow_cntl) { in enable_flow_control_ability()
1106 static int velocity_soft_reset(struct velocity_info *vptr) in velocity_soft_reset() argument
1108 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_soft_reset()
1138 struct velocity_info *vptr = netdev_priv(dev); in velocity_set_multi() local
1139 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_set_multi()
1148 } else if ((netdev_mc_count(dev) > vptr->multicast_limit) || in velocity_set_multi()
1154 int offset = MCAM_SIZE - vptr->multicast_limit; in velocity_set_multi()
1155 mac_get_cam_mask(regs, vptr->mCAMmask); in velocity_set_multi()
1160 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); in velocity_set_multi()
1164 mac_set_cam_mask(regs, vptr->mCAMmask); in velocity_set_multi()
1185 static void mii_init(struct velocity_info *vptr, u32 mii_status) in mii_init() argument
1189 switch (PHYID_GET_PHY_ID(vptr->phy_id)) { in mii_init()
1192 MII_ADVERTISE, vptr->mac_regs); in mii_init()
1193 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
1195 vptr->mac_regs); in mii_init()
1198 vptr->mac_regs); in mii_init()
1199 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); in mii_init()
1205 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); in mii_init()
1211 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
1212 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1214 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1218 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); in mii_init()
1225 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); in mii_init()
1231 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
1232 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1234 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1242 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); in mii_init()
1246 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); in mii_init()
1251 velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR); in mii_init()
1254 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR); in mii_init()
1264 static void setup_queue_timers(struct velocity_info *vptr) in setup_queue_timers() argument
1267 if (vptr->rev_id >= REV_ID_VT3216_A0) { in setup_queue_timers()
1271 if (vptr->mii_status & (VELOCITY_SPEED_1000 | in setup_queue_timers()
1273 txqueue_timer = vptr->options.txqueue_timer; in setup_queue_timers()
1274 rxqueue_timer = vptr->options.rxqueue_timer; in setup_queue_timers()
1277 writeb(txqueue_timer, &vptr->mac_regs->TQETMR); in setup_queue_timers()
1278 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR); in setup_queue_timers()
1290 static void setup_adaptive_interrupts(struct velocity_info *vptr) in setup_adaptive_interrupts() argument
1292 struct mac_regs __iomem *regs = vptr->mac_regs; in setup_adaptive_interrupts()
1293 u16 tx_intsup = vptr->options.tx_intsup; in setup_adaptive_interrupts()
1294 u16 rx_intsup = vptr->options.rx_intsup; in setup_adaptive_interrupts()
1297 vptr->int_mask = INT_MASK_DEF; in setup_adaptive_interrupts()
1302 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I | in setup_adaptive_interrupts()
1311 vptr->int_mask &= ~ISR_PRXI; in setup_adaptive_interrupts()
1328 static void velocity_init_registers(struct velocity_info *vptr, in velocity_init_registers() argument
1331 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_init_registers()
1332 struct net_device *netdev = vptr->netdev; in velocity_init_registers()
1346 velocity_rx_reset(vptr); in velocity_init_registers()
1350 mii_status = velocity_get_opt_media_mode(vptr); in velocity_init_registers()
1351 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { in velocity_init_registers()
1352 velocity_print_link_status(vptr); in velocity_init_registers()
1353 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) in velocity_init_registers()
1357 enable_flow_control_ability(vptr); in velocity_init_registers()
1371 velocity_soft_reset(vptr); in velocity_init_registers()
1374 if (!vptr->no_eeprom) { in velocity_init_registers()
1384 mac_set_rx_thresh(regs, vptr->options.rx_thresh); in velocity_init_registers()
1385 mac_set_dma_length(regs, vptr->options.DMA_length); in velocity_init_registers()
1396 velocity_init_cam_filter(vptr); in velocity_init_registers()
1408 setup_adaptive_interrupts(vptr); in velocity_init_registers()
1410 writel(vptr->rx.pool_dma, &regs->RDBaseLo); in velocity_init_registers()
1411 writew(vptr->options.numrx - 1, &regs->RDCSize); in velocity_init_registers()
1415 writew(vptr->options.numtx - 1, &regs->TDCSize); in velocity_init_registers()
1417 for (i = 0; i < vptr->tx.numq; i++) { in velocity_init_registers()
1418 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]); in velocity_init_registers()
1422 init_flow_control_register(vptr); in velocity_init_registers()
1427 mii_status = velocity_get_opt_media_mode(vptr); in velocity_init_registers()
1430 mii_init(vptr, mii_status); in velocity_init_registers()
1432 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { in velocity_init_registers()
1433 velocity_print_link_status(vptr); in velocity_init_registers()
1434 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) in velocity_init_registers()
1438 enable_flow_control_ability(vptr); in velocity_init_registers()
1440 mac_write_int_mask(vptr->int_mask, regs); in velocity_init_registers()
1446 static void velocity_give_many_rx_descs(struct velocity_info *vptr) in velocity_give_many_rx_descs() argument
1448 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_give_many_rx_descs()
1455 if (vptr->rx.filled < 4) in velocity_give_many_rx_descs()
1460 unusable = vptr->rx.filled & 0x0003; in velocity_give_many_rx_descs()
1461 dirty = vptr->rx.dirty - unusable; in velocity_give_many_rx_descs()
1462 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { in velocity_give_many_rx_descs()
1463 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; in velocity_give_many_rx_descs()
1464 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; in velocity_give_many_rx_descs()
1467 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU); in velocity_give_many_rx_descs()
1468 vptr->rx.filled = unusable; in velocity_give_many_rx_descs()
1478 static int velocity_init_dma_rings(struct velocity_info *vptr) in velocity_init_dma_rings() argument
1480 struct velocity_opt *opt = &vptr->options; in velocity_init_dma_rings()
1493 pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq + in velocity_init_dma_rings()
1496 dev_err(vptr->dev, "%s : DMA memory allocation failed.\n", in velocity_init_dma_rings()
1497 vptr->netdev->name); in velocity_init_dma_rings()
1501 vptr->rx.ring = pool; in velocity_init_dma_rings()
1502 vptr->rx.pool_dma = pool_dma; in velocity_init_dma_rings()
1507 for (i = 0; i < vptr->tx.numq; i++) { in velocity_init_dma_rings()
1508 vptr->tx.rings[i] = pool; in velocity_init_dma_rings()
1509 vptr->tx.pool_dma[i] = pool_dma; in velocity_init_dma_rings()
1517 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) in velocity_set_rxbufsize() argument
1519 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; in velocity_set_rxbufsize()
1532 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) in velocity_alloc_rx_buf() argument
1534 struct rx_desc *rd = &(vptr->rx.ring[idx]); in velocity_alloc_rx_buf()
1535 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); in velocity_alloc_rx_buf()
1537 rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64); in velocity_alloc_rx_buf()
1547 rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data, in velocity_alloc_rx_buf()
1548 vptr->rx.buf_sz, DMA_FROM_DEVICE); in velocity_alloc_rx_buf()
1555 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; in velocity_alloc_rx_buf()
1562 static int velocity_rx_refill(struct velocity_info *vptr) in velocity_rx_refill() argument
1564 int dirty = vptr->rx.dirty, done = 0; in velocity_rx_refill()
1567 struct rx_desc *rd = vptr->rx.ring + dirty; in velocity_rx_refill()
1573 if (!vptr->rx.info[dirty].skb) { in velocity_rx_refill()
1574 if (velocity_alloc_rx_buf(vptr, dirty) < 0) in velocity_rx_refill()
1578 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; in velocity_rx_refill()
1579 } while (dirty != vptr->rx.curr); in velocity_rx_refill()
1582 vptr->rx.dirty = dirty; in velocity_rx_refill()
1583 vptr->rx.filled += done; in velocity_rx_refill()
1596 static void velocity_free_rd_ring(struct velocity_info *vptr) in velocity_free_rd_ring() argument
1600 if (vptr->rx.info == NULL) in velocity_free_rd_ring()
1603 for (i = 0; i < vptr->options.numrx; i++) { in velocity_free_rd_ring()
1604 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); in velocity_free_rd_ring()
1605 struct rx_desc *rd = vptr->rx.ring + i; in velocity_free_rd_ring()
1611 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, in velocity_free_rd_ring()
1619 kfree(vptr->rx.info); in velocity_free_rd_ring()
1620 vptr->rx.info = NULL; in velocity_free_rd_ring()
1630 static int velocity_init_rd_ring(struct velocity_info *vptr) in velocity_init_rd_ring() argument
1634 vptr->rx.info = kcalloc(vptr->options.numrx, in velocity_init_rd_ring()
1636 if (!vptr->rx.info) in velocity_init_rd_ring()
1639 velocity_init_rx_ring_indexes(vptr); in velocity_init_rd_ring()
1641 if (velocity_rx_refill(vptr) != vptr->options.numrx) { in velocity_init_rd_ring()
1643 "%s: failed to allocate RX buffer.\n", vptr->netdev->name); in velocity_init_rd_ring()
1644 velocity_free_rd_ring(vptr); in velocity_init_rd_ring()
1661 static int velocity_init_td_ring(struct velocity_info *vptr) in velocity_init_td_ring() argument
1666 for (j = 0; j < vptr->tx.numq; j++) { in velocity_init_td_ring()
1668 vptr->tx.infos[j] = kcalloc(vptr->options.numtx, in velocity_init_td_ring()
1671 if (!vptr->tx.infos[j]) { in velocity_init_td_ring()
1673 kfree(vptr->tx.infos[j]); in velocity_init_td_ring()
1677 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; in velocity_init_td_ring()
1688 static void velocity_free_dma_rings(struct velocity_info *vptr) in velocity_free_dma_rings() argument
1690 const int size = vptr->options.numrx * sizeof(struct rx_desc) + in velocity_free_dma_rings()
1691 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; in velocity_free_dma_rings()
1693 dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma); in velocity_free_dma_rings()
1696 static int velocity_init_rings(struct velocity_info *vptr, int mtu) in velocity_init_rings() argument
1700 velocity_set_rxbufsize(vptr, mtu); in velocity_init_rings()
1702 ret = velocity_init_dma_rings(vptr); in velocity_init_rings()
1706 ret = velocity_init_rd_ring(vptr); in velocity_init_rings()
1710 ret = velocity_init_td_ring(vptr); in velocity_init_rings()
1717 velocity_free_rd_ring(vptr); in velocity_init_rings()
1719 velocity_free_dma_rings(vptr); in velocity_init_rings()
1731 static void velocity_free_tx_buf(struct velocity_info *vptr, in velocity_free_tx_buf() argument
1750 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], in velocity_free_tx_buf()
1761 static void velocity_free_td_ring_entry(struct velocity_info *vptr, in velocity_free_td_ring_entry() argument
1764 struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]); in velocity_free_td_ring_entry()
1773 dma_unmap_single(vptr->dev, td_info->skb_dma[i], in velocity_free_td_ring_entry()
1790 static void velocity_free_td_ring(struct velocity_info *vptr) in velocity_free_td_ring() argument
1794 for (j = 0; j < vptr->tx.numq; j++) { in velocity_free_td_ring()
1795 if (vptr->tx.infos[j] == NULL) in velocity_free_td_ring()
1797 for (i = 0; i < vptr->options.numtx; i++) in velocity_free_td_ring()
1798 velocity_free_td_ring_entry(vptr, j, i); in velocity_free_td_ring()
1800 kfree(vptr->tx.infos[j]); in velocity_free_td_ring()
1801 vptr->tx.infos[j] = NULL; in velocity_free_td_ring()
1805 static void velocity_free_rings(struct velocity_info *vptr) in velocity_free_rings() argument
1807 velocity_free_td_ring(vptr); in velocity_free_rings()
1808 velocity_free_rd_ring(vptr); in velocity_free_rings()
1809 velocity_free_dma_rings(vptr); in velocity_free_rings()
1823 static void velocity_error(struct velocity_info *vptr, int status) in velocity_error() argument
1827 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_error()
1832 netif_stop_queue(vptr->netdev); in velocity_error()
1839 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_error()
1842 if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_error()
1843 vptr->mii_status = check_connection_type(regs); in velocity_error()
1850 if (vptr->rev_id < REV_ID_VT3216_A0) { in velocity_error()
1851 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in velocity_error()
1859 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) in velocity_error()
1864 setup_queue_timers(vptr); in velocity_error()
1872 vptr->mii_status &= ~VELOCITY_LINK_FAIL; in velocity_error()
1873 netif_carrier_on(vptr->netdev); in velocity_error()
1875 vptr->mii_status |= VELOCITY_LINK_FAIL; in velocity_error()
1876 netif_carrier_off(vptr->netdev); in velocity_error()
1879 velocity_print_link_status(vptr); in velocity_error()
1880 enable_flow_control_ability(vptr); in velocity_error()
1889 if (vptr->mii_status & VELOCITY_LINK_FAIL) in velocity_error()
1890 netif_stop_queue(vptr->netdev); in velocity_error()
1892 netif_wake_queue(vptr->netdev); in velocity_error()
1896 velocity_update_hw_mibs(vptr); in velocity_error()
1898 mac_rx_queue_wake(vptr->mac_regs); in velocity_error()
1909 static int velocity_tx_srv(struct velocity_info *vptr) in velocity_tx_srv() argument
1917 struct net_device_stats *stats = &vptr->netdev->stats; in velocity_tx_srv()
1919 for (qnum = 0; qnum < vptr->tx.numq; qnum++) { in velocity_tx_srv()
1920 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; in velocity_tx_srv()
1921 idx = (idx + 1) % vptr->options.numtx) { in velocity_tx_srv()
1926 td = &(vptr->tx.rings[qnum][idx]); in velocity_tx_srv()
1927 tdinfo = &(vptr->tx.infos[qnum][idx]); in velocity_tx_srv()
1950 velocity_free_tx_buf(vptr, tdinfo, td); in velocity_tx_srv()
1951 vptr->tx.used[qnum]--; in velocity_tx_srv()
1953 vptr->tx.tail[qnum] = idx; in velocity_tx_srv()
1955 if (AVAIL_TD(vptr, qnum) < 1) in velocity_tx_srv()
1962 if (netif_queue_stopped(vptr->netdev) && (full == 0) && in velocity_tx_srv()
1963 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { in velocity_tx_srv()
1964 netif_wake_queue(vptr->netdev); in velocity_tx_srv()
2006 struct velocity_info *vptr) in velocity_rx_copy() argument
2012 new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size); in velocity_rx_copy()
2033 static inline void velocity_iph_realign(struct velocity_info *vptr, in velocity_iph_realign() argument
2036 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { in velocity_iph_realign()
2050 static int velocity_receive_frame(struct velocity_info *vptr, int idx) in velocity_receive_frame() argument
2052 struct net_device_stats *stats = &vptr->netdev->stats; in velocity_receive_frame()
2053 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); in velocity_receive_frame()
2054 struct rx_desc *rd = &(vptr->rx.ring[idx]); in velocity_receive_frame()
2059 …VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->n… in velocity_receive_frame()
2069 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, in velocity_receive_frame()
2070 vptr->rx.buf_sz, DMA_FROM_DEVICE); in velocity_receive_frame()
2076 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { in velocity_receive_frame()
2085 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { in velocity_receive_frame()
2086 velocity_iph_realign(vptr, skb, pkt_len); in velocity_receive_frame()
2088 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, in velocity_receive_frame()
2091 dma_sync_single_for_device(vptr->dev, rd_info->skb_dma, in velocity_receive_frame()
2092 vptr->rx.buf_sz, DMA_FROM_DEVICE); in velocity_receive_frame()
2096 skb->protocol = eth_type_trans(skb, vptr->netdev); in velocity_receive_frame()
2119 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) in velocity_rx_srv() argument
2121 struct net_device_stats *stats = &vptr->netdev->stats; in velocity_rx_srv()
2122 int rd_curr = vptr->rx.curr; in velocity_rx_srv()
2126 struct rx_desc *rd = vptr->rx.ring + rd_curr; in velocity_rx_srv()
2128 if (!vptr->rx.info[rd_curr].skb) in velocity_rx_srv()
2140 if (velocity_receive_frame(vptr, rd_curr) < 0) in velocity_rx_srv()
2154 if (rd_curr >= vptr->options.numrx) in velocity_rx_srv()
2159 vptr->rx.curr = rd_curr; in velocity_rx_srv()
2161 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) in velocity_rx_srv()
2162 velocity_give_many_rx_descs(vptr); in velocity_rx_srv()
2170 struct velocity_info *vptr = container_of(napi, in velocity_poll() local
2179 rx_done = velocity_rx_srv(vptr, budget); in velocity_poll()
2180 spin_lock_irqsave(&vptr->lock, flags); in velocity_poll()
2181 velocity_tx_srv(vptr); in velocity_poll()
2185 mac_enable_int(vptr->mac_regs); in velocity_poll()
2187 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_poll()
2205 struct velocity_info *vptr = netdev_priv(dev); in velocity_intr() local
2208 spin_lock(&vptr->lock); in velocity_intr()
2209 isr_status = mac_read_isr(vptr->mac_regs); in velocity_intr()
2213 spin_unlock(&vptr->lock); in velocity_intr()
2218 mac_write_isr(vptr->mac_regs, isr_status); in velocity_intr()
2220 if (likely(napi_schedule_prep(&vptr->napi))) { in velocity_intr()
2221 mac_disable_int(vptr->mac_regs); in velocity_intr()
2222 __napi_schedule(&vptr->napi); in velocity_intr()
2226 velocity_error(vptr, isr_status); in velocity_intr()
2228 spin_unlock(&vptr->lock); in velocity_intr()
2245 struct velocity_info *vptr = netdev_priv(dev); in velocity_open() local
2248 ret = velocity_init_rings(vptr, dev->mtu); in velocity_open()
2253 velocity_set_power_state(vptr, PCI_D0); in velocity_open()
2255 velocity_init_registers(vptr, VELOCITY_INIT_COLD); in velocity_open()
2261 velocity_set_power_state(vptr, PCI_D3hot); in velocity_open()
2262 velocity_free_rings(vptr); in velocity_open()
2266 velocity_give_many_rx_descs(vptr); in velocity_open()
2268 mac_enable_int(vptr->mac_regs); in velocity_open()
2270 napi_enable(&vptr->napi); in velocity_open()
2271 vptr->flags |= VELOCITY_FLAGS_OPENED; in velocity_open()
2283 static void velocity_shutdown(struct velocity_info *vptr) in velocity_shutdown() argument
2285 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_shutdown()
2305 struct velocity_info *vptr = netdev_priv(dev); in velocity_change_mtu() local
2310 vptr->netdev->name); in velocity_change_mtu()
2333 tmp_vptr->pdev = vptr->pdev; in velocity_change_mtu()
2334 tmp_vptr->dev = vptr->dev; in velocity_change_mtu()
2335 tmp_vptr->options = vptr->options; in velocity_change_mtu()
2336 tmp_vptr->tx.numq = vptr->tx.numq; in velocity_change_mtu()
2342 napi_disable(&vptr->napi); in velocity_change_mtu()
2344 spin_lock_irqsave(&vptr->lock, flags); in velocity_change_mtu()
2347 velocity_shutdown(vptr); in velocity_change_mtu()
2349 rx = vptr->rx; in velocity_change_mtu()
2350 tx = vptr->tx; in velocity_change_mtu()
2352 vptr->rx = tmp_vptr->rx; in velocity_change_mtu()
2353 vptr->tx = tmp_vptr->tx; in velocity_change_mtu()
2360 velocity_init_registers(vptr, VELOCITY_INIT_COLD); in velocity_change_mtu()
2362 velocity_give_many_rx_descs(vptr); in velocity_change_mtu()
2364 napi_enable(&vptr->napi); in velocity_change_mtu()
2366 mac_enable_int(vptr->mac_regs); in velocity_change_mtu()
2369 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_change_mtu()
2409 struct velocity_info *vptr = netdev_priv(dev); in velocity_mii_ioctl() local
2410 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_mii_ioctl()
2420 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0) in velocity_mii_ioctl()
2424 spin_lock_irqsave(&vptr->lock, flags); in velocity_mii_ioctl()
2425 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in); in velocity_mii_ioctl()
2426 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_mii_ioctl()
2427 check_connection_type(vptr->mac_regs); in velocity_mii_ioctl()
2448 struct velocity_info *vptr = netdev_priv(dev); in velocity_ioctl() local
2455 velocity_set_power_state(vptr, PCI_D0); in velocity_ioctl()
2468 velocity_set_power_state(vptr, PCI_D3hot); in velocity_ioctl()
2486 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_stats() local
2492 spin_lock_irq(&vptr->lock); in velocity_get_stats()
2493 velocity_update_hw_mibs(vptr); in velocity_get_stats()
2494 spin_unlock_irq(&vptr->lock); in velocity_get_stats()
2496 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; in velocity_get_stats()
2497 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; in velocity_get_stats()
2498 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors]; in velocity_get_stats()
2501 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; in velocity_get_stats()
2505 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE]; in velocity_get_stats()
2525 struct velocity_info *vptr = netdev_priv(dev); in velocity_close() local
2527 napi_disable(&vptr->napi); in velocity_close()
2529 velocity_shutdown(vptr); in velocity_close()
2531 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) in velocity_close()
2532 velocity_get_ip(vptr); in velocity_close()
2536 velocity_free_rings(vptr); in velocity_close()
2538 vptr->flags &= (~VELOCITY_FLAGS_OPENED); in velocity_close()
2553 struct velocity_info *vptr = netdev_priv(dev); in velocity_xmit() local
2576 spin_lock_irqsave(&vptr->lock, flags); in velocity_xmit()
2578 index = vptr->tx.curr[qnum]; in velocity_xmit()
2579 td_ptr = &(vptr->tx.rings[qnum][index]); in velocity_xmit()
2580 tdinfo = &(vptr->tx.infos[qnum][index]); in velocity_xmit()
2590 tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen, in velocity_xmit()
2601 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev, in velocity_xmit()
2633 prev = vptr->options.numtx - 1; in velocity_xmit()
2635 vptr->tx.used[qnum]++; in velocity_xmit()
2636 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; in velocity_xmit()
2638 if (AVAIL_TD(vptr, qnum) < 1) in velocity_xmit()
2641 td_ptr = &(vptr->tx.rings[qnum][prev]); in velocity_xmit()
2643 mac_tx_queue_wake(vptr->mac_regs, qnum); in velocity_xmit()
2645 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_xmit()
2676 static void velocity_init_info(struct velocity_info *vptr, in velocity_init_info() argument
2679 vptr->chip_id = info->chip_id; in velocity_init_info()
2680 vptr->tx.numq = info->txqueue; in velocity_init_info()
2681 vptr->multicast_limit = MCAM_SIZE; in velocity_init_info()
2682 spin_lock_init(&vptr->lock); in velocity_init_info()
2693 static int velocity_get_pci_info(struct velocity_info *vptr) in velocity_get_pci_info() argument
2695 struct pci_dev *pdev = vptr->pdev; in velocity_get_pci_info()
2699 vptr->ioaddr = pci_resource_start(pdev, 0); in velocity_get_pci_info()
2700 vptr->memaddr = pci_resource_start(pdev, 1); in velocity_get_pci_info()
2729 static int velocity_get_platform_info(struct velocity_info *vptr) in velocity_get_platform_info() argument
2734 if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL)) in velocity_get_platform_info()
2735 vptr->no_eeprom = 1; in velocity_get_platform_info()
2737 ret = of_address_to_resource(vptr->dev->of_node, 0, &res); in velocity_get_platform_info()
2739 dev_err(vptr->dev, "unable to find memory address\n"); in velocity_get_platform_info()
2743 vptr->memaddr = res.start; in velocity_get_platform_info()
2746 dev_err(vptr->dev, "memory region is too small.\n"); in velocity_get_platform_info()
2760 static void velocity_print_info(struct velocity_info *vptr) in velocity_print_info() argument
2762 struct net_device *dev = vptr->netdev; in velocity_print_info()
2764 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); in velocity_print_info()
2771 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_link() local
2772 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_get_link()
2793 struct velocity_info *vptr; in velocity_probe() local
2812 vptr = netdev_priv(netdev); in velocity_probe()
2823 vptr->netdev = netdev; in velocity_probe()
2824 vptr->dev = dev; in velocity_probe()
2826 velocity_init_info(vptr, info); in velocity_probe()
2829 vptr->pdev = to_pci_dev(dev); in velocity_probe()
2831 ret = velocity_get_pci_info(vptr); in velocity_probe()
2835 vptr->pdev = NULL; in velocity_probe()
2836 ret = velocity_get_platform_info(vptr); in velocity_probe()
2841 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); in velocity_probe()
2847 vptr->mac_regs = regs; in velocity_probe()
2848 vptr->rev_id = readb(&regs->rev_id); in velocity_probe()
2858 velocity_get_options(&vptr->options, velocity_nics, drv_string); in velocity_probe()
2864 vptr->options.flags &= info->flags; in velocity_probe()
2870 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); in velocity_probe()
2872 vptr->wol_opts = vptr->options.wol_opts; in velocity_probe()
2873 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_probe()
2875 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); in velocity_probe()
2879 netif_napi_add(netdev, &vptr->napi, velocity_poll, in velocity_probe()
2894 vptr->mii_status |= VELOCITY_LINK_FAIL; in velocity_probe()
2897 velocity_print_info(vptr); in velocity_probe()
2898 dev_set_drvdata(vptr->dev, netdev); in velocity_probe()
2902 velocity_set_power_state(vptr, PCI_D3hot); in velocity_probe()
2908 netif_napi_del(&vptr->napi); in velocity_probe()
2926 struct velocity_info *vptr = netdev_priv(netdev); in velocity_remove() local
2929 netif_napi_del(&vptr->napi); in velocity_remove()
2930 iounmap(vptr->mac_regs); in velocity_remove()
3042 static int velocity_set_wol(struct velocity_info *vptr) in velocity_set_wol() argument
3044 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_set_wol()
3045 enum speed_opt spd_dpx = vptr->options.spd_dpx; in velocity_set_wol()
3063 if (vptr->wol_opts & VELOCITY_WOL_UCAST) in velocity_set_wol()
3066 if (vptr->wol_opts & VELOCITY_WOL_ARP) { in velocity_set_wol()
3077 memcpy(arp->ar_tip, vptr->ip_addr, 4); in velocity_set_wol()
3097 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { in velocity_set_wol()
3098 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) in velocity_set_wol()
3099 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); in velocity_set_wol()
3101 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); in velocity_set_wol()
3104 if (vptr->mii_status & VELOCITY_SPEED_1000) in velocity_set_wol()
3105 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); in velocity_set_wol()
3137 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context) in velocity_save_context() argument
3139 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_save_context()
3157 struct velocity_info *vptr = netdev_priv(netdev); in velocity_suspend() local
3160 if (!netif_running(vptr->netdev)) in velocity_suspend()
3163 netif_device_detach(vptr->netdev); in velocity_suspend()
3165 spin_lock_irqsave(&vptr->lock, flags); in velocity_suspend()
3166 if (vptr->pdev) in velocity_suspend()
3167 pci_save_state(vptr->pdev); in velocity_suspend()
3169 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { in velocity_suspend()
3170 velocity_get_ip(vptr); in velocity_suspend()
3171 velocity_save_context(vptr, &vptr->context); in velocity_suspend()
3172 velocity_shutdown(vptr); in velocity_suspend()
3173 velocity_set_wol(vptr); in velocity_suspend()
3174 if (vptr->pdev) in velocity_suspend()
3175 pci_enable_wake(vptr->pdev, PCI_D3hot, 1); in velocity_suspend()
3176 velocity_set_power_state(vptr, PCI_D3hot); in velocity_suspend()
3178 velocity_save_context(vptr, &vptr->context); in velocity_suspend()
3179 velocity_shutdown(vptr); in velocity_suspend()
3180 if (vptr->pdev) in velocity_suspend()
3181 pci_disable_device(vptr->pdev); in velocity_suspend()
3182 velocity_set_power_state(vptr, PCI_D3hot); in velocity_suspend()
3185 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_suspend()
3197 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context) in velocity_restore_context() argument
3199 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_restore_context()
3227 struct velocity_info *vptr = netdev_priv(netdev); in velocity_resume() local
3231 if (!netif_running(vptr->netdev)) in velocity_resume()
3234 velocity_set_power_state(vptr, PCI_D0); in velocity_resume()
3236 if (vptr->pdev) { in velocity_resume()
3237 pci_enable_wake(vptr->pdev, PCI_D0, 0); in velocity_resume()
3238 pci_restore_state(vptr->pdev); in velocity_resume()
3241 mac_wol_reset(vptr->mac_regs); in velocity_resume()
3243 spin_lock_irqsave(&vptr->lock, flags); in velocity_resume()
3244 velocity_restore_context(vptr, &vptr->context); in velocity_resume()
3245 velocity_init_registers(vptr, VELOCITY_INIT_WOL); in velocity_resume()
3246 mac_disable_int(vptr->mac_regs); in velocity_resume()
3248 velocity_tx_srv(vptr); in velocity_resume()
3250 for (i = 0; i < vptr->tx.numq; i++) { in velocity_resume()
3251 if (vptr->tx.used[i]) in velocity_resume()
3252 mac_tx_queue_wake(vptr->mac_regs, i); in velocity_resume()
3255 mac_enable_int(vptr->mac_regs); in velocity_resume()
3256 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_resume()
3257 netif_device_attach(vptr->netdev); in velocity_resume()
3298 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_up() local
3300 velocity_set_power_state(vptr, PCI_D0); in velocity_ethtool_up()
3313 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_down() local
3315 velocity_set_power_state(vptr, PCI_D3hot); in velocity_ethtool_down()
3321 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_settings() local
3322 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_get_settings()
3324 status = check_connection_type(vptr->mac_regs); in velocity_get_settings()
3336 if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_get_settings()
3345 switch (vptr->options.spd_dpx) { in velocity_get_settings()
3389 struct velocity_info *vptr = netdev_priv(dev); in velocity_set_settings() local
3395 curr_status = check_connection_type(vptr->mac_regs); in velocity_set_settings()
3424 vptr->options.spd_dpx = spd_dpx; in velocity_set_settings()
3426 velocity_set_media_mode(vptr, new_status); in velocity_set_settings()
3434 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_drvinfo() local
3438 if (vptr->pdev) in velocity_get_drvinfo()
3439 strlcpy(info->bus_info, pci_name(vptr->pdev), in velocity_get_drvinfo()
3447 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_get_wol() local
3454 if (vptr->wol_opts & VELOCITY_WOL_UCAST) in velocity_ethtool_get_wol()
3456 if (vptr->wol_opts & VELOCITY_WOL_ARP) in velocity_ethtool_get_wol()
3458 memcpy(&wol->sopass, vptr->wol_passwd, 6); in velocity_ethtool_get_wol()
3463 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_set_wol() local
3467 vptr->wol_opts = VELOCITY_WOL_MAGIC; in velocity_ethtool_set_wol()
3477 vptr->wol_opts |= VELOCITY_WOL_MAGIC; in velocity_ethtool_set_wol()
3478 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3481 vptr->wol_opts |= VELOCITY_WOL_UCAST; in velocity_ethtool_set_wol()
3482 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3485 vptr->wol_opts |= VELOCITY_WOL_ARP; in velocity_ethtool_set_wol()
3486 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3488 memcpy(vptr->wol_passwd, wol->sopass, 6); in velocity_ethtool_set_wol()
3548 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_coalesce() local
3550 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup; in velocity_get_coalesce()
3551 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup; in velocity_get_coalesce()
3553 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer); in velocity_get_coalesce()
3554 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer); in velocity_get_coalesce()
3562 struct velocity_info *vptr = netdev_priv(dev); in velocity_set_coalesce() local
3577 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames; in velocity_set_coalesce()
3578 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames; in velocity_set_coalesce()
3580 set_pending_timer_val(&vptr->options.rxqueue_timer, in velocity_set_coalesce()
3582 set_pending_timer_val(&vptr->options.txqueue_timer, in velocity_set_coalesce()
3586 spin_lock_irqsave(&vptr->lock, flags); in velocity_set_coalesce()
3587 mac_disable_int(vptr->mac_regs); in velocity_set_coalesce()
3588 setup_adaptive_interrupts(vptr); in velocity_set_coalesce()
3589 setup_queue_timers(vptr); in velocity_set_coalesce()
3591 mac_write_int_mask(vptr->int_mask, vptr->mac_regs); in velocity_set_coalesce()
3592 mac_clear_isr(vptr->mac_regs); in velocity_set_coalesce()
3593 mac_enable_int(vptr->mac_regs); in velocity_set_coalesce()
3594 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_set_coalesce()
3657 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_ethtool_stats() local
3658 u32 *p = vptr->mib_counter; in velocity_get_ethtool_stats()
3661 spin_lock_irq(&vptr->lock); in velocity_get_ethtool_stats()
3662 velocity_update_hw_mibs(vptr); in velocity_get_ethtool_stats()
3663 spin_unlock_irq(&vptr->lock); in velocity_get_ethtool_stats()