Lines Matching refs:priv

94 static inline u32 tse_tx_avail(struct altera_tse_private *priv)  in tse_tx_avail()  argument
96 return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1; in tse_tx_avail()
103 struct net_device *ndev = bus->priv; in altera_tse_mdio_read()
104 struct altera_tse_private *priv = netdev_priv(ndev); in altera_tse_mdio_read() local
107 csrwr32((mii_id & 0x1f), priv->mac_dev, in altera_tse_mdio_read()
111 return csrrd32(priv->mac_dev, in altera_tse_mdio_read()
118 struct net_device *ndev = bus->priv; in altera_tse_mdio_write()
119 struct altera_tse_private *priv = netdev_priv(ndev); in altera_tse_mdio_write() local
122 csrwr32((mii_id & 0x1f), priv->mac_dev, in altera_tse_mdio_write()
126 csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4); in altera_tse_mdio_write()
132 struct altera_tse_private *priv = netdev_priv(dev); in altera_tse_mdio_create() local
139 for_each_child_of_node(priv->device->of_node, child_node) { in altera_tse_mdio_create()
172 mdio->priv = dev; in altera_tse_mdio_create()
173 mdio->parent = priv->device; in altera_tse_mdio_create()
182 if (netif_msg_drv(priv)) in altera_tse_mdio_create()
185 priv->mdio = mdio; in altera_tse_mdio_create()
197 struct altera_tse_private *priv = netdev_priv(dev); in altera_tse_mdio_destroy() local
199 if (priv->mdio == NULL) in altera_tse_mdio_destroy()
202 if (netif_msg_drv(priv)) in altera_tse_mdio_destroy()
204 priv->mdio->id); in altera_tse_mdio_destroy()
206 mdiobus_unregister(priv->mdio); in altera_tse_mdio_destroy()
207 kfree(priv->mdio->irq); in altera_tse_mdio_destroy()
208 mdiobus_free(priv->mdio); in altera_tse_mdio_destroy()
209 priv->mdio = NULL; in altera_tse_mdio_destroy()
212 static int tse_init_rx_buffer(struct altera_tse_private *priv, in tse_init_rx_buffer() argument
215 rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len); in tse_init_rx_buffer()
219 rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data, in tse_init_rx_buffer()
223 if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) { in tse_init_rx_buffer()
224 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); in tse_init_rx_buffer()
233 static void tse_free_rx_buffer(struct altera_tse_private *priv, in tse_free_rx_buffer() argument
241 dma_unmap_single(priv->device, dma_addr, in tse_free_rx_buffer()
252 static void tse_free_tx_buffer(struct altera_tse_private *priv, in tse_free_tx_buffer() argument
257 dma_unmap_page(priv->device, buffer->dma_addr, in tse_free_tx_buffer()
260 dma_unmap_single(priv->device, buffer->dma_addr, in tse_free_tx_buffer()
270 static int alloc_init_skbufs(struct altera_tse_private *priv) in alloc_init_skbufs() argument
272 unsigned int rx_descs = priv->rx_ring_size; in alloc_init_skbufs()
273 unsigned int tx_descs = priv->tx_ring_size; in alloc_init_skbufs()
278 priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer), in alloc_init_skbufs()
280 if (!priv->rx_ring) in alloc_init_skbufs()
284 priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer), in alloc_init_skbufs()
286 if (!priv->tx_ring) in alloc_init_skbufs()
289 priv->tx_cons = 0; in alloc_init_skbufs()
290 priv->tx_prod = 0; in alloc_init_skbufs()
294 ret = tse_init_rx_buffer(priv, &priv->rx_ring[i], in alloc_init_skbufs()
295 priv->rx_dma_buf_sz); in alloc_init_skbufs()
300 priv->rx_cons = 0; in alloc_init_skbufs()
301 priv->rx_prod = 0; in alloc_init_skbufs()
306 tse_free_rx_buffer(priv, &priv->rx_ring[i]); in alloc_init_skbufs()
307 kfree(priv->tx_ring); in alloc_init_skbufs()
309 kfree(priv->rx_ring); in alloc_init_skbufs()
316 struct altera_tse_private *priv = netdev_priv(dev); in free_skbufs() local
317 unsigned int rx_descs = priv->rx_ring_size; in free_skbufs()
318 unsigned int tx_descs = priv->tx_ring_size; in free_skbufs()
323 tse_free_rx_buffer(priv, &priv->rx_ring[i]); in free_skbufs()
325 tse_free_tx_buffer(priv, &priv->tx_ring[i]); in free_skbufs()
328 kfree(priv->tx_ring); in free_skbufs()
333 static inline void tse_rx_refill(struct altera_tse_private *priv) in tse_rx_refill() argument
335 unsigned int rxsize = priv->rx_ring_size; in tse_rx_refill()
339 for (; priv->rx_cons - priv->rx_prod > 0; in tse_rx_refill()
340 priv->rx_prod++) { in tse_rx_refill()
341 entry = priv->rx_prod % rxsize; in tse_rx_refill()
342 if (likely(priv->rx_ring[entry].skb == NULL)) { in tse_rx_refill()
343 ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry], in tse_rx_refill()
344 priv->rx_dma_buf_sz); in tse_rx_refill()
347 priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]); in tse_rx_refill()
369 static int tse_rx(struct altera_tse_private *priv, int limit) in tse_rx() argument
374 unsigned int entry = priv->rx_cons % priv->rx_ring_size; in tse_rx()
385 ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) { in tse_rx()
390 netdev_err(priv->dev, in tse_rx()
401 next_entry = (++priv->rx_cons) % priv->rx_ring_size; in tse_rx()
403 skb = priv->rx_ring[entry].skb; in tse_rx()
405 netdev_err(priv->dev, in tse_rx()
408 priv->dev->stats.rx_dropped++; in tse_rx()
411 priv->rx_ring[entry].skb = NULL; in tse_rx()
416 dma_sync_single_for_cpu(priv->device, in tse_rx()
417 priv->rx_ring[entry].dma_addr, in tse_rx()
418 priv->rx_ring[entry].len, in tse_rx()
421 dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr, in tse_rx()
422 priv->rx_ring[entry].len, DMA_FROM_DEVICE); in tse_rx()
424 if (netif_msg_pktdata(priv)) { in tse_rx()
425 netdev_info(priv->dev, "frame received %d bytes\n", in tse_rx()
431 tse_rx_vlan(priv->dev, skb); in tse_rx()
433 skb->protocol = eth_type_trans(skb, priv->dev); in tse_rx()
436 napi_gro_receive(&priv->napi, skb); in tse_rx()
438 priv->dev->stats.rx_packets++; in tse_rx()
439 priv->dev->stats.rx_bytes += pktlength; in tse_rx()
443 tse_rx_refill(priv); in tse_rx()
451 static int tse_tx_complete(struct altera_tse_private *priv) in tse_tx_complete() argument
453 unsigned int txsize = priv->tx_ring_size; in tse_tx_complete()
459 spin_lock(&priv->tx_lock); in tse_tx_complete()
461 ready = priv->dmaops->tx_completions(priv); in tse_tx_complete()
464 while (ready && (priv->tx_cons != priv->tx_prod)) { in tse_tx_complete()
465 entry = priv->tx_cons % txsize; in tse_tx_complete()
466 tx_buff = &priv->tx_ring[entry]; in tse_tx_complete()
468 if (netif_msg_tx_done(priv)) in tse_tx_complete()
469 netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n", in tse_tx_complete()
470 __func__, priv->tx_prod, priv->tx_cons); in tse_tx_complete()
473 priv->dev->stats.tx_packets++; in tse_tx_complete()
475 tse_free_tx_buffer(priv, tx_buff); in tse_tx_complete()
476 priv->tx_cons++; in tse_tx_complete()
482 if (unlikely(netif_queue_stopped(priv->dev) && in tse_tx_complete()
483 tse_tx_avail(priv) > TSE_TX_THRESH(priv))) { in tse_tx_complete()
484 netif_tx_lock(priv->dev); in tse_tx_complete()
485 if (netif_queue_stopped(priv->dev) && in tse_tx_complete()
486 tse_tx_avail(priv) > TSE_TX_THRESH(priv)) { in tse_tx_complete()
487 if (netif_msg_tx_done(priv)) in tse_tx_complete()
488 netdev_dbg(priv->dev, "%s: restart transmit\n", in tse_tx_complete()
490 netif_wake_queue(priv->dev); in tse_tx_complete()
492 netif_tx_unlock(priv->dev); in tse_tx_complete()
495 spin_unlock(&priv->tx_lock); in tse_tx_complete()
503 struct altera_tse_private *priv = in tse_poll() local
508 tse_tx_complete(priv); in tse_poll()
510 rxcomplete = tse_rx(priv, budget); in tse_poll()
516 netdev_dbg(priv->dev, in tse_poll()
520 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); in tse_poll()
521 priv->dmaops->enable_rxirq(priv); in tse_poll()
522 priv->dmaops->enable_txirq(priv); in tse_poll()
523 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); in tse_poll()
533 struct altera_tse_private *priv; in altera_isr() local
539 priv = netdev_priv(dev); in altera_isr()
541 spin_lock(&priv->rxdma_irq_lock); in altera_isr()
543 priv->dmaops->clear_rxirq(priv); in altera_isr()
544 priv->dmaops->clear_txirq(priv); in altera_isr()
545 spin_unlock(&priv->rxdma_irq_lock); in altera_isr()
547 if (likely(napi_schedule_prep(&priv->napi))) { in altera_isr()
548 spin_lock(&priv->rxdma_irq_lock); in altera_isr()
549 priv->dmaops->disable_rxirq(priv); in altera_isr()
550 priv->dmaops->disable_txirq(priv); in altera_isr()
551 spin_unlock(&priv->rxdma_irq_lock); in altera_isr()
552 __napi_schedule(&priv->napi); in altera_isr()
568 struct altera_tse_private *priv = netdev_priv(dev); in tse_start_xmit() local
569 unsigned int txsize = priv->tx_ring_size; in tse_start_xmit()
577 spin_lock_bh(&priv->tx_lock); in tse_start_xmit()
579 if (unlikely(tse_tx_avail(priv) < nfrags + 1)) { in tse_start_xmit()
583 netdev_err(priv->dev, in tse_start_xmit()
592 entry = priv->tx_prod % txsize; in tse_start_xmit()
593 buffer = &priv->tx_ring[entry]; in tse_start_xmit()
595 dma_addr = dma_map_single(priv->device, skb->data, nopaged_len, in tse_start_xmit()
597 if (dma_mapping_error(priv->device, dma_addr)) { in tse_start_xmit()
598 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); in tse_start_xmit()
608 dma_sync_single_for_device(priv->device, buffer->dma_addr, in tse_start_xmit()
611 priv->dmaops->tx_buffer(priv, buffer); in tse_start_xmit()
615 priv->tx_prod++; in tse_start_xmit()
618 if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) { in tse_start_xmit()
619 if (netif_msg_hw(priv)) in tse_start_xmit()
620 netdev_dbg(priv->dev, "%s: stop transmitted packets\n", in tse_start_xmit()
626 spin_unlock_bh(&priv->tx_lock); in tse_start_xmit()
639 struct altera_tse_private *priv = netdev_priv(dev); in altera_tse_adjust_link() local
640 struct phy_device *phydev = priv->phydev; in altera_tse_adjust_link()
644 spin_lock(&priv->mac_cfg_lock); in altera_tse_adjust_link()
647 u32 cfg_reg = ioread32(&priv->mac_dev->command_config); in altera_tse_adjust_link()
650 if (phydev->duplex != priv->oldduplex) { in altera_tse_adjust_link()
657 netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n", in altera_tse_adjust_link()
660 priv->oldduplex = phydev->duplex; in altera_tse_adjust_link()
664 if (phydev->speed != priv->oldspeed) { in altera_tse_adjust_link()
680 if (netif_msg_link(priv)) in altera_tse_adjust_link()
685 priv->oldspeed = phydev->speed; in altera_tse_adjust_link()
687 iowrite32(cfg_reg, &priv->mac_dev->command_config); in altera_tse_adjust_link()
689 if (!priv->oldlink) { in altera_tse_adjust_link()
691 priv->oldlink = 1; in altera_tse_adjust_link()
693 } else if (priv->oldlink) { in altera_tse_adjust_link()
695 priv->oldlink = 0; in altera_tse_adjust_link()
696 priv->oldspeed = 0; in altera_tse_adjust_link()
697 priv->oldduplex = -1; in altera_tse_adjust_link()
700 if (new_state && netif_msg_link(priv)) in altera_tse_adjust_link()
703 spin_unlock(&priv->mac_cfg_lock); in altera_tse_adjust_link()
707 struct altera_tse_private *priv = netdev_priv(dev); in connect_local_phy() local
711 if (priv->phy_addr != POLL_PHY) { in connect_local_phy()
713 priv->mdio->id, priv->phy_addr); in connect_local_phy()
718 priv->phy_iface); in connect_local_phy()
724 phydev = phy_find_first(priv->mdio); in connect_local_phy()
731 priv->phy_iface); in connect_local_phy()
742 struct altera_tse_private *priv = netdev_priv(dev); in altera_tse_phy_get_addr_mdio_create() local
743 struct device_node *np = priv->device->of_node; in altera_tse_phy_get_addr_mdio_create()
746 priv->phy_iface = of_get_phy_mode(np); in altera_tse_phy_get_addr_mdio_create()
749 if (!priv->phy_iface) in altera_tse_phy_get_addr_mdio_create()
756 if (of_property_read_u32(priv->device->of_node, "phy-addr", in altera_tse_phy_get_addr_mdio_create()
757 &priv->phy_addr)) { in altera_tse_phy_get_addr_mdio_create()
758 priv->phy_addr = POLL_PHY; in altera_tse_phy_get_addr_mdio_create()
761 if (!((priv->phy_addr == POLL_PHY) || in altera_tse_phy_get_addr_mdio_create()
762 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { in altera_tse_phy_get_addr_mdio_create()
764 priv->phy_addr); in altera_tse_phy_get_addr_mdio_create()
782 struct altera_tse_private *priv = netdev_priv(dev); in init_phy() local
789 if (!priv->phy_iface) in init_phy()
792 priv->oldlink = 0; in init_phy()
793 priv->oldspeed = 0; in init_phy()
794 priv->oldduplex = -1; in init_phy()
796 phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); in init_phy()
800 if (of_phy_is_fixed_link(priv->device->of_node)) { in init_phy()
801 rc = of_phy_register_fixed_link(priv->device->of_node); in init_phy()
810 phynode = of_node_get(priv->device->of_node); in init_phy()
816 0, priv->phy_iface); in init_phy()
819 if (!priv->mdio) { in init_phy()
828 &altera_tse_adjust_link, 0, priv->phy_iface); in init_phy()
840 if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) || in init_phy()
841 (priv->phy_iface == PHY_INTERFACE_MODE_RMII)) in init_phy()
860 priv->phydev = phydev; in init_phy()
864 static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) in tse_update_mac_addr() argument
873 csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0)); in tse_update_mac_addr()
874 csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1)); in tse_update_mac_addr()
883 static int reset_mac(struct altera_tse_private *priv) in reset_mac() argument
888 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); in reset_mac()
891 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); in reset_mac()
895 if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config), in reset_mac()
902 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); in reset_mac()
904 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); in reset_mac()
912 static int init_mac(struct altera_tse_private *priv) in init_mac() argument
918 csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, in init_mac()
919 priv->mac_dev, tse_csroffs(rx_section_empty)); in init_mac()
921 csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev, in init_mac()
924 csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev, in init_mac()
927 csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev, in init_mac()
931 csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, in init_mac()
932 priv->mac_dev, tse_csroffs(tx_section_empty)); in init_mac()
934 csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev, in init_mac()
937 csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev, in init_mac()
940 csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev, in init_mac()
944 tse_update_mac_addr(priv, priv->dev->dev_addr); in init_mac()
947 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; in init_mac()
948 csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length)); in init_mac()
950 csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev, in init_mac()
956 tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat), in init_mac()
959 tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat), in init_mac()
964 cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config)); in init_mac()
979 csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config)); in init_mac()
981 csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev, in init_mac()
984 if (netif_msg_hw(priv)) in init_mac()
985 dev_dbg(priv->device, in init_mac()
993 static void tse_set_mac(struct altera_tse_private *priv, bool enable) in tse_set_mac() argument
995 u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config)); in tse_set_mac()
1002 csrwr32(value, priv->mac_dev, tse_csroffs(command_config)); in tse_set_mac()
1009 struct altera_tse_private *priv = netdev_priv(dev); in tse_change_mtu() local
1010 unsigned int max_mtu = priv->max_mtu; in tse_change_mtu()
1031 struct altera_tse_private *priv = netdev_priv(dev); in altera_tse_set_mcfilter() local
1037 csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4); in altera_tse_set_mcfilter()
1053 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4); in altera_tse_set_mcfilter()
1060 struct altera_tse_private *priv = netdev_priv(dev); in altera_tse_set_mcfilterall() local
1065 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4); in altera_tse_set_mcfilterall()
1072 struct altera_tse_private *priv = netdev_priv(dev); in tse_set_rx_mode_hashfilter() local
1074 spin_lock(&priv->mac_cfg_lock); in tse_set_rx_mode_hashfilter()
1077 tse_set_bit(priv->mac_dev, tse_csroffs(command_config), in tse_set_rx_mode_hashfilter()
1085 spin_unlock(&priv->mac_cfg_lock); in tse_set_rx_mode_hashfilter()
1092 struct altera_tse_private *priv = netdev_priv(dev); in tse_set_rx_mode() local
1094 spin_lock(&priv->mac_cfg_lock); in tse_set_rx_mode()
1098 tse_set_bit(priv->mac_dev, tse_csroffs(command_config), in tse_set_rx_mode()
1101 tse_clear_bit(priv->mac_dev, tse_csroffs(command_config), in tse_set_rx_mode()
1104 spin_unlock(&priv->mac_cfg_lock); in tse_set_rx_mode()
1111 struct altera_tse_private *priv = netdev_priv(dev); in tse_open() local
1117 ret = priv->dmaops->init_dma(priv); in tse_open()
1123 if (netif_msg_ifup(priv)) in tse_open()
1127 if ((priv->revision < 0xd00) || (priv->revision > 0xe00)) in tse_open()
1128 netdev_warn(dev, "TSE revision %x\n", priv->revision); in tse_open()
1130 spin_lock(&priv->mac_cfg_lock); in tse_open()
1131 ret = reset_mac(priv); in tse_open()
1139 ret = init_mac(priv); in tse_open()
1140 spin_unlock(&priv->mac_cfg_lock); in tse_open()
1146 priv->dmaops->reset_dma(priv); in tse_open()
1149 priv->rx_ring_size = dma_rx_num; in tse_open()
1150 priv->tx_ring_size = dma_tx_num; in tse_open()
1151 ret = alloc_init_skbufs(priv); in tse_open()
1159 ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED, in tse_open()
1163 priv->rx_irq); in tse_open()
1168 ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED, in tse_open()
1172 priv->tx_irq); in tse_open()
1177 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); in tse_open()
1178 priv->dmaops->enable_rxirq(priv); in tse_open()
1179 priv->dmaops->enable_txirq(priv); in tse_open()
1182 for (i = 0; i < priv->rx_ring_size; i++) in tse_open()
1183 priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]); in tse_open()
1185 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); in tse_open()
1187 if (priv->phydev) in tse_open()
1188 phy_start(priv->phydev); in tse_open()
1190 napi_enable(&priv->napi); in tse_open()
1193 priv->dmaops->start_rxdma(priv); in tse_open()
1196 spin_lock(&priv->mac_cfg_lock); in tse_open()
1197 tse_set_mac(priv, true); in tse_open()
1198 spin_unlock(&priv->mac_cfg_lock); in tse_open()
1203 free_irq(priv->rx_irq, dev); in tse_open()
1215 struct altera_tse_private *priv = netdev_priv(dev); in tse_shutdown() local
1220 if (priv->phydev) in tse_shutdown()
1221 phy_stop(priv->phydev); in tse_shutdown()
1224 napi_disable(&priv->napi); in tse_shutdown()
1227 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); in tse_shutdown()
1228 priv->dmaops->disable_rxirq(priv); in tse_shutdown()
1229 priv->dmaops->disable_txirq(priv); in tse_shutdown()
1230 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); in tse_shutdown()
1233 free_irq(priv->rx_irq, dev); in tse_shutdown()
1234 free_irq(priv->tx_irq, dev); in tse_shutdown()
1237 spin_lock(&priv->mac_cfg_lock); in tse_shutdown()
1238 spin_lock(&priv->tx_lock); in tse_shutdown()
1240 ret = reset_mac(priv); in tse_shutdown()
1247 priv->dmaops->reset_dma(priv); in tse_shutdown()
1250 spin_unlock(&priv->tx_lock); in tse_shutdown()
1251 spin_unlock(&priv->mac_cfg_lock); in tse_shutdown()
1253 priv->dmaops->uninit_dma(priv); in tse_shutdown()
1305 struct altera_tse_private *priv; in altera_tse_probe() local
1318 priv = netdev_priv(ndev); in altera_tse_probe()
1319 priv->device = &pdev->dev; in altera_tse_probe()
1320 priv->dev = ndev; in altera_tse_probe()
1321 priv->msg_enable = netif_msg_init(debug, default_msg_level); in altera_tse_probe()
1326 priv->dmaops = (struct altera_dmaops *)of_id->data; in altera_tse_probe()
1329 if (priv->dmaops && in altera_tse_probe()
1330 priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) { in altera_tse_probe()
1337 priv->tx_dma_desc = descmap; in altera_tse_probe()
1340 priv->txdescmem = resource_size(dma_res)/2; in altera_tse_probe()
1342 priv->txdescmem_busaddr = (dma_addr_t)dma_res->start; in altera_tse_probe()
1344 priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap + in altera_tse_probe()
1345 priv->txdescmem)); in altera_tse_probe()
1346 priv->rxdescmem = resource_size(dma_res)/2; in altera_tse_probe()
1347 priv->rxdescmem_busaddr = dma_res->start; in altera_tse_probe()
1348 priv->rxdescmem_busaddr += priv->txdescmem; in altera_tse_probe()
1350 if (upper_32_bits(priv->rxdescmem_busaddr)) { in altera_tse_probe()
1351 dev_dbg(priv->device, in altera_tse_probe()
1355 if (upper_32_bits(priv->txdescmem_busaddr)) { in altera_tse_probe()
1356 dev_dbg(priv->device, in altera_tse_probe()
1360 } else if (priv->dmaops && in altera_tse_probe()
1361 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { in altera_tse_probe()
1363 &priv->rx_dma_resp); in altera_tse_probe()
1368 &priv->tx_dma_desc); in altera_tse_probe()
1372 priv->txdescmem = resource_size(dma_res); in altera_tse_probe()
1373 priv->txdescmem_busaddr = dma_res->start; in altera_tse_probe()
1376 &priv->rx_dma_desc); in altera_tse_probe()
1380 priv->rxdescmem = resource_size(dma_res); in altera_tse_probe()
1381 priv->rxdescmem_busaddr = dma_res->start; in altera_tse_probe()
1387 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) in altera_tse_probe()
1388 dma_set_coherent_mask(priv->device, in altera_tse_probe()
1389 DMA_BIT_MASK(priv->dmaops->dmamask)); in altera_tse_probe()
1390 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) in altera_tse_probe()
1391 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); in altera_tse_probe()
1397 (void __iomem **)&priv->mac_dev); in altera_tse_probe()
1403 &priv->rx_dma_csr); in altera_tse_probe()
1410 &priv->tx_dma_csr); in altera_tse_probe()
1416 priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq"); in altera_tse_probe()
1417 if (priv->rx_irq == -ENXIO) { in altera_tse_probe()
1424 priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq"); in altera_tse_probe()
1425 if (priv->tx_irq == -ENXIO) { in altera_tse_probe()
1433 &priv->rx_fifo_depth)) { in altera_tse_probe()
1440 &priv->tx_fifo_depth)) { in altera_tse_probe()
1447 priv->hash_filter = in altera_tse_probe()
1454 priv->hash_filter = 0; in altera_tse_probe()
1457 priv->added_unicast = in altera_tse_probe()
1462 priv->max_mtu = ETH_DATA_LEN; in altera_tse_probe()
1469 &priv->max_mtu); in altera_tse_probe()
1474 priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; in altera_tse_probe()
1497 if (priv->hash_filter) in altera_tse_probe()
1514 netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT); in altera_tse_probe()
1516 spin_lock_init(&priv->mac_cfg_lock); in altera_tse_probe()
1517 spin_lock_init(&priv->tx_lock); in altera_tse_probe()
1518 spin_lock_init(&priv->rxdma_irq_lock); in altera_tse_probe()
1529 priv->revision = ioread32(&priv->mac_dev->megacore_revision); in altera_tse_probe()
1531 if (netif_msg_probe(priv)) in altera_tse_probe()
1533 (priv->revision >> 8) & 0xff, in altera_tse_probe()
1534 priv->revision & 0xff, in altera_tse_probe()
1535 (unsigned long) control_port->start, priv->rx_irq, in altera_tse_probe()
1536 priv->tx_irq); in altera_tse_probe()
1548 netif_napi_del(&priv->napi); in altera_tse_probe()
1560 struct altera_tse_private *priv = netdev_priv(ndev); in altera_tse_remove() local
1562 if (priv->phydev) in altera_tse_remove()
1563 phy_disconnect(priv->phydev); in altera_tse_remove()