Lines Matching refs:ksp
199 ks8695_readreg(struct ks8695_priv *ksp, int reg) in ks8695_readreg() argument
201 return readl(ksp->io_regs + reg); in ks8695_readreg()
211 ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value) in ks8695_writereg() argument
213 writel(value, ksp->io_regs + reg); in ks8695_writereg()
226 ks8695_port_type(struct ks8695_priv *ksp) in ks8695_port_type() argument
228 switch (ksp->dtype) { in ks8695_port_type()
248 ks8695_update_mac(struct ks8695_priv *ksp) in ks8695_update_mac() argument
251 struct net_device *ndev = ksp->ndev; in ks8695_update_mac()
258 ks8695_writereg(ksp, KS8695_MAL, maclow); in ks8695_update_mac()
259 ks8695_writereg(ksp, KS8695_MAH, machigh); in ks8695_update_mac()
273 ks8695_refill_rxbuffers(struct ks8695_priv *ksp) in ks8695_refill_rxbuffers() argument
279 if (!ksp->rx_buffers[buff_n].skb) { in ks8695_refill_rxbuffers()
281 netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE); in ks8695_refill_rxbuffers()
284 ksp->rx_buffers[buff_n].skb = skb; in ks8695_refill_rxbuffers()
292 mapping = dma_map_single(ksp->dev, skb->data, in ks8695_refill_rxbuffers()
295 if (unlikely(dma_mapping_error(ksp->dev, mapping))) { in ks8695_refill_rxbuffers()
298 ksp->rx_buffers[buff_n].skb = NULL; in ks8695_refill_rxbuffers()
301 ksp->rx_buffers[buff_n].dma_ptr = mapping; in ks8695_refill_rxbuffers()
302 ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE; in ks8695_refill_rxbuffers()
305 ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping); in ks8695_refill_rxbuffers()
306 ksp->rx_ring[buff_n].length = in ks8695_refill_rxbuffers()
312 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); in ks8695_refill_rxbuffers()
331 ks8695_init_partial_multicast(struct ks8695_priv *ksp, in ks8695_init_partial_multicast() argument
347 ks8695_writereg(ksp, KS8695_AAL_(i), low); in ks8695_init_partial_multicast()
348 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high); in ks8695_init_partial_multicast()
354 ks8695_writereg(ksp, KS8695_AAL_(i), 0); in ks8695_init_partial_multicast()
355 ks8695_writereg(ksp, KS8695_AAH_(i), 0); in ks8695_init_partial_multicast()
374 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_tx_irq() local
378 if (ksp->tx_buffers[buff_n].skb && in ks8695_tx_irq()
379 !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) { in ks8695_tx_irq()
384 ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length; in ks8695_tx_irq()
387 ksp->tx_ring[buff_n].data_ptr = 0; in ks8695_tx_irq()
390 dma_unmap_single(ksp->dev, in ks8695_tx_irq()
391 ksp->tx_buffers[buff_n].dma_ptr, in ks8695_tx_irq()
392 ksp->tx_buffers[buff_n].length, in ks8695_tx_irq()
394 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb); in ks8695_tx_irq()
395 ksp->tx_buffers[buff_n].skb = NULL; in ks8695_tx_irq()
396 ksp->tx_ring_used--; in ks8695_tx_irq()
419 static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp) in ks8695_get_rx_enable_bit() argument
421 return ksp->rx_irq; in ks8695_get_rx_enable_bit()
436 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_rx_irq() local
438 spin_lock(&ksp->rx_lock); in ks8695_rx_irq()
440 if (napi_schedule_prep(&ksp->napi)) { in ks8695_rx_irq()
442 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); in ks8695_rx_irq()
446 __napi_schedule(&ksp->napi); in ks8695_rx_irq()
449 spin_unlock(&ksp->rx_lock); in ks8695_rx_irq()
458 static int ks8695_rx(struct ks8695_priv *ksp, int budget) in ks8695_rx() argument
460 struct net_device *ndev = ksp->ndev; in ks8695_rx()
467 buff_n = ksp->next_rx_desc_read; in ks8695_rx()
469 && ksp->rx_buffers[buff_n].skb in ks8695_rx()
470 && (!(ksp->rx_ring[buff_n].status & in ks8695_rx()
473 flags = le32_to_cpu(ksp->rx_ring[buff_n].status); in ks8695_rx()
507 skb = ksp->rx_buffers[buff_n].skb; in ks8695_rx()
510 ksp->rx_buffers[buff_n].skb = NULL; in ks8695_rx()
511 ksp->rx_ring[buff_n].data_ptr = 0; in ks8695_rx()
514 dma_unmap_single(ksp->dev, in ks8695_rx()
515 ksp->rx_buffers[buff_n].dma_ptr, in ks8695_rx()
516 ksp->rx_buffers[buff_n].length, in ks8695_rx()
534 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); in ks8695_rx()
541 ksp->next_rx_desc_read = buff_n; in ks8695_rx()
544 ks8695_refill_rxbuffers(ksp); in ks8695_rx()
547 ks8695_writereg(ksp, KS8695_DRSC, 0); in ks8695_rx()
563 struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi); in ks8695_poll() local
567 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); in ks8695_poll()
569 work_done = ks8695_rx(ksp, budget); in ks8695_poll()
573 spin_lock_irqsave(&ksp->rx_lock, flags); in ks8695_poll()
577 spin_unlock_irqrestore(&ksp->rx_lock, flags); in ks8695_poll()
594 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_link_irq() local
597 ctrl = readl(ksp->phyiface_regs + KS8695_WMC); in ks8695_link_irq()
600 if (netif_msg_link(ksp)) in ks8695_link_irq()
601 dev_info(ksp->dev, in ks8695_link_irq()
608 if (netif_msg_link(ksp)) in ks8695_link_irq()
609 dev_info(ksp->dev, "%s: Link is now down.\n", in ks8695_link_irq()
627 ks8695_reset(struct ks8695_priv *ksp) in ks8695_reset() argument
631 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST); in ks8695_reset()
633 if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST)) in ks8695_reset()
639 dev_crit(ksp->dev, in ks8695_reset()
650 ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB); in ks8695_reset()
652 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC); in ks8695_reset()
664 ks8695_shutdown(struct ks8695_priv *ksp) in ks8695_shutdown() argument
670 ctrl = ks8695_readreg(ksp, KS8695_DTXC); in ks8695_shutdown()
671 ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE); in ks8695_shutdown()
674 ctrl = ks8695_readreg(ksp, KS8695_DRXC); in ks8695_shutdown()
675 ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE); in ks8695_shutdown()
678 free_irq(ksp->rx_irq, ksp->ndev); in ks8695_shutdown()
679 free_irq(ksp->tx_irq, ksp->ndev); in ks8695_shutdown()
680 if (ksp->link_irq != -1) in ks8695_shutdown()
681 free_irq(ksp->link_irq, ksp->ndev); in ks8695_shutdown()
685 if (ksp->tx_buffers[buff_n].skb) { in ks8695_shutdown()
687 ksp->tx_ring[buff_n].owner = 0; in ks8695_shutdown()
688 ksp->tx_ring[buff_n].status = 0; in ks8695_shutdown()
689 ksp->tx_ring[buff_n].data_ptr = 0; in ks8695_shutdown()
692 dma_unmap_single(ksp->dev, in ks8695_shutdown()
693 ksp->tx_buffers[buff_n].dma_ptr, in ks8695_shutdown()
694 ksp->tx_buffers[buff_n].length, in ks8695_shutdown()
696 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb); in ks8695_shutdown()
697 ksp->tx_buffers[buff_n].skb = NULL; in ks8695_shutdown()
703 if (ksp->rx_buffers[buff_n].skb) { in ks8695_shutdown()
705 ksp->rx_ring[buff_n].status = 0; in ks8695_shutdown()
706 ksp->rx_ring[buff_n].data_ptr = 0; in ks8695_shutdown()
709 dma_unmap_single(ksp->dev, in ks8695_shutdown()
710 ksp->rx_buffers[buff_n].dma_ptr, in ks8695_shutdown()
711 ksp->rx_buffers[buff_n].length, in ks8695_shutdown()
713 dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb); in ks8695_shutdown()
714 ksp->rx_buffers[buff_n].skb = NULL; in ks8695_shutdown()
754 ks8695_init_net(struct ks8695_priv *ksp) in ks8695_init_net() argument
759 ks8695_refill_rxbuffers(ksp); in ks8695_init_net()
762 ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma); in ks8695_init_net()
763 ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma); in ks8695_init_net()
766 ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name, in ks8695_init_net()
767 ks8695_rx_irq, ksp->ndev); in ks8695_init_net()
770 ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name, in ks8695_init_net()
771 ks8695_tx_irq, ksp->ndev); in ks8695_init_net()
774 if (ksp->link_irq != -1) { in ks8695_init_net()
775 ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name, in ks8695_init_net()
776 ks8695_link_irq, ksp->ndev); in ks8695_init_net()
782 ksp->next_rx_desc_read = 0; in ks8695_init_net()
783 ksp->tx_ring_next_slot = 0; in ks8695_init_net()
784 ksp->tx_ring_used = 0; in ks8695_init_net()
787 ctrl = ks8695_readreg(ksp, KS8695_DTXC); in ks8695_init_net()
789 ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE); in ks8695_init_net()
792 ctrl = ks8695_readreg(ksp, KS8695_DRXC); in ks8695_init_net()
794 ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE); in ks8695_init_net()
796 ks8695_writereg(ksp, KS8695_DRSC, 0); in ks8695_init_net()
810 ks8695_release_device(struct ks8695_priv *ksp) in ks8695_release_device() argument
813 iounmap(ksp->io_regs); in ks8695_release_device()
814 if (ksp->phyiface_regs) in ks8695_release_device()
815 iounmap(ksp->phyiface_regs); in ks8695_release_device()
818 release_resource(ksp->regs_req); in ks8695_release_device()
819 kfree(ksp->regs_req); in ks8695_release_device()
820 if (ksp->phyiface_req) { in ks8695_release_device()
821 release_resource(ksp->phyiface_req); in ks8695_release_device()
822 kfree(ksp->phyiface_req); in ks8695_release_device()
826 dma_free_coherent(ksp->dev, RING_DMA_SIZE, in ks8695_release_device()
827 ksp->ring_base, ksp->ring_base_dma); in ks8695_release_device()
839 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_get_msglevel() local
841 return ksp->msg_enable; in ks8695_get_msglevel()
852 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_set_msglevel() local
854 ksp->msg_enable = value; in ks8695_set_msglevel()
865 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_wan_get_settings() local
879 ctrl = readl(ksp->phyiface_regs + KS8695_WMC); in ks8695_wan_get_settings()
920 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_wan_set_settings() local
942 ctrl = readl(ksp->phyiface_regs + KS8695_WMC); in ks8695_wan_set_settings()
957 writel(ctrl, ksp->phyiface_regs + KS8695_WMC); in ks8695_wan_set_settings()
959 ctrl = readl(ksp->phyiface_regs + KS8695_WMC); in ks8695_wan_set_settings()
970 writel(ctrl, ksp->phyiface_regs + KS8695_WMC); in ks8695_wan_set_settings()
983 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_wan_nwayreset() local
986 ctrl = readl(ksp->phyiface_regs + KS8695_WMC); in ks8695_wan_nwayreset()
990 ksp->phyiface_regs + KS8695_WMC); in ks8695_wan_nwayreset()
1006 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_wan_get_pause() local
1009 ctrl = readl(ksp->phyiface_regs + KS8695_WMC); in ks8695_wan_get_pause()
1015 ctrl = ks8695_readreg(ksp, KS8695_DRXC); in ks8695_wan_get_pause()
1019 ctrl = ks8695_readreg(ksp, KS8695_DTXC); in ks8695_wan_get_pause()
1064 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_set_mac() local
1072 ks8695_update_mac(ksp); in ks8695_set_mac()
1074 dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n", in ks8695_set_mac()
1090 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_set_multicast() local
1093 ctrl = ks8695_readreg(ksp, KS8695_DRXC); in ks8695_set_multicast()
1114 ks8695_init_partial_multicast(ksp, ndev); in ks8695_set_multicast()
1117 ks8695_writereg(ksp, KS8695_DRXC, ctrl); in ks8695_set_multicast()
1129 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_timeout() local
1132 ks8695_shutdown(ksp); in ks8695_timeout()
1134 ks8695_reset(ksp); in ks8695_timeout()
1136 ks8695_update_mac(ksp); in ks8695_timeout()
1141 ks8695_init_net(ksp); in ks8695_timeout()
1162 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_start_xmit() local
1166 spin_lock_irq(&ksp->txq_lock); in ks8695_start_xmit()
1168 if (ksp->tx_ring_used == MAX_TX_DESC) { in ks8695_start_xmit()
1170 spin_unlock_irq(&ksp->txq_lock); in ks8695_start_xmit()
1174 buff_n = ksp->tx_ring_next_slot; in ks8695_start_xmit()
1176 BUG_ON(ksp->tx_buffers[buff_n].skb); in ks8695_start_xmit()
1178 dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE); in ks8695_start_xmit()
1179 if (unlikely(dma_mapping_error(ksp->dev, dmap))) { in ks8695_start_xmit()
1181 spin_unlock_irq(&ksp->txq_lock); in ks8695_start_xmit()
1182 dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\ in ks8695_start_xmit()
1187 ksp->tx_buffers[buff_n].dma_ptr = dmap; in ks8695_start_xmit()
1189 ksp->tx_buffers[buff_n].skb = skb; in ks8695_start_xmit()
1190 ksp->tx_buffers[buff_n].length = skb->len; in ks8695_start_xmit()
1193 ksp->tx_ring[buff_n].data_ptr = in ks8695_start_xmit()
1194 cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr); in ks8695_start_xmit()
1195 ksp->tx_ring[buff_n].status = in ks8695_start_xmit()
1202 ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN); in ks8695_start_xmit()
1204 if (++ksp->tx_ring_used == MAX_TX_DESC) in ks8695_start_xmit()
1208 ks8695_writereg(ksp, KS8695_DTSC, 0); in ks8695_start_xmit()
1211 ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK; in ks8695_start_xmit()
1213 spin_unlock_irq(&ksp->txq_lock); in ks8695_start_xmit()
1227 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_stop() local
1230 napi_disable(&ksp->napi); in ks8695_stop()
1232 ks8695_shutdown(ksp); in ks8695_stop()
1248 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_open() local
1251 ks8695_reset(ksp); in ks8695_open()
1253 ks8695_update_mac(ksp); in ks8695_open()
1255 ret = ks8695_init_net(ksp); in ks8695_open()
1257 ks8695_shutdown(ksp); in ks8695_open()
1261 napi_enable(&ksp->napi); in ks8695_open()
1277 ks8695_init_switch(struct ks8695_priv *ksp) in ks8695_init_switch() argument
1291 writel(ctrl, ksp->phyiface_regs + KS8695_SEC0); in ks8695_init_switch()
1294 writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1); in ks8695_init_switch()
1305 ks8695_init_wan_phy(struct ks8695_priv *ksp) in ks8695_init_wan_phy() argument
1319 writel(ctrl, ksp->phyiface_regs + KS8695_WMC); in ks8695_init_wan_phy()
1321 writel(0, ksp->phyiface_regs + KS8695_WPPM); in ks8695_init_wan_phy()
1322 writel(0, ksp->phyiface_regs + KS8695_PPS); in ks8695_init_wan_phy()
1351 struct ks8695_priv *ksp; in ks8695_probe() local
1369 ksp = netdev_priv(ndev); in ks8695_probe()
1371 ksp->dev = &pdev->dev; in ks8695_probe()
1372 ksp->ndev = ndev; in ks8695_probe()
1373 ksp->msg_enable = NETIF_MSG_LINK; in ks8695_probe()
1384 dev_err(ksp->dev, "insufficient resources\n"); in ks8695_probe()
1389 ksp->regs_req = request_mem_region(regs_res->start, in ks8695_probe()
1393 if (!ksp->regs_req) { in ks8695_probe()
1394 dev_err(ksp->dev, "cannot claim register space\n"); in ks8695_probe()
1399 ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res)); in ks8695_probe()
1401 if (!ksp->io_regs) { in ks8695_probe()
1402 dev_err(ksp->dev, "failed to ioremap registers\n"); in ks8695_probe()
1408 ksp->phyiface_req = in ks8695_probe()
1413 if (!ksp->phyiface_req) { in ks8695_probe()
1414 dev_err(ksp->dev, in ks8695_probe()
1420 ksp->phyiface_regs = ioremap(phyiface_res->start, in ks8695_probe()
1423 if (!ksp->phyiface_regs) { in ks8695_probe()
1424 dev_err(ksp->dev, in ks8695_probe()
1431 ksp->rx_irq = rxirq_res->start; in ks8695_probe()
1432 ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX"; in ks8695_probe()
1433 ksp->tx_irq = txirq_res->start; in ks8695_probe()
1434 ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX"; in ks8695_probe()
1435 ksp->link_irq = (linkirq_res ? linkirq_res->start : -1); in ks8695_probe()
1436 ksp->link_irq_name = (linkirq_res && linkirq_res->name) ? in ks8695_probe()
1443 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); in ks8695_probe()
1448 machigh = ks8695_readreg(ksp, KS8695_MAH); in ks8695_probe()
1449 maclow = ks8695_readreg(ksp, KS8695_MAL); in ks8695_probe()
1459 dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please " in ks8695_probe()
1465 ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE, in ks8695_probe()
1466 &ksp->ring_base_dma, GFP_KERNEL); in ks8695_probe()
1467 if (!ksp->ring_base) { in ks8695_probe()
1473 ksp->tx_ring = ksp->ring_base; in ks8695_probe()
1474 ksp->tx_ring_dma = ksp->ring_base_dma; in ks8695_probe()
1477 spin_lock_init(&ksp->txq_lock); in ks8695_probe()
1478 spin_lock_init(&ksp->rx_lock); in ks8695_probe()
1481 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE; in ks8695_probe()
1482 ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE; in ks8695_probe()
1485 memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE); in ks8695_probe()
1486 memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE); in ks8695_probe()
1490 ksp->tx_ring[buff_n].next_desc = in ks8695_probe()
1491 cpu_to_le32(ksp->tx_ring_dma + in ks8695_probe()
1497 ksp->rx_ring[buff_n].next_desc = in ks8695_probe()
1498 cpu_to_le32(ksp->rx_ring_dma + in ks8695_probe()
1504 if (ksp->phyiface_regs && ksp->link_irq == -1) { in ks8695_probe()
1505 ks8695_init_switch(ksp); in ks8695_probe()
1506 ksp->dtype = KS8695_DTYPE_LAN; in ks8695_probe()
1508 } else if (ksp->phyiface_regs && ksp->link_irq != -1) { in ks8695_probe()
1509 ks8695_init_wan_phy(ksp); in ks8695_probe()
1510 ksp->dtype = KS8695_DTYPE_WAN; in ks8695_probe()
1514 ksp->dtype = KS8695_DTYPE_HPNA; in ks8695_probe()
1523 dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n", in ks8695_probe()
1524 ks8695_port_type(ksp), ndev->dev_addr); in ks8695_probe()
1527 dev_err(ksp->dev, "ks8695net: failed to register netdev.\n"); in ks8695_probe()
1536 ks8695_release_device(ksp); in ks8695_probe()
1553 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_drv_suspend() local
1555 ksp->in_suspend = 1; in ks8695_drv_suspend()
1559 ks8695_shutdown(ksp); in ks8695_drv_suspend()
1576 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_drv_resume() local
1579 ks8695_reset(ksp); in ks8695_drv_resume()
1580 ks8695_init_net(ksp); in ks8695_drv_resume()
1585 ksp->in_suspend = 0; in ks8695_drv_resume()
1600 struct ks8695_priv *ksp = netdev_priv(ndev); in ks8695_drv_remove() local
1602 netif_napi_del(&ksp->napi); in ks8695_drv_remove()
1605 ks8695_release_device(ksp); in ks8695_drv_remove()