Lines Matching refs:mp
91 static inline void mace_clean_rings(struct mace_data *mp);
113 struct mace_data *mp; in mace_probe() local
155 mp = netdev_priv(dev); in mace_probe()
156 mp->mdev = mdev; in mace_probe()
160 mp->mace = ioremap(dev->base_addr, 0x1000); in mace_probe()
161 if (mp->mace == NULL) { in mace_probe()
172 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | in mace_probe()
173 in_8(&mp->mace->chipid_lo); in mace_probe()
176 mp = netdev_priv(dev); in mace_probe()
177 mp->maccc = ENXMT | ENRCV; in mace_probe()
179 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000); in mace_probe()
180 if (mp->tx_dma == NULL) { in mace_probe()
185 mp->tx_dma_intr = macio_irq(mdev, 1); in mace_probe()
187 mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000); in mace_probe()
188 if (mp->rx_dma == NULL) { in mace_probe()
193 mp->rx_dma_intr = macio_irq(mdev, 2); in mace_probe()
195 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); in mace_probe()
196 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; in mace_probe()
198 memset((char *) mp->tx_cmds, 0, in mace_probe()
200 init_timer(&mp->tx_timeout); in mace_probe()
201 spin_lock_init(&mp->lock); in mace_probe()
202 mp->timeout_active = 0; in mace_probe()
205 mp->port_aaui = port_aaui; in mace_probe()
209 mp->port_aaui = 1; in mace_probe()
212 mp->port_aaui = 1; in mace_probe()
214 mp->port_aaui = 0; in mace_probe()
231 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); in mace_probe()
233 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr); in mace_probe()
236 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); in mace_probe()
238 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr); in mace_probe()
250 mp->chipid >> 8, mp->chipid & 0xff); in mace_probe()
261 iounmap(mp->rx_dma); in mace_probe()
263 iounmap(mp->tx_dma); in mace_probe()
265 iounmap(mp->mace); in mace_probe()
277 struct mace_data *mp; in mace_remove() local
283 mp = netdev_priv(dev); in mace_remove()
288 free_irq(mp->tx_dma_intr, dev); in mace_remove()
289 free_irq(mp->rx_dma_intr, dev); in mace_remove()
291 iounmap(mp->rx_dma); in mace_remove()
292 iounmap(mp->tx_dma); in mace_remove()
293 iounmap(mp->mace); in mace_remove()
319 struct mace_data *mp = netdev_priv(dev); in mace_reset() local
320 volatile struct mace __iomem *mb = mp->mace; in mace_reset()
352 if (mp->chipid == BROKEN_ADDRCHG_REV) in mace_reset()
363 if (mp->chipid != BROKEN_ADDRCHG_REV) in mace_reset()
366 if (mp->port_aaui) in mace_reset()
374 struct mace_data *mp = netdev_priv(dev); in __mace_set_address() local
375 volatile struct mace __iomem *mb = mp->mace; in __mace_set_address()
380 if (mp->chipid == BROKEN_ADDRCHG_REV) in __mace_set_address()
389 if (mp->chipid != BROKEN_ADDRCHG_REV) in __mace_set_address()
395 struct mace_data *mp = netdev_priv(dev); in mace_set_address() local
396 volatile struct mace __iomem *mb = mp->mace; in mace_set_address()
399 spin_lock_irqsave(&mp->lock, flags); in mace_set_address()
404 out_8(&mb->maccc, mp->maccc); in mace_set_address()
406 spin_unlock_irqrestore(&mp->lock, flags); in mace_set_address()
410 static inline void mace_clean_rings(struct mace_data *mp) in mace_clean_rings() argument
416 if (mp->rx_bufs[i] != NULL) { in mace_clean_rings()
417 dev_kfree_skb(mp->rx_bufs[i]); in mace_clean_rings()
418 mp->rx_bufs[i] = NULL; in mace_clean_rings()
421 for (i = mp->tx_empty; i != mp->tx_fill; ) { in mace_clean_rings()
422 dev_kfree_skb(mp->tx_bufs[i]); in mace_clean_rings()
430 struct mace_data *mp = netdev_priv(dev); in mace_open() local
431 volatile struct mace __iomem *mb = mp->mace; in mace_open()
432 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_open()
433 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_open()
443 mace_clean_rings(mp); in mace_open()
444 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd)); in mace_open()
445 cp = mp->rx_cmds; in mace_open()
454 mp->rx_bufs[i] = skb; in mace_open()
461 mp->rx_bufs[i] = NULL; in mace_open()
463 mp->rx_fill = i; in mace_open()
464 mp->rx_empty = 0; in mace_open()
469 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds)); in mace_open()
473 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds)); in mace_open()
477 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; in mace_open()
479 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds)); in mace_open()
483 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds)); in mace_open()
484 mp->tx_fill = 0; in mace_open()
485 mp->tx_empty = 0; in mace_open()
486 mp->tx_fullup = 0; in mace_open()
487 mp->tx_active = 0; in mace_open()
488 mp->tx_bad_runt = 0; in mace_open()
491 out_8(&mb->maccc, mp->maccc); in mace_open()
500 struct mace_data *mp = netdev_priv(dev); in mace_close() local
501 volatile struct mace __iomem *mb = mp->mace; in mace_close()
502 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_close()
503 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_close()
513 mace_clean_rings(mp); in mace_close()
520 struct mace_data *mp = netdev_priv(dev); in mace_set_timeout() local
522 if (mp->timeout_active) in mace_set_timeout()
523 del_timer(&mp->tx_timeout); in mace_set_timeout()
524 mp->tx_timeout.expires = jiffies + TX_TIMEOUT; in mace_set_timeout()
525 mp->tx_timeout.function = mace_tx_timeout; in mace_set_timeout()
526 mp->tx_timeout.data = (unsigned long) dev; in mace_set_timeout()
527 add_timer(&mp->tx_timeout); in mace_set_timeout()
528 mp->timeout_active = 1; in mace_set_timeout()
533 struct mace_data *mp = netdev_priv(dev); in mace_xmit_start() local
534 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_xmit_start()
540 spin_lock_irqsave(&mp->lock, flags); in mace_xmit_start()
541 fill = mp->tx_fill; in mace_xmit_start()
545 if (next == mp->tx_empty) { in mace_xmit_start()
547 mp->tx_fullup = 1; in mace_xmit_start()
548 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
551 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
559 mp->tx_bufs[fill] = skb; in mace_xmit_start()
560 cp = mp->tx_cmds + NCMDS_TX * fill; in mace_xmit_start()
564 np = mp->tx_cmds + NCMDS_TX * next; in mace_xmit_start()
568 spin_lock_irqsave(&mp->lock, flags); in mace_xmit_start()
569 mp->tx_fill = next; in mace_xmit_start()
570 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) { in mace_xmit_start()
574 ++mp->tx_active; in mace_xmit_start()
579 if (next == mp->tx_empty) in mace_xmit_start()
581 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
588 struct mace_data *mp = netdev_priv(dev); in mace_set_multicast() local
589 volatile struct mace __iomem *mb = mp->mace; in mace_set_multicast()
594 spin_lock_irqsave(&mp->lock, flags); in mace_set_multicast()
595 mp->maccc &= ~PROM; in mace_set_multicast()
597 mp->maccc |= PROM; in mace_set_multicast()
621 if (mp->chipid == BROKEN_ADDRCHG_REV) in mace_set_multicast()
630 if (mp->chipid != BROKEN_ADDRCHG_REV) in mace_set_multicast()
634 out_8(&mb->maccc, mp->maccc); in mace_set_multicast()
635 spin_unlock_irqrestore(&mp->lock, flags); in mace_set_multicast()
638 static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev) in mace_handle_misc_intrs() argument
640 volatile struct mace __iomem *mb = mp->mace; in mace_handle_misc_intrs()
662 struct mace_data *mp = netdev_priv(dev); in mace_interrupt() local
663 volatile struct mace __iomem *mb = mp->mace; in mace_interrupt()
664 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_interrupt()
671 spin_lock_irqsave(&mp->lock, flags); in mace_interrupt()
674 mace_handle_misc_intrs(mp, intr, dev); in mace_interrupt()
676 i = mp->tx_empty; in mace_interrupt()
678 del_timer(&mp->tx_timeout); in mace_interrupt()
679 mp->timeout_active = 0; in mace_interrupt()
687 mace_handle_misc_intrs(mp, intr, dev); in mace_interrupt()
688 if (mp->tx_bad_runt) { in mace_interrupt()
690 mp->tx_bad_runt = 0; in mace_interrupt()
726 cp = mp->tx_cmds + NCMDS_TX * i; in mace_interrupt()
737 mp->tx_bad_runt = 1; in mace_interrupt()
754 if (i == mp->tx_fill) { in mace_interrupt()
767 dev->stats.tx_bytes += mp->tx_bufs[i]->len; in mace_interrupt()
770 dev_kfree_skb_irq(mp->tx_bufs[i]); in mace_interrupt()
771 --mp->tx_active; in mace_interrupt()
780 if (i != mp->tx_empty) { in mace_interrupt()
781 mp->tx_fullup = 0; in mace_interrupt()
784 mp->tx_empty = i; in mace_interrupt()
785 i += mp->tx_active; in mace_interrupt()
788 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) { in mace_interrupt()
791 cp = mp->tx_cmds + NCMDS_TX * i; in mace_interrupt()
794 ++mp->tx_active; in mace_interrupt()
797 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE); in mace_interrupt()
801 spin_unlock_irqrestore(&mp->lock, flags); in mace_interrupt()
808 struct mace_data *mp = netdev_priv(dev); in mace_tx_timeout() local
809 volatile struct mace __iomem *mb = mp->mace; in mace_tx_timeout()
810 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_tx_timeout()
811 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_tx_timeout()
816 spin_lock_irqsave(&mp->lock, flags); in mace_tx_timeout()
817 mp->timeout_active = 0; in mace_tx_timeout()
818 if (mp->tx_active == 0 && !mp->tx_bad_runt) in mace_tx_timeout()
822 mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); in mace_tx_timeout()
824 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; in mace_tx_timeout()
840 i = mp->tx_empty; in mace_tx_timeout()
841 mp->tx_active = 0; in mace_tx_timeout()
843 if (mp->tx_bad_runt) { in mace_tx_timeout()
844 mp->tx_bad_runt = 0; in mace_tx_timeout()
845 } else if (i != mp->tx_fill) { in mace_tx_timeout()
846 dev_kfree_skb(mp->tx_bufs[i]); in mace_tx_timeout()
849 mp->tx_empty = i; in mace_tx_timeout()
851 mp->tx_fullup = 0; in mace_tx_timeout()
853 if (i != mp->tx_fill) { in mace_tx_timeout()
854 cp = mp->tx_cmds + NCMDS_TX * i; in mace_tx_timeout()
859 ++mp->tx_active; in mace_tx_timeout()
865 out_8(&mb->maccc, mp->maccc); in mace_tx_timeout()
868 spin_unlock_irqrestore(&mp->lock, flags); in mace_tx_timeout()
879 struct mace_data *mp = netdev_priv(dev); in mace_rxdma_intr() local
880 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_rxdma_intr()
889 spin_lock_irqsave(&mp->lock, flags); in mace_rxdma_intr()
890 for (i = mp->rx_empty; i != mp->rx_fill; ) { in mace_rxdma_intr()
891 cp = mp->rx_cmds + i; in mace_rxdma_intr()
897 np = mp->rx_cmds + next; in mace_rxdma_intr()
898 if (next != mp->rx_fill && in mace_rxdma_intr()
908 skb = mp->rx_bufs[i]; in mace_rxdma_intr()
935 mp->rx_bufs[i] = NULL; in mace_rxdma_intr()
947 mp->rx_empty = i; in mace_rxdma_intr()
949 i = mp->rx_fill; in mace_rxdma_intr()
954 if (next == mp->rx_empty) in mace_rxdma_intr()
956 cp = mp->rx_cmds + i; in mace_rxdma_intr()
957 skb = mp->rx_bufs[i]; in mace_rxdma_intr()
962 mp->rx_bufs[i] = skb; in mace_rxdma_intr()
979 if (i != mp->rx_fill) { in mace_rxdma_intr()
981 mp->rx_fill = i; in mace_rxdma_intr()
983 spin_unlock_irqrestore(&mp->lock, flags); in mace_rxdma_intr()