Lines Matching refs:bp

228 	struct bmac_data *bp = netdev_priv(dev);  in bmac_enable_and_reset_chip()  local
229 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_enable_and_reset_chip()
230 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_enable_and_reset_chip()
237 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1); in bmac_enable_and_reset_chip()
312 struct bmac_data *bp = netdev_priv(dev); in bmac_init_registers() local
329 if (!bp->is_bmac_plus) { in bmac_init_registers()
371 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; in bmac_init_registers()
372 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ in bmac_init_registers()
373 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ in bmac_init_registers()
374 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ in bmac_init_registers()
375 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ in bmac_init_registers()
405 struct bmac_data *bp = netdev_priv(dev); in bmac_start_chip() local
406 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_start_chip()
425 struct bmac_data *bp = netdev_priv(dev); in bmac_init_phy() local
435 if (bp->is_bmac_plus) { in bmac_init_phy()
459 struct bmac_data *bp = netdev_priv(dev); in bmac_suspend() local
466 spin_lock_irqsave(&bp->lock, flags); in bmac_suspend()
467 if (bp->timeout_active) { in bmac_suspend()
468 del_timer(&bp->tx_timeout); in bmac_suspend()
469 bp->timeout_active = 0; in bmac_suspend()
472 disable_irq(bp->tx_dma_intr); in bmac_suspend()
473 disable_irq(bp->rx_dma_intr); in bmac_suspend()
474 bp->sleeping = 1; in bmac_suspend()
475 spin_unlock_irqrestore(&bp->lock, flags); in bmac_suspend()
476 if (bp->opened) { in bmac_suspend()
477 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_suspend()
478 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_suspend()
490 if (bp->rx_bufs[i] != NULL) { in bmac_suspend()
491 dev_kfree_skb(bp->rx_bufs[i]); in bmac_suspend()
492 bp->rx_bufs[i] = NULL; in bmac_suspend()
496 if (bp->tx_bufs[i] != NULL) { in bmac_suspend()
497 dev_kfree_skb(bp->tx_bufs[i]); in bmac_suspend()
498 bp->tx_bufs[i] = NULL; in bmac_suspend()
502 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); in bmac_suspend()
509 struct bmac_data *bp = netdev_priv(dev); in bmac_resume() local
512 if (bp->opened) in bmac_resume()
516 enable_irq(bp->tx_dma_intr); in bmac_resume()
517 enable_irq(bp->rx_dma_intr); in bmac_resume()
526 struct bmac_data *bp = netdev_priv(dev); in bmac_set_address() local
533 spin_lock_irqsave(&bp->lock, flags); in bmac_set_address()
544 spin_unlock_irqrestore(&bp->lock, flags); in bmac_set_address()
551 struct bmac_data *bp = netdev_priv(dev); in bmac_set_timeout() local
554 spin_lock_irqsave(&bp->lock, flags); in bmac_set_timeout()
555 if (bp->timeout_active) in bmac_set_timeout()
556 del_timer(&bp->tx_timeout); in bmac_set_timeout()
557 bp->tx_timeout.expires = jiffies + TX_TIMEOUT; in bmac_set_timeout()
558 bp->tx_timeout.function = bmac_tx_timeout; in bmac_set_timeout()
559 bp->tx_timeout.data = (unsigned long) dev; in bmac_set_timeout()
560 add_timer(&bp->tx_timeout); in bmac_set_timeout()
561 bp->timeout_active = 1; in bmac_set_timeout()
562 spin_unlock_irqrestore(&bp->lock, flags); in bmac_set_timeout()
589 bmac_init_tx_ring(struct bmac_data *bp) in bmac_init_tx_ring() argument
591 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_init_tx_ring()
593 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd)); in bmac_init_tx_ring()
595 bp->tx_empty = 0; in bmac_init_tx_ring()
596 bp->tx_fill = 0; in bmac_init_tx_ring()
597 bp->tx_fullup = 0; in bmac_init_tx_ring()
600 dbdma_setcmd(&bp->tx_cmds[N_TX_RING], in bmac_init_tx_ring()
601 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds)); in bmac_init_tx_ring()
606 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds)); in bmac_init_tx_ring()
612 struct bmac_data *bp = netdev_priv(dev); in bmac_init_rx_ring() local
613 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_init_rx_ring()
618 memset((char *)bp->rx_cmds, 0, in bmac_init_rx_ring()
621 if ((skb = bp->rx_bufs[i]) == NULL) { in bmac_init_rx_ring()
622 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); in bmac_init_rx_ring()
626 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); in bmac_init_rx_ring()
629 bp->rx_empty = 0; in bmac_init_rx_ring()
630 bp->rx_fill = i; in bmac_init_rx_ring()
633 dbdma_setcmd(&bp->rx_cmds[N_RX_RING], in bmac_init_rx_ring()
634 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds)); in bmac_init_rx_ring()
638 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds)); in bmac_init_rx_ring()
646 struct bmac_data *bp = netdev_priv(dev); in bmac_transmit_packet() local
647 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_transmit_packet()
653 i = bp->tx_fill + 1; in bmac_transmit_packet()
656 if (i == bp->tx_empty) { in bmac_transmit_packet()
658 bp->tx_fullup = 1; in bmac_transmit_packet()
663 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0); in bmac_transmit_packet()
665 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]); in bmac_transmit_packet()
667 bp->tx_bufs[bp->tx_fill] = skb; in bmac_transmit_packet()
668 bp->tx_fill = i; in bmac_transmit_packet()
682 struct bmac_data *bp = netdev_priv(dev); in bmac_rxdma_intr() local
683 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_rxdma_intr()
691 spin_lock_irqsave(&bp->lock, flags); in bmac_rxdma_intr()
698 i = bp->rx_empty; in bmac_rxdma_intr()
701 cp = &bp->rx_cmds[i]; in bmac_rxdma_intr()
712 skb = bp->rx_bufs[i]; in bmac_rxdma_intr()
713 bp->rx_bufs[i] = NULL; in bmac_rxdma_intr()
725 if ((skb = bp->rx_bufs[i]) == NULL) { in bmac_rxdma_intr()
726 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); in bmac_rxdma_intr()
728 skb_reserve(bp->rx_bufs[i], 2); in bmac_rxdma_intr()
730 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); in bmac_rxdma_intr()
738 bp->rx_fill = last; in bmac_rxdma_intr()
739 bp->rx_empty = i; in bmac_rxdma_intr()
743 spin_unlock_irqrestore(&bp->lock, flags); in bmac_rxdma_intr()
756 struct bmac_data *bp = netdev_priv(dev); in bmac_txdma_intr() local
761 spin_lock_irqsave(&bp->lock, flags); in bmac_txdma_intr()
771 cp = &bp->tx_cmds[bp->tx_empty]; in bmac_txdma_intr()
780 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) in bmac_txdma_intr()
784 if (bp->tx_bufs[bp->tx_empty]) { in bmac_txdma_intr()
786 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); in bmac_txdma_intr()
788 bp->tx_bufs[bp->tx_empty] = NULL; in bmac_txdma_intr()
789 bp->tx_fullup = 0; in bmac_txdma_intr()
791 if (++bp->tx_empty >= N_TX_RING) in bmac_txdma_intr()
792 bp->tx_empty = 0; in bmac_txdma_intr()
793 if (bp->tx_empty == bp->tx_fill) in bmac_txdma_intr()
797 spin_unlock_irqrestore(&bp->lock, flags); in bmac_txdma_intr()
867 bmac_addhash(struct bmac_data *bp, unsigned char *addr) in bmac_addhash() argument
875 if (bp->hash_use_count[crc]++) return; /* This bit is already set */ in bmac_addhash()
878 bp->hash_use_count[crc/16] |= mask; in bmac_addhash()
882 bmac_removehash(struct bmac_data *bp, unsigned char *addr) in bmac_removehash() argument
890 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ in bmac_removehash()
891 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ in bmac_removehash()
894 bp->hash_table_mask[crc/16] &= mask; in bmac_removehash()
934 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp) in bmac_update_hash_table_mask() argument
936 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ in bmac_update_hash_table_mask()
937 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ in bmac_update_hash_table_mask()
938 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ in bmac_update_hash_table_mask()
939 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ in bmac_update_hash_table_mask()
945 struct bmac_data *bp, unsigned char *addr)
948 bmac_addhash(bp, addr);
950 bmac_update_hash_table_mask(dev, bp);
957 struct bmac_data *bp, unsigned char *addr)
959 bmac_removehash(bp, addr);
961 bmac_update_hash_table_mask(dev, bp);
975 struct bmac_data *bp = netdev_priv(dev); in bmac_set_multicast() local
980 if (bp->sleeping) in bmac_set_multicast()
986 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; in bmac_set_multicast()
987 bmac_update_hash_table_mask(dev, bp); in bmac_set_multicast()
997 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; in bmac_set_multicast()
998 for (i=0; i<64; i++) bp->hash_use_count[i] = 0; in bmac_set_multicast()
1004 bmac_addhash(bp, ha->addr); in bmac_set_multicast()
1005 bmac_update_hash_table_mask(dev, bp); in bmac_set_multicast()
1201 struct bmac_data *bp = netdev_priv(dev); in bmac_reset_and_enable() local
1206 spin_lock_irqsave(&bp->lock, flags); in bmac_reset_and_enable()
1208 bmac_init_tx_ring(bp); in bmac_reset_and_enable()
1213 bp->sleeping = 0; in bmac_reset_and_enable()
1227 spin_unlock_irqrestore(&bp->lock, flags); in bmac_reset_and_enable()
1247 struct bmac_data *bp; in bmac_probe() local
1273 bp = netdev_priv(dev); in bmac_probe()
1277 bp->mdev = mdev; in bmac_probe()
1278 spin_lock_init(&bp->lock); in bmac_probe()
1310 bp->is_bmac_plus = is_bmac_plus; in bmac_probe()
1311 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1)); in bmac_probe()
1312 if (!bp->tx_dma) in bmac_probe()
1314 bp->tx_dma_intr = macio_irq(mdev, 1); in bmac_probe()
1315 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2)); in bmac_probe()
1316 if (!bp->rx_dma) in bmac_probe()
1318 bp->rx_dma_intr = macio_irq(mdev, 2); in bmac_probe()
1320 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1); in bmac_probe()
1321 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1; in bmac_probe()
1323 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); in bmac_probe()
1324 skb_queue_head_init(bp->queue); in bmac_probe()
1326 init_timer(&bp->tx_timeout); in bmac_probe()
1333 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev); in bmac_probe()
1335 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr); in bmac_probe()
1338 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev); in bmac_probe()
1340 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr); in bmac_probe()
1348 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); in bmac_probe()
1363 free_irq(bp->rx_dma_intr, dev); in bmac_probe()
1365 free_irq(bp->tx_dma_intr, dev); in bmac_probe()
1369 iounmap(bp->rx_dma); in bmac_probe()
1371 iounmap(bp->tx_dma); in bmac_probe()
1377 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); in bmac_probe()
1385 struct bmac_data *bp = netdev_priv(dev); in bmac_open() local
1388 bp->opened = 1; in bmac_open()
1396 struct bmac_data *bp = netdev_priv(dev); in bmac_close() local
1397 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_close()
1398 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_close()
1402 bp->sleeping = 1; in bmac_close()
1420 if (bp->rx_bufs[i] != NULL) { in bmac_close()
1421 dev_kfree_skb(bp->rx_bufs[i]); in bmac_close()
1422 bp->rx_bufs[i] = NULL; in bmac_close()
1427 if (bp->tx_bufs[i] != NULL) { in bmac_close()
1428 dev_kfree_skb(bp->tx_bufs[i]); in bmac_close()
1429 bp->tx_bufs[i] = NULL; in bmac_close()
1434 bp->opened = 0; in bmac_close()
1436 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); in bmac_close()
1444 struct bmac_data *bp = netdev_priv(dev); in bmac_start() local
1449 if (bp->sleeping) in bmac_start()
1452 spin_lock_irqsave(&bp->lock, flags); in bmac_start()
1454 i = bp->tx_fill + 1; in bmac_start()
1457 if (i == bp->tx_empty) in bmac_start()
1459 skb = skb_dequeue(bp->queue); in bmac_start()
1464 spin_unlock_irqrestore(&bp->lock, flags); in bmac_start()
1470 struct bmac_data *bp = netdev_priv(dev); in bmac_output() local
1471 skb_queue_tail(bp->queue, skb); in bmac_output()
1479 struct bmac_data *bp = netdev_priv(dev); in bmac_tx_timeout() local
1480 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_tx_timeout()
1481 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_tx_timeout()
1488 spin_lock_irqsave(&bp->lock, flags); in bmac_tx_timeout()
1489 bp->timeout_active = 0; in bmac_tx_timeout()
1494 cp = &bp->tx_cmds[bp->tx_empty]; in bmac_tx_timeout()
1517 bp->tx_empty, bp->tx_fill, bp->tx_fullup)); in bmac_tx_timeout()
1518 i = bp->tx_empty; in bmac_tx_timeout()
1520 if (i != bp->tx_fill) { in bmac_tx_timeout()
1521 dev_kfree_skb(bp->tx_bufs[i]); in bmac_tx_timeout()
1522 bp->tx_bufs[i] = NULL; in bmac_tx_timeout()
1524 bp->tx_empty = i; in bmac_tx_timeout()
1526 bp->tx_fullup = 0; in bmac_tx_timeout()
1528 if (i != bp->tx_fill) { in bmac_tx_timeout()
1529 cp = &bp->tx_cmds[i]; in bmac_tx_timeout()
1544 spin_unlock_irqrestore(&bp->lock, flags); in bmac_tx_timeout()
1605 struct bmac_data *bp = netdev_priv(dev); in bmac_remove() local
1610 free_irq(bp->tx_dma_intr, dev); in bmac_remove()
1611 free_irq(bp->rx_dma_intr, dev); in bmac_remove()
1614 iounmap(bp->tx_dma); in bmac_remove()
1615 iounmap(bp->rx_dma); in bmac_remove()