Lines Matching refs:dpriv
186 #define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET) argument
391 static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv, in scc_patchl() argument
397 state = dpriv->scc_regs[offset >> 2]; in scc_patchl()
400 dpriv->scc_regs[offset >> 2] = state; in scc_patchl()
401 writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset); in scc_patchl()
404 static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv, in scc_writel() argument
411 dpriv->scc_regs[offset >> 2] = bits; in scc_writel()
412 writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset); in scc_writel()
415 static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset) in scc_readl() argument
417 return dpriv->scc_regs[offset >> 2]; in scc_readl()
420 static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev) in scc_readl_star() argument
423 readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); in scc_readl_star()
424 return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); in scc_readl_star()
427 static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv, in dscc4_do_tx() argument
430 dpriv->ltda = dpriv->tx_fd_dma + in dscc4_do_tx()
431 ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD); in dscc4_do_tx()
432 writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); in dscc4_do_tx()
434 readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); in dscc4_do_tx()
437 static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv, in dscc4_rx_update() argument
440 dpriv->lrda = dpriv->rx_fd_dma + in dscc4_rx_update()
441 ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD); in dscc4_rx_update()
442 writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); in dscc4_rx_update()
445 static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv) in dscc4_tx_done() argument
447 return dpriv->tx_current == dpriv->tx_dirty; in dscc4_tx_done()
450 static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv, in dscc4_tx_quiescent() argument
453 return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda; in dscc4_tx_quiescent()
456 static int state_check(u32 state, struct dscc4_dev_priv *dpriv, in state_check() argument
462 if (SOURCE_ID(state) != dpriv->dev_id) { in state_check()
477 struct dscc4_dev_priv *dpriv, in dscc4_tx_print() argument
481 dev->name, dpriv->tx_current, dpriv->tx_dirty, msg); in dscc4_tx_print()
484 static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) in dscc4_release_ring() argument
486 struct pci_dev *pdev = dpriv->pci_priv->pdev; in dscc4_release_ring()
487 struct TxFD *tx_fd = dpriv->tx_fd; in dscc4_release_ring()
488 struct RxFD *rx_fd = dpriv->rx_fd; in dscc4_release_ring()
492 pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma); in dscc4_release_ring()
493 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); in dscc4_release_ring()
495 skbuff = dpriv->tx_skbuff; in dscc4_release_ring()
506 skbuff = dpriv->rx_skbuff; in dscc4_release_ring()
518 static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, in try_get_rx_skb() argument
521 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; in try_get_rx_skb()
522 struct RxFD *rx_fd = dpriv->rx_fd + dirty; in try_get_rx_skb()
528 dpriv->rx_skbuff[dirty] = skb; in try_get_rx_skb()
531 rx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, in try_get_rx_skb()
543 static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv, in dscc4_wait_ack_cec() argument
549 if (!(scc_readl_star(dpriv, dev) & SccBusy)) { in dscc4_wait_ack_cec()
589 static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv) in dscc4_xpr_ack() argument
591 int cur = dpriv->iqtx_current%IRQ_RING_SIZE; in dscc4_xpr_ack()
595 if (!(dpriv->flags & (NeedIDR | NeedIDT)) || in dscc4_xpr_ack()
596 (dpriv->iqtx[cur] & cpu_to_le32(Xpr))) in dscc4_xpr_ack()
606 static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
610 spin_lock_irqsave(&dpriv->pci_priv->lock, flags);
612 writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
613 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
614 readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
615 writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
616 writel(Action, dpriv->base_addr + GCMDR);
617 spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags);
623 static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
628 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
629 scc_writel(0x00050000, dpriv, dev, CCR2);
633 while (!dscc4_tx_quiescent(dpriv, dev) && ++i) {
638 writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
645 static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, in dscc4_rx_skb() argument
648 struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE; in dscc4_rx_skb()
649 struct pci_dev *pdev = dpriv->pci_priv->pdev; in dscc4_rx_skb()
653 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; in dscc4_rx_skb()
680 while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) { in dscc4_rx_skb()
681 if (try_get_rx_skb(dpriv, dev) < 0) in dscc4_rx_skb()
683 dpriv->rx_dirty++; in dscc4_rx_skb()
685 dscc4_rx_update(dpriv, dev); in dscc4_rx_skb()
711 struct dscc4_dev_priv *dpriv; in dscc4_init_one() local
788 dpriv = priv->root + i; in dscc4_init_one()
789 dpriv->iqtx = (__le32 *) pci_alloc_consistent(pdev, in dscc4_init_one()
790 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma); in dscc4_init_one()
791 if (!dpriv->iqtx) in dscc4_init_one()
793 writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4); in dscc4_init_one()
796 dpriv = priv->root + i; in dscc4_init_one()
797 dpriv->iqrx = (__le32 *) pci_alloc_consistent(pdev, in dscc4_init_one()
798 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma); in dscc4_init_one()
799 if (!dpriv->iqrx) in dscc4_init_one()
801 writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4); in dscc4_init_one()
821 dpriv = priv->root + i; in dscc4_init_one()
823 dpriv->iqrx, dpriv->iqrx_dma); in dscc4_init_one()
828 dpriv = priv->root + i; in dscc4_init_one()
830 dpriv->iqtx, dpriv->iqtx_dma); in dscc4_init_one()
853 static void dscc4_init_registers(struct dscc4_dev_priv *dpriv, in dscc4_init_registers() argument
857 scc_writel(0x00000000, dpriv, dev, CCR0); in dscc4_init_registers()
859 scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR); in dscc4_init_registers()
867 scc_writel(0x02408000, dpriv, dev, CCR1); in dscc4_init_registers()
870 scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2); in dscc4_init_registers()
875 static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz) in dscc4_set_quartz() argument
882 dpriv->pci_priv->xtal_hz = hz; in dscc4_set_quartz()
920 struct dscc4_dev_priv *dpriv = root + i; in dscc4_found1() local
921 struct net_device *d = dscc4_to_dev(dpriv); in dscc4_found1()
930 dpriv->dev_id = i; in dscc4_found1()
931 dpriv->pci_priv = ppriv; in dscc4_found1()
932 dpriv->base_addr = ioaddr; in dscc4_found1()
933 spin_lock_init(&dpriv->lock); in dscc4_found1()
938 dscc4_init_registers(dpriv, d); in dscc4_found1()
939 dpriv->parity = PARITY_CRC16_PR0_CCITT; in dscc4_found1()
940 dpriv->encoding = ENCODING_NRZ; in dscc4_found1()
949 dscc4_release_ring(dpriv); in dscc4_found1()
980 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_timer() local
985 dpriv->timer.expires = jiffies + TX_TIMEOUT; in dscc4_timer()
986 add_timer(&dpriv->timer); in dscc4_timer()
994 static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv) in dscc4_loopback_check() argument
996 sync_serial_settings *settings = &dpriv->settings; in dscc4_loopback_check()
999 struct net_device *dev = dscc4_to_dev(dpriv); in dscc4_loopback_check()
1048 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_open() local
1052 if ((dscc4_loopback_check(dpriv) < 0)) in dscc4_open()
1058 ppriv = dpriv->pci_priv; in dscc4_open()
1069 if (dpriv->flags & FakeReset) { in dscc4_open()
1070 dpriv->flags &= ~FakeReset; in dscc4_open()
1071 scc_patchl(0, PowerUp, dpriv, dev, CCR0); in dscc4_open()
1072 scc_patchl(0, 0x00050000, dpriv, dev, CCR2); in dscc4_open()
1073 scc_writel(EventsMask, dpriv, dev, IMR); in dscc4_open()
1079 dpriv->flags = NeedIDR | NeedIDT; in dscc4_open()
1081 scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0); in dscc4_open()
1090 if (scc_readl_star(dpriv, dev) & SccBusy) { in dscc4_open()
1097 scc_writel(EventsMask, dpriv, dev, IMR); in dscc4_open()
1100 scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR); in dscc4_open()
1102 if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0) in dscc4_open()
1112 if ((ret = dscc4_xpr_ack(dpriv)) < 0) { in dscc4_open()
1118 dscc4_tx_print(dev, dpriv, "Open"); in dscc4_open()
1123 init_timer(&dpriv->timer); in dscc4_open()
1124 dpriv->timer.expires = jiffies + 10*HZ; in dscc4_open()
1125 dpriv->timer.data = (unsigned long)dev; in dscc4_open()
1126 dpriv->timer.function = dscc4_timer; in dscc4_open()
1127 add_timer(&dpriv->timer); in dscc4_open()
1133 scc_writel(0xffffffff, dpriv, dev, IMR); in dscc4_open()
1134 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); in dscc4_open()
1142 static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev) in dscc4_tx_poll() argument
1151 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_start_xmit() local
1152 struct dscc4_pci_priv *ppriv = dpriv->pci_priv; in dscc4_start_xmit()
1156 next = dpriv->tx_current%TX_RING_SIZE; in dscc4_start_xmit()
1157 dpriv->tx_skbuff[next] = skb; in dscc4_start_xmit()
1158 tx_fd = dpriv->tx_fd + next; in dscc4_start_xmit()
1167 spin_lock(&dpriv->lock); in dscc4_start_xmit()
1168 while (dscc4_tx_poll(dpriv, dev)); in dscc4_start_xmit()
1169 spin_unlock(&dpriv->lock); in dscc4_start_xmit()
1173 dscc4_tx_print(dev, dpriv, "Xmit"); in dscc4_start_xmit()
1175 if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)) in dscc4_start_xmit()
1178 if (dscc4_tx_quiescent(dpriv, dev)) in dscc4_start_xmit()
1179 dscc4_do_tx(dpriv, dev); in dscc4_start_xmit()
1186 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_close() local
1188 del_timer_sync(&dpriv->timer); in dscc4_close()
1191 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); in dscc4_close()
1192 scc_patchl(0x00050000, 0, dpriv, dev, CCR2); in dscc4_close()
1193 scc_writel(0xffffffff, dpriv, dev, IMR); in dscc4_close()
1195 dpriv->flags |= FakeReset; in dscc4_close()
1258 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_set_clock() local
1267 xtal = dpriv->pci_priv->xtal_hz; in dscc4_set_clock()
1270 if (dscc4_check_clock_ability(dpriv->dev_id) < 0) in dscc4_set_clock()
1303 scc_writel(brr, dpriv, dev, BRR); in dscc4_set_clock()
1312 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_ioctl() local
1313 const size_t size = sizeof(dpriv->settings); in dscc4_ioctl()
1329 if (copy_to_user(line, &dpriv->settings, size)) in dscc4_ioctl()
1337 if (dpriv->flags & FakeReset) { in dscc4_ioctl()
1341 if (copy_from_user(&dpriv->settings, line, size)) in dscc4_ioctl()
1343 ret = dscc4_set_iface(dpriv, dev); in dscc4_ioctl()
1368 static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv, in dscc4_clock_setting() argument
1371 sync_serial_settings *settings = &dpriv->settings; in dscc4_clock_setting()
1376 state = scc_readl(dpriv, CCR0); in dscc4_clock_setting()
1390 scc_writel(state, dpriv, dev, CCR0); in dscc4_clock_setting()
1396 static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv, in dscc4_encoding_setting() argument
1409 i = dscc4_match(encoding, dpriv->encoding); in dscc4_encoding_setting()
1411 scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0); in dscc4_encoding_setting()
1417 static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv, in dscc4_loopback_setting() argument
1420 sync_serial_settings *settings = &dpriv->settings; in dscc4_loopback_setting()
1423 state = scc_readl(dpriv, CCR1); in dscc4_loopback_setting()
1431 scc_writel(state, dpriv, dev, CCR1); in dscc4_loopback_setting()
1435 static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv, in dscc4_crc_setting() argument
1446 i = dscc4_match(crc, dpriv->parity); in dscc4_crc_setting()
1448 scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1); in dscc4_crc_setting()
1454 static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev) in dscc4_set_iface() argument
1468 if ((ret = p->action(dpriv, dev)) < 0) in dscc4_set_iface()
1533 struct dscc4_dev_priv *dpriv) in dscc4_tx_irq() argument
1535 struct net_device *dev = dscc4_to_dev(dpriv); in dscc4_tx_irq()
1540 cur = dpriv->iqtx_current%IRQ_RING_SIZE; in dscc4_tx_irq()
1541 state = le32_to_cpu(dpriv->iqtx[cur]); in dscc4_tx_irq()
1549 if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE) in dscc4_tx_irq()
1552 if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) && in dscc4_tx_irq()
1553 !dscc4_tx_done(dpriv)) in dscc4_tx_irq()
1554 dscc4_do_tx(dpriv, dev); in dscc4_tx_irq()
1558 dpriv->iqtx[cur] = 0; in dscc4_tx_irq()
1559 dpriv->iqtx_current++; in dscc4_tx_irq()
1561 if (state_check(state, dpriv, dev, "Tx") < 0) in dscc4_tx_irq()
1570 dscc4_tx_print(dev, dpriv, "Alls"); in dscc4_tx_irq()
1575 cur = dpriv->tx_dirty%TX_RING_SIZE; in dscc4_tx_irq()
1576 tx_fd = dpriv->tx_fd + cur; in dscc4_tx_irq()
1577 skb = dpriv->tx_skbuff[cur]; in dscc4_tx_irq()
1586 dpriv->tx_skbuff[cur] = NULL; in dscc4_tx_irq()
1587 ++dpriv->tx_dirty; in dscc4_tx_irq()
1611 dpriv->flags = NeedIDT; in dscc4_tx_irq()
1614 dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG); in dscc4_tx_irq()
1615 writel(Action, dpriv->base_addr + GCMDR); in dscc4_tx_irq()
1639 if (!(scc_readl_star(dpriv, dev) & SccBusy)) in dscc4_tx_irq()
1645 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; in dscc4_tx_irq()
1647 if (dpriv->flags & NeedIDT) { in dscc4_tx_irq()
1649 dscc4_tx_print(dev, dpriv, "Xpr"); in dscc4_tx_irq()
1650 ring = dpriv->tx_fd_dma + in dscc4_tx_irq()
1651 (dpriv->tx_dirty%TX_RING_SIZE)* in dscc4_tx_irq()
1654 dscc4_do_tx(dpriv, dev); in dscc4_tx_irq()
1658 dpriv->flags &= ~NeedIDT; in dscc4_tx_irq()
1660 if (dpriv->flags & NeedIDR) { in dscc4_tx_irq()
1661 ring = dpriv->rx_fd_dma + in dscc4_tx_irq()
1662 (dpriv->rx_current%RX_RING_SIZE)* in dscc4_tx_irq()
1665 dscc4_rx_update(dpriv, dev); in dscc4_tx_irq()
1669 dpriv->flags &= ~NeedIDR; in dscc4_tx_irq()
1672 scc_writel(0x08050008, dpriv, dev, CCR2); in dscc4_tx_irq()
1687 while (!dscc4_tx_poll(dpriv, dev)); in dscc4_tx_irq()
1702 struct dscc4_dev_priv *dpriv) in dscc4_rx_irq() argument
1704 struct net_device *dev = dscc4_to_dev(dpriv); in dscc4_rx_irq()
1709 cur = dpriv->iqrx_current%IRQ_RING_SIZE; in dscc4_rx_irq()
1710 state = le32_to_cpu(dpriv->iqrx[cur]); in dscc4_rx_irq()
1713 dpriv->iqrx[cur] = 0; in dscc4_rx_irq()
1714 dpriv->iqrx_current++; in dscc4_rx_irq()
1716 if (state_check(state, dpriv, dev, "Rx") < 0) in dscc4_rx_irq()
1728 cur = dpriv->rx_current%RX_RING_SIZE; in dscc4_rx_irq()
1729 rx_fd = dpriv->rx_fd + cur; in dscc4_rx_irq()
1746 rx_fd = dpriv->rx_fd; in dscc4_rx_irq()
1749 try_get_rx_skb(dpriv, dev); in dscc4_rx_irq()
1759 dscc4_rx_skb(dpriv, dev); in dscc4_rx_irq()
1808 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; in dscc4_rx_irq()
1810 scc_patchl(RxActivate, 0, dpriv, dev, CCR2); in dscc4_rx_irq()
1816 scc_writel(RxSccRes, dpriv, dev, CMDR); in dscc4_rx_irq()
1817 dpriv->flags |= RdoSet; in dscc4_rx_irq()
1825 cur = dpriv->rx_current++%RX_RING_SIZE; in dscc4_rx_irq()
1826 rx_fd = dpriv->rx_fd + cur; in dscc4_rx_irq()
1835 dscc4_rx_skb(dpriv, dev); in dscc4_rx_irq()
1839 if (dpriv->flags & RdoSet) in dscc4_rx_irq()
1848 writel(dpriv->rx_fd_dma + in dscc4_rx_irq()
1849 (dpriv->rx_current%RX_RING_SIZE)* in dscc4_rx_irq()
1863 scc_patchl(0, RxActivate, dpriv, dev, CCR2); in dscc4_rx_irq()
1888 static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) in dscc4_init_dummy_skb() argument
1894 int last = dpriv->tx_dirty%TX_RING_SIZE; in dscc4_init_dummy_skb()
1895 struct TxFD *tx_fd = dpriv->tx_fd + last; in dscc4_init_dummy_skb()
1901 tx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, in dscc4_init_dummy_skb()
1904 dpriv->tx_skbuff[last] = skb; in dscc4_init_dummy_skb()
1911 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_init_ring() local
1912 struct pci_dev *pdev = dpriv->pci_priv->pdev; in dscc4_init_ring()
1918 ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma); in dscc4_init_ring()
1921 dpriv->rx_fd = rx_fd = (struct RxFD *) ring; in dscc4_init_ring()
1923 ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma); in dscc4_init_ring()
1926 dpriv->tx_fd = tx_fd = (struct TxFD *) ring; in dscc4_init_ring()
1928 memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE); in dscc4_init_ring()
1929 dpriv->tx_dirty = 0xffffffff; in dscc4_init_ring()
1930 i = dpriv->tx_current = 0; in dscc4_init_ring()
1935 tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma); in dscc4_init_ring()
1936 (tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma + in dscc4_init_ring()
1940 if (!dscc4_init_dummy_skb(dpriv)) in dscc4_init_ring()
1943 memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE); in dscc4_init_ring()
1944 i = dpriv->rx_dirty = dpriv->rx_current = 0; in dscc4_init_ring()
1952 if (try_get_rx_skb(dpriv, dev) >= 0) in dscc4_init_ring()
1953 dpriv->rx_dirty++; in dscc4_init_ring()
1954 (rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma + in dscc4_init_ring()
1961 pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma); in dscc4_init_ring()
1963 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); in dscc4_init_ring()
1986 struct dscc4_dev_priv *dpriv = root + i; in dscc4_remove_one() local
1988 dscc4_release_ring(dpriv); in dscc4_remove_one()
1990 dpriv->iqrx, dpriv->iqrx_dma); in dscc4_remove_one()
1992 dpriv->iqtx, dpriv->iqtx_dma); in dscc4_remove_one()
2008 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_hdlc_attach() local
2024 dpriv->encoding = encoding; in dscc4_hdlc_attach()
2025 dpriv->parity = parity; in dscc4_hdlc_attach()