Lines Matching refs:dpriv
186 #define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET) argument
391 static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv, in scc_patchl() argument
397 state = dpriv->scc_regs[offset >> 2]; in scc_patchl()
400 dpriv->scc_regs[offset >> 2] = state; in scc_patchl()
401 writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset); in scc_patchl()
404 static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv, in scc_writel() argument
411 dpriv->scc_regs[offset >> 2] = bits; in scc_writel()
412 writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset); in scc_writel()
415 static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset) in scc_readl() argument
417 return dpriv->scc_regs[offset >> 2]; in scc_readl()
420 static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev) in scc_readl_star() argument
423 readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); in scc_readl_star()
424 return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); in scc_readl_star()
427 static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv, in dscc4_do_tx() argument
430 dpriv->ltda = dpriv->tx_fd_dma + in dscc4_do_tx()
431 ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD); in dscc4_do_tx()
432 writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); in dscc4_do_tx()
434 readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); in dscc4_do_tx()
437 static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv, in dscc4_rx_update() argument
440 dpriv->lrda = dpriv->rx_fd_dma + in dscc4_rx_update()
441 ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD); in dscc4_rx_update()
442 writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); in dscc4_rx_update()
445 static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv) in dscc4_tx_done() argument
447 return dpriv->tx_current == dpriv->tx_dirty; in dscc4_tx_done()
450 static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv, in dscc4_tx_quiescent() argument
453 return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda; in dscc4_tx_quiescent()
456 static int state_check(u32 state, struct dscc4_dev_priv *dpriv, in state_check() argument
462 if (SOURCE_ID(state) != dpriv->dev_id) { in state_check()
477 struct dscc4_dev_priv *dpriv, in dscc4_tx_print() argument
481 dev->name, dpriv->tx_current, dpriv->tx_dirty, msg); in dscc4_tx_print()
484 static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) in dscc4_release_ring() argument
486 struct pci_dev *pdev = dpriv->pci_priv->pdev; in dscc4_release_ring()
487 struct TxFD *tx_fd = dpriv->tx_fd; in dscc4_release_ring()
488 struct RxFD *rx_fd = dpriv->rx_fd; in dscc4_release_ring()
492 pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma); in dscc4_release_ring()
493 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); in dscc4_release_ring()
495 skbuff = dpriv->tx_skbuff; in dscc4_release_ring()
506 skbuff = dpriv->rx_skbuff; in dscc4_release_ring()
518 static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, in try_get_rx_skb() argument
521 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; in try_get_rx_skb()
522 struct RxFD *rx_fd = dpriv->rx_fd + dirty; in try_get_rx_skb()
528 dpriv->rx_skbuff[dirty] = skb; in try_get_rx_skb()
531 rx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, in try_get_rx_skb()
543 static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv, in dscc4_wait_ack_cec() argument
549 if (!(scc_readl_star(dpriv, dev) & SccBusy)) { in dscc4_wait_ack_cec()
589 static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv) in dscc4_xpr_ack() argument
591 int cur = dpriv->iqtx_current%IRQ_RING_SIZE; in dscc4_xpr_ack()
595 if (!(dpriv->flags & (NeedIDR | NeedIDT)) || in dscc4_xpr_ack()
596 (dpriv->iqtx[cur] & cpu_to_le32(Xpr))) in dscc4_xpr_ack()
606 static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
610 spin_lock_irqsave(&dpriv->pci_priv->lock, flags);
612 writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
613 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
614 readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
615 writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
616 writel(Action, dpriv->base_addr + GCMDR);
617 spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags);
623 static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
628 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
629 scc_writel(0x00050000, dpriv, dev, CCR2);
633 while (!dscc4_tx_quiescent(dpriv, dev) && ++i) {
638 writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
645 static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, in dscc4_rx_skb() argument
648 struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE; in dscc4_rx_skb()
649 struct pci_dev *pdev = dpriv->pci_priv->pdev; in dscc4_rx_skb()
653 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; in dscc4_rx_skb()
680 while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) { in dscc4_rx_skb()
681 if (try_get_rx_skb(dpriv, dev) < 0) in dscc4_rx_skb()
683 dpriv->rx_dirty++; in dscc4_rx_skb()
685 dscc4_rx_update(dpriv, dev); in dscc4_rx_skb()
711 struct dscc4_dev_priv *dpriv; in dscc4_init_one() local
788 dpriv = priv->root + i; in dscc4_init_one()
789 dpriv->iqtx = (__le32 *) pci_alloc_consistent(pdev, in dscc4_init_one()
790 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma); in dscc4_init_one()
791 if (!dpriv->iqtx) in dscc4_init_one()
793 writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4); in dscc4_init_one()
796 dpriv = priv->root + i; in dscc4_init_one()
797 dpriv->iqrx = (__le32 *) pci_alloc_consistent(pdev, in dscc4_init_one()
798 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma); in dscc4_init_one()
799 if (!dpriv->iqrx) in dscc4_init_one()
801 writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4); in dscc4_init_one()
821 dpriv = priv->root + i; in dscc4_init_one()
823 dpriv->iqrx, dpriv->iqrx_dma); in dscc4_init_one()
828 dpriv = priv->root + i; in dscc4_init_one()
830 dpriv->iqtx, dpriv->iqtx_dma); in dscc4_init_one()
853 static void dscc4_init_registers(struct dscc4_dev_priv *dpriv, in dscc4_init_registers() argument
857 scc_writel(0x00000000, dpriv, dev, CCR0); in dscc4_init_registers()
859 scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR); in dscc4_init_registers()
867 scc_writel(0x02408000, dpriv, dev, CCR1); in dscc4_init_registers()
870 scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2); in dscc4_init_registers()
875 static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz) in dscc4_set_quartz() argument
882 dpriv->pci_priv->xtal_hz = hz; in dscc4_set_quartz()
920 struct dscc4_dev_priv *dpriv = root + i; in dscc4_found1() local
921 struct net_device *d = dscc4_to_dev(dpriv); in dscc4_found1()
930 dpriv->dev_id = i; in dscc4_found1()
931 dpriv->pci_priv = ppriv; in dscc4_found1()
932 dpriv->base_addr = ioaddr; in dscc4_found1()
933 spin_lock_init(&dpriv->lock); in dscc4_found1()
938 dscc4_init_registers(dpriv, d); in dscc4_found1()
939 dpriv->parity = PARITY_CRC16_PR0_CCITT; in dscc4_found1()
940 dpriv->encoding = ENCODING_NRZ; in dscc4_found1()
949 dscc4_release_ring(dpriv); in dscc4_found1()
980 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_timer() local
985 dpriv->timer.expires = jiffies + TX_TIMEOUT; in dscc4_timer()
986 add_timer(&dpriv->timer); in dscc4_timer()
994 static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv) in dscc4_loopback_check() argument
996 sync_serial_settings *settings = &dpriv->settings; in dscc4_loopback_check()
999 struct net_device *dev = dscc4_to_dev(dpriv); in dscc4_loopback_check()
1048 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_open() local
1051 if ((dscc4_loopback_check(dpriv) < 0)) in dscc4_open()
1066 if (dpriv->flags & FakeReset) { in dscc4_open()
1067 dpriv->flags &= ~FakeReset; in dscc4_open()
1068 scc_patchl(0, PowerUp, dpriv, dev, CCR0); in dscc4_open()
1069 scc_patchl(0, 0x00050000, dpriv, dev, CCR2); in dscc4_open()
1070 scc_writel(EventsMask, dpriv, dev, IMR); in dscc4_open()
1076 dpriv->flags = NeedIDR | NeedIDT; in dscc4_open()
1078 scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0); in dscc4_open()
1087 if (scc_readl_star(dpriv, dev) & SccBusy) { in dscc4_open()
1094 scc_writel(EventsMask, dpriv, dev, IMR); in dscc4_open()
1097 scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR); in dscc4_open()
1099 if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0) in dscc4_open()
1109 if ((ret = dscc4_xpr_ack(dpriv)) < 0) { in dscc4_open()
1115 dscc4_tx_print(dev, dpriv, "Open"); in dscc4_open()
1120 init_timer(&dpriv->timer); in dscc4_open()
1121 dpriv->timer.expires = jiffies + 10*HZ; in dscc4_open()
1122 dpriv->timer.data = (unsigned long)dev; in dscc4_open()
1123 dpriv->timer.function = dscc4_timer; in dscc4_open()
1124 add_timer(&dpriv->timer); in dscc4_open()
1130 scc_writel(0xffffffff, dpriv, dev, IMR); in dscc4_open()
1131 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); in dscc4_open()
1139 static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev) in dscc4_tx_poll() argument
1148 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_start_xmit() local
1149 struct dscc4_pci_priv *ppriv = dpriv->pci_priv; in dscc4_start_xmit()
1153 next = dpriv->tx_current%TX_RING_SIZE; in dscc4_start_xmit()
1154 dpriv->tx_skbuff[next] = skb; in dscc4_start_xmit()
1155 tx_fd = dpriv->tx_fd + next; in dscc4_start_xmit()
1164 spin_lock(&dpriv->lock); in dscc4_start_xmit()
1165 while (dscc4_tx_poll(dpriv, dev)); in dscc4_start_xmit()
1166 spin_unlock(&dpriv->lock); in dscc4_start_xmit()
1170 dscc4_tx_print(dev, dpriv, "Xmit"); in dscc4_start_xmit()
1172 if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)) in dscc4_start_xmit()
1175 if (dscc4_tx_quiescent(dpriv, dev)) in dscc4_start_xmit()
1176 dscc4_do_tx(dpriv, dev); in dscc4_start_xmit()
1183 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_close() local
1185 del_timer_sync(&dpriv->timer); in dscc4_close()
1188 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); in dscc4_close()
1189 scc_patchl(0x00050000, 0, dpriv, dev, CCR2); in dscc4_close()
1190 scc_writel(0xffffffff, dpriv, dev, IMR); in dscc4_close()
1192 dpriv->flags |= FakeReset; in dscc4_close()
1255 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_set_clock() local
1264 xtal = dpriv->pci_priv->xtal_hz; in dscc4_set_clock()
1267 if (dscc4_check_clock_ability(dpriv->dev_id) < 0) in dscc4_set_clock()
1300 scc_writel(brr, dpriv, dev, BRR); in dscc4_set_clock()
1309 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_ioctl() local
1310 const size_t size = sizeof(dpriv->settings); in dscc4_ioctl()
1326 if (copy_to_user(line, &dpriv->settings, size)) in dscc4_ioctl()
1334 if (dpriv->flags & FakeReset) { in dscc4_ioctl()
1338 if (copy_from_user(&dpriv->settings, line, size)) in dscc4_ioctl()
1340 ret = dscc4_set_iface(dpriv, dev); in dscc4_ioctl()
1365 static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv, in dscc4_clock_setting() argument
1368 sync_serial_settings *settings = &dpriv->settings; in dscc4_clock_setting()
1373 state = scc_readl(dpriv, CCR0); in dscc4_clock_setting()
1387 scc_writel(state, dpriv, dev, CCR0); in dscc4_clock_setting()
1393 static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv, in dscc4_encoding_setting() argument
1406 i = dscc4_match(encoding, dpriv->encoding); in dscc4_encoding_setting()
1408 scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0); in dscc4_encoding_setting()
1414 static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv, in dscc4_loopback_setting() argument
1417 sync_serial_settings *settings = &dpriv->settings; in dscc4_loopback_setting()
1420 state = scc_readl(dpriv, CCR1); in dscc4_loopback_setting()
1428 scc_writel(state, dpriv, dev, CCR1); in dscc4_loopback_setting()
1432 static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv, in dscc4_crc_setting() argument
1443 i = dscc4_match(crc, dpriv->parity); in dscc4_crc_setting()
1445 scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1); in dscc4_crc_setting()
1451 static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev) in dscc4_set_iface() argument
1465 if ((ret = p->action(dpriv, dev)) < 0) in dscc4_set_iface()
1530 struct dscc4_dev_priv *dpriv) in dscc4_tx_irq() argument
1532 struct net_device *dev = dscc4_to_dev(dpriv); in dscc4_tx_irq()
1537 cur = dpriv->iqtx_current%IRQ_RING_SIZE; in dscc4_tx_irq()
1538 state = le32_to_cpu(dpriv->iqtx[cur]); in dscc4_tx_irq()
1546 if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE) in dscc4_tx_irq()
1549 if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) && in dscc4_tx_irq()
1550 !dscc4_tx_done(dpriv)) in dscc4_tx_irq()
1551 dscc4_do_tx(dpriv, dev); in dscc4_tx_irq()
1555 dpriv->iqtx[cur] = 0; in dscc4_tx_irq()
1556 dpriv->iqtx_current++; in dscc4_tx_irq()
1558 if (state_check(state, dpriv, dev, "Tx") < 0) in dscc4_tx_irq()
1567 dscc4_tx_print(dev, dpriv, "Alls"); in dscc4_tx_irq()
1572 cur = dpriv->tx_dirty%TX_RING_SIZE; in dscc4_tx_irq()
1573 tx_fd = dpriv->tx_fd + cur; in dscc4_tx_irq()
1574 skb = dpriv->tx_skbuff[cur]; in dscc4_tx_irq()
1583 dpriv->tx_skbuff[cur] = NULL; in dscc4_tx_irq()
1584 ++dpriv->tx_dirty; in dscc4_tx_irq()
1608 dpriv->flags = NeedIDT; in dscc4_tx_irq()
1611 dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG); in dscc4_tx_irq()
1612 writel(Action, dpriv->base_addr + GCMDR); in dscc4_tx_irq()
1636 if (!(scc_readl_star(dpriv, dev) & SccBusy)) in dscc4_tx_irq()
1642 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; in dscc4_tx_irq()
1644 if (dpriv->flags & NeedIDT) { in dscc4_tx_irq()
1646 dscc4_tx_print(dev, dpriv, "Xpr"); in dscc4_tx_irq()
1647 ring = dpriv->tx_fd_dma + in dscc4_tx_irq()
1648 (dpriv->tx_dirty%TX_RING_SIZE)* in dscc4_tx_irq()
1651 dscc4_do_tx(dpriv, dev); in dscc4_tx_irq()
1655 dpriv->flags &= ~NeedIDT; in dscc4_tx_irq()
1657 if (dpriv->flags & NeedIDR) { in dscc4_tx_irq()
1658 ring = dpriv->rx_fd_dma + in dscc4_tx_irq()
1659 (dpriv->rx_current%RX_RING_SIZE)* in dscc4_tx_irq()
1662 dscc4_rx_update(dpriv, dev); in dscc4_tx_irq()
1666 dpriv->flags &= ~NeedIDR; in dscc4_tx_irq()
1669 scc_writel(0x08050008, dpriv, dev, CCR2); in dscc4_tx_irq()
1684 while (!dscc4_tx_poll(dpriv, dev)); in dscc4_tx_irq()
1699 struct dscc4_dev_priv *dpriv) in dscc4_rx_irq() argument
1701 struct net_device *dev = dscc4_to_dev(dpriv); in dscc4_rx_irq()
1706 cur = dpriv->iqrx_current%IRQ_RING_SIZE; in dscc4_rx_irq()
1707 state = le32_to_cpu(dpriv->iqrx[cur]); in dscc4_rx_irq()
1710 dpriv->iqrx[cur] = 0; in dscc4_rx_irq()
1711 dpriv->iqrx_current++; in dscc4_rx_irq()
1713 if (state_check(state, dpriv, dev, "Rx") < 0) in dscc4_rx_irq()
1725 cur = dpriv->rx_current%RX_RING_SIZE; in dscc4_rx_irq()
1726 rx_fd = dpriv->rx_fd + cur; in dscc4_rx_irq()
1743 rx_fd = dpriv->rx_fd; in dscc4_rx_irq()
1746 try_get_rx_skb(dpriv, dev); in dscc4_rx_irq()
1756 dscc4_rx_skb(dpriv, dev); in dscc4_rx_irq()
1805 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; in dscc4_rx_irq()
1807 scc_patchl(RxActivate, 0, dpriv, dev, CCR2); in dscc4_rx_irq()
1813 scc_writel(RxSccRes, dpriv, dev, CMDR); in dscc4_rx_irq()
1814 dpriv->flags |= RdoSet; in dscc4_rx_irq()
1822 cur = dpriv->rx_current++%RX_RING_SIZE; in dscc4_rx_irq()
1823 rx_fd = dpriv->rx_fd + cur; in dscc4_rx_irq()
1832 dscc4_rx_skb(dpriv, dev); in dscc4_rx_irq()
1836 if (dpriv->flags & RdoSet) in dscc4_rx_irq()
1845 writel(dpriv->rx_fd_dma + in dscc4_rx_irq()
1846 (dpriv->rx_current%RX_RING_SIZE)* in dscc4_rx_irq()
1860 scc_patchl(0, RxActivate, dpriv, dev, CCR2); in dscc4_rx_irq()
1885 static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) in dscc4_init_dummy_skb() argument
1891 int last = dpriv->tx_dirty%TX_RING_SIZE; in dscc4_init_dummy_skb()
1892 struct TxFD *tx_fd = dpriv->tx_fd + last; in dscc4_init_dummy_skb()
1898 tx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, in dscc4_init_dummy_skb()
1901 dpriv->tx_skbuff[last] = skb; in dscc4_init_dummy_skb()
1908 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_init_ring() local
1909 struct pci_dev *pdev = dpriv->pci_priv->pdev; in dscc4_init_ring()
1915 ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma); in dscc4_init_ring()
1918 dpriv->rx_fd = rx_fd = (struct RxFD *) ring; in dscc4_init_ring()
1920 ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma); in dscc4_init_ring()
1923 dpriv->tx_fd = tx_fd = (struct TxFD *) ring; in dscc4_init_ring()
1925 memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE); in dscc4_init_ring()
1926 dpriv->tx_dirty = 0xffffffff; in dscc4_init_ring()
1927 i = dpriv->tx_current = 0; in dscc4_init_ring()
1932 tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma); in dscc4_init_ring()
1933 (tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma + in dscc4_init_ring()
1937 if (!dscc4_init_dummy_skb(dpriv)) in dscc4_init_ring()
1940 memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE); in dscc4_init_ring()
1941 i = dpriv->rx_dirty = dpriv->rx_current = 0; in dscc4_init_ring()
1949 if (try_get_rx_skb(dpriv, dev) >= 0) in dscc4_init_ring()
1950 dpriv->rx_dirty++; in dscc4_init_ring()
1951 (rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma + in dscc4_init_ring()
1958 pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma); in dscc4_init_ring()
1960 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); in dscc4_init_ring()
1983 struct dscc4_dev_priv *dpriv = root + i; in dscc4_remove_one() local
1985 dscc4_release_ring(dpriv); in dscc4_remove_one()
1987 dpriv->iqrx, dpriv->iqrx_dma); in dscc4_remove_one()
1989 dpriv->iqtx, dpriv->iqtx_dma); in dscc4_remove_one()
2005 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); in dscc4_hdlc_attach() local
2021 dpriv->encoding = encoding; in dscc4_hdlc_attach()
2022 dpriv->parity = parity; in dscc4_hdlc_attach()