Lines Matching refs:ring
294 static void cas_disable_irq(struct cas *cp, const int ring) in cas_disable_irq() argument
297 if (ring == 0) { in cas_disable_irq()
304 switch (ring) { in cas_disable_irq()
316 cp->regs + REG_PLUS_INTRN_MASK(ring)); in cas_disable_irq()
321 REG_PLUS_INTRN_MASK(ring)); in cas_disable_irq()
335 static void cas_enable_irq(struct cas *cp, const int ring) in cas_enable_irq() argument
337 if (ring == 0) { /* all but TX_DONE */ in cas_enable_irq()
343 switch (ring) { in cas_enable_irq()
355 REG_PLUS_INTRN_MASK(ring)); in cas_enable_irq()
1388 static cas_page_t *cas_page_swap(struct cas *cp, const int ring, in cas_page_swap() argument
1854 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) in cas_tx_ringN() argument
1861 spin_lock(&cp->tx_lock[ring]); in cas_tx_ringN()
1862 txds = cp->init_txds[ring]; in cas_tx_ringN()
1863 skbs = cp->tx_skbs[ring]; in cas_tx_ringN()
1864 entry = cp->tx_old[ring]; in cas_tx_ringN()
1866 count = TX_BUFF_COUNT(ring, entry, limit); in cas_tx_ringN()
1875 entry = TX_DESC_NEXT(ring, entry); in cas_tx_ringN()
1881 + cp->tx_tiny_use[ring][entry].nbufs + 1; in cas_tx_ringN()
1886 "tx[%d] done, slot %d\n", ring, entry); in cas_tx_ringN()
1889 cp->tx_tiny_use[ring][entry].nbufs = 0; in cas_tx_ringN()
1899 entry = TX_DESC_NEXT(ring, entry); in cas_tx_ringN()
1902 if (cp->tx_tiny_use[ring][entry].used) { in cas_tx_ringN()
1903 cp->tx_tiny_use[ring][entry].used = 0; in cas_tx_ringN()
1904 entry = TX_DESC_NEXT(ring, entry); in cas_tx_ringN()
1908 spin_lock(&cp->stat_lock[ring]); in cas_tx_ringN()
1909 cp->net_stats[ring].tx_packets++; in cas_tx_ringN()
1910 cp->net_stats[ring].tx_bytes += skb->len; in cas_tx_ringN()
1911 spin_unlock(&cp->stat_lock[ring]); in cas_tx_ringN()
1914 cp->tx_old[ring] = entry; in cas_tx_ringN()
1921 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) in cas_tx_ringN()
1923 spin_unlock(&cp->tx_lock[ring]); in cas_tx_ringN()
1929 int limit, ring; in cas_tx() local
1937 for (ring = 0; ring < N_TX_RINGS; ring++) { in cas_tx()
1944 limit = readl(cp->regs + REG_TX_COMPN(ring)); in cas_tx()
1946 if (cp->tx_old[ring] != limit) in cas_tx()
1947 cas_tx_ringN(cp, ring, limit); in cas_tx()
2189 static void cas_post_page(struct cas *cp, const int ring, const int index) in cas_post_page() argument
2194 entry = cp->rx_old[ring]; in cas_post_page()
2196 new = cas_page_swap(cp, ring, index); in cas_post_page()
2197 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); in cas_post_page()
2198 cp->init_rxds[ring][entry].index = in cas_post_page()
2200 CAS_BASE(RX_INDEX_RING, ring)); in cas_post_page()
2202 entry = RX_DESC_ENTRY(ring, entry + 1); in cas_post_page()
2203 cp->rx_old[ring] = entry; in cas_post_page()
2208 if (ring == 0) in cas_post_page()
2217 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) in cas_post_rxds_ringN() argument
2221 cas_page_t **page = cp->rx_pages[ring]; in cas_post_rxds_ringN()
2223 entry = cp->rx_old[ring]; in cas_post_rxds_ringN()
2226 "rxd[%d] interrupt, done: %d\n", ring, entry); in cas_post_rxds_ringN()
2230 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); in cas_post_rxds_ringN()
2240 cp->cas_flags |= CAS_FLAG_RXD_POST(ring); in cas_post_rxds_ringN()
2244 cp->rx_old[ring] = entry; in cas_post_rxds_ringN()
2245 cp->rx_last[ring] = num ? num - released : 0; in cas_post_rxds_ringN()
2251 cp->init_rxds[ring][entry].buffer = in cas_post_rxds_ringN()
2262 entry = RX_DESC_ENTRY(ring, entry + 1); in cas_post_rxds_ringN()
2264 cp->rx_old[ring] = entry; in cas_post_rxds_ringN()
2269 if (ring == 0) in cas_post_rxds_ringN()
2290 static int cas_rx_ringN(struct cas *cp, int ring, int budget) in cas_rx_ringN() argument
2292 struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; in cas_rx_ringN()
2298 ring, in cas_rx_ringN()
2299 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]); in cas_rx_ringN()
2301 entry = cp->rx_new[ring]; in cas_rx_ringN()
2327 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2328 cp->net_stats[ring].rx_errors++; in cas_rx_ringN()
2330 cp->net_stats[ring].rx_length_errors++; in cas_rx_ringN()
2332 cp->net_stats[ring].rx_crc_errors++; in cas_rx_ringN()
2333 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2337 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2338 ++cp->net_stats[ring].rx_dropped; in cas_rx_ringN()
2339 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2359 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2360 cp->net_stats[ring].rx_packets++; in cas_rx_ringN()
2361 cp->net_stats[ring].rx_bytes += len; in cas_rx_ringN()
2362 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2390 entry = RX_COMP_ENTRY(ring, entry + 1 + in cas_rx_ringN()
2397 cp->rx_new[ring] = entry; in cas_rx_ringN()
2407 struct cas *cp, int ring) in cas_post_rxcs_ringN() argument
2409 struct cas_rx_comp *rxc = cp->init_rxcs[ring]; in cas_post_rxcs_ringN()
2412 last = cp->rx_cur[ring]; in cas_post_rxcs_ringN()
2413 entry = cp->rx_new[ring]; in cas_post_rxcs_ringN()
2416 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry); in cas_post_rxcs_ringN()
2421 last = RX_COMP_ENTRY(ring, last + 1); in cas_post_rxcs_ringN()
2423 cp->rx_cur[ring] = last; in cas_post_rxcs_ringN()
2425 if (ring == 0) in cas_post_rxcs_ringN()
2428 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); in cas_post_rxcs_ringN()
2439 const int ring) in cas_handle_irqN() argument
2442 cas_post_rxcs_ringN(dev, cp, ring); in cas_handle_irqN()
2450 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3; in cas_interruptN() local
2451 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); in cas_interruptN()
2463 cas_rx_ringN(cp, ring, 0); in cas_interruptN()
2469 cas_handle_irqN(dev, cp, status, ring); in cas_interruptN()
2727 static inline int cas_intme(int ring, int entry) in cas_intme() argument
2730 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) in cas_intme()
2736 static void cas_write_txd(struct cas *cp, int ring, int entry, in cas_write_txd() argument
2739 struct cas_tx_desc *txd = cp->init_txds[ring] + entry; in cas_write_txd()
2742 if (cas_intme(ring, entry)) in cas_write_txd()
2750 static inline void *tx_tiny_buf(struct cas *cp, const int ring, in tx_tiny_buf() argument
2753 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; in tx_tiny_buf()
2756 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, in tx_tiny_map() argument
2759 cp->tx_tiny_use[ring][tentry].nbufs++; in tx_tiny_map()
2760 cp->tx_tiny_use[ring][entry].used = 1; in tx_tiny_map()
2761 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; in tx_tiny_map()
2764 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, in cas_xmit_tx_ringN() argument
2774 spin_lock_irqsave(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2777 if (TX_BUFFS_AVAIL(cp, ring) <= in cas_xmit_tx_ringN()
2780 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2795 entry = cp->tx_new[ring]; in cas_xmit_tx_ringN()
2796 cp->tx_skbs[ring][entry] = skb; in cas_xmit_tx_ringN()
2808 cas_write_txd(cp, ring, entry, mapping, len - tabort, in cas_xmit_tx_ringN()
2810 entry = TX_DESC_NEXT(ring, entry); in cas_xmit_tx_ringN()
2813 tx_tiny_buf(cp, ring, entry), tabort); in cas_xmit_tx_ringN()
2814 mapping = tx_tiny_map(cp, ring, entry, tentry); in cas_xmit_tx_ringN()
2815 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, in cas_xmit_tx_ringN()
2818 cas_write_txd(cp, ring, entry, mapping, len, ctrl | in cas_xmit_tx_ringN()
2821 entry = TX_DESC_NEXT(ring, entry); in cas_xmit_tx_ringN()
2835 cas_write_txd(cp, ring, entry, mapping, len - tabort, in cas_xmit_tx_ringN()
2837 entry = TX_DESC_NEXT(ring, entry); in cas_xmit_tx_ringN()
2840 memcpy(tx_tiny_buf(cp, ring, entry), in cas_xmit_tx_ringN()
2844 mapping = tx_tiny_map(cp, ring, entry, tentry); in cas_xmit_tx_ringN()
2848 cas_write_txd(cp, ring, entry, mapping, len, ctrl, in cas_xmit_tx_ringN()
2850 entry = TX_DESC_NEXT(ring, entry); in cas_xmit_tx_ringN()
2853 cp->tx_new[ring] = entry; in cas_xmit_tx_ringN()
2854 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) in cas_xmit_tx_ringN()
2859 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring)); in cas_xmit_tx_ringN()
2860 writel(entry, cp->regs + REG_TX_KICKN(ring)); in cas_xmit_tx_ringN()
2861 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2872 static int ring; in cas_start_xmit() local
2880 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) in cas_start_xmit()
3893 static void cas_clean_txd(struct cas *cp, int ring) in cas_clean_txd() argument
3895 struct cas_tx_desc *txd = cp->init_txds[ring]; in cas_clean_txd()
3896 struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; in cas_clean_txd()
3900 size = TX_DESC_RINGN_SIZE(ring); in cas_clean_txd()
3929 if (cp->tx_tiny_use[ring][ent].used) in cas_clean_txd()
3937 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); in cas_clean_txd()
3941 static inline void cas_free_rx_desc(struct cas *cp, int ring) in cas_free_rx_desc() argument
3943 cas_page_t **page = cp->rx_pages[ring]; in cas_free_rx_desc()
3946 size = RX_DESC_RINGN_SIZE(ring); in cas_free_rx_desc()
3981 static inline int cas_alloc_rx_desc(struct cas *cp, int ring) in cas_alloc_rx_desc() argument
3983 cas_page_t **page = cp->rx_pages[ring]; in cas_alloc_rx_desc()
3986 size = RX_DESC_RINGN_SIZE(ring); in cas_alloc_rx_desc()