Lines Matching refs:rx_small

175 	struct myri10ge_rx_buf rx_small;  member
1156 ss->rx_small.cnt = 0; in myri10ge_reset()
1432 rx = &ss->rx_small; in myri10ge_rx_done()
1618 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) in myri10ge_clean_rx_done()
1619 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_clean_rx_done()
1885 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1; in myri10ge_get_ringparam()
2020 data[i++] = (unsigned int)ss->rx_small.cnt; in myri10ge_get_ethtool_stats()
2141 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; in myri10ge_allocate_rings()
2158 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); in myri10ge_allocate_rings()
2159 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
2160 if (ss->rx_small.shadow == NULL) in myri10ge_allocate_rings()
2175 bytes = rx_ring_entries * sizeof(*ss->rx_small.info); in myri10ge_allocate_rings()
2176 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
2177 if (ss->rx_small.info == NULL) in myri10ge_allocate_rings()
2187 ss->rx_small.cnt = 0; in myri10ge_allocate_rings()
2189 ss->rx_small.fill_cnt = 0; in myri10ge_allocate_rings()
2190 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; in myri10ge_allocate_rings()
2192 ss->rx_small.watchdog_needed = 0; in myri10ge_allocate_rings()
2195 ss->rx_small.fill_cnt = ss->rx_small.mask + 1; in myri10ge_allocate_rings()
2197 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_allocate_rings()
2201 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { in myri10ge_allocate_rings()
2203 slice, ss->rx_small.fill_cnt); in myri10ge_allocate_rings()
2226 ss->rx_small.fill_cnt = ss->rx_small.cnt; in myri10ge_allocate_rings()
2227 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { in myri10ge_allocate_rings()
2228 int idx = i & ss->rx_small.mask; in myri10ge_allocate_rings()
2229 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], in myri10ge_allocate_rings()
2231 put_page(ss->rx_small.info[idx].page); in myri10ge_allocate_rings()
2237 kfree(ss->rx_small.info); in myri10ge_allocate_rings()
2246 kfree(ss->rx_small.shadow); in myri10ge_allocate_rings()
2278 ss->rx_small.fill_cnt = ss->rx_small.cnt; in myri10ge_free_rings()
2279 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { in myri10ge_free_rings()
2280 idx = i & ss->rx_small.mask; in myri10ge_free_rings()
2281 if (i == ss->rx_small.fill_cnt - 1) in myri10ge_free_rings()
2282 ss->rx_small.info[idx].page_offset = in myri10ge_free_rings()
2284 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], in myri10ge_free_rings()
2286 put_page(ss->rx_small.info[idx].page); in myri10ge_free_rings()
2316 kfree(ss->rx_small.info); in myri10ge_free_rings()
2322 kfree(ss->rx_small.shadow); in myri10ge_free_rings()
2430 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *) in myri10ge_get_txrx()
3714 if (ss->rx_small.watchdog_needed) { in myri10ge_watchdog_timer()
3715 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_watchdog_timer()
3718 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= in myri10ge_watchdog_timer()
3720 ss->rx_small.watchdog_needed = 0; in myri10ge_watchdog_timer()