Lines Matching refs:ring
133 struct vxge_ring *ring; in VXGE_COMPLETE_ALL_RX() local
137 ring = &vdev->vpaths[i].ring; in VXGE_COMPLETE_ALL_RX()
138 vxge_hw_vpath_poll_rx(ring->handle); in VXGE_COMPLETE_ALL_RX()
194 vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) in vxge_rx_alloc() argument
200 dev = ring->ndev; in vxge_rx_alloc()
202 ring->ndev->name, __func__, __LINE__); in vxge_rx_alloc()
212 ring->stats.skb_alloc_fail++; in vxge_rx_alloc()
217 "%s: %s:%d Skb : 0x%p", ring->ndev->name, in vxge_rx_alloc()
226 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); in vxge_rx_alloc()
234 static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) in vxge_rx_map() argument
240 ring->ndev->name, __func__, __LINE__); in vxge_rx_map()
244 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, in vxge_rx_map()
247 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) { in vxge_rx_map()
248 ring->stats.pci_map_fail++; in vxge_rx_map()
253 ring->ndev->name, __func__, __LINE__, in vxge_rx_map()
259 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); in vxge_rx_map()
271 struct vxge_ring *ring = (struct vxge_ring *)userdata; in vxge_rx_initial_replenish() local
275 ring->ndev->name, __func__, __LINE__); in vxge_rx_initial_replenish()
276 if (vxge_rx_alloc(dtrh, ring, in vxge_rx_initial_replenish()
277 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL) in vxge_rx_initial_replenish()
280 if (vxge_rx_map(dtrh, ring)) { in vxge_rx_initial_replenish()
287 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); in vxge_rx_initial_replenish()
293 vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, in vxge_rx_complete() argument
298 ring->ndev->name, __func__, __LINE__); in vxge_rx_complete()
299 skb_record_rx_queue(skb, ring->driver_id); in vxge_rx_complete()
300 skb->protocol = eth_type_trans(skb, ring->ndev); in vxge_rx_complete()
302 u64_stats_update_begin(&ring->stats.syncp); in vxge_rx_complete()
303 ring->stats.rx_frms++; in vxge_rx_complete()
304 ring->stats.rx_bytes += pkt_length; in vxge_rx_complete()
307 ring->stats.rx_mcast++; in vxge_rx_complete()
308 u64_stats_update_end(&ring->stats.syncp); in vxge_rx_complete()
312 ring->ndev->name, __func__, __LINE__, skb->protocol); in vxge_rx_complete()
315 ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) in vxge_rx_complete()
317 napi_gro_receive(ring->napi_p, skb); in vxge_rx_complete()
320 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); in vxge_rx_complete()
323 static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring, in vxge_re_pre_post() argument
326 pci_dma_sync_single_for_device(ring->pdev, in vxge_re_pre_post()
330 vxge_hw_ring_rxd_pre_post(ring->handle, dtr); in vxge_re_pre_post()
357 struct vxge_ring *ring = (struct vxge_ring *)userdata; in vxge_rx_1b_compl() local
358 struct net_device *dev = ring->ndev; in vxge_rx_1b_compl()
369 ring->ndev->name, __func__, __LINE__); in vxge_rx_1b_compl()
371 if (ring->budget <= 0) in vxge_rx_1b_compl()
384 ring->ndev->name, __func__, __LINE__, skb); in vxge_rx_1b_compl()
393 ring->ndev->name, __func__, __LINE__, pkt_length); in vxge_rx_1b_compl()
405 ring->stats.rx_errors++; in vxge_rx_1b_compl()
408 ring->ndev->name, __func__, in vxge_rx_1b_compl()
415 vxge_re_pre_post(dtr, ring, rx_priv); in vxge_rx_1b_compl()
418 ring->stats.rx_dropped++; in vxge_rx_1b_compl()
424 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { in vxge_rx_1b_compl()
425 if (!vxge_rx_map(dtr, ring)) { in vxge_rx_1b_compl()
428 pci_unmap_single(ring->pdev, data_dma, in vxge_rx_1b_compl()
438 vxge_re_pre_post(dtr, ring, rx_priv); in vxge_rx_1b_compl()
442 ring->stats.rx_dropped++; in vxge_rx_1b_compl()
446 vxge_re_pre_post(dtr, ring, rx_priv); in vxge_rx_1b_compl()
449 ring->stats.rx_dropped++; in vxge_rx_1b_compl()
461 pci_dma_sync_single_for_cpu(ring->pdev, in vxge_rx_1b_compl()
467 ring->ndev->name, __func__, in vxge_rx_1b_compl()
471 vxge_re_pre_post(dtr, ring, rx_priv); in vxge_rx_1b_compl()
479 vxge_re_pre_post(dtr, ring, rx_priv); in vxge_rx_1b_compl()
485 ring->stats.skb_alloc_fail++; in vxge_rx_1b_compl()
500 if (ring->rx_hwts) { in vxge_rx_1b_compl()
516 vxge_rx_complete(ring, skb, ext_info.vlan, in vxge_rx_1b_compl()
519 ring->budget--; in vxge_rx_1b_compl()
520 ring->pkts_processed++; in vxge_rx_1b_compl()
521 if (!ring->budget) in vxge_rx_1b_compl()
1006 struct vxge_ring *ring = (struct vxge_ring *)userdata; in vxge_rx_term() local
1011 ring->ndev->name, __func__, __LINE__); in vxge_rx_term()
1015 pci_unmap_single(ring->pdev, rx_priv->data_dma, in vxge_rx_term()
1023 ring->ndev->name, __func__, __LINE__); in vxge_rx_term()
1567 vpath->ring.last_status = VXGE_HW_OK; in vxge_reset_vpath()
1589 hw_ring = vdev->vpaths[i].ring.handle; in vxge_config_ci_for_tti_rti()
1817 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi); in vxge_poll_msix() local
1821 ring->budget = budget; in vxge_poll_msix()
1822 ring->pkts_processed = 0; in vxge_poll_msix()
1823 vxge_hw_vpath_poll_rx(ring->handle); in vxge_poll_msix()
1824 pkts_processed = ring->pkts_processed; in vxge_poll_msix()
1826 if (ring->pkts_processed < budget_org) { in vxge_poll_msix()
1831 (struct __vxge_hw_channel *)ring->handle, in vxge_poll_msix()
1832 ring->rx_vector_no); in vxge_poll_msix()
1848 struct vxge_ring *ring; in vxge_poll_inta() local
1853 ring = &vdev->vpaths[i].ring; in vxge_poll_inta()
1854 ring->budget = budget; in vxge_poll_inta()
1855 ring->pkts_processed = 0; in vxge_poll_inta()
1856 vxge_hw_vpath_poll_rx(ring->handle); in vxge_poll_inta()
1857 pkts_processed += ring->pkts_processed; in vxge_poll_inta()
1858 budget -= ring->pkts_processed; in vxge_poll_inta()
2062 attr.ring_attr.userdata = &vpath->ring; in vxge_open_vpaths()
2064 vpath->ring.ndev = vdev->ndev; in vxge_open_vpaths()
2065 vpath->ring.pdev = vdev->pdev; in vxge_open_vpaths()
2071 vpath->ring.handle = in vxge_open_vpaths()
2079 u64_stats_init(&vpath->ring.stats.syncp); in vxge_open_vpaths()
2090 vpath->ring.rx_vector_no = 0; in vxge_open_vpaths()
2091 vpath->ring.rx_hwts = vdev->rx_hwts; in vxge_open_vpaths()
2094 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; in vxge_open_vpaths()
2149 static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) in adaptive_coalesce_rx_interrupts() argument
2151 ring->interrupt_count++; in adaptive_coalesce_rx_interrupts()
2152 if (time_before(ring->jiffies + HZ / 100, jiffies)) { in adaptive_coalesce_rx_interrupts()
2153 struct __vxge_hw_ring *hw_ring = ring->handle; in adaptive_coalesce_rx_interrupts()
2155 ring->jiffies = jiffies; in adaptive_coalesce_rx_interrupts()
2156 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT && in adaptive_coalesce_rx_interrupts()
2164 ring->interrupt_count = 0; in adaptive_coalesce_rx_interrupts()
2252 struct vxge_ring *ring = (struct vxge_ring *)dev_id; in vxge_rx_msix_napi_handle() local
2254 adaptive_coalesce_rx_interrupts(ring); in vxge_rx_msix_napi_handle()
2256 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, in vxge_rx_msix_napi_handle()
2257 ring->rx_vector_no); in vxge_rx_msix_napi_handle()
2259 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle, in vxge_rx_msix_napi_handle()
2260 ring->rx_vector_no); in vxge_rx_msix_napi_handle()
2262 napi_schedule(&ring->napi); in vxge_rx_msix_napi_handle()
2409 vpath->ring.rx_vector_no = (vpath->device_id * in vxge_enable_msix()
2513 &vdev->vpaths[vp_idx].ring); in vxge_add_isr()
2515 &vdev->vpaths[vp_idx].ring; in vxge_add_isr()
2631 struct vxge_ring *ring; in vxge_poll_vp_lockup() local
2636 ring = &vdev->vpaths[i].ring; in vxge_poll_vp_lockup()
2639 rx_frms = ACCESS_ONCE(ring->stats.rx_frms); in vxge_poll_vp_lockup()
2642 if (ring->stats.prev_rx_frms == rx_frms) { in vxge_poll_vp_lockup()
2643 status = vxge_hw_vpath_check_leak(ring->handle); in vxge_poll_vp_lockup()
2647 (VXGE_HW_FAIL == ring->last_status)) { in vxge_poll_vp_lockup()
2662 ring->stats.prev_rx_frms = rx_frms; in vxge_poll_vp_lockup()
2663 ring->last_status = status; in vxge_poll_vp_lockup()
2761 vpath->ring.napi_p = &vdev->napi; in vxge_open()
2766 netif_napi_add(dev, &vpath->ring.napi, in vxge_open()
2768 napi_enable(&vpath->ring.napi); in vxge_open()
2769 vpath->ring.napi_p = &vpath->ring.napi; in vxge_open()
2913 napi_disable(&vdev->vpaths[i].ring.napi); in vxge_open()
2946 netif_napi_del(&vdev->vpaths[i].ring.napi); in vxge_napi_del_all()
3025 napi_disable(&vdev->vpaths[i].ring.napi); in do_vxge_close()
3134 struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats; in vxge_get_stats64()
3247 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts; in vxge_hwtstamp_set()
3819 device_config->vp_config[i].ring.enable = in vxge_config_vpaths()
3822 device_config->vp_config[i].ring.ring_blocks = in vxge_config_vpaths()
3825 device_config->vp_config[i].ring.buffer_mode = in vxge_config_vpaths()
3828 device_config->vp_config[i].ring.rxds_limit = in vxge_config_vpaths()
3831 device_config->vp_config[i].ring.scatter_mode = in vxge_config_vpaths()
4629 vdev->vpaths[j].ring.driver_id = j; in vxge_probe()