Lines Matching refs:rq

561 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,  in vmxnet3_rq_alloc_rx_buf()  argument
565 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; in vmxnet3_rq_alloc_rx_buf()
566 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; in vmxnet3_rq_alloc_rx_buf()
582 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
593 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
607 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
617 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
1158 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, in vmxnet3_rx_error() argument
1161 rq->stats.drop_err++; in vmxnet3_rx_error()
1163 rq->stats.drop_fcs++; in vmxnet3_rx_error()
1165 rq->stats.drop_total++; in vmxnet3_rx_error()
1231 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, in vmxnet3_rq_rx_complete() argument
1240 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; in vmxnet3_rq_rx_complete()
1246 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, in vmxnet3_rq_rx_complete()
1248 while (rcd->gen == rq->comp_ring.gen) { in vmxnet3_rq_rx_complete()
1263 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); in vmxnet3_rq_rx_complete()
1266 ring = rq->rx_ring + ring_idx; in vmxnet3_rq_rx_complete()
1267 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, in vmxnet3_rq_rx_complete()
1269 rbi = rq->buf_info[ring_idx] + idx; in vmxnet3_rq_rx_complete()
1275 vmxnet3_rx_error(rq, rcd, ctx, adapter); in vmxnet3_rq_rx_complete()
1281 rcd->rqID != rq->qid); in vmxnet3_rq_rx_complete()
1303 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1305 rq->stats.drop_total++; in vmxnet3_rq_rx_complete()
1318 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1320 rq->stats.drop_total++; in vmxnet3_rq_rx_complete()
1377 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1390 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1454 napi_gro_receive(&rq->napi, skb); in vmxnet3_rq_rx_complete()
1464 ring = rq->rx_ring + ring_idx; in vmxnet3_rq_rx_complete()
1477 if (unlikely(rq->shared->updateRxProd)) { in vmxnet3_rq_rx_complete()
1479 rxprod_reg[ring_idx] + rq->qid * 8, in vmxnet3_rq_rx_complete()
1483 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); in vmxnet3_rq_rx_complete()
1485 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); in vmxnet3_rq_rx_complete()
1493 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, in vmxnet3_rq_cleanup() argument
1500 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { in vmxnet3_rq_cleanup()
1505 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc); in vmxnet3_rq_cleanup()
1508 rq->buf_info[ring_idx][i].skb) { in vmxnet3_rq_cleanup()
1511 dev_kfree_skb(rq->buf_info[ring_idx][i].skb); in vmxnet3_rq_cleanup()
1512 rq->buf_info[ring_idx][i].skb = NULL; in vmxnet3_rq_cleanup()
1514 rq->buf_info[ring_idx][i].page) { in vmxnet3_rq_cleanup()
1517 put_page(rq->buf_info[ring_idx][i].page); in vmxnet3_rq_cleanup()
1518 rq->buf_info[ring_idx][i].page = NULL; in vmxnet3_rq_cleanup()
1522 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; in vmxnet3_rq_cleanup()
1523 rq->rx_ring[ring_idx].next2fill = in vmxnet3_rq_cleanup()
1524 rq->rx_ring[ring_idx].next2comp = 0; in vmxnet3_rq_cleanup()
1527 rq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_rq_cleanup()
1528 rq->comp_ring.next2proc = 0; in vmxnet3_rq_cleanup()
1542 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, in vmxnet3_rq_destroy() argument
1550 if (rq->buf_info[i]) { in vmxnet3_rq_destroy()
1551 for (j = 0; j < rq->rx_ring[i].size; j++) in vmxnet3_rq_destroy()
1552 BUG_ON(rq->buf_info[i][j].page != NULL); in vmxnet3_rq_destroy()
1558 if (rq->rx_ring[i].base) { in vmxnet3_rq_destroy()
1560 rq->rx_ring[i].size in vmxnet3_rq_destroy()
1562 rq->rx_ring[i].base, in vmxnet3_rq_destroy()
1563 rq->rx_ring[i].basePA); in vmxnet3_rq_destroy()
1564 rq->rx_ring[i].base = NULL; in vmxnet3_rq_destroy()
1566 rq->buf_info[i] = NULL; in vmxnet3_rq_destroy()
1569 if (rq->comp_ring.base) { in vmxnet3_rq_destroy()
1570 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size in vmxnet3_rq_destroy()
1572 rq->comp_ring.base, rq->comp_ring.basePA); in vmxnet3_rq_destroy()
1573 rq->comp_ring.base = NULL; in vmxnet3_rq_destroy()
1576 if (rq->buf_info[0]) { in vmxnet3_rq_destroy()
1578 (rq->rx_ring[0].size + rq->rx_ring[1].size); in vmxnet3_rq_destroy()
1579 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], in vmxnet3_rq_destroy()
1580 rq->buf_info_pa); in vmxnet3_rq_destroy()
1586 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, in vmxnet3_rq_init() argument
1592 for (i = 0; i < rq->rx_ring[0].size; i++) { in vmxnet3_rq_init()
1596 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB; in vmxnet3_rq_init()
1597 rq->buf_info[0][i].len = adapter->skb_buf_size; in vmxnet3_rq_init()
1599 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; in vmxnet3_rq_init()
1600 rq->buf_info[0][i].len = PAGE_SIZE; in vmxnet3_rq_init()
1603 for (i = 0; i < rq->rx_ring[1].size; i++) { in vmxnet3_rq_init()
1604 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; in vmxnet3_rq_init()
1605 rq->buf_info[1][i].len = PAGE_SIZE; in vmxnet3_rq_init()
1610 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; in vmxnet3_rq_init()
1612 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * in vmxnet3_rq_init()
1614 rq->rx_ring[i].gen = VMXNET3_INIT_GEN; in vmxnet3_rq_init()
1616 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, in vmxnet3_rq_init()
1621 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); in vmxnet3_rq_init()
1624 rq->comp_ring.next2proc = 0; in vmxnet3_rq_init()
1625 memset(rq->comp_ring.base, 0, rq->comp_ring.size * in vmxnet3_rq_init()
1627 rq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_rq_init()
1630 rq->rx_ctx.skb = NULL; in vmxnet3_rq_init()
1657 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) in vmxnet3_rq_create() argument
1665 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); in vmxnet3_rq_create()
1666 rq->rx_ring[i].base = dma_alloc_coherent( in vmxnet3_rq_create()
1668 &rq->rx_ring[i].basePA, in vmxnet3_rq_create()
1670 if (!rq->rx_ring[i].base) { in vmxnet3_rq_create()
1677 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); in vmxnet3_rq_create()
1678 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
1679 &rq->comp_ring.basePA, in vmxnet3_rq_create()
1681 if (!rq->comp_ring.base) { in vmxnet3_rq_create()
1686 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + in vmxnet3_rq_create()
1687 rq->rx_ring[1].size); in vmxnet3_rq_create()
1688 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, in vmxnet3_rq_create()
1693 rq->buf_info[0] = bi; in vmxnet3_rq_create()
1694 rq->buf_info[1] = bi + rq->rx_ring[0].size; in vmxnet3_rq_create()
1699 vmxnet3_rq_destroy(rq, adapter); in vmxnet3_rq_create()
1767 struct vmxnet3_rx_queue *rq = container_of(napi, in vmxnet3_poll_rx_only() local
1769 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_poll_rx_only()
1777 &adapter->tx_queue[rq - adapter->rx_queue]; in vmxnet3_poll_rx_only()
1781 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); in vmxnet3_poll_rx_only()
1785 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_poll_rx_only()
1831 struct vmxnet3_rx_queue *rq = data; in vmxnet3_msix_rx() local
1832 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_msix_rx()
1836 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_msix_rx()
1837 napi_schedule(&rq->napi); in vmxnet3_msix_rx()
2022 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_request_irqs() local
2023 rq->qid = i; in vmxnet3_request_irqs()
2024 rq->qid2 = i + adapter->num_rx_queues; in vmxnet3_request_irqs()
2326 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_setup_driver_shared() local
2328 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); in vmxnet3_setup_driver_shared()
2329 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); in vmxnet3_setup_driver_shared()
2330 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); in vmxnet3_setup_driver_shared()
2331 rqc->ddPA = cpu_to_le64(rq->buf_info_pa); in vmxnet3_setup_driver_shared()
2332 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); in vmxnet3_setup_driver_shared()
2333 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); in vmxnet3_setup_driver_shared()
2334 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); in vmxnet3_setup_driver_shared()
2339 rqc->intrIdx = rq->comp_ring.intr_idx; in vmxnet3_setup_driver_shared()
2625 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0]; in vmxnet3_adjust_rx_ring_size() local
2659 rq = &adapter->rx_queue[i]; in vmxnet3_adjust_rx_ring_size()
2660 rq->rx_ring[0].size = ring0_size; in vmxnet3_adjust_rx_ring_size()
2661 rq->rx_ring[1].size = ring1_size; in vmxnet3_adjust_rx_ring_size()
2662 rq->comp_ring.size = comp_size; in vmxnet3_adjust_rx_ring_size()
2695 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_create_queues() local
2698 rq->shared = &adapter->rqd_start[i].ctrl; in vmxnet3_create_queues()
2699 rq->adapter = adapter; in vmxnet3_create_queues()
2700 err = vmxnet3_rq_create(rq, adapter); in vmxnet3_create_queues()