Lines Matching refs:tx_ring
2100 struct tx_ring *tx_ring; in ql_process_mac_tx_intr() local
2104 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; in ql_process_mac_tx_intr()
2105 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; in ql_process_mac_tx_intr()
2107 tx_ring->tx_bytes += (tx_ring_desc->skb)->len; in ql_process_mac_tx_intr()
2108 tx_ring->tx_packets++; in ql_process_mac_tx_intr()
2133 atomic_inc(&tx_ring->tx_count); in ql_process_mac_tx_intr()
2202 struct tx_ring *tx_ring; in ql_clean_outbound_rx_ring() local
2230 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; in ql_clean_outbound_rx_ring()
2231 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { in ql_clean_outbound_rx_ring()
2232 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) in ql_clean_outbound_rx_ring()
2237 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); in ql_clean_outbound_rx_ring()
2639 struct tx_ring *tx_ring; in qlge_send() local
2642 tx_ring = &qdev->tx_ring[tx_ring_idx]; in qlge_send()
2647 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { in qlge_send()
2651 netif_stop_subqueue(ndev, tx_ring->wq_id); in qlge_send()
2652 tx_ring->tx_errors++; in qlge_send()
2655 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; in qlge_send()
2687 tx_ring->tx_errors++; in qlge_send()
2691 tx_ring->prod_idx++; in qlge_send()
2692 if (tx_ring->prod_idx == tx_ring->wq_len) in qlge_send()
2693 tx_ring->prod_idx = 0; in qlge_send()
2696 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); in qlge_send()
2699 tx_ring->prod_idx, skb->len); in qlge_send()
2701 atomic_dec(&tx_ring->tx_count); in qlge_send()
2703 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { in qlge_send()
2704 netif_stop_subqueue(ndev, tx_ring->wq_id); in qlge_send()
2705 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) in qlge_send()
2710 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); in qlge_send()
2763 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) in ql_init_tx_ring() argument
2769 mac_iocb_ptr = tx_ring->wq_base; in ql_init_tx_ring()
2770 tx_ring_desc = tx_ring->q; in ql_init_tx_ring()
2771 for (i = 0; i < tx_ring->wq_len; i++) { in ql_init_tx_ring()
2778 atomic_set(&tx_ring->tx_count, tx_ring->wq_len); in ql_init_tx_ring()
2782 struct tx_ring *tx_ring) in ql_free_tx_resources() argument
2784 if (tx_ring->wq_base) { in ql_free_tx_resources()
2785 pci_free_consistent(qdev->pdev, tx_ring->wq_size, in ql_free_tx_resources()
2786 tx_ring->wq_base, tx_ring->wq_base_dma); in ql_free_tx_resources()
2787 tx_ring->wq_base = NULL; in ql_free_tx_resources()
2789 kfree(tx_ring->q); in ql_free_tx_resources()
2790 tx_ring->q = NULL; in ql_free_tx_resources()
2794 struct tx_ring *tx_ring) in ql_alloc_tx_resources() argument
2796 tx_ring->wq_base = in ql_alloc_tx_resources()
2797 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, in ql_alloc_tx_resources()
2798 &tx_ring->wq_base_dma); in ql_alloc_tx_resources()
2800 if ((tx_ring->wq_base == NULL) || in ql_alloc_tx_resources()
2801 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) in ql_alloc_tx_resources()
2804 tx_ring->q = in ql_alloc_tx_resources()
2805 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); in ql_alloc_tx_resources()
2806 if (tx_ring->q == NULL) in ql_alloc_tx_resources()
2811 pci_free_consistent(qdev->pdev, tx_ring->wq_size, in ql_alloc_tx_resources()
2812 tx_ring->wq_base, tx_ring->wq_base_dma); in ql_alloc_tx_resources()
2813 tx_ring->wq_base = NULL; in ql_alloc_tx_resources()
3053 struct tx_ring *tx_ring; in ql_tx_ring_clean() local
3062 tx_ring = &qdev->tx_ring[j]; in ql_tx_ring_clean()
3063 for (i = 0; i < tx_ring->wq_len; i++) { in ql_tx_ring_clean()
3064 tx_ring_desc = &tx_ring->q[i]; in ql_tx_ring_clean()
3084 ql_free_tx_resources(qdev, &qdev->tx_ring[i]); in ql_free_mem_resources()
3107 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { in ql_alloc_mem_resources()
3256 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) in ql_start_tx_ring() argument
3258 struct wqicb *wqicb = (struct wqicb *)tx_ring; in ql_start_tx_ring()
3260 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); in ql_start_tx_ring()
3262 (tx_ring->wq_id * sizeof(u64)); in ql_start_tx_ring()
3264 (tx_ring->wq_id * sizeof(u64)); in ql_start_tx_ring()
3271 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; in ql_start_tx_ring()
3272 tx_ring->prod_idx = 0; in ql_start_tx_ring()
3274 tx_ring->valid_db_reg = doorbell_area + 0x04; in ql_start_tx_ring()
3279 tx_ring->cnsmr_idx_sh_reg = shadow_reg; in ql_start_tx_ring()
3280 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; in ql_start_tx_ring()
3282 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); in ql_start_tx_ring()
3285 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); in ql_start_tx_ring()
3287 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); in ql_start_tx_ring()
3289 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); in ql_start_tx_ring()
3291 ql_init_tx_ring(qdev, tx_ring); in ql_start_tx_ring()
3294 (u16) tx_ring->wq_id); in ql_start_tx_ring()
3832 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); in ql_adapter_initialize()
4101 struct tx_ring *tx_ring; in ql_configure_rings() local
4123 tx_ring = &qdev->tx_ring[i]; in ql_configure_rings()
4124 memset((void *)tx_ring, 0, sizeof(*tx_ring)); in ql_configure_rings()
4125 tx_ring->qdev = qdev; in ql_configure_rings()
4126 tx_ring->wq_id = i; in ql_configure_rings()
4127 tx_ring->wq_len = qdev->tx_ring_size; in ql_configure_rings()
4128 tx_ring->wq_size = in ql_configure_rings()
4129 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); in ql_configure_rings()
4135 tx_ring->cq_id = qdev->rss_ring_count + i; in ql_configure_rings()
4293 struct tx_ring *tx_ring = &qdev->tx_ring[0]; in qlge_get_stats() local
4314 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { in qlge_get_stats()
4315 pkts += tx_ring->tx_packets; in qlge_get_stats()
4316 bytes += tx_ring->tx_bytes; in qlge_get_stats()
4317 errors += tx_ring->tx_errors; in qlge_get_stats()