Lines Matching refs:tx_ring

2111 	struct tx_ring *tx_ring;  in ql_process_mac_tx_intr()  local
2115 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; in ql_process_mac_tx_intr()
2116 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; in ql_process_mac_tx_intr()
2118 tx_ring->tx_bytes += (tx_ring_desc->skb)->len; in ql_process_mac_tx_intr()
2119 tx_ring->tx_packets++; in ql_process_mac_tx_intr()
2144 atomic_inc(&tx_ring->tx_count); in ql_process_mac_tx_intr()
2213 struct tx_ring *tx_ring; in ql_clean_outbound_rx_ring() local
2241 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; in ql_clean_outbound_rx_ring()
2242 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { in ql_clean_outbound_rx_ring()
2243 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) in ql_clean_outbound_rx_ring()
2248 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); in ql_clean_outbound_rx_ring()
2650 struct tx_ring *tx_ring; in qlge_send() local
2653 tx_ring = &qdev->tx_ring[tx_ring_idx]; in qlge_send()
2658 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { in qlge_send()
2662 netif_stop_subqueue(ndev, tx_ring->wq_id); in qlge_send()
2663 tx_ring->tx_errors++; in qlge_send()
2666 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; in qlge_send()
2698 tx_ring->tx_errors++; in qlge_send()
2702 tx_ring->prod_idx++; in qlge_send()
2703 if (tx_ring->prod_idx == tx_ring->wq_len) in qlge_send()
2704 tx_ring->prod_idx = 0; in qlge_send()
2707 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); in qlge_send()
2710 tx_ring->prod_idx, skb->len); in qlge_send()
2712 atomic_dec(&tx_ring->tx_count); in qlge_send()
2714 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { in qlge_send()
2715 netif_stop_subqueue(ndev, tx_ring->wq_id); in qlge_send()
2716 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) in qlge_send()
2721 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); in qlge_send()
2774 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) in ql_init_tx_ring() argument
2780 mac_iocb_ptr = tx_ring->wq_base; in ql_init_tx_ring()
2781 tx_ring_desc = tx_ring->q; in ql_init_tx_ring()
2782 for (i = 0; i < tx_ring->wq_len; i++) { in ql_init_tx_ring()
2789 atomic_set(&tx_ring->tx_count, tx_ring->wq_len); in ql_init_tx_ring()
2793 struct tx_ring *tx_ring) in ql_free_tx_resources() argument
2795 if (tx_ring->wq_base) { in ql_free_tx_resources()
2796 pci_free_consistent(qdev->pdev, tx_ring->wq_size, in ql_free_tx_resources()
2797 tx_ring->wq_base, tx_ring->wq_base_dma); in ql_free_tx_resources()
2798 tx_ring->wq_base = NULL; in ql_free_tx_resources()
2800 kfree(tx_ring->q); in ql_free_tx_resources()
2801 tx_ring->q = NULL; in ql_free_tx_resources()
2805 struct tx_ring *tx_ring) in ql_alloc_tx_resources() argument
2807 tx_ring->wq_base = in ql_alloc_tx_resources()
2808 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, in ql_alloc_tx_resources()
2809 &tx_ring->wq_base_dma); in ql_alloc_tx_resources()
2811 if ((tx_ring->wq_base == NULL) || in ql_alloc_tx_resources()
2812 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) in ql_alloc_tx_resources()
2815 tx_ring->q = in ql_alloc_tx_resources()
2816 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); in ql_alloc_tx_resources()
2817 if (tx_ring->q == NULL) in ql_alloc_tx_resources()
2822 pci_free_consistent(qdev->pdev, tx_ring->wq_size, in ql_alloc_tx_resources()
2823 tx_ring->wq_base, tx_ring->wq_base_dma); in ql_alloc_tx_resources()
2824 tx_ring->wq_base = NULL; in ql_alloc_tx_resources()
3064 struct tx_ring *tx_ring; in ql_tx_ring_clean() local
3073 tx_ring = &qdev->tx_ring[j]; in ql_tx_ring_clean()
3074 for (i = 0; i < tx_ring->wq_len; i++) { in ql_tx_ring_clean()
3075 tx_ring_desc = &tx_ring->q[i]; in ql_tx_ring_clean()
3095 ql_free_tx_resources(qdev, &qdev->tx_ring[i]); in ql_free_mem_resources()
3118 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { in ql_alloc_mem_resources()
3267 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) in ql_start_tx_ring() argument
3269 struct wqicb *wqicb = (struct wqicb *)tx_ring; in ql_start_tx_ring()
3271 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); in ql_start_tx_ring()
3273 (tx_ring->wq_id * sizeof(u64)); in ql_start_tx_ring()
3275 (tx_ring->wq_id * sizeof(u64)); in ql_start_tx_ring()
3282 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; in ql_start_tx_ring()
3283 tx_ring->prod_idx = 0; in ql_start_tx_ring()
3285 tx_ring->valid_db_reg = doorbell_area + 0x04; in ql_start_tx_ring()
3290 tx_ring->cnsmr_idx_sh_reg = shadow_reg; in ql_start_tx_ring()
3291 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; in ql_start_tx_ring()
3293 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); in ql_start_tx_ring()
3296 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); in ql_start_tx_ring()
3298 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); in ql_start_tx_ring()
3300 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); in ql_start_tx_ring()
3302 ql_init_tx_ring(qdev, tx_ring); in ql_start_tx_ring()
3305 (u16) tx_ring->wq_id); in ql_start_tx_ring()
3843 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); in ql_adapter_initialize()
4110 struct tx_ring *tx_ring; in ql_configure_rings() local
4132 tx_ring = &qdev->tx_ring[i]; in ql_configure_rings()
4133 memset((void *)tx_ring, 0, sizeof(*tx_ring)); in ql_configure_rings()
4134 tx_ring->qdev = qdev; in ql_configure_rings()
4135 tx_ring->wq_id = i; in ql_configure_rings()
4136 tx_ring->wq_len = qdev->tx_ring_size; in ql_configure_rings()
4137 tx_ring->wq_size = in ql_configure_rings()
4138 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); in ql_configure_rings()
4144 tx_ring->cq_id = qdev->rss_ring_count + i; in ql_configure_rings()
4303 struct tx_ring *tx_ring = &qdev->tx_ring[0]; in qlge_get_stats() local
4324 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { in qlge_get_stats()
4325 pkts += tx_ring->tx_packets; in qlge_get_stats()
4326 bytes += tx_ring->tx_bytes; in qlge_get_stats()
4327 errors += tx_ring->tx_errors; in qlge_get_stats()