Lines Matching refs:q_vector
1078 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, in ixgbe_clean_tx_irq() argument
1081 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_tx_irq()
1085 unsigned int budget = q_vector->tx.work_limit; in ixgbe_clean_tx_irq()
1173 q_vector->tx.total_bytes += total_bytes; in ixgbe_clean_tx_irq()
1174 q_vector->tx.total_packets += total_packets; in ixgbe_clean_tx_irq()
1292 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) in ixgbe_update_dca() argument
1294 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_update_dca()
1298 if (q_vector->cpu == cpu) in ixgbe_update_dca()
1301 ixgbe_for_each_ring(ring, q_vector->tx) in ixgbe_update_dca()
1304 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_update_dca()
1307 q_vector->cpu = cpu; in ixgbe_update_dca()
1323 adapter->q_vector[i]->cpu = -1; in ixgbe_setup_dca()
1324 ixgbe_update_dca(adapter->q_vector[i]); in ixgbe_setup_dca()
1606 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); in ixgbe_process_skb_fields()
1619 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, in ixgbe_rx_skb() argument
1622 if (ixgbe_qv_busy_polling(q_vector)) in ixgbe_rx_skb()
1625 napi_gro_receive(&q_vector->napi, skb); in ixgbe_rx_skb()
1933 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in ixgbe_fetch_rx_buffer()
2005 static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, in ixgbe_clean_rx_irq() argument
2011 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_rx_irq()
2086 skb_mark_napi_id(skb, &q_vector->napi); in ixgbe_clean_rx_irq()
2087 ixgbe_rx_skb(q_vector, skb); in ixgbe_clean_rx_irq()
2097 q_vector->rx.total_packets += total_rx_packets; in ixgbe_clean_rx_irq()
2098 q_vector->rx.total_bytes += total_rx_bytes; in ixgbe_clean_rx_irq()
2107 struct ixgbe_q_vector *q_vector = in ixgbe_low_latency_recv() local
2109 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_low_latency_recv()
2116 if (!ixgbe_qv_lock_poll(q_vector)) in ixgbe_low_latency_recv()
2119 ixgbe_for_each_ring(ring, q_vector->rx) { in ixgbe_low_latency_recv()
2120 found = ixgbe_clean_rx_irq(q_vector, ring, 4); in ixgbe_low_latency_recv()
2131 ixgbe_qv_unlock_poll(q_vector); in ixgbe_low_latency_recv()
2146 struct ixgbe_q_vector *q_vector; in ixgbe_configure_msix() local
2162 q_vector = adapter->q_vector[v_idx]; in ixgbe_configure_msix()
2164 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_configure_msix()
2167 ixgbe_for_each_ring(ring, q_vector->tx) in ixgbe_configure_msix()
2170 ixgbe_write_eitr(q_vector); in ixgbe_configure_msix()
2220 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, in ixgbe_update_itr() argument
2238 timepassed_us = q_vector->itr >> 2; in ixgbe_update_itr()
2277 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) in ixgbe_write_eitr() argument
2279 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_write_eitr()
2281 int v_idx = q_vector->v_idx; in ixgbe_write_eitr()
2282 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; in ixgbe_write_eitr()
2305 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) in ixgbe_set_itr() argument
2307 u32 new_itr = q_vector->itr; in ixgbe_set_itr()
2310 ixgbe_update_itr(q_vector, &q_vector->tx); in ixgbe_set_itr()
2311 ixgbe_update_itr(q_vector, &q_vector->rx); in ixgbe_set_itr()
2313 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in ixgbe_set_itr()
2330 if (new_itr != q_vector->itr) { in ixgbe_set_itr()
2332 new_itr = (10 * new_itr * q_vector->itr) / in ixgbe_set_itr()
2333 ((9 * new_itr) + q_vector->itr); in ixgbe_set_itr()
2336 q_vector->itr = new_itr; in ixgbe_set_itr()
2338 ixgbe_write_eitr(q_vector); in ixgbe_set_itr()
2673 struct ixgbe_q_vector *q_vector = data; in ixgbe_msix_clean_rings() local
2677 if (q_vector->rx.ring || q_vector->tx.ring) in ixgbe_msix_clean_rings()
2678 napi_schedule(&q_vector->napi); in ixgbe_msix_clean_rings()
2692 struct ixgbe_q_vector *q_vector = in ixgbe_poll() local
2694 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_poll()
2701 ixgbe_update_dca(q_vector); in ixgbe_poll()
2704 ixgbe_for_each_ring(ring, q_vector->tx) in ixgbe_poll()
2705 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); in ixgbe_poll()
2707 if (!ixgbe_qv_lock_napi(q_vector)) in ixgbe_poll()
2712 if (q_vector->rx.count > 1) in ixgbe_poll()
2713 per_ring_budget = max(budget/q_vector->rx.count, 1); in ixgbe_poll()
2717 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_poll()
2718 clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring, in ixgbe_poll()
2721 ixgbe_qv_unlock_napi(q_vector); in ixgbe_poll()
2729 ixgbe_set_itr(q_vector); in ixgbe_poll()
2731 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); in ixgbe_poll()
2750 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; in ixgbe_request_msix_irqs() local
2753 if (q_vector->tx.ring && q_vector->rx.ring) { in ixgbe_request_msix_irqs()
2754 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ixgbe_request_msix_irqs()
2757 } else if (q_vector->rx.ring) { in ixgbe_request_msix_irqs()
2758 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ixgbe_request_msix_irqs()
2760 } else if (q_vector->tx.ring) { in ixgbe_request_msix_irqs()
2761 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ixgbe_request_msix_irqs()
2768 q_vector->name, q_vector); in ixgbe_request_msix_irqs()
2778 &q_vector->affinity_mask); in ixgbe_request_msix_irqs()
2797 adapter->q_vector[vector]); in ixgbe_request_msix_irqs()
2815 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; in ixgbe_intr() local
2867 napi_schedule(&q_vector->napi); in ixgbe_intr()
2916 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; in ixgbe_free_irq() local
2920 if (!q_vector->rx.ring && !q_vector->tx.ring) in ixgbe_free_irq()
2926 free_irq(entry->vector, q_vector); in ixgbe_free_irq()
2972 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; in ixgbe_configure_msi_and_legacy() local
2974 ixgbe_write_eitr(q_vector); in ixgbe_configure_msi_and_legacy()
3021 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) in ixgbe_configure_tx_ring()
3044 struct ixgbe_q_vector *q_vector = ring->q_vector; in ixgbe_configure_tx_ring() local
3046 if (q_vector) in ixgbe_configure_tx_ring()
3048 &q_vector->affinity_mask, in ixgbe_configure_tx_ring()
4197 ixgbe_qv_init_lock(adapter->q_vector[q_idx]); in ixgbe_napi_enable_all()
4198 napi_enable(&adapter->q_vector[q_idx]->napi); in ixgbe_napi_enable_all()
4207 napi_disable(&adapter->q_vector[q_idx]->napi); in ixgbe_napi_disable_all()
4208 while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { in ixgbe_napi_disable_all()
5386 if (tx_ring->q_vector) in ixgbe_setup_tx_resources()
5387 ring_node = tx_ring->q_vector->numa_node; in ixgbe_setup_tx_resources()
5470 if (rx_ring->q_vector) in ixgbe_setup_rx_resources()
5471 ring_node = rx_ring->q_vector->numa_node; in ixgbe_setup_rx_resources()
6226 struct ixgbe_q_vector *qv = adapter->q_vector[i]; in ixgbe_check_hang_subtask()
7152 struct ixgbe_q_vector *q_vector = ring->q_vector; in ixgbe_atr() local
7164 if (!q_vector) in ixgbe_atr()
7233 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, in ixgbe_atr()
7568 ixgbe_msix_clean_rings(0, adapter->q_vector[i]); in ixgbe_netpoll()