/linux-4.1.27/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf.h | 219 static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) in ixgbevf_qv_init_lock() argument 221 spin_lock_init(&q_vector->lock); in ixgbevf_qv_init_lock() 222 q_vector->state = IXGBEVF_QV_STATE_IDLE; in ixgbevf_qv_init_lock() 226 static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) in ixgbevf_qv_lock_napi() argument 230 spin_lock_bh(&q_vector->lock); in ixgbevf_qv_lock_napi() 231 if (q_vector->state & IXGBEVF_QV_LOCKED) { in ixgbevf_qv_lock_napi() 232 WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); in ixgbevf_qv_lock_napi() 233 q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; in ixgbevf_qv_lock_napi() 236 q_vector->tx.ring->stats.yields++; in ixgbevf_qv_lock_napi() 240 q_vector->state = IXGBEVF_QV_STATE_NAPI; in ixgbevf_qv_lock_napi() [all …]
|
D | ixgbevf_main.c | 118 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 290 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, in ixgbevf_clean_tx_irq() argument 293 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_tx_irq() 385 q_vector->tx.total_bytes += total_bytes; in ixgbevf_clean_tx_irq() 386 q_vector->tx.total_packets += total_packets; in ixgbevf_clean_tx_irq() 444 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, in ixgbevf_rx_skb() argument 448 skb_mark_napi_id(skb, &q_vector->napi); in ixgbevf_rx_skb() 450 if (ixgbevf_qv_busy_polling(q_vector)) { in ixgbevf_rx_skb() 457 napi_gro_receive(&q_vector->napi, skb); in ixgbevf_rx_skb() 912 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, in ixgbevf_clean_rx_irq() argument [all …]
|
D | ethtool.c | 732 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) in ixgbevf_get_coalesce() 748 struct ixgbevf_q_vector *q_vector; in ixgbevf_set_coalesce() local 753 if (adapter->q_vector[0]->tx.count && in ixgbevf_set_coalesce() 754 adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) in ixgbevf_set_coalesce() 784 q_vector = adapter->q_vector[i]; in ixgbevf_set_coalesce() 785 if (q_vector->tx.count && !q_vector->rx.count) in ixgbevf_set_coalesce() 787 q_vector->itr = tx_itr_param; in ixgbevf_set_coalesce() 790 q_vector->itr = rx_itr_param; in ixgbevf_set_coalesce() 791 ixgbevf_write_eitr(q_vector); in ixgbevf_set_coalesce()
|
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_debugfs.c | 134 struct fm10k_q_vector *q_vector = ring->q_vector; in fm10k_dbg_desc_open() local 138 if (ring < q_vector->rx.ring) in fm10k_dbg_desc_open() 168 void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) in fm10k_dbg_q_vector_init() argument 170 struct fm10k_intfc *interface = q_vector->interface; in fm10k_dbg_q_vector_init() 178 sprintf(name, "q_vector.%03d", q_vector->v_idx); in fm10k_dbg_q_vector_init() 180 q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc); in fm10k_dbg_q_vector_init() 181 if (!q_vector->dbg_q_vector) in fm10k_dbg_q_vector_init() 185 for (i = 0; i < q_vector->tx.count; i++) { in fm10k_dbg_q_vector_init() 186 struct fm10k_ring *ring = &q_vector->tx.ring[i]; in fm10k_dbg_q_vector_init() 191 q_vector->dbg_q_vector, ring, in fm10k_dbg_q_vector_init() [all …]
|
D | fm10k_main.c | 321 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in fm10k_fetch_rx_buffer() 415 struct fm10k_intfc *interface = rx_ring->q_vector->interface; in fm10k_rx_hwtstamp() 599 static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, in fm10k_receive_skb() argument 602 napi_gro_receive(&q_vector->napi, skb); in fm10k_receive_skb() 605 static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, in fm10k_clean_rx_irq() argument 655 fm10k_receive_skb(q_vector, skb); in fm10k_clean_rx_irq() 671 q_vector->rx.total_packets += total_packets; in fm10k_clean_rx_irq() 672 q_vector->rx.total_bytes += total_bytes; in fm10k_clean_rx_irq() 1217 static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, in fm10k_clean_tx_irq() argument 1220 struct fm10k_intfc *interface = q_vector->interface; in fm10k_clean_tx_irq() [all …]
|
D | fm10k_pci.c | 467 struct fm10k_q_vector *qv = interface->q_vector[i]; in fm10k_check_hang_subtask() 540 if (ring->q_vector) { in fm10k_configure_tx_ring() 541 txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); in fm10k_configure_tx_ring() 667 if (ring->q_vector) { in fm10k_configure_rx_ring() 668 rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); in fm10k_configure_rx_ring() 800 struct fm10k_q_vector *q_vector; in fm10k_napi_enable_all() local 804 q_vector = interface->q_vector[q_idx]; in fm10k_napi_enable_all() 805 napi_enable(&q_vector->napi); in fm10k_napi_enable_all() 811 struct fm10k_q_vector *q_vector = data; in fm10k_msix_clean_rings() local 813 if (q_vector->rx.count || q_vector->tx.count) in fm10k_msix_clean_rings() [all …]
|
D | fm10k.h | 113 struct fm10k_q_vector *q_vector;/* backpointer to host q_vector */ member 286 struct fm10k_q_vector *q_vector[MAX_Q_VECTORS]; member 504 void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector); 505 void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector); 511 static inline void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) {} in fm10k_dbg_q_vector_init() argument 512 static inline void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) {} in fm10k_dbg_q_vector_exit() argument
|
D | fm10k_ethtool.c | 629 qv = interface->q_vector[i]; in fm10k_set_coalesce()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe.h | 252 struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ member 407 static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) in ixgbe_qv_init_lock() argument 410 atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); in ixgbe_qv_init_lock() 414 static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) in ixgbe_qv_lock_napi() argument 416 int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, in ixgbe_qv_lock_napi() 420 q_vector->tx.ring->stats.yields++; in ixgbe_qv_lock_napi() 427 static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) in ixgbe_qv_unlock_napi() argument 429 WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI); in ixgbe_qv_unlock_napi() 432 if (q_vector->napi.gro_list) in ixgbe_qv_unlock_napi() 433 napi_gro_flush(&q_vector->napi, false); in ixgbe_qv_unlock_napi() [all …]
|
D | ixgbe_lib.c | 805 struct ixgbe_q_vector *q_vector; in ixgbe_alloc_q_vector() local 828 q_vector = kzalloc_node(size, GFP_KERNEL, node); in ixgbe_alloc_q_vector() 829 if (!q_vector) in ixgbe_alloc_q_vector() 830 q_vector = kzalloc(size, GFP_KERNEL); in ixgbe_alloc_q_vector() 831 if (!q_vector) in ixgbe_alloc_q_vector() 836 cpumask_set_cpu(cpu, &q_vector->affinity_mask); in ixgbe_alloc_q_vector() 837 q_vector->numa_node = node; in ixgbe_alloc_q_vector() 841 q_vector->cpu = -1; in ixgbe_alloc_q_vector() 845 netif_napi_add(adapter->netdev, &q_vector->napi, in ixgbe_alloc_q_vector() 847 napi_hash_add(&q_vector->napi); in ixgbe_alloc_q_vector() [all …]
|
D | ixgbe_main.c | 1078 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, in ixgbe_clean_tx_irq() argument 1081 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_tx_irq() 1085 unsigned int budget = q_vector->tx.work_limit; in ixgbe_clean_tx_irq() 1173 q_vector->tx.total_bytes += total_bytes; in ixgbe_clean_tx_irq() 1174 q_vector->tx.total_packets += total_packets; in ixgbe_clean_tx_irq() 1292 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) in ixgbe_update_dca() argument 1294 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_update_dca() 1298 if (q_vector->cpu == cpu) in ixgbe_update_dca() 1301 ixgbe_for_each_ring(ring, q_vector->tx) in ixgbe_update_dca() 1304 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_update_dca() [all …]
|
D | ixgbe_ethtool.c | 2201 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) in ixgbe_get_coalesce() 2247 struct ixgbe_q_vector *q_vector; in ixgbe_set_coalesce() local 2252 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { in ixgbe_set_coalesce() 2286 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) in ixgbe_set_coalesce() 2305 q_vector = adapter->q_vector[i]; in ixgbe_set_coalesce() 2306 if (q_vector->tx.count && !q_vector->rx.count) in ixgbe_set_coalesce() 2308 q_vector->itr = tx_itr_param; in ixgbe_set_coalesce() 2311 q_vector->itr = rx_itr_param; in ixgbe_set_coalesce() 2312 ixgbe_write_eitr(q_vector); in ixgbe_set_coalesce()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/ |
D | i40evf_main.c | 330 struct i40e_q_vector *q_vector = data; in i40evf_msix_clean_rings() local 332 if (!q_vector->tx.ring && !q_vector->rx.ring) in i40evf_msix_clean_rings() 335 napi_schedule(&q_vector->napi); in i40evf_msix_clean_rings() 349 struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; in i40evf_map_vector_to_rxq() local 352 rx_ring->q_vector = q_vector; in i40evf_map_vector_to_rxq() 353 rx_ring->next = q_vector->rx.ring; in i40evf_map_vector_to_rxq() 355 q_vector->rx.ring = rx_ring; in i40evf_map_vector_to_rxq() 356 q_vector->rx.count++; in i40evf_map_vector_to_rxq() 357 q_vector->rx.latency_range = I40E_LOW_LATENCY; in i40evf_map_vector_to_rxq() 369 struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; in i40evf_map_vector_to_txq() local [all …]
|
D | i40e_txrx.c | 304 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_clean_tx_irq() 305 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_clean_tx_irq() 371 static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) in i40e_force_wb() argument 380 I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1), in i40e_force_wb() 457 static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) in i40e_update_dynamic_itr() argument 459 u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; in i40e_update_dynamic_itr() 460 struct i40e_hw *hw = &q_vector->vsi->back->hw; in i40e_update_dynamic_itr() 465 old_itr = q_vector->rx.itr; in i40e_update_dynamic_itr() 466 i40e_set_new_dynamic_itr(&q_vector->rx); in i40e_update_dynamic_itr() 467 if (old_itr != q_vector->rx.itr) in i40e_update_dynamic_itr() [all …]
|
D | i40evf_ethtool.c | 319 struct i40e_q_vector *q_vector; in i40evf_set_coalesce() local 352 q_vector = adapter->q_vector[i]; in i40evf_set_coalesce() 353 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); in i40evf_set_coalesce() 354 wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr); in i40evf_set_coalesce() 355 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); in i40evf_set_coalesce() 356 wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr); in i40evf_set_coalesce()
|
D | i40evf_virtchnl.c | 320 struct i40e_q_vector *q_vector; in i40evf_map_queues() local 342 q_vector = adapter->q_vector[v_idx]; in i40evf_map_queues() 345 vimi->vecmap[v_idx].txq_map = q_vector->ring_mask; in i40evf_map_queues() 346 vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask; in i40evf_map_queues()
|
D | i40e_txrx.h | 265 struct i40e_q_vector *q_vector; /* Backreference to associated vector */ member
|
D | i40evf.h | 191 struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; member
|
/linux-4.1.27/drivers/net/ethernet/intel/igb/ |
D | igb_main.c | 793 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) in igb_assign_vector() argument 795 struct igb_adapter *adapter = q_vector->adapter; in igb_assign_vector() 801 if (q_vector->rx.ring) in igb_assign_vector() 802 rx_queue = q_vector->rx.ring->reg_idx; in igb_assign_vector() 803 if (q_vector->tx.ring) in igb_assign_vector() 804 tx_queue = q_vector->tx.ring->reg_idx; in igb_assign_vector() 820 q_vector->eims_value = msixbm; in igb_assign_vector() 836 q_vector->eims_value = 1 << msix_vector; in igb_assign_vector() 857 q_vector->eims_value = 1 << msix_vector; in igb_assign_vector() 865 adapter->eims_enable_mask |= q_vector->eims_value; in igb_assign_vector() [all …]
|
D | igb.h | 234 struct igb_q_vector *q_vector; /* backlink to q_vector */ member 416 struct igb_q_vector *q_vector[MAX_Q_VECTORS]; member 538 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 539 void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
|
D | igb_ptp.c | 713 void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, in igb_ptp_rx_pktstamp() argument 723 igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), in igb_ptp_rx_pktstamp() 735 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, in igb_ptp_rx_rgtstamp() argument 738 struct igb_adapter *adapter = q_vector->adapter; in igb_ptp_rx_rgtstamp()
|
D | igb_ethtool.c | 2198 struct igb_q_vector *q_vector = adapter->q_vector[i]; in igb_set_coalesce() local 2199 q_vector->tx.work_limit = adapter->tx_work_limit; in igb_set_coalesce() 2200 if (q_vector->rx.ring) in igb_set_coalesce() 2201 q_vector->itr_val = adapter->rx_itr_setting; in igb_set_coalesce() 2203 q_vector->itr_val = adapter->tx_itr_setting; in igb_set_coalesce() 2204 if (q_vector->itr_val && q_vector->itr_val <= 3) in igb_set_coalesce() 2205 q_vector->itr_val = IGB_START_ITR; in igb_set_coalesce() 2206 q_vector->set_itr = 1; in igb_set_coalesce()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.c | 785 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_clean_tx_irq() 786 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_clean_tx_irq() 859 static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) in i40e_force_wb() argument 868 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), in i40e_force_wb() 945 static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) in i40e_update_dynamic_itr() argument 947 u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; in i40e_update_dynamic_itr() 948 struct i40e_hw *hw = &q_vector->vsi->back->hw; in i40e_update_dynamic_itr() 953 old_itr = q_vector->rx.itr; in i40e_update_dynamic_itr() 954 i40e_set_new_dynamic_itr(&q_vector->rx); in i40e_update_dynamic_itr() 955 if (old_itr != q_vector->rx.itr) in i40e_update_dynamic_itr() [all …]
|
D | i40e_main.c | 2419 if (!ring->q_vector || !ring->netdev) in i40e_config_xps_tx_ring() 2426 &ring->q_vector->affinity_mask, in i40e_config_xps_tx_ring() 2797 struct i40e_q_vector *q_vector; in i40e_vsi_configure_msix() local 2811 q_vector = vsi->q_vectors[i]; in i40e_vsi_configure_msix() 2812 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); in i40e_vsi_configure_msix() 2813 q_vector->rx.latency_range = I40E_LOW_LATENCY; in i40e_vsi_configure_msix() 2815 q_vector->rx.itr); in i40e_vsi_configure_msix() 2816 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); in i40e_vsi_configure_msix() 2817 q_vector->tx.latency_range = I40E_LOW_LATENCY; in i40e_vsi_configure_msix() 2819 q_vector->tx.itr); in i40e_vsi_configure_msix() [all …]
|
D | i40e_txrx.h | 268 struct i40e_q_vector *q_vector; /* Backreference to associated vector */ member
|
D | i40e_ethtool.c | 1728 struct i40e_q_vector *q_vector; in i40e_set_coalesce() local 1775 q_vector = vsi->q_vectors[i]; in i40e_set_coalesce() 1776 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); in i40e_set_coalesce() 1777 wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); in i40e_set_coalesce() 1778 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); in i40e_set_coalesce() 1779 wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr); in i40e_set_coalesce()
|
D | i40e_fcoe.c | 796 rx_ring->q_vector->rx.total_bytes += rc; in i40e_fcoe_handle_offload() 797 rx_ring->q_vector->rx.total_packets += pkts; in i40e_fcoe_handle_offload()
|
D | i40e_debugfs.c | 534 rx_ring->q_vector); in i40e_dbg_dump_vsi_seid() 580 tx_ring->q_vector); in i40e_dbg_dump_vsi_seid()
|