Home
last modified time | relevance | path

Searched refs:txq (Results 1 – 197 of 197) sorted by relevance

/linux-4.4.14/drivers/net/wireless/iwlwifi/pcie/
Dtx.c149 struct iwl_txq *txq = (void *)data; in iwl_pcie_txq_stuck_timer() local
150 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; in iwl_pcie_txq_stuck_timer()
153 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); in iwl_pcie_txq_stuck_timer()
157 spin_lock(&txq->lock); in iwl_pcie_txq_stuck_timer()
159 if (txq->q.read_ptr == txq->q.write_ptr) { in iwl_pcie_txq_stuck_timer()
160 spin_unlock(&txq->lock); in iwl_pcie_txq_stuck_timer()
163 spin_unlock(&txq->lock); in iwl_pcie_txq_stuck_timer()
165 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, in iwl_pcie_txq_stuck_timer()
166 jiffies_to_msecs(txq->wd_timeout)); in iwl_pcie_txq_stuck_timer()
168 txq->q.read_ptr, txq->q.write_ptr); in iwl_pcie_txq_stuck_timer()
[all …]
Dinternal.h285 iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) in iwl_pcie_get_scratchbuf_dma() argument
287 return txq->scratchbufs_dma + in iwl_pcie_get_scratchbuf_dma()
341 struct iwl_txq *txq; member
491 struct iwl_txq *txq) in iwl_wake_queue() argument
495 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { in iwl_wake_queue()
496 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); in iwl_wake_queue()
497 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); in iwl_wake_queue()
502 struct iwl_txq *txq) in iwl_stop_queue() argument
506 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { in iwl_stop_queue()
507 iwl_op_mode_queue_full(trans->op_mode, txq->q.id); in iwl_stop_queue()
[all …]
Dtrans.c1641 struct iwl_txq *txq = &trans_pcie->txq[queue]; in iwl_trans_pcie_freeze_txq_timer() local
1644 spin_lock_bh(&txq->lock); in iwl_trans_pcie_freeze_txq_timer()
1648 if (txq->frozen == freeze) in iwl_trans_pcie_freeze_txq_timer()
1654 txq->frozen = freeze; in iwl_trans_pcie_freeze_txq_timer()
1656 if (txq->q.read_ptr == txq->q.write_ptr) in iwl_trans_pcie_freeze_txq_timer()
1661 txq->stuck_timer.expires))) { in iwl_trans_pcie_freeze_txq_timer()
1669 txq->frozen_expiry_remainder = in iwl_trans_pcie_freeze_txq_timer()
1670 txq->stuck_timer.expires - now; in iwl_trans_pcie_freeze_txq_timer()
1671 del_timer(&txq->stuck_timer); in iwl_trans_pcie_freeze_txq_timer()
1679 mod_timer(&txq->stuck_timer, in iwl_trans_pcie_freeze_txq_timer()
[all …]
Drx.c839 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; in iwl_pcie_rx_handle_rb() local
899 cmd_index = get_cmd_index(&txq->q, index); in iwl_pcie_rx_handle_rb()
904 kzfree(txq->entries[cmd_index].free_buf); in iwl_pcie_rx_handle_rb()
905 txq->entries[cmd_index].free_buf = NULL; in iwl_pcie_rx_handle_rb()
1105 del_timer(&trans_pcie->txq[i].stuck_timer); in iwl_pcie_irq_handle_error()
/linux-4.4.14/drivers/net/wireless/ath/ath9k/
Dxmit.c50 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
53 int tx_flags, struct ath_txq *txq);
55 struct ath_txq *txq, struct list_head *bf_q,
57 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct ath_txq *txq,
80 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) in ath_txq_lock() argument
81 __acquires(&txq->axq_lock) in ath_txq_lock()
83 spin_lock_bh(&txq->axq_lock); in ath_txq_lock()
86 void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) in ath_txq_unlock() argument
87 __releases(&txq->axq_lock) in ath_txq_unlock()
[all …]
Dlink.c27 struct ath_txq *txq; in ath_tx_complete_poll_work() local
39 txq = sc->tx.txq_map[i]; in ath_tx_complete_poll_work()
41 ath_txq_lock(sc, txq); in ath_tx_complete_poll_work()
42 if (txq->axq_depth) { in ath_tx_complete_poll_work()
43 if (txq->axq_tx_inprogress) { in ath_tx_complete_poll_work()
45 ath_txq_unlock(sc, txq); in ath_tx_complete_poll_work()
48 txq->axq_tx_inprogress = true; in ath_tx_complete_poll_work()
51 ath_txq_unlock(sc, txq); in ath_tx_complete_poll_work()
178 txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE]; in ath_paprd_send_frame()
Ddebug_sta.c29 struct ath_txq *txq; in read_file_node_aggr() local
57 txq = tid->txq; in read_file_node_aggr()
58 ath_txq_lock(sc, txq); in read_file_node_aggr()
71 ath_txq_unlock(sc, txq); in read_file_node_aggr()
Dath9k.h178 s8 txq; member
237 struct ath_txq *txq; member
274 struct ath_txq *txq; member
291 struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; member
553 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq);
554 void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq);
555 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq);
556 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
558 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq);
561 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
Ddebug.c624 static void print_queue(struct ath_softc *sc, struct ath_txq *txq, in print_queue() argument
627 ath_txq_lock(sc, txq); in print_queue()
629 seq_printf(file, "%s: %d ", "qnum", txq->axq_qnum); in print_queue()
630 seq_printf(file, "%s: %2d ", "qdepth", txq->axq_depth); in print_queue()
631 seq_printf(file, "%s: %2d ", "ampdu-depth", txq->axq_ampdu_depth); in print_queue()
632 seq_printf(file, "%s: %3d ", "pending", txq->pending_frames); in print_queue()
633 seq_printf(file, "%s: %d\n", "stopped", txq->stopped); in print_queue()
635 ath_txq_unlock(sc, txq); in print_queue()
642 struct ath_txq *txq; in read_file_queues() local
649 txq = sc->tx.txq_map[i]; in read_file_queues()
[all …]
Ddebug.h256 struct ath_tx_status *ts, struct ath_txq *txq,
293 struct ath_txq *txq, in ath_debug_stat_tx() argument
Dmain.c57 static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq, in ath9k_has_pending_frames() argument
62 spin_lock_bh(&txq->axq_lock); in ath9k_has_pending_frames()
64 if (txq->axq_depth) { in ath9k_has_pending_frames()
72 if (txq->mac80211_qnum >= 0) { in ath9k_has_pending_frames()
75 list = &sc->cur_chan->acq[txq->mac80211_qnum]; in ath9k_has_pending_frames()
80 spin_unlock_bh(&txq->axq_lock); in ath9k_has_pending_frames()
806 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; in ath9k_tx()
813 TX_STAT_INC(txctl.txq->axq_qnum, txfailed); in ath9k_tx()
1618 struct ath_txq *txq; in ath9k_conf_tx() local
1625 txq = sc->tx.txq_map[queue]; in ath9k_conf_tx()
[all …]
Dgpio.c409 struct ath_txq *txq; in ath9k_init_btcoex() local
424 txq = sc->tx.txq_map[IEEE80211_AC_BE]; in ath9k_init_btcoex()
425 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); in ath9k_init_btcoex()
Dmac.c203 qi = &ah->txq[q]; in ath9k_hw_set_txq_props()
267 qi = &ah->txq[q]; in ath9k_hw_get_txq_props()
323 qi = &ah->txq[q]; in ath9k_hw_setuptxqueue()
351 qi = &ah->txq[q]; in ath9k_hw_releasetxqueue()
373 qi = &ah->txq[q]; in ath9k_hw_resettxqueue()
Dbeacon.c38 struct ath_txq *txq; in ath9k_beaconq_config() local
50 txq = sc->tx.txq_map[IEEE80211_AC_BE]; in ath9k_beaconq_config()
51 ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be); in ath9k_beaconq_config()
Dtx99.c128 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; in ath9k_tx99_init()
Dinit.c682 ath_tx_cleanupq(sc, &sc->tx.txq[i]); in ath9k_init_softc()
1011 ath_tx_cleanupq(sc, &sc->tx.txq[i]); in ath9k_deinit_softc()
Dchannel.c992 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; in ath_scan_send_probe()
1113 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; in ath_chanctx_send_vif_ps_frame()
Dhw.h816 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; member
/linux-4.4.14/drivers/net/ethernet/marvell/
Dmv643xx_eth.c192 #define IS_TSO_HEADER(txq, addr) \ argument
193 ((addr >= txq->tso_hdrs_dma) && \
194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
423 struct tx_queue txq[8]; member
461 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) in txq_to_mp() argument
463 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp()
482 static void txq_reset_hw_ptr(struct tx_queue *txq) in txq_reset_hw_ptr() argument
484 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr()
487 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr()
488 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr()
[all …]
Dmvneta.c279 #define IS_TSO_HEADER(txq, addr) \ argument
280 ((addr >= txq->tso_hdrs_phys) && \
281 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
558 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) in mvneta_txq_inc_get() argument
560 txq->txq_get_index++; in mvneta_txq_inc_get()
561 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get()
562 txq->txq_get_index = 0; in mvneta_txq_inc_get()
566 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) in mvneta_txq_inc_put() argument
568 txq->txq_put_index++; in mvneta_txq_inc_put()
569 if (txq->txq_put_index == txq->size) in mvneta_txq_inc_put()
[all …]
Dmvpp2.c133 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) argument
1000 static inline int mvpp2_txq_phys(int port, int txq) in mvpp2_txq_phys() argument
1002 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; in mvpp2_txq_phys()
4029 struct mvpp2_tx_queue *txq = port->txqs[queue]; in mvpp2_egress_enable() local
4031 if (txq->descs != NULL) in mvpp2_egress_enable()
4146 struct mvpp2_tx_queue *txq) in mvpp2_txq_pend_desc_num_get() argument
4150 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); in mvpp2_txq_pend_desc_num_get()
4158 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) in mvpp2_txq_next_desc_get() argument
4160 int tx_desc = txq->next_desc_to_proc; in mvpp2_txq_next_desc_get()
4162 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); in mvpp2_txq_next_desc_get()
[all …]
/linux-4.4.14/drivers/net/ethernet/freescale/
Dfec_main.c217 #define IS_TSO_HEADER(txq, addr) \ argument
218 ((addr >= txq->tso_hdrs_dma) && \
219 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
230 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; in fec_enet_get_nextdesc() local
236 if (bdp >= txq->tx_bd_base) { in fec_enet_get_nextdesc()
237 base = txq->tx_bd_base; in fec_enet_get_nextdesc()
238 ring_size = txq->tx_ring_size; in fec_enet_get_nextdesc()
239 ex_base = (struct bufdesc_ex *)txq->tx_bd_base; in fec_enet_get_nextdesc()
261 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; in fec_enet_get_prevdesc() local
267 if (bdp >= txq->tx_bd_base) { in fec_enet_get_prevdesc()
[all …]
Dgianfar.c2024 struct netdev_queue *txq; in free_skb_resources() local
2027 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); in free_skb_resources()
2030 netdev_tx_reset_queue(txq); in free_skb_resources()
2323 struct netdev_queue *txq; in gfar_start_xmit() local
2335 txq = netdev_get_tx_queue(dev, rq); in gfar_start_xmit()
2380 netif_tx_stop_queue(txq); in gfar_start_xmit()
2508 netdev_tx_sent_queue(txq, bytes_sent); in gfar_start_xmit()
2540 netif_tx_stop_queue(txq); in gfar_start_xmit()
2659 struct netdev_queue *txq; in gfar_clean_tx_ring() local
2675 txq = netdev_get_tx_queue(dev, tqi); in gfar_clean_tx_ring()
[all …]
/linux-4.4.14/drivers/net/ethernet/atheros/alx/
Dmain.c57 struct alx_buffer *txb = &alx->txq.bufs[entry]; in alx_free_txbuf()
132 struct alx_tx_queue *txq = &alx->txq; in alx_tpd_avail() local
134 if (txq->write_idx >= txq->read_idx) in alx_tpd_avail()
135 return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; in alx_tpd_avail()
136 return txq->read_idx - txq->write_idx - 1; in alx_tpd_avail()
141 struct alx_tx_queue *txq = &alx->txq; in alx_clean_tx_irq() local
146 sw_read_idx = txq->read_idx; in alx_clean_tx_irq()
153 skb = txq->bufs[sw_read_idx].skb; in alx_clean_tx_irq()
165 txq->read_idx = sw_read_idx; in alx_clean_tx_irq()
376 alx->txq.read_idx = 0; in alx_init_ring_ptrs()
[all …]
Dalx.h100 struct alx_tx_queue txq; member
Dhw.c378 u32 rxq, txq, val; in alx_stop_mac() local
383 txq = alx_read_mem32(hw, ALX_TXQ0); in alx_stop_mac()
384 alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN); in alx_stop_mac()
609 u32 mac, txq, rxq; in alx_start_mac() local
613 txq = alx_read_mem32(hw, ALX_TXQ0); in alx_start_mac()
614 alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN); in alx_start_mac()
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4vf/
Dsge.c1138 static void txq_stop(struct sge_eth_txq *txq) in txq_stop() argument
1140 netif_tx_stop_queue(txq->txq); in txq_stop()
1141 txq->q.stops++; in txq_stop()
1169 struct sge_eth_txq *txq; in t4vf_eth_xmit() local
1203 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit()
1209 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit()
1218 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit()
1227 txq_stop(txq); in t4vf_eth_xmit()
1241 txq->mapping_err++; in t4vf_eth_xmit()
1256 txq_stop(txq); in t4vf_eth_xmit()
[all …]
Dcxgb4vf_main.c477 struct sge_eth_txq *txq; in fwevtq_handler() local
499 txq = container_of(tq, struct sge_eth_txq, q); in fwevtq_handler()
511 txq->q.restarts++; in fwevtq_handler()
512 netif_tx_wake_queue(txq->txq); in fwevtq_handler()
574 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; in setup_sge_queues() local
577 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues()
584 err = t4vf_sge_alloc_eth_txq(adapter, txq, dev, in setup_sge_queues()
605 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; in setup_sge_queues() local
608 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues()
610 EQ_MAP(s, txq->q.abs_id) = &txq->q; in setup_sge_queues()
[all …]
Dadapter.h259 struct netdev_queue *txq; /* associated netdev TX queue */ member
/linux-4.4.14/drivers/net/ethernet/qlogic/qede/
Dqede_main.c202 struct qede_tx_queue *txq, in qede_free_tx_pkt() argument
205 u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; in qede_free_tx_pkt()
206 struct sk_buff *skb = txq->sw_tx_ring[idx].skb; in qede_free_tx_pkt()
211 bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt()
217 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt()
223 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
231 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
241 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
247 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
251 txq->sw_tx_ring[idx].skb = NULL; in qede_free_tx_pkt()
[all …]
/linux-4.4.14/net/sched/
Dsch_generic.c60 const struct netdev_queue *txq, in try_bulk_dequeue_skb() argument
63 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; in try_bulk_dequeue_skb()
86 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb() local
92 txq = skb_get_tx_queue(txq->dev, skb); in dequeue_skb()
93 if (!netif_xmit_frozen_or_stopped(txq)) { in dequeue_skb()
102 !netif_xmit_frozen_or_stopped(txq)) { in dequeue_skb()
105 try_bulk_dequeue_skb(q, skb, txq, packets); in dequeue_skb()
150 struct net_device *dev, struct netdev_queue *txq, in sch_direct_xmit() argument
163 HARD_TX_LOCK(dev, txq, smp_processor_id()); in sch_direct_xmit()
164 if (!netif_xmit_frozen_or_stopped(txq)) in sch_direct_xmit()
[all …]
Dsch_teql.c150 struct netdev_queue *txq; in teql_destroy() local
153 txq = netdev_get_tx_queue(master->dev, 0); in teql_destroy()
156 root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); in teql_destroy()
158 qdisc_reset(rtnl_dereference(txq->qdisc)); in teql_destroy()
220 struct net_device *dev, struct netdev_queue *txq, in __teql_resolve() argument
260 struct netdev_queue *txq) in teql_resolve() argument
265 if (rcu_access_pointer(txq->qdisc) == &noop_qdisc) in teql_resolve()
272 res = __teql_resolve(skb, skb_res, dev, txq, dst); in teql_resolve()
/linux-4.4.14/drivers/net/wireless/ath/ath5k/
Dbase.c733 struct ath5k_txq *txq, int padsize, in ath5k_txbuf_setup() argument
830 spin_lock_bh(&txq->lock); in ath5k_txbuf_setup()
831 list_add_tail(&bf->list, &txq->q); in ath5k_txbuf_setup()
832 txq->txq_len++; in ath5k_txbuf_setup()
833 if (txq->link == NULL) /* is this first packet? */ in ath5k_txbuf_setup()
834 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); in ath5k_txbuf_setup()
836 *txq->link = bf->daddr; in ath5k_txbuf_setup()
838 txq->link = &ds->ds_link; in ath5k_txbuf_setup()
839 ath5k_hw_start_tx_dma(ah, txq->qnum); in ath5k_txbuf_setup()
841 spin_unlock_bh(&txq->lock); in ath5k_txbuf_setup()
[all …]
Ddebug.c836 struct ath5k_txq *txq; in read_file_queue() local
844 txq = &ah->txqs[i]; in read_file_queue()
847 "%02d: %ssetup\n", i, txq->setup ? "" : "not "); in read_file_queue()
849 if (!txq->setup) in read_file_queue()
853 spin_lock_bh(&txq->lock); in read_file_queue()
854 list_for_each_entry_safe(bf, bf0, &txq->q, list) in read_file_queue()
856 spin_unlock_bh(&txq->lock); in read_file_queue()
859 " len: %d bufs: %d\n", txq->txq_len, n); in read_file_queue()
861 " stuck: %d\n", txq->txq_stuck); in read_file_queue()
Dbase.h108 struct ath5k_txq *txq, struct ieee80211_tx_control *control);
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb3/
Dsge.c179 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset()
691 if (q->txq[i].desc) { in t3_free_qset()
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset()
695 if (q->txq[i].sdesc) { in t3_free_qset()
696 free_tx_desc(adapter, &q->txq[i], in t3_free_qset()
697 q->txq[i].in_use); in t3_free_qset()
698 kfree(q->txq[i].sdesc); in t3_free_qset()
701 q->txq[i].size * in t3_free_qset()
703 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset()
[all …]
Dadapter.h205 struct sge_txq txq[SGE_TXQ_PER_SET]; member
Dcxgb3_main.c604 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); in ring_dbs()
/linux-4.4.14/net/core/
Dnetpoll.c73 struct netdev_queue *txq) in netpoll_start_xmit() argument
92 status = netdev_start_xmit(skb, dev, txq, false); in netpoll_start_xmit()
105 while ((skb = skb_dequeue(&npinfo->txq))) { in queue_process()
107 struct netdev_queue *txq; in queue_process() local
114 txq = skb_get_tx_queue(dev, skb); in queue_process()
117 HARD_TX_LOCK(dev, txq, smp_processor_id()); in queue_process()
118 if (netif_xmit_frozen_or_stopped(txq) || in queue_process()
119 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { in queue_process()
120 skb_queue_head(&npinfo->txq, skb); in queue_process()
121 HARD_TX_UNLOCK(dev, txq); in queue_process()
[all …]
Ddev.c1906 static void netif_setup_tc(struct net_device *dev, unsigned int txq) in netif_setup_tc() argument
1912 if (tc->offset + tc->count > txq) { in netif_setup_tc()
1923 if (tc->offset + tc->count > txq) { in netif_setup_tc()
2160 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) in netif_set_real_num_tx_queues() argument
2164 if (txq < 1 || txq > dev->num_tx_queues) in netif_set_real_num_tx_queues()
2172 txq); in netif_set_real_num_tx_queues()
2177 netif_setup_tc(dev, txq); in netif_set_real_num_tx_queues()
2179 if (txq < dev->real_num_tx_queues) { in netif_set_real_num_tx_queues()
2180 qdisc_reset_all_tx_gt(dev, txq); in netif_set_real_num_tx_queues()
2182 netif_reset_xps_queues_gt(dev, txq); in netif_set_real_num_tx_queues()
[all …]
Dpktgen.c3370 struct netdev_queue *txq; in pktgen_xmit() local
3439 txq = skb_get_tx_queue(odev, pkt_dev->skb); in pktgen_xmit()
3443 HARD_TX_LOCK(odev, txq, smp_processor_id()); in pktgen_xmit()
3445 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { in pktgen_xmit()
3453 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); in pktgen_xmit()
3461 if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq)) in pktgen_xmit()
3484 HARD_TX_UNLOCK(odev, txq); in pktgen_xmit()
Dnet-sysfs.c1338 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; in register_queue_kobjects() local
1357 txq = real_tx; in register_queue_kobjects()
1362 netdev_queue_update_kobjects(dev, txq, 0); in register_queue_kobjects()
/linux-4.4.14/drivers/atm/
Dambassador.c628 amb_txq * txq = &dev->txq; in tx_give() local
636 spin_lock_irqsave (&txq->lock, flags); in tx_give()
638 if (txq->pending < txq->maximum) { in tx_give()
639 PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr); in tx_give()
641 *txq->in.ptr = *tx; in tx_give()
642 txq->pending++; in tx_give()
643 txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit); in tx_give()
645 wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr)); in tx_give()
648 if (txq->pending > txq->high) in tx_give()
649 txq->high = txq->pending; in tx_give()
[all …]
Dfore200e.c856 struct host_txq* txq = &fore200e->host_txq; in fore200e_tx_irq() local
866 entry = &txq->host_entry[ txq->tail ]; in fore200e_tx_irq()
873 entry, txq->tail, entry->vc_map, entry->skb); in fore200e_tx_irq()
945 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX); in fore200e_tx_irq()
1561 struct host_txq* txq = &fore200e->host_txq; in fore200e_send() local
1642 entry = &txq->host_entry[ txq->head ]; in fore200e_send()
1644 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) { in fore200e_send()
1687 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); in fore200e_send()
1688 txq->txing++; in fore200e_send()
2268 struct host_txq* txq = &fore200e->host_txq; in fore200e_init_tx_queue() local
[all …]
Dfirestream.c1401 static int init_q(struct fs_dev *dev, struct queue *txq, int queue, in init_q() argument
1428 txq->sa = p; in init_q()
1429 txq->ea = p; in init_q()
1430 txq->offset = queue; in init_q()
1531 static void free_queue(struct fs_dev *dev, struct queue *txq) in free_queue() argument
1535 write_fs (dev, Q_SA(txq->offset), 0); in free_queue()
1536 write_fs (dev, Q_EA(txq->offset), 0); in free_queue()
1537 write_fs (dev, Q_RP(txq->offset), 0); in free_queue()
1538 write_fs (dev, Q_WP(txq->offset), 0); in free_queue()
1541 fs_dprintk (FS_DEBUG_ALLOC, "Free queue: %p\n", txq->sa); in free_queue()
[all …]
Dambassador.h634 amb_txq txq; member
/linux-4.4.14/drivers/net/wireless/iwlegacy/
Dcommon.c382 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; in il_send_cmd_sync()
2724 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) in il_txq_update_write_ptr() argument
2727 int txq_id = txq->q.id; in il_txq_update_write_ptr()
2729 if (txq->need_update == 0) in il_txq_update_write_ptr()
2747 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr()
2755 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr()
2756 txq->need_update = 0; in il_txq_update_write_ptr()
2766 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_unmap() local
2767 struct il_queue *q = &txq->q; in il_tx_queue_unmap()
2773 il->ops->txq_free_tfd(il, txq); in il_tx_queue_unmap()
[all …]
D3945.c289 struct il_tx_queue *txq = &il->txq[txq_id]; in il3945_tx_queue_reclaim() local
290 struct il_queue *q = &txq->q; in il3945_tx_queue_reclaim()
298 skb = txq->skbs[txq->q.read_ptr]; in il3945_tx_queue_reclaim()
300 txq->skbs[txq->q.read_ptr] = NULL; in il3945_tx_queue_reclaim()
301 il->ops->txq_free_tfd(il, txq); in il3945_tx_queue_reclaim()
306 il_wake_queue(il, txq); in il3945_tx_queue_reclaim()
319 struct il_tx_queue *txq = &il->txq[txq_id]; in il3945_hdl_tx() local
326 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { in il3945_hdl_tx()
329 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); in il3945_hdl_tx()
346 txq->time_stamp = jiffies; in il3945_hdl_tx()
[all …]
D4965-mac.c232 if (!il->txq) { in il4965_hw_nic_init()
1665 struct il_tx_queue *txq; in il4965_tx_skb() local
1766 txq = &il->txq[txq_id]; in il4965_tx_skb()
1767 q = &txq->q; in il4965_tx_skb()
1782 txq->skbs[q->write_ptr] = skb; in il4965_tx_skb()
1785 out_cmd = txq->cmd[q->write_ptr]; in il4965_tx_skb()
1786 out_meta = &txq->meta[q->write_ptr]; in il4965_tx_skb()
1853 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); in il4965_tx_skb()
1857 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, in il4965_tx_skb()
1861 txq->need_update = 1; in il4965_tx_skb()
[all …]
D4965.h75 void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
76 int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
78 int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
107 void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
D3945.h227 int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
229 void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
231 int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
D3945-mac.c470 struct il_tx_queue *txq = NULL; in il3945_tx_skb() local
534 txq = &il->txq[txq_id]; in il3945_tx_skb()
535 q = &txq->q; in il3945_tx_skb()
544 txq->skbs[q->write_ptr] = skb; in il3945_tx_skb()
547 out_cmd = txq->cmd[idx]; in il3945_tx_skb()
548 out_meta = &txq->meta[idx]; in il3945_tx_skb()
616 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); in il3945_tx_skb()
620 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0, in il3945_tx_skb()
624 txq->need_update = 1; in il3945_tx_skb()
627 txq->need_update = 0; in il3945_tx_skb()
[all …]
Dcommon.h1271 struct il_tx_queue *txq; member
1548 struct il_tx_queue *txq,
1551 struct il_tx_queue *txq, dma_addr_t addr,
1553 void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
1554 int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
1765 void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
2269 il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq) in il_set_swq_id() argument
2274 txq->swq_id = (hwq << 2) | ac; in il_set_swq_id()
2291 il_wake_queue(struct il_priv *il, struct il_tx_queue *txq) in il_wake_queue() argument
2293 u8 queue = txq->swq_id; in il_wake_queue()
[all …]
Ddebug.c848 struct il_tx_queue *txq; in il_dbgfs_tx_queue_read() local
857 if (!il->txq) { in il_dbgfs_tx_queue_read()
866 txq = &il->txq[cnt]; in il_dbgfs_tx_queue_read()
867 q = &txq->q; in il_dbgfs_tx_queue_read()
874 txq->swq_id, txq->swq_id & 3, in il_dbgfs_tx_queue_read()
875 (txq->swq_id >> 2) & 0x1f); in il_dbgfs_tx_queue_read()
D4965.c1549 il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq, in il4965_txq_update_byte_cnt_tbl() argument
1553 int txq_id = txq->q.id; in il4965_txq_update_byte_cnt_tbl()
1554 int write_ptr = txq->q.write_ptr; in il4965_txq_update_byte_cnt_tbl()
/linux-4.4.14/drivers/net/ethernet/brocade/bna/
Dbna_tx_rx.c2878 struct bna_txq *txq; in bna_tx_sm_started_entry() local
2881 list_for_each_entry(txq, &tx->txq_q, qe) { in bna_tx_sm_started_entry()
2882 txq->tcb->priority = txq->priority; in bna_tx_sm_started_entry()
2884 bna_ib_start(tx->bna, &txq->ib, is_regular); in bna_tx_sm_started_entry()
3097 struct bna_txq *txq = NULL; in bna_bfi_tx_enet_start() local
3107 txq = txq ? list_next_entry(txq, qe) in bna_bfi_tx_enet_start()
3109 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); in bna_bfi_tx_enet_start()
3110 cfg_req->q_cfg[i].q.priority = txq->priority; in bna_bfi_tx_enet_start()
3113 txq->ib.ib_seg_host_addr.lsb; in bna_bfi_tx_enet_start()
3115 txq->ib.ib_seg_host_addr.msb; in bna_bfi_tx_enet_start()
[all …]
Dbna_types.h434 struct bna_txq *txq; member
523 struct bna_txq *txq; /* BFI_MAX_TXQ entries */ member
Dbnad_ethtool.c839 bnad->tx_info[i].tcb[j]->txq) { in bnad_per_q_stats_fill()
841 buf[bi++] = tcb->txq->tx_packets; in bnad_per_q_stats_fill()
842 buf[bi++] = tcb->txq->tx_bytes; in bnad_per_q_stats_fill()
Dbnad.c223 tcb->txq->tx_packets += sent_packets; in bnad_txcmpl_process()
224 tcb->txq->tx_bytes += sent_bytes; in bnad_txcmpl_process()
1017 (struct bnad_tx_info *)tcb->txq->tx->priv; in bnad_cb_tcb_setup()
1027 (struct bnad_tx_info *)tcb->txq->tx->priv; in bnad_cb_tcb_destroy()
2431 bnad->tx_info[i].tcb[j]->txq->tx_packets; in bnad_netdev_qstats_fill()
2433 bnad->tx_info[i].tcb[j]->txq->tx_bytes; in bnad_netdev_qstats_fill()
/linux-4.4.14/drivers/net/
Difb.c68 struct netdev_queue *txq; in ifb_ri_tasklet() local
71 txq = netdev_get_tx_queue(txp->dev, txp->txqnum); in ifb_ri_tasklet()
74 if (!__netif_tx_trylock(txq)) in ifb_ri_tasklet()
77 __netif_tx_unlock(txq); in ifb_ri_tasklet()
113 if (__netif_tx_trylock(txq)) { in ifb_ri_tasklet()
117 if (netif_tx_queue_stopped(txq)) in ifb_ri_tasklet()
118 netif_tx_wake_queue(txq); in ifb_ri_tasklet()
120 __netif_tx_unlock(txq); in ifb_ri_tasklet()
123 __netif_tx_unlock(txq); in ifb_ri_tasklet()
Dtun.c445 u32 txq = 0; in tun_select_queue() local
451 txq = skb_get_hash(skb); in tun_select_queue()
452 if (txq) { in tun_select_queue()
453 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); in tun_select_queue()
455 tun_flow_save_rps_rxhash(e, txq); in tun_select_queue()
456 txq = e->queue_index; in tun_select_queue()
459 txq = ((u64)txq * numqueues) >> 32; in tun_select_queue()
461 txq = skb_get_rx_queue(skb); in tun_select_queue()
462 while (unlikely(txq >= numqueues)) in tun_select_queue()
463 txq -= numqueues; in tun_select_queue()
[all …]
Dvirtio_net.c169 static int txq2vq(int txq) in txq2vq() argument
171 return txq * 2 + 1; in txq2vq()
921 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); in start_xmit() local
970 if (kick || netif_xmit_stopped(txq)) in start_xmit()
Dmacvlan.c779 struct netdev_queue *txq, in macvlan_set_lockdep_class_one() argument
782 lockdep_set_class(&txq->_xmit_lock, in macvlan_set_lockdep_class_one()
/linux-4.4.14/drivers/bluetooth/
Dhci_ath.c49 struct sk_buff_head txq; member
108 skb_queue_head_init(&ath->txq); in ath_open()
124 skb_queue_purge(&ath->txq); in ath_close()
142 skb_queue_purge(&ath->txq); in ath_flush()
228 skb_queue_tail(&ath->txq, skb); in ath_enqueue()
240 return skb_dequeue(&ath->txq); in ath_dequeue()
Dhci_h4.c52 struct sk_buff_head txq; member
66 skb_queue_head_init(&h4->txq); in h4_open()
79 skb_queue_purge(&h4->txq); in h4_flush()
93 skb_queue_purge(&h4->txq); in h4_close()
112 skb_queue_tail(&h4->txq, skb); in h4_enqueue()
146 return skb_dequeue(&h4->txq); in h4_dequeue()
Dhci_ll.c83 struct sk_buff_head txq; member
115 skb_queue_tail(&ll->txq, skb); in send_hcill_cmd()
131 skb_queue_head_init(&ll->txq); in ll_open()
150 skb_queue_purge(&ll->txq); in ll_flush()
163 skb_queue_purge(&ll->txq); in ll_close()
186 skb_queue_tail(&ll->txq, skb); in __ll_do_awake()
319 skb_queue_tail(&ll->txq, skb); in ll_enqueue()
505 return skb_dequeue(&ll->txq); in ll_dequeue()
Dbtsdio.c63 struct sk_buff_head txq; member
115 while ((skb = skb_dequeue(&data->txq))) { in btsdio_work()
119 skb_queue_head(&data->txq, skb); in btsdio_work()
244 skb_queue_purge(&data->txq); in btsdio_flush()
272 skb_queue_tail(&data->txq, skb); in btsdio_send_frame()
302 skb_queue_head_init(&data->txq); in btsdio_probe()
Dhci_qca.c78 struct sk_buff_head txq; member
220 skb_queue_tail(&qca->txq, skb); in send_hci_ibs_cmd()
397 skb_queue_head_init(&qca->txq); in qca_open()
511 skb_queue_purge(&qca->txq); in qca_flush()
526 skb_queue_purge(&qca->txq); in qca_close()
645 skb_queue_tail(&qca->txq, skb); in device_woke_up()
687 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
697 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
819 return skb_dequeue(&qca->txq); in qca_dequeue()
878 skb_queue_tail(&qca->txq, skb); in qca_set_baudrate()
Ddtl1_cs.c75 struct sk_buff_head txq; member
156 skb = skb_dequeue(&(info->txq)); in dtl1_write_wakeup()
168 skb_queue_head(&(info->txq), skb); in dtl1_write_wakeup()
369 skb_queue_purge(&(info->txq)); in dtl1_hci_flush()
420 skb_queue_tail(&(info->txq), s); in dtl1_hci_send_frame()
442 skb_queue_head_init(&(info->txq)); in dtl1_open()
Dbtuart_cs.c72 struct sk_buff_head txq; member
152 skb = skb_dequeue(&(info->txq)); in btuart_write_wakeup()
164 skb_queue_head(&(info->txq), skb); in btuart_write_wakeup()
403 skb_queue_purge(&(info->txq)); in btuart_hci_flush()
441 skb_queue_tail(&(info->txq), skb); in btuart_hci_send_frame()
461 skb_queue_head_init(&(info->txq)); in btuart_open()
Dbluecard_cs.c72 struct sk_buff_head txq; member
260 skb = skb_dequeue(&(info->txq)); in bluecard_write_wakeup()
329 skb_queue_head(&(info->txq), skb); in bluecard_write_wakeup()
601 skb_queue_tail(&(info->txq), skb); in bluecard_hci_set_baud_rate()
618 skb_queue_purge(&(info->txq)); in bluecard_hci_flush()
677 skb_queue_tail(&(info->txq), skb); in bluecard_hci_send_frame()
701 skb_queue_head_init(&(info->txq)); in bluecard_open()
784 skb_queue_purge(&(info->txq)); in bluecard_open()
Dhci_bcm.c73 struct sk_buff_head txq; member
269 skb_queue_tail(&bcm->txq, skb); in bcm_set_diag()
286 skb_queue_head_init(&bcm->txq); in bcm_open()
339 skb_queue_purge(&bcm->txq); in bcm_close()
353 skb_queue_purge(&bcm->txq); in bcm_flush()
476 skb_queue_tail(&bcm->txq, skb); in bcm_enqueue()
495 skb = skb_dequeue(&bcm->txq); in bcm_dequeue()
Dhci_intel.c84 struct sk_buff_head txq; member
194 skb_queue_head(&intel->txq, skb); in intel_lpm_suspend()
238 skb_queue_head(&intel->txq, skb); in intel_lpm_resume()
278 skb_queue_head(&intel->txq, skb); in intel_lpm_host_wake()
408 skb_queue_head_init(&intel->txq); in intel_open()
431 skb_queue_purge(&intel->txq); in intel_close()
445 skb_queue_purge(&intel->txq); in intel_flush()
524 skb_queue_tail(&intel->txq, skb); in intel_set_baudrate()
1114 skb_queue_tail(&intel->txq, skb); in intel_enqueue()
1124 skb = skb_dequeue(&intel->txq); in intel_dequeue()
Dbt3c_cs.c77 struct sk_buff_head txq; member
196 skb = skb_dequeue(&(info->txq)); in bt3c_write_wakeup()
389 skb_queue_purge(&(info->txq)); in bt3c_hci_flush()
428 skb_queue_tail(&(info->txq), skb); in bt3c_hci_send_frame()
542 skb_queue_head_init(&(info->txq)); in bt3c_open()
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c1087 netif_tx_stop_queue(q->txq); in eth_txq_stop()
2384 struct sge_ofld_txq *txq = s->egr_map[id]; in sge_tx_timer_cb() local
2387 tasklet_schedule(&txq->qresume_tsk); in sge_tx_timer_cb()
2396 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && in sge_tx_timer_cb()
2397 __netif_tx_trylock(q->txq)) { in sge_tx_timer_cb()
2408 __netif_tx_unlock(q->txq); in sge_tx_timer_cb()
2637 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, in t4_sge_alloc_eth_txq() argument
2647 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_eth_txq()
2649 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
2651 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_eth_txq()
[all …]
Dcxgb4_main.c312 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable() local
318 for (i = 0; i < pi->nqsets; i++, txq++) { in dcb_tx_queue_prio_enable()
325 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); in dcb_tx_queue_prio_enable()
341 txq->dcb_prio = value; in dcb_tx_queue_prio_enable()
624 struct sge_txq *txq; in fwevtq_handler() local
626 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
627 txq->restarts++; in fwevtq_handler()
628 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { in fwevtq_handler()
631 eq = container_of(txq, struct sge_eth_txq, q); in fwevtq_handler()
632 netif_tx_wake_queue(eq->txq); in fwevtq_handler()
[all …]
Dcxgb4.h606 struct netdev_queue *txq; /* associated netdev TX queue */ member
1107 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
1110 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
1113 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
1371 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
Dt4_hw.c6361 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, in t4_cfg_pfvf() argument
6377 FW_PFVF_CMD_NEQ_V(txq)); in t4_cfg_pfvf()
/linux-4.4.14/include/linux/
Dnetdevice.h2588 void netif_schedule_queue(struct netdev_queue *txq);
2619 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_start_all_queues() local
2620 netif_tx_start_queue(txq); in netif_tx_start_all_queues()
2643 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_wake_all_queues() local
2644 netif_tx_wake_queue(txq); in netif_tx_wake_all_queues()
2873 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_start_subqueue() local
2875 netif_tx_start_queue(txq); in netif_start_subqueue()
2887 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_stop_subqueue() local
2888 netif_tx_stop_queue(txq); in netif_stop_subqueue()
2901 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in __netif_subqueue_stopped() local
[all …]
Dnetpoll.h41 struct sk_buff_head txq; member
Dti_wilink_st.h155 struct sk_buff_head txq, tx_waitq; member
/linux-4.4.14/drivers/net/wireless/ath/ath6kl/
Dhtc_pipe.c106 if (list_empty(&ep->txq)) in get_htc_packet_credit_based()
110 packet = list_first_entry(&ep->txq, struct htc_packet, list); in get_htc_packet_credit_based()
114 __func__, packet, get_queue_depth(&ep->txq)); in get_htc_packet_credit_based()
159 packet = list_first_entry(&ep->txq, struct htc_packet, list); in get_htc_packet_credit_based()
183 if (list_empty(&ep->txq)) in get_htc_packet()
186 packet = list_first_entry(&ep->txq, struct htc_packet, list); in get_htc_packet()
191 __func__, packet, get_queue_depth(&ep->txq)); in get_htc_packet()
303 struct list_head *txq) in htc_try_send() argument
313 __func__, txq, in htc_try_send()
314 (txq == NULL) ? 0 : get_queue_depth(txq)); in htc_try_send()
[all …]
Dhtc_mbox.c426 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); in htc_tx_comp_update()
439 struct list_head *txq) in htc_tx_complete() argument
441 if (list_empty(txq)) in htc_tx_complete()
446 endpoint->eid, get_queue_depth(txq)); in htc_tx_complete()
448 ath6kl_tx_complete(endpoint->target, txq); in htc_tx_complete()
611 if (list_empty(&endpoint->txq)) in ath6kl_htc_tx_pkts_get()
613 packet = list_first_entry(&endpoint->txq, struct htc_packet, in ath6kl_htc_tx_pkts_get()
618 packet, get_queue_depth(&endpoint->txq)); in ath6kl_htc_tx_pkts_get()
628 packet = list_first_entry(&endpoint->txq, struct htc_packet, in ath6kl_htc_tx_pkts_get()
861 struct list_head txq; in ath6kl_htc_tx_from_queue() local
[all …]
Dhtc.h511 struct list_head txq; member
Ddebug.c272 get_queue_depth(&ep_dist->htc_ep->txq)); in dump_cred_dist()
744 get_queue_depth(&ep_list->htc_ep->txq)); in read_file_credit_dist_stats()
/linux-4.4.14/include/net/
Dsch_generic.h128 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) in qdisc_avail_bulklimit() argument
132 return dql_avail(&txq->dql); in qdisc_avail_bulklimit()
438 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in qdisc_all_tx_empty() local
439 const struct Qdisc *q = rcu_dereference(txq->qdisc); in qdisc_all_tx_empty()
456 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in qdisc_tx_changing() local
457 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) in qdisc_tx_changing()
469 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in qdisc_tx_is_noop() local
470 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) in qdisc_tx_is_noop()
Dpkt_sched.h102 struct net_device *dev, struct netdev_queue *txq,
Dmac80211.h1376 struct ieee80211_txq *txq; member
1702 struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; member
3500 struct ieee80211_txq *txq);
5469 struct ieee80211_txq *txq);
/linux-4.4.14/drivers/net/wireless/
Dmwl8k.c248 struct mwl8k_tx_queue txq[MWL8K_MAX_TX_QUEUES]; member
1443 struct mwl8k_tx_queue *txq = priv->txq + index; in mwl8k_txq_init() local
1447 txq->len = 0; in mwl8k_txq_init()
1448 txq->head = 0; in mwl8k_txq_init()
1449 txq->tail = 0; in mwl8k_txq_init()
1453 txq->txd = pci_zalloc_consistent(priv->pdev, size, &txq->txd_dma); in mwl8k_txq_init()
1454 if (txq->txd == NULL) { in mwl8k_txq_init()
1459 txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL); in mwl8k_txq_init()
1460 if (txq->skb == NULL) { in mwl8k_txq_init()
1461 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); in mwl8k_txq_init()
[all …]
Dairo.c1226 struct sk_buff_head txq;// tx queue used by mpi350 code member
1931 npacks = skb_queue_len (&ai->txq); in mpi_start_xmit()
1939 skb_queue_tail (&ai->txq, skb); in mpi_start_xmit()
1944 skb_queue_tail (&ai->txq, skb); in mpi_start_xmit()
1974 if ((skb = skb_dequeue(&ai->txq)) == NULL) { in mpi_send_packet()
2401 if (test_bit(FLAG_MPI, &ai->flags) && !skb_queue_empty(&ai->txq)) { in stop_airo_card()
2403 for (;(skb = skb_dequeue(&ai->txq));) in stop_airo_card()
2812 skb_queue_head_init (&ai->txq); in _init_airo_card()
3440 if (!skb_queue_empty(&ai->txq)) { in airo_handle_tx()
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/
Den_tx.c272 netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes); in mlx5e_sq_xmit()
275 netif_tx_stop_queue(sq->txq); in mlx5e_sq_xmit()
279 if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { in mlx5e_sq_xmit()
392 netdev_tx_completed_queue(sq->txq, npkts, nbytes); in mlx5e_poll_tx_cq()
394 if (netif_tx_queue_stopped(sq->txq) && in mlx5e_poll_tx_cq()
397 netif_tx_wake_queue(sq->txq); in mlx5e_poll_tx_cq()
Den_main.c567 sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix); in mlx5e_create_sq()
700 netdev_tx_reset_queue(sq->txq); in mlx5e_open_sq()
701 netif_tx_start_queue(sq->txq); in mlx5e_open_sq()
713 static inline void netif_tx_disable_queue(struct netdev_queue *txq) in netif_tx_disable_queue() argument
715 __netif_tx_lock_bh(txq); in netif_tx_disable_queue()
716 netif_tx_stop_queue(txq); in netif_tx_disable_queue()
717 __netif_tx_unlock_bh(txq); in netif_tx_disable_queue()
724 netif_tx_disable_queue(sq->txq); in mlx5e_close_sq()
Den.h378 struct netdev_queue *txq; member
/linux-4.4.14/drivers/net/ethernet/samsung/sxgbe/
Dsxgbe_main.c337 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]); in sxgbe_clear_descriptors()
573 priv->txq[queue_num], tx_rsize); in init_dma_desc_rings()
582 priv->txq[queue_num]->priv_ptr = priv; in init_dma_desc_rings()
606 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); in init_dma_desc_rings()
642 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; in dma_free_tx_skbufs()
658 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); in free_dma_desc_resources()
672 priv->txq[queue_num] = devm_kmalloc(priv->device, in txring_mem_alloc()
674 if (!priv->txq[queue_num]) in txring_mem_alloc()
808 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; in sxgbe_tx_all_clean()
827 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num]; in sxgbe_restart_tx_queue()
[all …]
Dsxgbe_platform.c130 priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++); in sxgbe_platform_probe()
131 if (priv->txq[i]->irq_no <= 0) { in sxgbe_platform_probe()
163 irq_dispose_mapping(priv->txq[i]->irq_no); in sxgbe_platform_probe()
Dsxgbe_common.h458 struct sxgbe_tx_queue *txq[SXGBE_TX_QUEUES]; member
/linux-4.4.14/drivers/net/ethernet/intel/i40evf/
Di40evf_virtchnl.c256 vqpi->txq.vsi_id = vqci->vsi_id; in i40evf_configure_queues()
257 vqpi->txq.queue_id = i; in i40evf_configure_queues()
258 vqpi->txq.ring_len = adapter->tx_rings[i]->count; in i40evf_configure_queues()
259 vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; in i40evf_configure_queues()
260 vqpi->txq.headwb_enabled = 1; in i40evf_configure_queues()
261 vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr + in i40evf_configure_queues()
262 (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc)); in i40evf_configure_queues()
Di40e_virtchnl.h214 struct i40e_virtchnl_txq_info txq; member
/linux-4.4.14/drivers/net/usb/
Dusbnet.c782 temp = unlink_urbs(dev, &dev->txq) + in usbnet_terminate_urbs()
787 wait_skb_queue_empty(&dev->txq); in usbnet_terminate_urbs()
1102 unlink_urbs (dev, &dev->txq); in usbnet_deferred_kevent()
1248 (void) defer_bh(dev, skb, &dev->txq, tx_done); in tx_complete()
1257 unlink_urbs (dev, &dev->txq); in usbnet_tx_timeout()
1382 spin_lock_irqsave(&dev->txq.lock, flags); in usbnet_start_xmit()
1385 spin_unlock_irqrestore(&dev->txq.lock, flags); in usbnet_start_xmit()
1397 spin_unlock_irqrestore(&dev->txq.lock, flags); in usbnet_start_xmit()
1416 __usbnet_queue_skb(&dev->txq, skb, tx_start); in usbnet_start_xmit()
1417 if (dev->txq.qlen >= TX_QLEN (dev)) in usbnet_start_xmit()
[all …]
Dlan78xx.c245 struct sk_buff_head txq; member
1992 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); in lan78xx_terminate_urbs()
1996 !skb_queue_empty(&dev->txq) && in lan78xx_terminate_urbs()
2155 defer_bh(dev, skb, &dev->txq, tx_done); in tx_complete()
2641 spin_lock_irqsave(&dev->txq.lock, flags); in lan78xx_tx_bh()
2644 spin_unlock_irqrestore(&dev->txq.lock, flags); in lan78xx_tx_bh()
2664 spin_unlock_irqrestore(&dev->txq.lock, flags); in lan78xx_tx_bh()
2674 lan78xx_queue_skb(&dev->txq, skb, tx_start); in lan78xx_tx_bh()
2675 if (skb_queue_len(&dev->txq) >= dev->tx_qlen) in lan78xx_tx_bh()
2690 spin_unlock_irqrestore(&dev->txq.lock, flags); in lan78xx_tx_bh()
[all …]
/linux-4.4.14/drivers/net/wireless/mwifiex/
Dtxrx.c287 struct netdev_queue *txq; in mwifiex_write_data_complete() local
323 txq = netdev_get_tx_queue(priv->netdev, index); in mwifiex_write_data_complete()
324 if (netif_tx_queue_stopped(txq)) { in mwifiex_write_data_complete()
325 netif_tx_wake_queue(txq); in mwifiex_write_data_complete()
Dinit.c335 struct netdev_queue *txq = netdev_get_tx_queue(netdev, i); in mwifiex_wake_up_net_dev_queue() local
337 if (netif_tx_queue_stopped(txq)) in mwifiex_wake_up_net_dev_queue()
338 netif_tx_wake_queue(txq); in mwifiex_wake_up_net_dev_queue()
356 struct netdev_queue *txq = netdev_get_tx_queue(netdev, i); in mwifiex_stop_net_dev_queue() local
358 if (!netif_tx_queue_stopped(txq)) in mwifiex_stop_net_dev_queue()
359 netif_tx_stop_queue(txq); in mwifiex_stop_net_dev_queue()
Dmain.c726 struct netdev_queue *txq; in mwifiex_queue_tx_pkt() local
730 txq = netdev_get_tx_queue(priv->netdev, index); in mwifiex_queue_tx_pkt()
731 if (!netif_tx_queue_stopped(txq)) { in mwifiex_queue_tx_pkt()
732 netif_tx_stop_queue(txq); in mwifiex_queue_tx_pkt()
985 struct netdev_queue *txq; in mwifiex_drv_info_dump() local
1054 txq = netdev_get_tx_queue(priv->netdev, idx); in mwifiex_drv_info_dump()
1056 netif_tx_queue_stopped(txq) ? in mwifiex_drv_info_dump()
Ddebugfs.c81 struct netdev_queue *txq; in mwifiex_info_read() local
140 txq = netdev_get_tx_queue(netdev, i); in mwifiex_info_read()
141 p += sprintf(p, " %d:%s", i, netif_tx_queue_stopped(txq) ? in mwifiex_info_read()
/linux-4.4.14/net/irda/
Dirlap_event.c192 if (skb_queue_empty(&self->txq) || self->remote_busy) { in irlap_start_poll_timer()
262 skb_queue_len(&self->txq)); in irlap_do_event()
264 if (!skb_queue_empty(&self->txq)) { in irlap_do_event()
284 while ((skb = skb_dequeue(&self->txq)) != NULL) { in irlap_do_event()
1005 skb_next = skb_peek(&self->txq); in irlap_state_xmit_p()
1031 skb_queue_head(&self->txq, skb_get(skb)); in irlap_state_xmit_p()
1055 nextfit = !skb_queue_empty(&self->txq); in irlap_state_xmit_p()
1082 skb_queue_head(&self->txq, skb_get(skb)); in irlap_state_xmit_p()
1768 skb_next = skb_peek(&self->txq); in irlap_state_xmit_s()
1782 skb_queue_head(&self->txq, skb_get(skb)); in irlap_state_xmit_s()
[all …]
Dirlap.c135 skb_queue_head_init(&self->txq); in irlap_open()
350 skb_queue_tail(&self->txq, skb); in irlap_data_request()
359 if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy)) in irlap_data_request()
424 if (!skb_queue_empty(&self->txq)) { in irlap_disconnect_request()
825 while ((skb = skb_dequeue(&self->txq)) != NULL) in irlap_flush_all_queues()
1134 skb_queue_len(&self->txq)); in irlap_seq_show()
Dirlap_frame.c1011 while (!skb_queue_empty(&self->txq)) { in irlap_resend_rejected_frames()
1015 skb = skb_dequeue( &self->txq); in irlap_resend_rejected_frames()
1023 !skb_queue_empty(&self->txq)) { in irlap_resend_rejected_frames()
/linux-4.4.14/drivers/net/wireless/ipw2x00/
Dipw2100.c2820 struct ipw2100_bd_queue *txq = &priv->tx_queue; in __ipw2100_tx_process() local
2834 tbd = &txq->drv[packet->index]; in __ipw2100_tx_process()
2841 e = txq->oldest; in __ipw2100_tx_process()
2848 e = txq->oldest + frag_num; in __ipw2100_tx_process()
2849 e %= txq->entries; in __ipw2100_tx_process()
2866 if (w != txq->next) in __ipw2100_tx_process()
2902 i = txq->oldest; in __ipw2100_tx_process()
2904 &txq->drv[i], in __ipw2100_tx_process()
2905 (u32) (txq->nic + i * sizeof(struct ipw2100_bd)), in __ipw2100_tx_process()
2906 txq->drv[i].host_addr, txq->drv[i].buf_length); in __ipw2100_tx_process()
[all …]
Dipw2200.c213 struct clx2_tx_queue *txq, int qindex);
2013 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0); in ipw_irq_tasklet()
2019 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1); in ipw_irq_tasklet()
2025 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2); in ipw_irq_tasklet()
2031 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3); in ipw_irq_tasklet()
3824 struct clx2_tx_queue *txq) in ipw_queue_tx_free_tfd() argument
3826 struct tfd_frame *bd = &txq->bd[txq->q.last_used]; in ipw_queue_tx_free_tfd()
3848 if (txq->txb[txq->q.last_used]) { in ipw_queue_tx_free_tfd()
3849 libipw_txb_free(txq->txb[txq->q.last_used]); in ipw_queue_tx_free_tfd()
3850 txq->txb[txq->q.last_used] = NULL; in ipw_queue_tx_free_tfd()
[all …]
Dipw2200.h1207 struct clx2_tx_queue txq[4]; member
/linux-4.4.14/net/mac80211/
Dsta_info.c111 if (sta->sta.txq[0]) { in __cleanup_single_sta()
112 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { in __cleanup_single_sta()
113 struct txq_info *txqi = to_txq_info(sta->sta.txq[i]); in __cleanup_single_sta()
117 atomic_sub(n, &sdata->txqs_len[txqi->txq.ac]); in __cleanup_single_sta()
249 if (sta->sta.txq[0]) in sta_info_free()
250 kfree(to_txq_info(sta->sta.txq[0])); in sta_info_free()
351 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); in sta_info_alloc()
355 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { in sta_info_alloc()
356 struct txq_info *txq = txq_data + i * size; in sta_info_alloc() local
358 ieee80211_init_tx_queue(sdata, sta, txq, i); in sta_info_alloc()
[all …]
Dagg-tx.c195 struct ieee80211_txq *txq = sta->sta.txq[tid]; in ieee80211_agg_stop_txq() local
198 if (!txq) in ieee80211_agg_stop_txq()
201 txqi = to_txq_info(txq); in ieee80211_agg_stop_txq()
212 struct ieee80211_txq *txq = sta->sta.txq[tid]; in ieee80211_agg_start_txq() local
215 if (!txq) in ieee80211_agg_start_txq()
218 txqi = to_txq_info(txq); in ieee80211_agg_start_txq()
Dtx.c845 if (!tx->sta->sta.txq[0]) in ieee80211_tx_h_sequence()
1086 } else if (!tx->sta->sta.txq[tid]) { in ieee80211_tx_prep_agg()
1242 struct ieee80211_txq *txq = NULL; in ieee80211_drv_tx() local
1255 txq = pubsta->txq[tid]; in ieee80211_drv_tx()
1257 txq = vif->txq; in ieee80211_drv_tx()
1260 if (!txq) in ieee80211_drv_tx()
1263 ac = txq->ac; in ieee80211_drv_tx()
1264 txqi = to_txq_info(txq); in ieee80211_drv_tx()
1279 struct ieee80211_txq *txq) in ieee80211_tx_dequeue() argument
1282 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif); in ieee80211_tx_dequeue()
[all …]
Ddriver-ops.h1158 struct txq_info *txq) in drv_wake_tx_queue() argument
1160 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); in drv_wake_tx_queue()
1165 trace_drv_wake_tx_queue(local, sdata, txq); in drv_wake_tx_queue()
1166 local->ops->wake_tx_queue(&local->hw, &txq->txq); in drv_wake_tx_queue()
Dtdls.c268 struct ieee80211_tx_queue_params *txq; in ieee80211_tdls_add_wmm_param_ie() local
290 txq = &sdata->tx_conf[ieee80211_ac_from_wmm(i)]; in ieee80211_tdls_add_wmm_param_ie()
291 wmm->ac[i].aci_aifsn = ieee80211_wmm_aci_aifsn(txq->aifs, in ieee80211_tdls_add_wmm_param_ie()
292 txq->acm, i); in ieee80211_tdls_add_wmm_param_ie()
293 wmm->ac[i].cw = ieee80211_wmm_ecw(txq->cw_min, txq->cw_max); in ieee80211_tdls_add_wmm_param_ie()
294 wmm->ac[i].txop_limit = cpu_to_le16(txq->txop); in ieee80211_tdls_add_wmm_param_ie()
Dtrace.h2350 struct txq_info *txq),
2352 TP_ARGS(local, sdata, txq),
2363 struct ieee80211_sta *sta = txq->txq.sta;
2368 __entry->ac = txq->txq.ac;
2369 __entry->tid = txq->txq.tid;
Dutil.c3334 txqi->txq.vif = &sdata->vif; in ieee80211_init_tx_queue()
3337 txqi->txq.sta = &sta->sta; in ieee80211_init_tx_queue()
3338 sta->sta.txq[tid] = &txqi->txq; in ieee80211_init_tx_queue()
3339 txqi->txq.tid = tid; in ieee80211_init_tx_queue()
3340 txqi->txq.ac = ieee802_1d_to_ac[tid & 7]; in ieee80211_init_tx_queue()
3342 sdata->vif.txq = &txqi->txq; in ieee80211_init_tx_queue()
3343 txqi->txq.tid = 0; in ieee80211_init_tx_queue()
3344 txqi->txq.ac = IEEE80211_AC_BE; in ieee80211_init_tx_queue()
Dieee80211_i.h813 struct ieee80211_txq txq; member
1456 static inline struct txq_info *to_txq_info(struct ieee80211_txq *txq) in to_txq_info() argument
1458 return container_of(txq, struct txq_info, txq); in to_txq_info()
1921 struct txq_info *txq, int tid);
Diface.c977 if (sdata->vif.txq) { in ieee80211_do_stop()
978 struct txq_info *txqi = to_txq_info(sdata->vif.txq); in ieee80211_do_stop()
984 atomic_set(&sdata->txqs_len[txqi->txq.ac], 0); in ieee80211_do_stop()
Drx.c1242 if (!sta->sta.txq[0]) in sta_ps_start()
1245 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { in sta_ps_start()
1246 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); in sta_ps_start()
/linux-4.4.14/drivers/net/ethernet/sun/
Dsunvnet.c639 struct netdev_queue *txq; in vnet_ack() local
672 txq = netdev_get_tx_queue(dev, port->q_index); in vnet_ack()
673 if (unlikely(netif_tx_queue_stopped(txq) && in vnet_ack()
707 struct netdev_queue *txq; in maybe_tx_wakeup() local
709 txq = netdev_get_tx_queue(port->vp->dev, port->q_index); in maybe_tx_wakeup()
710 __netif_tx_lock(txq, smp_processor_id()); in maybe_tx_wakeup()
711 if (likely(netif_tx_queue_stopped(txq))) { in maybe_tx_wakeup()
715 netif_tx_wake_queue(txq); in maybe_tx_wakeup()
717 __netif_tx_unlock(txq); in maybe_tx_wakeup()
1182 struct netdev_queue *txq; in vnet_handle_offloads() local
[all …]
Dsungem.c704 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); in gem_tx() local
706 __netif_tx_lock(txq, smp_processor_id()); in gem_tx()
710 __netif_tx_unlock(txq); in gem_tx()
892 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); in gem_poll() local
900 __netif_tx_lock(txq, smp_processor_id()); in gem_poll()
902 __netif_tx_unlock(txq); in gem_poll()
1341 struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0); in gem_set_link_modes() local
1371 __netif_tx_lock(txq, smp_processor_id()); in gem_set_link_modes()
1433 __netif_tx_unlock(txq); in gem_set_link_modes()
Dniu.c3604 struct netdev_queue *txq; in niu_tx_work() local
3610 txq = netdev_get_tx_queue(np->dev, index); in niu_tx_work()
3634 if (unlikely(netif_tx_queue_stopped(txq) && in niu_tx_work()
3636 __netif_tx_lock(txq, smp_processor_id()); in niu_tx_work()
3637 if (netif_tx_queue_stopped(txq) && in niu_tx_work()
3639 netif_tx_wake_queue(txq); in niu_tx_work()
3640 __netif_tx_unlock(txq); in niu_tx_work()
6634 struct netdev_queue *txq; in niu_start_xmit() local
6644 txq = netdev_get_tx_queue(dev, i); in niu_start_xmit()
6647 netif_tx_stop_queue(txq); in niu_start_xmit()
[all …]
/linux-4.4.14/drivers/misc/ti-st/
Dst_core.c219 skb_queue_tail(&st_gdata->txq, waiting_skb); in st_wakeup_ack()
409 return skb_dequeue(&st_gdata->txq); in st_int_dequeue()
431 skb_queue_tail(&st_gdata->txq, skb); in st_int_enqueue()
784 skb_queue_purge(&st_gdata->txq); in st_tty_close()
880 skb_queue_head_init(&st_gdata->txq); in st_core_init()
912 skb_queue_purge(&st_gdata->txq); in st_core_exit()
/linux-4.4.14/drivers/net/ethernet/emulex/benet/
Dbe_main.c867 struct be_queue_info *txq = &txo->q; in be_tx_setup_wrb_hdr() local
868 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head); in be_tx_setup_wrb_hdr()
876 atomic_add(num_frags, &txq->used); in be_tx_setup_wrb_hdr()
886 struct be_queue_info *txq = &txo->q; in be_tx_setup_wrb_frag() local
888 wrb = queue_head_node(txq); in be_tx_setup_wrb_frag()
890 queue_head_inc(txq); in be_tx_setup_wrb_frag()
903 struct be_queue_info *txq = &txo->q; in be_xmit_restore() local
906 txq->head = head; in be_xmit_restore()
909 queue_head_inc(txq); in be_xmit_restore()
911 wrb = queue_head_node(txq); in be_xmit_restore()
[all …]
Dbe_cmds.c1301 struct be_queue_info *txq = &txo->q; in be_cmd_txq_create() local
1303 struct be_dma_mem *q_mem = &txq->dma_mem; in be_cmd_txq_create()
1325 req->queue_size = be_encoded_q_len(txq->len); in be_cmd_txq_create()
1333 txq->id = le16_to_cpu(resp->cid); in be_cmd_txq_create()
1338 txq->created = true; in be_cmd_txq_create()
/linux-4.4.14/net/ieee802154/6lowpan/
Dcore.c65 struct netdev_queue *txq, in lowpan_set_lockdep_class_one() argument
68 lockdep_set_class(&txq->_xmit_lock, in lowpan_set_lockdep_class_one()
/linux-4.4.14/include/net/irda/
Dirlap.h148 struct sk_buff_head txq; /* Frames to be transmitted */ member
266 #define IRLAP_GET_TX_QUEUE_LEN(self) skb_queue_len(&self->txq)
Dirlan_common.h136 struct sk_buff_head txq; /* Transmit control queue */ member
/linux-4.4.14/drivers/net/ethernet/
Dlantiq_etop.c186 struct netdev_queue *txq = in ltq_etop_poll_tx() local
202 if (netif_tx_queue_stopped(txq)) in ltq_etop_poll_tx()
203 netif_tx_start_queue(txq); in ltq_etop_poll_tx()
521 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue); in ltq_etop_tx() local
534 netif_tx_stop_queue(txq); in ltq_etop_tx()
555 netif_tx_stop_queue(txq); in ltq_etop_tx()
/linux-4.4.14/drivers/net/ethernet/micrel/
Dks8851.c131 struct sk_buff_head txq; member
769 bool last = skb_queue_empty(&ks->txq); in ks8851_tx_work()
774 txb = skb_dequeue(&ks->txq); in ks8851_tx_work()
775 last = skb_queue_empty(&ks->txq); in ks8851_tx_work()
905 while (!skb_queue_empty(&ks->txq)) { in ks8851_net_stop()
906 struct sk_buff *txb = skb_dequeue(&ks->txq); in ks8851_net_stop()
947 skb_queue_tail(&ks->txq, skb); in ks8851_start_xmit()
1512 skb_queue_head_init(&ks->txq); in ks8851_probe()
/linux-4.4.14/drivers/net/ethernet/broadcom/
Dbcmsysport.c735 struct netdev_queue *txq; in __bcm_sysport_tx_reclaim() local
738 txq = netdev_get_tx_queue(ndev, ring->index); in __bcm_sysport_tx_reclaim()
770 if (netif_tx_queue_stopped(txq) && pkts_compl) in __bcm_sysport_tx_reclaim()
771 netif_tx_wake_queue(txq); in __bcm_sysport_tx_reclaim()
1015 struct netdev_queue *txq; in bcm_sysport_xmit() local
1025 txq = netdev_get_tx_queue(dev, queue); in bcm_sysport_xmit()
1031 netif_tx_stop_queue(txq); in bcm_sysport_xmit()
1105 netif_tx_stop_queue(txq); in bcm_sysport_xmit()
1754 u32 txq, rxq; in bcm_sysport_probe() local
1761 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) in bcm_sysport_probe()
[all …]
Dbnx2.c2852 struct netdev_queue *txq; in bnx2_tx_int() local
2855 txq = netdev_get_tx_queue(bp->dev, index); in bnx2_tx_int()
2917 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes); in bnx2_tx_int()
2928 if (unlikely(netif_tx_queue_stopped(txq)) && in bnx2_tx_int()
2930 __netif_tx_lock(txq, smp_processor_id()); in bnx2_tx_int()
2931 if ((netif_tx_queue_stopped(txq)) && in bnx2_tx_int()
2933 netif_tx_wake_queue(txq); in bnx2_tx_int()
2934 __netif_tx_unlock(txq); in bnx2_tx_int()
6592 struct netdev_queue *txq; in bnx2_start_xmit() local
6598 txq = netdev_get_tx_queue(dev, i); in bnx2_start_xmit()
[all …]
Dtg3.c6523 struct netdev_queue *txq; in tg3_tx() local
6530 txq = netdev_get_tx_queue(tp->dev, index); in tg3_tx()
6597 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); in tg3_tx()
6608 if (unlikely(netif_tx_queue_stopped(txq) && in tg3_tx()
6610 __netif_tx_lock(txq, smp_processor_id()); in tg3_tx()
6611 if (netif_tx_queue_stopped(txq) && in tg3_tx()
6613 netif_tx_wake_queue(txq); in tg3_tx()
6614 __netif_tx_unlock(txq); in tg3_tx()
7850 struct netdev_queue *txq, struct sk_buff *skb) in tg3_tso_bug() argument
7857 netif_tx_stop_queue(txq); in tg3_tso_bug()
[all …]
/linux-4.4.14/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_vfpf.c654 req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping; in bnx2x_vfpf_setup_q()
655 req->txq.vf_sb = fp_idx; in bnx2x_vfpf_setup_q()
656 req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0; in bnx2x_vfpf_setup_q()
657 req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0; in bnx2x_vfpf_setup_q()
658 req->txq.flags = flags; in bnx2x_vfpf_setup_q()
659 req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW; in bnx2x_vfpf_setup_q()
1564 q->sb_idx = setup_q->txq.vf_sb; in bnx2x_vf_mbx_setup_q()
1567 init_p->tx.hc_rate = setup_q->txq.hc_rate; in bnx2x_vf_mbx_setup_q()
1568 init_p->tx.sb_cq_index = setup_q->txq.sb_index; in bnx2x_vf_mbx_setup_q()
1570 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, in bnx2x_vf_mbx_setup_q()
[all …]
Dbnx2x_cmn.c278 struct netdev_queue *txq; in bnx2x_tx_int() local
287 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); in bnx2x_tx_int()
306 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); in bnx2x_tx_int()
322 if (unlikely(netif_tx_queue_stopped(txq))) { in bnx2x_tx_int()
333 __netif_tx_lock(txq, smp_processor_id()); in bnx2x_tx_int()
335 if ((netif_tx_queue_stopped(txq)) && in bnx2x_tx_int()
338 netif_tx_wake_queue(txq); in bnx2x_tx_int()
340 __netif_tx_unlock(txq); in bnx2x_tx_int()
3786 struct netdev_queue *txq; in bnx2x_start_xmit() local
3811 txq = netdev_get_tx_queue(dev, txq_index); in bnx2x_start_xmit()
[all …]
Dbnx2x_vfpf.h288 } txq; member
Dbnx2x_ethtool.c2451 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, in bnx2x_run_loopback() local
2518 netdev_tx_sent_queue(txq, skb->len); in bnx2x_run_loopback()
/linux-4.4.14/net/caif/
Dcaif_dev.c170 struct netdev_queue *txq; in transmit() local
188 txq = netdev_get_tx_queue(skb->dev, 0); in transmit()
189 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); in transmit()
/linux-4.4.14/drivers/net/ethernet/amd/xgbe/
Dxgbe-drv.c921 struct netdev_queue *txq; in xgbe_stop() local
947 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_stop()
948 netdev_tx_reset_queue(txq); in xgbe_stop()
1399 struct netdev_queue *txq; in xgbe_xmit() local
1405 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_xmit()
1444 netdev_tx_sent_queue(txq, packet->tx_bytes); in xgbe_xmit()
1811 struct netdev_queue *txq; in xgbe_tx_poll() local
1827 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_tx_poll()
1860 netdev_tx_completed_queue(txq, tx_packets, tx_bytes); in xgbe_tx_poll()
1865 netif_tx_wake_queue(txq); in xgbe_tx_poll()
/linux-4.4.14/Documentation/devicetree/bindings/net/
Dbrcm,systemport.txt18 - systemport,num-txq: number of HW transmit queues, an integer
/linux-4.4.14/net/irda/irlan/
Dirlan_common.c235 skb_queue_head_init(&self->client.txq); in irlan_open()
276 skb_queue_purge(&self->client.txq); in __irlan_close()
577 skb = skb_dequeue(&self->client.txq); in irlan_run_ctrl_tx_queue()
605 skb_queue_tail(&self->client.txq, skb); in irlan_ctrl_data_request()
Dirlan_eth.c148 skb_queue_purge(&self->client.txq); in irlan_eth_close()
Dirlan_client.c229 while ((skb = skb_dequeue(&self->client.txq)) != NULL) { in irlan_client_ctrl_disconnect_indication()
/linux-4.4.14/drivers/net/ethernet/intel/i40e/
Di40e_virtchnl.h214 struct i40e_virtchnl_txq_info txq; member
Di40e_virtchnl_pf.c1328 vsi_queue_id = qpi->txq.queue_id; in i40e_vc_config_queues_msg()
1329 if ((qpi->txq.vsi_id != vsi_id) || in i40e_vc_config_queues_msg()
1340 &qpi->txq)) { in i40e_vc_config_queues_msg()
/linux-4.4.14/drivers/net/ethernet/cavium/thunder/
Dnicvf_main.c643 struct netdev_queue *txq; in nicvf_cq_intr_handler() local
710 txq = netdev_get_tx_queue(netdev, in nicvf_cq_intr_handler()
713 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { in nicvf_cq_intr_handler()
714 netif_tx_start_queue(txq); in nicvf_cq_intr_handler()
1012 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid); in nicvf_xmit() local
1020 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { in nicvf_xmit()
1021 netif_tx_stop_queue(txq); in nicvf_xmit()
/linux-4.4.14/drivers/net/ethernet/cavium/liquidio/
Docteon_network.h44 int txq; member
Dlio_main.c262 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, in octeon_report_tx_completion_to_bql() argument
265 struct netdev_queue *netdev_queue = txq; in octeon_report_tx_completion_to_bql()
304 struct netdev_queue *txq; in octeon_report_sent_bytes_to_bql() local
323 txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb)); in octeon_report_sent_bytes_to_bql()
324 netdev_tx_sent_queue(txq, skb->len); in octeon_report_sent_bytes_to_bql()
762 if (octnet_iq_is_full(lio->oct_dev, lio->txq)) in check_txq_status()
764 wake_q(lio->netdev, lio->txq); in check_txq_status()
1342 iq = lio->txq; in check_txq_state()
2688 iq_no = lio->txq; in liquidio_xmit()
2729 if (octnet_iq_is_full(oct, lio->txq)) { in liquidio_xmit()
[all …]
Docteon_main.h53 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
/linux-4.4.14/drivers/net/wan/
Ddlci.c196 struct netdev_queue *txq = skb_get_tx_queue(dev, skb); in dlci_transmit() local
197 netdev_start_xmit(skb, dlp->slave, txq, false); in dlci_transmit()
Dfarsync.c443 struct sk_buff *txq[FST_TXQ_DEPTH]; /* The queue */ member
1366 skb = port->txq[port->txqs]; in do_bottom_half_tx()
2336 port->txq[port->txqe] = skb; in fst_start_xmit()
/linux-4.4.14/drivers/net/hamradio/
Dbpqether.c128 struct netdev_queue *txq, in bpq_set_lockdep_class_one() argument
131 lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key); in bpq_set_lockdep_class_one()
/linux-4.4.14/drivers/net/ethernet/broadcom/genet/
Dbcmgenet.c1172 struct netdev_queue *txq; in __bcmgenet_tx_reclaim() local
1224 txq = netdev_get_tx_queue(dev, ring->queue); in __bcmgenet_tx_reclaim()
1225 if (netif_tx_queue_stopped(txq)) in __bcmgenet_tx_reclaim()
1226 netif_tx_wake_queue(txq); in __bcmgenet_tx_reclaim()
1429 struct netdev_queue *txq; in bcmgenet_xmit() local
1451 txq = netdev_get_tx_queue(dev, ring->queue); in bcmgenet_xmit()
1455 netif_tx_stop_queue(txq); in bcmgenet_xmit()
1507 netif_tx_stop_queue(txq); in bcmgenet_xmit()
1509 if (!skb->xmit_more || netif_xmit_stopped(txq)) in bcmgenet_xmit()
2968 struct netdev_queue *txq; in bcmgenet_dump_tx_queue() local
[all …]
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmfmac/
Dsdio.c444 struct pktq txq; /* Queue length used for flow-control */ member
2383 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)); in brcmf_sdio_sendfromq()
2387 pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, in brcmf_sdio_sendfromq()
2419 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { in brcmf_sdio_sendfromq()
2538 brcmu_pktq_flush(&bus->txq, true, NULL, NULL); in brcmf_sdio_bus_stop()
2727 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && in brcmf_sdio_dpc()
2750 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && in brcmf_sdio_dpc()
2762 return &bus->txq; in brcmf_sdio_bus_gettxq()
2827 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq)); in brcmf_sdio_bus_txdata()
2834 if (!brcmf_sdio_prec_enq(&bus->txq, pkt, prec)) { in brcmf_sdio_bus_txdata()
[all …]
Dfwsignal.c824 struct pktq *txq; in brcmf_fws_bus_txq_cleanup() local
829 txq = brcmf_bus_gettxq(fws->drvr->bus_if); in brcmf_fws_bus_txq_cleanup()
830 if (IS_ERR(txq)) { in brcmf_fws_bus_txq_cleanup()
835 for (prec = 0; prec < txq->num_prec; prec++) { in brcmf_fws_bus_txq_cleanup()
836 skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx); in brcmf_fws_bus_txq_cleanup()
843 skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx); in brcmf_fws_bus_txq_cleanup()
/linux-4.4.14/drivers/net/ethernet/sfc/
Dptp.c273 struct sk_buff_head txq; member
1140 skb_queue_purge(&efx->ptp_data->txq); in efx_ptp_stop()
1193 while ((skb = skb_dequeue(&ptp_data->txq))) in efx_ptp_worker()
1237 skb_queue_head_init(&ptp->txq); in efx_ptp_probe()
1326 skb_queue_purge(&efx->ptp_data->txq); in efx_ptp_remove()
1463 skb_queue_tail(&ptp->txq, skb); in efx_ptp_tx()
/linux-4.4.14/drivers/net/ethernet/broadcom/bnxt/
Dbnxt.c169 struct netdev_queue *txq; in bnxt_start_xmit() local
188 txq = netdev_get_tx_queue(dev, i); in bnxt_start_xmit()
193 netif_tx_stop_queue(txq); in bnxt_start_xmit()
269 netdev_tx_sent_queue(txq, skb->len); in bnxt_start_xmit()
365 netdev_tx_sent_queue(txq, skb->len); in bnxt_start_xmit()
381 netif_tx_stop_queue(txq); in bnxt_start_xmit()
390 netif_tx_wake_queue(txq); in bnxt_start_xmit()
422 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); in bnxt_tx_int() local
464 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); in bnxt_tx_int()
474 if (unlikely(netif_tx_queue_stopped(txq)) && in bnxt_tx_int()
[all …]
/linux-4.4.14/include/linux/usb/
Dusbnet.h58 struct sk_buff_head txq; member
/linux-4.4.14/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic_hw.c403 __netif_tx_lock_bh(tx_ring->txq); in qlcnic_send_cmd_descs()
408 netif_tx_stop_queue(tx_ring->txq); in qlcnic_send_cmd_descs()
412 netif_tx_wake_queue(tx_ring->txq); in qlcnic_send_cmd_descs()
415 __netif_tx_unlock_bh(tx_ring->txq); in qlcnic_send_cmd_descs()
439 __netif_tx_unlock_bh(tx_ring->txq); in qlcnic_send_cmd_descs()
Dqlcnic_io.c692 netif_tx_stop_queue(tx_ring->txq); in qlcnic_xmit_frame()
694 netif_tx_start_queue(tx_ring->txq); in qlcnic_xmit_frame()
927 if (netif_tx_queue_stopped(tx_ring->txq) && in qlcnic_process_cmd_ring()
930 netif_tx_wake_queue(tx_ring->txq); in qlcnic_process_cmd_ring()
Dqlcnic.h638 struct netdev_queue *txq; member
Dqlcnic_main.c2410 tx_ring->txq = netdev_get_tx_queue(netdev, ring); in qlcnic_alloc_tx_rings()
/linux-4.4.14/drivers/net/ethernet/neterion/vxge/
Dvxge-main.c109 if (__netif_tx_trylock(fifo->txq)) { in VXGE_COMPLETE_VPATH_TX()
112 __netif_tx_unlock(fifo->txq); in VXGE_COMPLETE_VPATH_TX()
621 if (netif_tx_queue_stopped(fifo->txq)) in vxge_xmit_compl()
622 netif_tx_wake_queue(fifo->txq); in vxge_xmit_compl()
863 if (netif_tx_queue_stopped(fifo->txq)) in vxge_xmit()
878 netif_tx_stop_queue(fifo->txq); in vxge_xmit()
991 netif_tx_stop_queue(fifo->txq); in vxge_xmit()
1573 if (netif_tx_queue_stopped(vpath->fifo.txq)) in vxge_reset_vpath()
1574 netif_tx_wake_queue(vpath->fifo.txq); in vxge_reset_vpath()
2082 vpath->fifo.txq = in vxge_open_vpaths()
[all …]
Dvxge-main.h240 struct netdev_queue *txq; member
/linux-4.4.14/drivers/scsi/lpfc/
Dlpfc_sli.h206 struct list_head txq; member
Dlpfc_sli.c1030 if (!list_empty(&pring->txq)) in __lpfc_sli_release_iocbq_s4()
1347 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); in lpfc_sli_ringtx_get()
1618 (!list_empty(&pring->txq)) && in lpfc_sli_resume_iocb()
3543 list_splice_init(&pring->txq, &completions); in lpfc_sli_abort_iocb_ring()
3554 list_splice_init(&pring->txq, &completions); in lpfc_sli_abort_iocb_ring()
3611 LIST_HEAD(txq); in lpfc_sli_flush_fcp_rings()
3629 list_splice_init(&pring->txq, &txq); in lpfc_sli_flush_fcp_rings()
3637 lpfc_sli_cancel_iocbs(phba, &txq, in lpfc_sli_flush_fcp_rings()
3650 list_splice_init(&pring->txq, &txq); in lpfc_sli_flush_fcp_rings()
3658 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, in lpfc_sli_flush_fcp_rings()
[all …]
Dlpfc_hbadisc.c705 (!list_empty(&pring->txq))) in lpfc_work_done()
4449 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, in lpfc_no_rpi()
5205 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { in lpfc_free_tx()
Dlpfc_bsg.c5327 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, in lpfc_bsg_timeout()
5364 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, in lpfc_bsg_timeout()
Dlpfc_nportdisc.c252 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { in lpfc_els_abort()
Dlpfc_els.c7028 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { in lpfc_els_flush_cmd()
8845 if (!(list_empty(&pring->txq))) in lpfc_sli4_els_xri_aborted()
Dlpfc_scsi.c601 if (!list_empty(&pring->txq)) in lpfc_sli4_fcp_xri_aborted()
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/
Dehea_main.c822 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, in ehea_proc_cqes() local
873 if (unlikely(netif_tx_queue_stopped(txq) && in ehea_proc_cqes()
875 __netif_tx_lock(txq, smp_processor_id()); in ehea_proc_cqes()
876 if (netif_tx_queue_stopped(txq) && in ehea_proc_cqes()
878 netif_tx_wake_queue(txq); in ehea_proc_cqes()
879 __netif_tx_unlock(txq); in ehea_proc_cqes()
2058 struct netdev_queue *txq; in ehea_start_xmit() local
2061 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); in ehea_start_xmit()
2111 netif_tx_stop_queue(txq); in ehea_start_xmit()
2119 netif_tx_stop_queue(txq); in ehea_start_xmit()
/linux-4.4.14/drivers/net/team/
Dteam.c1548 struct netdev_queue *txq, in team_set_lockdep_class_one() argument
1551 lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key); in team_set_lockdep_class_one()
1681 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; in team_select_queue() local
1688 if (unlikely(txq >= dev->real_num_tx_queues)) { in team_select_queue()
1690 txq -= dev->real_num_tx_queues; in team_select_queue()
1691 } while (txq >= dev->real_num_tx_queues); in team_select_queue()
1693 return txq; in team_select_queue()
/linux-4.4.14/net/8021q/
Dvlan_dev.c481 struct netdev_queue *txq, in vlan_dev_set_lockdep_one() argument
484 lockdep_set_class_and_subclass(&txq->_xmit_lock, in vlan_dev_set_lockdep_one()
/linux-4.4.14/drivers/net/ipvlan/
Dipvlan_main.c100 struct netdev_queue *txq, in ipvlan_set_lockdep_class_one() argument
103 lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key); in ipvlan_set_lockdep_class_one()
/linux-4.4.14/net/batman-adv/
Dsoft-interface.c704 struct netdev_queue *txq, in batadv_set_lockdep_class_one() argument
707 lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key); in batadv_set_lockdep_class_one()
/linux-4.4.14/drivers/net/ethernet/qlogic/netxen/
Dnetxen_nic_hw.c593 __netif_tx_lock_bh(tx_ring->txq); in netxen_send_cmd_descs()
599 netif_tx_stop_queue(tx_ring->txq); in netxen_send_cmd_descs()
603 netif_tx_wake_queue(tx_ring->txq); in netxen_send_cmd_descs()
605 __netif_tx_unlock_bh(tx_ring->txq); in netxen_send_cmd_descs()
629 __netif_tx_unlock_bh(tx_ring->txq); in netxen_send_cmd_descs()
Dnetxen_nic.h664 struct netdev_queue *txq; member
Dnetxen_nic_init.c213 tx_ring->txq = netdev_get_tx_queue(netdev, 0); in netxen_alloc_sw_resources()
/linux-4.4.14/drivers/net/ethernet/nxp/
Dlpc_eth.c1017 struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0); in lpc_eth_poll() local
1019 __netif_tx_lock(txq, smp_processor_id()); in lpc_eth_poll()
1021 __netif_tx_unlock(txq); in lpc_eth_poll()
/linux-4.4.14/drivers/net/bonding/
Dbond_main.c3965 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; in bond_select_queue() local
3970 if (unlikely(txq >= dev->real_num_tx_queues)) { in bond_select_queue()
3972 txq -= dev->real_num_tx_queues; in bond_select_queue()
3973 } while (txq >= dev->real_num_tx_queues); in bond_select_queue()
3975 return txq; in bond_select_queue()
4575 struct netdev_queue *txq, in bond_set_lockdep_class_one() argument
4578 lockdep_set_class(&txq->_xmit_lock, in bond_set_lockdep_class_one()
/linux-4.4.14/drivers/net/ethernet/cisco/enic/
Denic_main.c636 struct netdev_queue *txq; in enic_hard_start_xmit() local
645 txq = netdev_get_tx_queue(netdev, txq_map); in enic_hard_start_xmit()
663 netif_tx_stop_queue(txq); in enic_hard_start_xmit()
673 netif_tx_stop_queue(txq); in enic_hard_start_xmit()
674 if (!skb->xmit_more || netif_xmit_stopped(txq)) in enic_hard_start_xmit()
/linux-4.4.14/Documentation/DocBook/
D80211.xml.db41 API-struct-ieee80211-txq-params
Dnetworking.xml.db408 API-netdev-txq-bql-enqueue-prefetchw
409 API-netdev-txq-bql-complete-prefetchw
/linux-4.4.14/net/bluetooth/
D6lowpan.c634 struct netdev_queue *txq, in bt_set_lockdep_class_one() argument
637 lockdep_set_class(&txq->_xmit_lock, &bt_netdev_xmit_lock_key); in bt_set_lockdep_class_one()
/linux-4.4.14/net/dsa/
Dslave.c1093 struct netdev_queue *txq, in dsa_slave_set_lockdep_class_one() argument
1096 lockdep_set_class(&txq->_xmit_lock, in dsa_slave_set_lockdep_class_one()
/linux-4.4.14/net/netrom/
Daf_netrom.c79 struct netdev_queue *txq, in nr_set_lockdep_one() argument
82 lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); in nr_set_lockdep_one()
/linux-4.4.14/net/packet/
Daf_packet.c253 struct netdev_queue *txq; in packet_direct_xmit() local
265 txq = skb_get_tx_queue(dev, skb); in packet_direct_xmit()
269 HARD_TX_LOCK(dev, txq, smp_processor_id()); in packet_direct_xmit()
270 if (!netif_xmit_frozen_or_drv_stopped(txq)) in packet_direct_xmit()
271 ret = netdev_start_xmit(skb, dev, txq, false); in packet_direct_xmit()
272 HARD_TX_UNLOCK(dev, txq); in packet_direct_xmit()
/linux-4.4.14/drivers/net/ethernet/atheros/atl1c/
Datl1c_main.c1200 u32 mac, txq, rxq; in atl1c_start_mac() local
1206 AT_READ_REG(hw, REG_TXQ_CTRL, &txq); in atl1c_start_mac()
1210 txq |= TXQ_CTRL_EN; in atl1c_start_mac()
1224 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq); in atl1c_start_mac()
/linux-4.4.14/net/rose/
Daf_rose.c80 struct netdev_queue *txq, in rose_set_lockdep_one() argument
83 lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); in rose_set_lockdep_one()
/linux-4.4.14/drivers/tty/
Dn_gsm.c2035 struct gsm_msg *txq, *ntxq; in gsm_cleanup_mux() local
2074 list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list) in gsm_cleanup_mux()
2075 kfree(txq); in gsm_cleanup_mux()
/linux-4.4.14/drivers/net/wireless/ath/wil6210/
Ddebugfs.c1231 struct netdev_queue *txq = netdev_get_tx_queue(ndev, i); in wil_info_debugfs_show() local
1232 unsigned long state = txq->state; in wil_info_debugfs_show()
/linux-4.4.14/drivers/net/wireless/hostap/
Dhostap_hw.c3070 struct netdev_queue *txq, in prism2_set_lockdep_class_one() argument
3073 lockdep_set_class(&txq->_xmit_lock, in prism2_set_lockdep_class_one()
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/
Dixgbe_main.c7454 int txq; in ixgbe_select_queue() local
7479 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : in ixgbe_select_queue()
7482 while (txq >= f->indices) in ixgbe_select_queue()
7483 txq -= f->indices; in ixgbe_select_queue()
7485 return txq + f->offset; in ixgbe_select_queue()
/linux-4.4.14/drivers/net/vmxnet3/
Dvmxnet3_drv.c1811 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; in vmxnet3_msix_tx() local
1812 vmxnet3_tq_tx_complete(txq, adapter); in vmxnet3_msix_tx()
/linux-4.4.14/Documentation/scsi/
DChangeLog.lpfc251 lpfc_els_abort to reset txq and txcmplq iterator after a