Home
last modified time | relevance | path

Searched refs:txq (Results 1 – 182 of 182) sorted by relevance

/linux-4.1.27/drivers/net/wireless/iwlwifi/pcie/
Dtx.c149 struct iwl_txq *txq = (void *)data; in iwl_pcie_txq_stuck_timer() local
150 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; in iwl_pcie_txq_stuck_timer()
153 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); in iwl_pcie_txq_stuck_timer()
157 spin_lock(&txq->lock); in iwl_pcie_txq_stuck_timer()
159 if (txq->q.read_ptr == txq->q.write_ptr) { in iwl_pcie_txq_stuck_timer()
160 spin_unlock(&txq->lock); in iwl_pcie_txq_stuck_timer()
163 spin_unlock(&txq->lock); in iwl_pcie_txq_stuck_timer()
165 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, in iwl_pcie_txq_stuck_timer()
166 jiffies_to_msecs(txq->wd_timeout)); in iwl_pcie_txq_stuck_timer()
168 txq->q.read_ptr, txq->q.write_ptr); in iwl_pcie_txq_stuck_timer()
[all …]
Dinternal.h244 iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) in iwl_pcie_get_scratchbuf_dma() argument
246 return txq->scratchbufs_dma + in iwl_pcie_get_scratchbuf_dma()
296 struct iwl_txq *txq; member
445 struct iwl_txq *txq) in iwl_wake_queue() argument
449 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { in iwl_wake_queue()
450 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); in iwl_wake_queue()
451 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); in iwl_wake_queue()
456 struct iwl_txq *txq) in iwl_stop_queue() argument
460 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { in iwl_stop_queue()
461 iwl_op_mode_queue_full(trans->op_mode, txq->q.id); in iwl_stop_queue()
[all …]
Dtrans.c1521 struct iwl_txq *txq = &trans_pcie->txq[queue]; in iwl_trans_pcie_freeze_txq_timer() local
1524 spin_lock_bh(&txq->lock); in iwl_trans_pcie_freeze_txq_timer()
1528 if (txq->frozen == freeze) in iwl_trans_pcie_freeze_txq_timer()
1534 txq->frozen = freeze; in iwl_trans_pcie_freeze_txq_timer()
1536 if (txq->q.read_ptr == txq->q.write_ptr) in iwl_trans_pcie_freeze_txq_timer()
1541 txq->stuck_timer.expires))) { in iwl_trans_pcie_freeze_txq_timer()
1549 txq->frozen_expiry_remainder = in iwl_trans_pcie_freeze_txq_timer()
1550 txq->stuck_timer.expires - now; in iwl_trans_pcie_freeze_txq_timer()
1551 del_timer(&txq->stuck_timer); in iwl_trans_pcie_freeze_txq_timer()
1559 mod_timer(&txq->stuck_timer, in iwl_trans_pcie_freeze_txq_timer()
[all …]
Drx.c574 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; in iwl_pcie_rx_handle_rb() local
635 cmd_index = get_cmd_index(&txq->q, index); in iwl_pcie_rx_handle_rb()
638 cmd = txq->entries[cmd_index].cmd; in iwl_pcie_rx_handle_rb()
645 kzfree(txq->entries[cmd_index].free_buf); in iwl_pcie_rx_handle_rb()
646 txq->entries[cmd_index].free_buf = NULL; in iwl_pcie_rx_handle_rb()
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
Dxmit.c50 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
53 int tx_flags, struct ath_txq *txq);
55 struct ath_txq *txq, struct list_head *bf_q,
57 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct ath_txq *txq,
80 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) in ath_txq_lock() argument
81 __acquires(&txq->axq_lock) in ath_txq_lock()
83 spin_lock_bh(&txq->axq_lock); in ath_txq_lock()
86 void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) in ath_txq_unlock() argument
87 __releases(&txq->axq_lock) in ath_txq_unlock()
[all …]
Ddebug_sta.c30 struct ath_txq *txq; in read_file_node_aggr() local
56 txq = ac->txq; in read_file_node_aggr()
57 ath_txq_lock(sc, txq); in read_file_node_aggr()
61 ath_txq_unlock(sc, txq); in read_file_node_aggr()
71 txq = tid->ac->txq; in read_file_node_aggr()
72 ath_txq_lock(sc, txq); in read_file_node_aggr()
85 ath_txq_unlock(sc, txq); in read_file_node_aggr()
Dlink.c27 struct ath_txq *txq; in ath_tx_complete_poll_work() local
39 txq = sc->tx.txq_map[i]; in ath_tx_complete_poll_work()
41 ath_txq_lock(sc, txq); in ath_tx_complete_poll_work()
42 if (txq->axq_depth) { in ath_tx_complete_poll_work()
43 if (txq->axq_tx_inprogress) { in ath_tx_complete_poll_work()
45 ath_txq_unlock(sc, txq); in ath_tx_complete_poll_work()
48 txq->axq_tx_inprogress = true; in ath_tx_complete_poll_work()
51 ath_txq_unlock(sc, txq); in ath_tx_complete_poll_work()
178 txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE]; in ath_paprd_send_frame()
Dath9k.h176 struct ath_txq *txq; member
186 s8 txq; member
283 struct ath_txq *txq; member
300 struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; member
555 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq);
556 void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq);
557 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq);
558 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
560 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq);
563 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
Ddebug.c624 static void print_queue(struct ath_softc *sc, struct ath_txq *txq, in print_queue() argument
627 ath_txq_lock(sc, txq); in print_queue()
629 seq_printf(file, "%s: %d ", "qnum", txq->axq_qnum); in print_queue()
630 seq_printf(file, "%s: %2d ", "qdepth", txq->axq_depth); in print_queue()
631 seq_printf(file, "%s: %2d ", "ampdu-depth", txq->axq_ampdu_depth); in print_queue()
632 seq_printf(file, "%s: %3d ", "pending", txq->pending_frames); in print_queue()
633 seq_printf(file, "%s: %d\n", "stopped", txq->stopped); in print_queue()
635 ath_txq_unlock(sc, txq); in print_queue()
642 struct ath_txq *txq; in read_file_queues() local
649 txq = sc->tx.txq_map[i]; in read_file_queues()
[all …]
Ddebug.h254 struct ath_tx_status *ts, struct ath_txq *txq,
291 struct ath_txq *txq, in ath_debug_stat_tx() argument
Dmain.c57 static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq, in ath9k_has_pending_frames() argument
62 spin_lock_bh(&txq->axq_lock); in ath9k_has_pending_frames()
64 if (txq->axq_depth) { in ath9k_has_pending_frames()
72 if (txq->mac80211_qnum >= 0) { in ath9k_has_pending_frames()
75 list = &sc->cur_chan->acq[txq->mac80211_qnum]; in ath9k_has_pending_frames()
80 spin_unlock_bh(&txq->axq_lock); in ath9k_has_pending_frames()
806 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; in ath9k_tx()
813 TX_STAT_INC(txctl.txq->axq_qnum, txfailed); in ath9k_tx()
1609 struct ath_txq *txq; in ath9k_conf_tx() local
1616 txq = sc->tx.txq_map[queue]; in ath9k_conf_tx()
[all …]
Dgpio.c409 struct ath_txq *txq; in ath9k_init_btcoex() local
424 txq = sc->tx.txq_map[IEEE80211_AC_BE]; in ath9k_init_btcoex()
425 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); in ath9k_init_btcoex()
Dmac.c203 qi = &ah->txq[q]; in ath9k_hw_set_txq_props()
267 qi = &ah->txq[q]; in ath9k_hw_get_txq_props()
323 qi = &ah->txq[q]; in ath9k_hw_setuptxqueue()
351 qi = &ah->txq[q]; in ath9k_hw_releasetxqueue()
373 qi = &ah->txq[q]; in ath9k_hw_resettxqueue()
Dbeacon.c38 struct ath_txq *txq; in ath9k_beaconq_config() local
50 txq = sc->tx.txq_map[IEEE80211_AC_BE]; in ath9k_beaconq_config()
51 ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be); in ath9k_beaconq_config()
Dtx99.c128 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; in ath9k_tx99_init()
Dinit.c682 ath_tx_cleanupq(sc, &sc->tx.txq[i]); in ath9k_init_softc()
1004 ath_tx_cleanupq(sc, &sc->tx.txq[i]); in ath9k_deinit_softc()
Dchannel.c983 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; in ath_scan_send_probe()
1104 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; in ath_chanctx_send_vif_ps_frame()
Dhw.h816 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; member
/linux-4.1.27/drivers/net/ethernet/marvell/
Dmv643xx_eth.c192 #define IS_TSO_HEADER(txq, addr) \ argument
193 ((addr >= txq->tso_hdrs_dma) && \
194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
423 struct tx_queue txq[8]; member
461 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) in txq_to_mp() argument
463 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp()
482 static void txq_reset_hw_ptr(struct tx_queue *txq) in txq_reset_hw_ptr() argument
484 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr()
487 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr()
488 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr()
[all …]
Dmvneta.c274 #define IS_TSO_HEADER(txq, addr) \ argument
275 ((addr >= txq->tso_hdrs_phys) && \
276 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
496 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) in mvneta_txq_inc_get() argument
498 txq->txq_get_index++; in mvneta_txq_inc_get()
499 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get()
500 txq->txq_get_index = 0; in mvneta_txq_inc_get()
504 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) in mvneta_txq_inc_put() argument
506 txq->txq_put_index++; in mvneta_txq_inc_put()
507 if (txq->txq_put_index == txq->size) in mvneta_txq_inc_put()
[all …]
Dmvpp2.c131 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) argument
981 static inline int mvpp2_txq_phys(int port, int txq) in mvpp2_txq_phys() argument
983 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; in mvpp2_txq_phys()
4016 struct mvpp2_tx_queue *txq = port->txqs[queue]; in mvpp2_egress_enable() local
4018 if (txq->descs != NULL) in mvpp2_egress_enable()
4133 struct mvpp2_tx_queue *txq) in mvpp2_txq_pend_desc_num_get() argument
4137 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); in mvpp2_txq_pend_desc_num_get()
4145 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) in mvpp2_txq_next_desc_get() argument
4147 int tx_desc = txq->next_desc_to_proc; in mvpp2_txq_next_desc_get()
4149 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); in mvpp2_txq_next_desc_get()
[all …]
/linux-4.1.27/drivers/net/ethernet/freescale/
Dfec_main.c213 #define IS_TSO_HEADER(txq, addr) \ argument
214 ((addr >= txq->tso_hdrs_dma) && \
215 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
226 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; in fec_enet_get_nextdesc() local
232 if (bdp >= txq->tx_bd_base) { in fec_enet_get_nextdesc()
233 base = txq->tx_bd_base; in fec_enet_get_nextdesc()
234 ring_size = txq->tx_ring_size; in fec_enet_get_nextdesc()
235 ex_base = (struct bufdesc_ex *)txq->tx_bd_base; in fec_enet_get_nextdesc()
257 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; in fec_enet_get_prevdesc() local
263 if (bdp >= txq->tx_bd_base) { in fec_enet_get_prevdesc()
[all …]
Dgianfar.c1958 struct netdev_queue *txq; in free_skb_resources() local
1961 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); in free_skb_resources()
1964 netdev_tx_reset_queue(txq); in free_skb_resources()
2249 struct netdev_queue *txq; in gfar_start_xmit() local
2262 txq = netdev_get_tx_queue(dev, rq); in gfar_start_xmit()
2307 netif_tx_stop_queue(txq); in gfar_start_xmit()
2435 netdev_tx_sent_queue(txq, bytes_sent); in gfar_start_xmit()
2473 netif_tx_stop_queue(txq); in gfar_start_xmit()
2604 struct netdev_queue *txq; in gfar_clean_tx_ring() local
2620 txq = netdev_get_tx_queue(dev, tqi); in gfar_clean_tx_ring()
[all …]
/linux-4.1.27/drivers/net/ethernet/atheros/alx/
Dmain.c57 struct alx_buffer *txb = &alx->txq.bufs[entry]; in alx_free_txbuf()
132 struct alx_tx_queue *txq = &alx->txq; in alx_tpd_avail() local
134 if (txq->write_idx >= txq->read_idx) in alx_tpd_avail()
135 return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; in alx_tpd_avail()
136 return txq->read_idx - txq->write_idx - 1; in alx_tpd_avail()
141 struct alx_tx_queue *txq = &alx->txq; in alx_clean_tx_irq() local
146 sw_read_idx = txq->read_idx; in alx_clean_tx_irq()
153 skb = txq->bufs[sw_read_idx].skb; in alx_clean_tx_irq()
165 txq->read_idx = sw_read_idx; in alx_clean_tx_irq()
376 alx->txq.read_idx = 0; in alx_init_ring_ptrs()
[all …]
Dalx.h100 struct alx_tx_queue txq; member
Dhw.c378 u32 rxq, txq, val; in alx_stop_mac() local
383 txq = alx_read_mem32(hw, ALX_TXQ0); in alx_stop_mac()
384 alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN); in alx_stop_mac()
609 u32 mac, txq, rxq; in alx_start_mac() local
613 txq = alx_read_mem32(hw, ALX_TXQ0); in alx_start_mac()
614 alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN); in alx_start_mac()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4vf/
Dsge.c1135 static void txq_stop(struct sge_eth_txq *txq) in txq_stop() argument
1137 netif_tx_stop_queue(txq->txq); in txq_stop()
1138 txq->q.stops++; in txq_stop()
1166 struct sge_eth_txq *txq; in t4vf_eth_xmit() local
1193 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit()
1199 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit()
1208 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit()
1217 txq_stop(txq); in t4vf_eth_xmit()
1231 txq->mapping_err++; in t4vf_eth_xmit()
1246 txq_stop(txq); in t4vf_eth_xmit()
[all …]
Dcxgb4vf_main.c477 struct sge_eth_txq *txq; in fwevtq_handler() local
499 txq = container_of(tq, struct sge_eth_txq, q); in fwevtq_handler()
511 txq->q.restarts++; in fwevtq_handler()
512 netif_tx_wake_queue(txq->txq); in fwevtq_handler()
574 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; in setup_sge_queues() local
577 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues()
584 err = t4vf_sge_alloc_eth_txq(adapter, txq, dev, in setup_sge_queues()
605 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; in setup_sge_queues() local
608 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues()
610 EQ_MAP(s, txq->q.abs_id) = &txq->q; in setup_sge_queues()
[all …]
Dadapter.h259 struct netdev_queue *txq; /* associated netdev TX queue */ member
/linux-4.1.27/drivers/net/wireless/ath/ath5k/
Dbase.c733 struct ath5k_txq *txq, int padsize, in ath5k_txbuf_setup() argument
830 spin_lock_bh(&txq->lock); in ath5k_txbuf_setup()
831 list_add_tail(&bf->list, &txq->q); in ath5k_txbuf_setup()
832 txq->txq_len++; in ath5k_txbuf_setup()
833 if (txq->link == NULL) /* is this first packet? */ in ath5k_txbuf_setup()
834 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); in ath5k_txbuf_setup()
836 *txq->link = bf->daddr; in ath5k_txbuf_setup()
838 txq->link = &ds->ds_link; in ath5k_txbuf_setup()
839 ath5k_hw_start_tx_dma(ah, txq->qnum); in ath5k_txbuf_setup()
841 spin_unlock_bh(&txq->lock); in ath5k_txbuf_setup()
[all …]
Ddebug.c836 struct ath5k_txq *txq; in read_file_queue() local
844 txq = &ah->txqs[i]; in read_file_queue()
847 "%02d: %ssetup\n", i, txq->setup ? "" : "not "); in read_file_queue()
849 if (!txq->setup) in read_file_queue()
853 spin_lock_bh(&txq->lock); in read_file_queue()
854 list_for_each_entry_safe(bf, bf0, &txq->q, list) in read_file_queue()
856 spin_unlock_bh(&txq->lock); in read_file_queue()
859 " len: %d bufs: %d\n", txq->txq_len, n); in read_file_queue()
861 " stuck: %d\n", txq->txq_stuck); in read_file_queue()
Dbase.h108 struct ath5k_txq *txq, struct ieee80211_tx_control *control);
/linux-4.1.27/net/sched/
Dsch_generic.c60 const struct netdev_queue *txq, in try_bulk_dequeue_skb() argument
63 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; in try_bulk_dequeue_skb()
86 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb() local
92 txq = skb_get_tx_queue(txq->dev, skb); in dequeue_skb()
93 if (!netif_xmit_frozen_or_stopped(txq)) { in dequeue_skb()
102 !netif_xmit_frozen_or_stopped(txq)) { in dequeue_skb()
105 try_bulk_dequeue_skb(q, skb, txq, packets); in dequeue_skb()
150 struct net_device *dev, struct netdev_queue *txq, in sch_direct_xmit() argument
163 HARD_TX_LOCK(dev, txq, smp_processor_id()); in sch_direct_xmit()
164 if (!netif_xmit_frozen_or_stopped(txq)) in sch_direct_xmit()
[all …]
Dsch_teql.c150 struct netdev_queue *txq; in teql_destroy() local
153 txq = netdev_get_tx_queue(master->dev, 0); in teql_destroy()
156 root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); in teql_destroy()
158 qdisc_reset(rtnl_dereference(txq->qdisc)); in teql_destroy()
220 struct net_device *dev, struct netdev_queue *txq, in __teql_resolve() argument
260 struct netdev_queue *txq) in teql_resolve() argument
265 if (rcu_access_pointer(txq->qdisc) == &noop_qdisc) in teql_resolve()
272 res = __teql_resolve(skb, skb_res, dev, txq, dst); in teql_resolve()
/linux-4.1.27/drivers/bluetooth/
Dhci_bcm.c36 struct sk_buff_head txq; member
49 skb_queue_head_init(&bcm->txq); in bcm_open()
61 skb_queue_purge(&bcm->txq); in bcm_close()
75 skb_queue_purge(&bcm->txq); in bcm_flush()
121 skb_queue_tail(&bcm->txq, skb); in bcm_enqueue()
130 return skb_dequeue(&bcm->txq); in bcm_dequeue()
Dhci_h4.c52 struct sk_buff_head txq; member
66 skb_queue_head_init(&h4->txq); in h4_open()
79 skb_queue_purge(&h4->txq); in h4_flush()
93 skb_queue_purge(&h4->txq); in h4_close()
112 skb_queue_tail(&h4->txq, skb); in h4_enqueue()
145 return skb_dequeue(&h4->txq); in h4_dequeue()
Dhci_ath.c49 struct sk_buff_head txq; member
108 skb_queue_head_init(&ath->txq); in ath_open()
124 skb_queue_purge(&ath->txq); in ath_close()
142 skb_queue_purge(&ath->txq); in ath_flush()
227 skb_queue_tail(&ath->txq, skb); in ath_enqueue()
239 return skb_dequeue(&ath->txq); in ath_dequeue()
Dhci_ll.c83 struct sk_buff_head txq; member
115 skb_queue_tail(&ll->txq, skb); in send_hcill_cmd()
131 skb_queue_head_init(&ll->txq); in ll_open()
150 skb_queue_purge(&ll->txq); in ll_flush()
163 skb_queue_purge(&ll->txq); in ll_close()
186 skb_queue_tail(&ll->txq, skb); in __ll_do_awake()
319 skb_queue_tail(&ll->txq, skb); in ll_enqueue()
505 return skb_dequeue(&ll->txq); in ll_dequeue()
Dbtsdio.c63 struct sk_buff_head txq; member
115 while ((skb = skb_dequeue(&data->txq))) { in btsdio_work()
119 skb_queue_head(&data->txq, skb); in btsdio_work()
253 skb_queue_purge(&data->txq); in btsdio_flush()
284 skb_queue_tail(&data->txq, skb); in btsdio_send_frame()
314 skb_queue_head_init(&data->txq); in btsdio_probe()
Ddtl1_cs.c75 struct sk_buff_head txq; member
156 skb = skb_dequeue(&(info->txq)); in dtl1_write_wakeup()
168 skb_queue_head(&(info->txq), skb); in dtl1_write_wakeup()
371 skb_queue_purge(&(info->txq)); in dtl1_hci_flush()
425 skb_queue_tail(&(info->txq), s); in dtl1_hci_send_frame()
447 skb_queue_head_init(&(info->txq)); in dtl1_open()
Dbtuart_cs.c72 struct sk_buff_head txq; member
152 skb = skb_dequeue(&(info->txq)); in btuart_write_wakeup()
164 skb_queue_head(&(info->txq), skb); in btuart_write_wakeup()
404 skb_queue_purge(&(info->txq)); in btuart_hci_flush()
447 skb_queue_tail(&(info->txq), skb); in btuart_hci_send_frame()
467 skb_queue_head_init(&(info->txq)); in btuart_open()
Dbluecard_cs.c72 struct sk_buff_head txq; member
260 skb = skb_dequeue(&(info->txq)); in bluecard_write_wakeup()
329 skb_queue_head(&(info->txq), skb); in bluecard_write_wakeup()
601 skb_queue_tail(&(info->txq), skb); in bluecard_hci_set_baud_rate()
618 skb_queue_purge(&(info->txq)); in bluecard_hci_flush()
683 skb_queue_tail(&(info->txq), skb); in bluecard_hci_send_frame()
707 skb_queue_head_init(&(info->txq)); in bluecard_open()
790 skb_queue_purge(&(info->txq)); in bluecard_open()
Dbt3c_cs.c77 struct sk_buff_head txq; member
196 skb = skb_dequeue(&(info->txq)); in bt3c_write_wakeup()
391 skb_queue_purge(&(info->txq)); in bt3c_hci_flush()
435 skb_queue_tail(&(info->txq), skb); in bt3c_hci_send_frame()
545 skb_queue_head_init(&(info->txq)); in bt3c_open()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/
Dsge.c179 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset()
691 if (q->txq[i].desc) { in t3_free_qset()
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset()
695 if (q->txq[i].sdesc) { in t3_free_qset()
696 free_tx_desc(adapter, &q->txq[i], in t3_free_qset()
697 q->txq[i].in_use); in t3_free_qset()
698 kfree(q->txq[i].sdesc); in t3_free_qset()
701 q->txq[i].size * in t3_free_qset()
703 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset()
[all …]
Dadapter.h205 struct sge_txq txq[SGE_TXQ_PER_SET]; member
Dcxgb3_main.c604 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); in ring_dbs()
/linux-4.1.27/net/core/
Dnetpoll.c73 struct netdev_queue *txq) in netpoll_start_xmit() argument
92 status = netdev_start_xmit(skb, dev, txq, false); in netpoll_start_xmit()
105 while ((skb = skb_dequeue(&npinfo->txq))) { in queue_process()
107 struct netdev_queue *txq; in queue_process() local
114 txq = skb_get_tx_queue(dev, skb); in queue_process()
117 HARD_TX_LOCK(dev, txq, smp_processor_id()); in queue_process()
118 if (netif_xmit_frozen_or_stopped(txq) || in queue_process()
119 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { in queue_process()
120 skb_queue_head(&npinfo->txq, skb); in queue_process()
121 HARD_TX_UNLOCK(dev, txq); in queue_process()
[all …]
Ddev.c1874 static void netif_setup_tc(struct net_device *dev, unsigned int txq) in netif_setup_tc() argument
1880 if (tc->offset + tc->count > txq) { in netif_setup_tc()
1891 if (tc->offset + tc->count > txq) { in netif_setup_tc()
2128 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) in netif_set_real_num_tx_queues() argument
2132 if (txq < 1 || txq > dev->num_tx_queues) in netif_set_real_num_tx_queues()
2140 txq); in netif_set_real_num_tx_queues()
2145 netif_setup_tc(dev, txq); in netif_set_real_num_tx_queues()
2147 if (txq < dev->real_num_tx_queues) { in netif_set_real_num_tx_queues()
2148 qdisc_reset_all_tx_gt(dev, txq); in netif_set_real_num_tx_queues()
2150 netif_reset_xps_queues_gt(dev, txq); in netif_set_real_num_tx_queues()
[all …]
Dpktgen.c3319 struct netdev_queue *txq; in pktgen_xmit() local
3357 txq = skb_get_tx_queue(odev, pkt_dev->skb); in pktgen_xmit()
3361 HARD_TX_LOCK(odev, txq, smp_processor_id()); in pktgen_xmit()
3363 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { in pktgen_xmit()
3371 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); in pktgen_xmit()
3379 if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq)) in pktgen_xmit()
3402 HARD_TX_UNLOCK(odev, txq); in pktgen_xmit()
Dnet-sysfs.c1321 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; in register_queue_kobjects() local
1340 txq = real_tx; in register_queue_kobjects()
1345 netdev_queue_update_kobjects(dev, txq, 0); in register_queue_kobjects()
/linux-4.1.27/drivers/atm/
Dambassador.c628 amb_txq * txq = &dev->txq; in tx_give() local
636 spin_lock_irqsave (&txq->lock, flags); in tx_give()
638 if (txq->pending < txq->maximum) { in tx_give()
639 PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr); in tx_give()
641 *txq->in.ptr = *tx; in tx_give()
642 txq->pending++; in tx_give()
643 txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit); in tx_give()
645 wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr)); in tx_give()
648 if (txq->pending > txq->high) in tx_give()
649 txq->high = txq->pending; in tx_give()
[all …]
Dfore200e.c856 struct host_txq* txq = &fore200e->host_txq; in fore200e_tx_irq() local
866 entry = &txq->host_entry[ txq->tail ]; in fore200e_tx_irq()
873 entry, txq->tail, entry->vc_map, entry->skb); in fore200e_tx_irq()
945 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX); in fore200e_tx_irq()
1561 struct host_txq* txq = &fore200e->host_txq; in fore200e_send() local
1642 entry = &txq->host_entry[ txq->head ]; in fore200e_send()
1644 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) { in fore200e_send()
1687 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); in fore200e_send()
1688 txq->txing++; in fore200e_send()
2268 struct host_txq* txq = &fore200e->host_txq; in fore200e_init_tx_queue() local
[all …]
Dfirestream.c1401 static int init_q(struct fs_dev *dev, struct queue *txq, int queue, in init_q() argument
1428 txq->sa = p; in init_q()
1429 txq->ea = p; in init_q()
1430 txq->offset = queue; in init_q()
1531 static void free_queue(struct fs_dev *dev, struct queue *txq) in free_queue() argument
1535 write_fs (dev, Q_SA(txq->offset), 0); in free_queue()
1536 write_fs (dev, Q_EA(txq->offset), 0); in free_queue()
1537 write_fs (dev, Q_RP(txq->offset), 0); in free_queue()
1538 write_fs (dev, Q_WP(txq->offset), 0); in free_queue()
1541 fs_dprintk (FS_DEBUG_ALLOC, "Free queue: %p\n", txq->sa); in free_queue()
[all …]
Dambassador.h634 amb_txq txq; member
/linux-4.1.27/drivers/net/wireless/iwlegacy/
Dcommon.c382 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; in il_send_cmd_sync()
2724 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) in il_txq_update_write_ptr() argument
2727 int txq_id = txq->q.id; in il_txq_update_write_ptr()
2729 if (txq->need_update == 0) in il_txq_update_write_ptr()
2747 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr()
2755 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr()
2756 txq->need_update = 0; in il_txq_update_write_ptr()
2766 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_unmap() local
2767 struct il_queue *q = &txq->q; in il_tx_queue_unmap()
2773 il->ops->txq_free_tfd(il, txq); in il_tx_queue_unmap()
[all …]
D3945.c289 struct il_tx_queue *txq = &il->txq[txq_id]; in il3945_tx_queue_reclaim() local
290 struct il_queue *q = &txq->q; in il3945_tx_queue_reclaim()
298 skb = txq->skbs[txq->q.read_ptr]; in il3945_tx_queue_reclaim()
300 txq->skbs[txq->q.read_ptr] = NULL; in il3945_tx_queue_reclaim()
301 il->ops->txq_free_tfd(il, txq); in il3945_tx_queue_reclaim()
306 il_wake_queue(il, txq); in il3945_tx_queue_reclaim()
319 struct il_tx_queue *txq = &il->txq[txq_id]; in il3945_hdl_tx() local
326 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { in il3945_hdl_tx()
329 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); in il3945_hdl_tx()
346 txq->time_stamp = jiffies; in il3945_hdl_tx()
[all …]
D4965-mac.c232 if (!il->txq) { in il4965_hw_nic_init()
1665 struct il_tx_queue *txq; in il4965_tx_skb() local
1766 txq = &il->txq[txq_id]; in il4965_tx_skb()
1767 q = &txq->q; in il4965_tx_skb()
1782 txq->skbs[q->write_ptr] = skb; in il4965_tx_skb()
1785 out_cmd = txq->cmd[q->write_ptr]; in il4965_tx_skb()
1786 out_meta = &txq->meta[q->write_ptr]; in il4965_tx_skb()
1853 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); in il4965_tx_skb()
1857 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, in il4965_tx_skb()
1861 txq->need_update = 1; in il4965_tx_skb()
[all …]
D4965.h75 void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
76 int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
78 int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
107 void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
D3945.h227 int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
229 void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
231 int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
D3945-mac.c470 struct il_tx_queue *txq = NULL; in il3945_tx_skb() local
534 txq = &il->txq[txq_id]; in il3945_tx_skb()
535 q = &txq->q; in il3945_tx_skb()
544 txq->skbs[q->write_ptr] = skb; in il3945_tx_skb()
547 out_cmd = txq->cmd[idx]; in il3945_tx_skb()
548 out_meta = &txq->meta[idx]; in il3945_tx_skb()
616 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); in il3945_tx_skb()
620 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0, in il3945_tx_skb()
624 txq->need_update = 1; in il3945_tx_skb()
627 txq->need_update = 0; in il3945_tx_skb()
[all …]
Dcommon.h1271 struct il_tx_queue *txq; member
1548 struct il_tx_queue *txq,
1551 struct il_tx_queue *txq, dma_addr_t addr,
1553 void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
1554 int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
1765 void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
2269 il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq) in il_set_swq_id() argument
2274 txq->swq_id = (hwq << 2) | ac; in il_set_swq_id()
2291 il_wake_queue(struct il_priv *il, struct il_tx_queue *txq) in il_wake_queue() argument
2293 u8 queue = txq->swq_id; in il_wake_queue()
[all …]
Ddebug.c852 struct il_tx_queue *txq; in il_dbgfs_tx_queue_read() local
861 if (!il->txq) { in il_dbgfs_tx_queue_read()
870 txq = &il->txq[cnt]; in il_dbgfs_tx_queue_read()
871 q = &txq->q; in il_dbgfs_tx_queue_read()
878 txq->swq_id, txq->swq_id & 3, in il_dbgfs_tx_queue_read()
879 (txq->swq_id >> 2) & 0x1f); in il_dbgfs_tx_queue_read()
D4965.c1549 il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq, in il4965_txq_update_byte_cnt_tbl() argument
1553 int txq_id = txq->q.id; in il4965_txq_update_byte_cnt_tbl()
1554 int write_ptr = txq->q.write_ptr; in il4965_txq_update_byte_cnt_tbl()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c1080 netif_tx_stop_queue(q->txq); in eth_txq_stop()
2377 struct sge_ofld_txq *txq = s->egr_map[id]; in sge_tx_timer_cb() local
2380 tasklet_schedule(&txq->qresume_tsk); in sge_tx_timer_cb()
2389 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && in sge_tx_timer_cb()
2390 __netif_tx_trylock(q->txq)) { in sge_tx_timer_cb()
2401 __netif_tx_unlock(q->txq); in sge_tx_timer_cb()
2570 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, in t4_sge_alloc_eth_txq() argument
2580 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_eth_txq()
2582 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
2584 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_eth_txq()
[all …]
Dcxgb4_main.c301 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable() local
307 for (i = 0; i < pi->nqsets; i++, txq++) { in dcb_tx_queue_prio_enable()
314 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); in dcb_tx_queue_prio_enable()
329 txq->dcb_prio = value; in dcb_tx_queue_prio_enable()
612 struct sge_txq *txq; in fwevtq_handler() local
614 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
615 txq->restarts++; in fwevtq_handler()
616 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { in fwevtq_handler()
619 eq = container_of(txq, struct sge_eth_txq, q); in fwevtq_handler()
620 netif_tx_wake_queue(eq->txq); in fwevtq_handler()
[all …]
Dcxgb4.h575 struct netdev_queue *txq; /* associated netdev TX queue */ member
1059 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
1062 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
1065 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
1270 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
Dt4_hw.c4460 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, in t4_cfg_pfvf() argument
4476 FW_PFVF_CMD_NEQ_V(txq)); in t4_cfg_pfvf()
/linux-4.1.27/drivers/net/ethernet/brocade/bna/
Dbna_tx_rx.c3146 struct bna_txq *txq; in bna_tx_sm_started_entry() local
3151 txq = (struct bna_txq *)qe; in bna_tx_sm_started_entry()
3152 txq->tcb->priority = txq->priority; in bna_tx_sm_started_entry()
3154 bna_ib_start(tx->bna, &txq->ib, is_regular); in bna_tx_sm_started_entry()
3374 struct bna_txq *txq = NULL; in bna_bfi_tx_enet_start() local
3387 txq = (struct bna_txq *)qe; in bna_bfi_tx_enet_start()
3389 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); in bna_bfi_tx_enet_start()
3390 cfg_req->q_cfg[i].q.priority = txq->priority; in bna_bfi_tx_enet_start()
3393 txq->ib.ib_seg_host_addr.lsb; in bna_bfi_tx_enet_start()
3395 txq->ib.ib_seg_host_addr.msb; in bna_bfi_tx_enet_start()
[all …]
Dbna_types.h444 struct bna_txq *txq; member
536 struct bna_txq *txq; /* BFI_MAX_TXQ entries */ member
Dbnad_ethtool.c835 bnad->tx_info[i].tcb[j]->txq) { in bnad_per_q_stats_fill()
837 buf[bi++] = tcb->txq->tx_packets; in bnad_per_q_stats_fill()
838 buf[bi++] = tcb->txq->tx_bytes; in bnad_per_q_stats_fill()
Dbnad.c222 tcb->txq->tx_packets += sent_packets; in bnad_txcmpl_process()
223 tcb->txq->tx_bytes += sent_bytes; in bnad_txcmpl_process()
1010 (struct bnad_tx_info *)tcb->txq->tx->priv; in bnad_cb_tcb_setup()
1020 (struct bnad_tx_info *)tcb->txq->tx->priv; in bnad_cb_tcb_destroy()
2429 bnad->tx_info[i].tcb[j]->txq->tx_packets; in bnad_netdev_qstats_fill()
2431 bnad->tx_info[i].tcb[j]->txq->tx_bytes; in bnad_netdev_qstats_fill()
/linux-4.1.27/include/linux/
Dnetdevice.h2493 void netif_schedule_queue(struct netdev_queue *txq);
2524 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_start_all_queues() local
2525 netif_tx_start_queue(txq); in netif_tx_start_all_queues()
2548 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_wake_all_queues() local
2549 netif_tx_wake_queue(txq); in netif_tx_wake_all_queues()
2579 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_stop_all_queues() local
2580 netif_tx_stop_queue(txq); in netif_tx_stop_all_queues()
2790 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_start_subqueue() local
2792 netif_tx_start_queue(txq); in netif_start_subqueue()
2804 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_stop_subqueue() local
[all …]
Dnetpoll.h41 struct sk_buff_head txq; member
Dti_wilink_st.h156 struct sk_buff_head txq, tx_waitq; member
/linux-4.1.27/drivers/net/
Difb.c67 struct netdev_queue *txq; in ri_tasklet() local
70 txq = netdev_get_tx_queue(_dev, 0); in ri_tasklet()
72 if (__netif_tx_trylock(txq)) { in ri_tasklet()
74 __netif_tx_unlock(txq); in ri_tasklet()
114 if (__netif_tx_trylock(txq)) { in ri_tasklet()
120 __netif_tx_unlock(txq); in ri_tasklet()
123 __netif_tx_unlock(txq); in ri_tasklet()
Dtun.c391 u32 txq = 0; in tun_select_queue() local
397 txq = skb_get_hash(skb); in tun_select_queue()
398 if (txq) { in tun_select_queue()
399 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); in tun_select_queue()
401 tun_flow_save_rps_rxhash(e, txq); in tun_select_queue()
402 txq = e->queue_index; in tun_select_queue()
405 txq = ((u64)txq * numqueues) >> 32; in tun_select_queue()
407 txq = skb_get_rx_queue(skb); in tun_select_queue()
408 while (unlikely(txq >= numqueues)) in tun_select_queue()
409 txq -= numqueues; in tun_select_queue()
[all …]
Dvirtio_net.c163 static int txq2vq(int txq) in txq2vq() argument
165 return txq * 2 + 1; in txq2vq()
915 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); in start_xmit() local
964 if (kick || netif_xmit_stopped(txq)) in start_xmit()
Dmacvlan.c777 struct netdev_queue *txq, in macvlan_set_lockdep_class_one() argument
780 lockdep_set_class(&txq->_xmit_lock, in macvlan_set_lockdep_class_one()
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/
Dhtc_pipe.c106 if (list_empty(&ep->txq)) in get_htc_packet_credit_based()
110 packet = list_first_entry(&ep->txq, struct htc_packet, list); in get_htc_packet_credit_based()
114 __func__, packet, get_queue_depth(&ep->txq)); in get_htc_packet_credit_based()
159 packet = list_first_entry(&ep->txq, struct htc_packet, list); in get_htc_packet_credit_based()
183 if (list_empty(&ep->txq)) in get_htc_packet()
186 packet = list_first_entry(&ep->txq, struct htc_packet, list); in get_htc_packet()
191 __func__, packet, get_queue_depth(&ep->txq)); in get_htc_packet()
303 struct list_head *txq) in htc_try_send() argument
313 __func__, txq, in htc_try_send()
314 (txq == NULL) ? 0 : get_queue_depth(txq)); in htc_try_send()
[all …]
Dhtc_mbox.c426 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); in htc_tx_comp_update()
439 struct list_head *txq) in htc_tx_complete() argument
441 if (list_empty(txq)) in htc_tx_complete()
446 endpoint->eid, get_queue_depth(txq)); in htc_tx_complete()
448 ath6kl_tx_complete(endpoint->target, txq); in htc_tx_complete()
611 if (list_empty(&endpoint->txq)) in ath6kl_htc_tx_pkts_get()
613 packet = list_first_entry(&endpoint->txq, struct htc_packet, in ath6kl_htc_tx_pkts_get()
618 packet, get_queue_depth(&endpoint->txq)); in ath6kl_htc_tx_pkts_get()
628 packet = list_first_entry(&endpoint->txq, struct htc_packet, in ath6kl_htc_tx_pkts_get()
861 struct list_head txq; in ath6kl_htc_tx_from_queue() local
[all …]
Dhtc.h511 struct list_head txq; member
Ddebug.c245 get_queue_depth(&ep_dist->htc_ep->txq)); in dump_cred_dist()
735 get_queue_depth(&ep_list->htc_ep->txq)); in read_file_credit_dist_stats()
/linux-4.1.27/include/net/
Dsch_generic.h128 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) in qdisc_avail_bulklimit() argument
132 return dql_avail(&txq->dql); in qdisc_avail_bulklimit()
435 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in qdisc_all_tx_empty() local
436 const struct Qdisc *q = rcu_dereference(txq->qdisc); in qdisc_all_tx_empty()
453 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in qdisc_tx_changing() local
454 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) in qdisc_tx_changing()
466 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in qdisc_tx_is_noop() local
467 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) in qdisc_tx_is_noop()
Dpkt_sched.h102 struct net_device *dev, struct netdev_queue *txq,
Dmac80211.h1355 struct ieee80211_txq *txq; member
1634 struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; member
3390 struct ieee80211_txq *txq);
5394 struct ieee80211_txq *txq);
/linux-4.1.27/drivers/net/wireless/
Dmwl8k.c248 struct mwl8k_tx_queue txq[MWL8K_MAX_TX_QUEUES]; member
1443 struct mwl8k_tx_queue *txq = priv->txq + index; in mwl8k_txq_init() local
1447 txq->len = 0; in mwl8k_txq_init()
1448 txq->head = 0; in mwl8k_txq_init()
1449 txq->tail = 0; in mwl8k_txq_init()
1453 txq->txd = pci_zalloc_consistent(priv->pdev, size, &txq->txd_dma); in mwl8k_txq_init()
1454 if (txq->txd == NULL) { in mwl8k_txq_init()
1459 txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL); in mwl8k_txq_init()
1460 if (txq->skb == NULL) { in mwl8k_txq_init()
1461 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); in mwl8k_txq_init()
[all …]
Dairo.c1226 struct sk_buff_head txq;// tx queue used by mpi350 code member
1935 npacks = skb_queue_len (&ai->txq); in mpi_start_xmit()
1943 skb_queue_tail (&ai->txq, skb); in mpi_start_xmit()
1948 skb_queue_tail (&ai->txq, skb); in mpi_start_xmit()
1978 if ((skb = skb_dequeue(&ai->txq)) == NULL) { in mpi_send_packet()
2405 if (test_bit(FLAG_MPI, &ai->flags) && !skb_queue_empty(&ai->txq)) { in stop_airo_card()
2407 for (;(skb = skb_dequeue(&ai->txq));) in stop_airo_card()
2816 skb_queue_head_init (&ai->txq); in _init_airo_card()
3435 if (!skb_queue_empty(&ai->txq)) { in airo_handle_tx()
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/
Di40evf_virtchnl.c235 vqpi->txq.vsi_id = vqci->vsi_id; in i40evf_configure_queues()
236 vqpi->txq.queue_id = i; in i40evf_configure_queues()
237 vqpi->txq.ring_len = adapter->tx_rings[i]->count; in i40evf_configure_queues()
238 vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; in i40evf_configure_queues()
239 vqpi->txq.headwb_enabled = 1; in i40evf_configure_queues()
240 vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr + in i40evf_configure_queues()
241 (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc)); in i40evf_configure_queues()
Di40e_virtchnl.h207 struct i40e_virtchnl_txq_info txq; member
/linux-4.1.27/drivers/net/usb/
Dusbnet.c761 temp = unlink_urbs(dev, &dev->txq) + in usbnet_terminate_urbs()
766 && !skb_queue_empty(&dev->txq) in usbnet_terminate_urbs()
1084 unlink_urbs (dev, &dev->txq); in usbnet_deferred_kevent()
1230 (void) defer_bh(dev, skb, &dev->txq, tx_done); in tx_complete()
1239 unlink_urbs (dev, &dev->txq); in usbnet_tx_timeout()
1364 spin_lock_irqsave(&dev->txq.lock, flags); in usbnet_start_xmit()
1367 spin_unlock_irqrestore(&dev->txq.lock, flags); in usbnet_start_xmit()
1379 spin_unlock_irqrestore(&dev->txq.lock, flags); in usbnet_start_xmit()
1398 __usbnet_queue_skb(&dev->txq, skb, tx_start); in usbnet_start_xmit()
1399 if (dev->txq.qlen >= TX_QLEN (dev)) in usbnet_start_xmit()
[all …]
/linux-4.1.27/drivers/net/ethernet/samsung/sxgbe/
Dsxgbe_main.c337 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]); in sxgbe_clear_descriptors()
573 priv->txq[queue_num], tx_rsize); in init_dma_desc_rings()
582 priv->txq[queue_num]->priv_ptr = priv; in init_dma_desc_rings()
606 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); in init_dma_desc_rings()
642 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; in dma_free_tx_skbufs()
658 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); in free_dma_desc_resources()
672 priv->txq[queue_num] = devm_kmalloc(priv->device, in txring_mem_alloc()
674 if (!priv->txq[queue_num]) in txring_mem_alloc()
808 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; in sxgbe_tx_all_clean()
827 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num]; in sxgbe_restart_tx_queue()
[all …]
Dsxgbe_platform.c130 priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++); in sxgbe_platform_probe()
131 if (priv->txq[i]->irq_no <= 0) { in sxgbe_platform_probe()
163 irq_dispose_mapping(priv->txq[i]->irq_no); in sxgbe_platform_probe()
Dsxgbe_common.h458 struct sxgbe_tx_queue *txq[SXGBE_TX_QUEUES]; member
/linux-4.1.27/net/mac80211/
Dsta_info.c110 if (sta->sta.txq[0]) { in __cleanup_single_sta()
111 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { in __cleanup_single_sta()
112 struct txq_info *txqi = to_txq_info(sta->sta.txq[i]); in __cleanup_single_sta()
116 atomic_sub(n, &sdata->txqs_len[txqi->txq.ac]); in __cleanup_single_sta()
248 if (sta->sta.txq[0]) in sta_info_free()
249 kfree(to_txq_info(sta->sta.txq[0])); in sta_info_free()
342 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); in sta_info_alloc()
346 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { in sta_info_alloc()
347 struct txq_info *txq = txq_data + i * size; in sta_info_alloc() local
349 ieee80211_init_tx_queue(sdata, sta, txq, i); in sta_info_alloc()
[all …]
Dagg-tx.c194 struct ieee80211_txq *txq = sta->sta.txq[tid]; in ieee80211_agg_stop_txq() local
197 if (!txq) in ieee80211_agg_stop_txq()
200 txqi = to_txq_info(txq); in ieee80211_agg_stop_txq()
211 struct ieee80211_txq *txq = sta->sta.txq[tid]; in ieee80211_agg_start_txq() local
214 if (!txq) in ieee80211_agg_start_txq()
217 txqi = to_txq_info(txq); in ieee80211_agg_start_txq()
Dtx.c835 if (!tx->sta->sta.txq[0]) in ieee80211_tx_h_sequence()
1077 } else if (!tx->sta->sta.txq[tid]) { in ieee80211_tx_prep_agg()
1229 struct ieee80211_txq *txq = NULL; in ieee80211_drv_tx() local
1242 txq = pubsta->txq[tid]; in ieee80211_drv_tx()
1244 txq = vif->txq; in ieee80211_drv_tx()
1247 if (!txq) in ieee80211_drv_tx()
1250 ac = txq->ac; in ieee80211_drv_tx()
1251 txqi = to_txq_info(txq); in ieee80211_drv_tx()
1266 struct ieee80211_txq *txq) in ieee80211_tx_dequeue() argument
1269 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif); in ieee80211_tx_dequeue()
[all …]
Ddriver-ops.h1371 struct txq_info *txq) in drv_wake_tx_queue() argument
1373 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); in drv_wake_tx_queue()
1378 trace_drv_wake_tx_queue(local, sdata, txq); in drv_wake_tx_queue()
1379 local->ops->wake_tx_queue(&local->hw, &txq->txq); in drv_wake_tx_queue()
Dtdls.c263 struct ieee80211_tx_queue_params *txq; in ieee80211_tdls_add_wmm_param_ie() local
285 txq = &sdata->tx_conf[ieee80211_ac_from_wmm(i)]; in ieee80211_tdls_add_wmm_param_ie()
286 wmm->ac[i].aci_aifsn = ieee80211_wmm_aci_aifsn(txq->aifs, in ieee80211_tdls_add_wmm_param_ie()
287 txq->acm, i); in ieee80211_tdls_add_wmm_param_ie()
288 wmm->ac[i].cw = ieee80211_wmm_ecw(txq->cw_min, txq->cw_max); in ieee80211_tdls_add_wmm_param_ie()
289 wmm->ac[i].txop_limit = cpu_to_le16(txq->txop); in ieee80211_tdls_add_wmm_param_ie()
Dtrace.h2318 struct txq_info *txq),
2320 TP_ARGS(local, sdata, txq),
2331 struct ieee80211_sta *sta = txq->txq.sta;
2336 __entry->ac = txq->txq.ac;
2337 __entry->tid = txq->txq.tid;
Dieee80211_i.h821 struct ieee80211_txq txq; member
1464 static inline struct txq_info *to_txq_info(struct ieee80211_txq *txq) in to_txq_info() argument
1466 return container_of(txq, struct txq_info, txq); in to_txq_info()
1926 struct txq_info *txq, int tid);
Dutil.c3336 txqi->txq.vif = &sdata->vif; in ieee80211_init_tx_queue()
3339 txqi->txq.sta = &sta->sta; in ieee80211_init_tx_queue()
3340 sta->sta.txq[tid] = &txqi->txq; in ieee80211_init_tx_queue()
3341 txqi->txq.ac = ieee802_1d_to_ac[tid & 7]; in ieee80211_init_tx_queue()
3343 sdata->vif.txq = &txqi->txq; in ieee80211_init_tx_queue()
3344 txqi->txq.ac = IEEE80211_AC_BE; in ieee80211_init_tx_queue()
Diface.c980 if (sdata->vif.txq) { in ieee80211_do_stop()
981 struct txq_info *txqi = to_txq_info(sdata->vif.txq); in ieee80211_do_stop()
984 atomic_set(&sdata->txqs_len[txqi->txq.ac], 0); in ieee80211_do_stop()
Drx.c1203 if (!sta->sta.txq[0]) in sta_ps_start()
1206 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { in sta_ps_start()
1207 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); in sta_ps_start()
/linux-4.1.27/drivers/net/wireless/mwifiex/
Dtxrx.c280 struct netdev_queue *txq; in mwifiex_write_data_complete() local
319 txq = netdev_get_tx_queue(priv->netdev, index); in mwifiex_write_data_complete()
320 if (netif_tx_queue_stopped(txq)) { in mwifiex_write_data_complete()
321 netif_tx_wake_queue(txq); in mwifiex_write_data_complete()
Dinit.c332 struct netdev_queue *txq = netdev_get_tx_queue(netdev, i); in mwifiex_wake_up_net_dev_queue() local
334 if (netif_tx_queue_stopped(txq)) in mwifiex_wake_up_net_dev_queue()
335 netif_tx_wake_queue(txq); in mwifiex_wake_up_net_dev_queue()
353 struct netdev_queue *txq = netdev_get_tx_queue(netdev, i); in mwifiex_stop_net_dev_queue() local
355 if (!netif_tx_queue_stopped(txq)) in mwifiex_stop_net_dev_queue()
356 netif_tx_stop_queue(txq); in mwifiex_stop_net_dev_queue()
Dmain.c646 struct netdev_queue *txq; in mwifiex_queue_tx_pkt() local
650 txq = netdev_get_tx_queue(priv->netdev, index); in mwifiex_queue_tx_pkt()
651 if (!netif_tx_queue_stopped(txq)) { in mwifiex_queue_tx_pkt()
652 netif_tx_stop_queue(txq); in mwifiex_queue_tx_pkt()
866 struct netdev_queue *txq; in mwifiex_dump_drv_info() local
932 txq = netdev_get_tx_queue(priv->netdev, idx); in mwifiex_dump_drv_info()
934 netif_tx_queue_stopped(txq) ? in mwifiex_dump_drv_info()
Ddebugfs.c81 struct netdev_queue *txq; in mwifiex_info_read() local
140 txq = netdev_get_tx_queue(netdev, i); in mwifiex_info_read()
141 p += sprintf(p, " %d:%s", i, netif_tx_queue_stopped(txq) ? in mwifiex_info_read()
/linux-4.1.27/net/irda/
Dirlap_event.c192 if (skb_queue_empty(&self->txq) || self->remote_busy) { in irlap_start_poll_timer()
262 skb_queue_len(&self->txq)); in irlap_do_event()
264 if (!skb_queue_empty(&self->txq)) { in irlap_do_event()
284 while ((skb = skb_dequeue(&self->txq)) != NULL) { in irlap_do_event()
1005 skb_next = skb_peek(&self->txq); in irlap_state_xmit_p()
1031 skb_queue_head(&self->txq, skb_get(skb)); in irlap_state_xmit_p()
1055 nextfit = !skb_queue_empty(&self->txq); in irlap_state_xmit_p()
1082 skb_queue_head(&self->txq, skb_get(skb)); in irlap_state_xmit_p()
1768 skb_next = skb_peek(&self->txq); in irlap_state_xmit_s()
1782 skb_queue_head(&self->txq, skb_get(skb)); in irlap_state_xmit_s()
[all …]
Dirlap.c135 skb_queue_head_init(&self->txq); in irlap_open()
350 skb_queue_tail(&self->txq, skb); in irlap_data_request()
359 if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy)) in irlap_data_request()
424 if (!skb_queue_empty(&self->txq)) { in irlap_disconnect_request()
825 while ((skb = skb_dequeue(&self->txq)) != NULL) in irlap_flush_all_queues()
1134 skb_queue_len(&self->txq)); in irlap_seq_show()
Dirlap_frame.c1011 while (!skb_queue_empty(&self->txq)) { in irlap_resend_rejected_frames()
1015 skb = skb_dequeue( &self->txq); in irlap_resend_rejected_frames()
1023 !skb_queue_empty(&self->txq)) { in irlap_resend_rejected_frames()
/linux-4.1.27/drivers/net/wireless/ipw2x00/
Dipw2100.c2820 struct ipw2100_bd_queue *txq = &priv->tx_queue; in __ipw2100_tx_process() local
2834 tbd = &txq->drv[packet->index]; in __ipw2100_tx_process()
2841 e = txq->oldest; in __ipw2100_tx_process()
2848 e = txq->oldest + frag_num; in __ipw2100_tx_process()
2849 e %= txq->entries; in __ipw2100_tx_process()
2866 if (w != txq->next) in __ipw2100_tx_process()
2902 i = txq->oldest; in __ipw2100_tx_process()
2904 &txq->drv[i], in __ipw2100_tx_process()
2905 (u32) (txq->nic + i * sizeof(struct ipw2100_bd)), in __ipw2100_tx_process()
2906 txq->drv[i].host_addr, txq->drv[i].buf_length); in __ipw2100_tx_process()
[all …]
Dipw2200.c213 struct clx2_tx_queue *txq, int qindex);
2013 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0); in ipw_irq_tasklet()
2019 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1); in ipw_irq_tasklet()
2025 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2); in ipw_irq_tasklet()
2031 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3); in ipw_irq_tasklet()
3824 struct clx2_tx_queue *txq) in ipw_queue_tx_free_tfd() argument
3826 struct tfd_frame *bd = &txq->bd[txq->q.last_used]; in ipw_queue_tx_free_tfd()
3848 if (txq->txb[txq->q.last_used]) { in ipw_queue_tx_free_tfd()
3849 libipw_txb_free(txq->txb[txq->q.last_used]); in ipw_queue_tx_free_tfd()
3850 txq->txb[txq->q.last_used] = NULL; in ipw_queue_tx_free_tfd()
[all …]
Dipw2200.h1207 struct clx2_tx_queue txq[4]; member
/linux-4.1.27/drivers/net/ethernet/sun/
Dsunvnet.c639 struct netdev_queue *txq; in vnet_ack() local
672 txq = netdev_get_tx_queue(dev, port->q_index); in vnet_ack()
673 if (unlikely(netif_tx_queue_stopped(txq) && in vnet_ack()
707 struct netdev_queue *txq; in maybe_tx_wakeup() local
709 txq = netdev_get_tx_queue(port->vp->dev, port->q_index); in maybe_tx_wakeup()
710 __netif_tx_lock(txq, smp_processor_id()); in maybe_tx_wakeup()
711 if (likely(netif_tx_queue_stopped(txq))) { in maybe_tx_wakeup()
715 netif_tx_wake_queue(txq); in maybe_tx_wakeup()
717 __netif_tx_unlock(txq); in maybe_tx_wakeup()
1182 struct netdev_queue *txq; in vnet_handle_offloads() local
[all …]
Dsungem.c704 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); in gem_tx() local
706 __netif_tx_lock(txq, smp_processor_id()); in gem_tx()
710 __netif_tx_unlock(txq); in gem_tx()
892 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); in gem_poll() local
900 __netif_tx_lock(txq, smp_processor_id()); in gem_poll()
902 __netif_tx_unlock(txq); in gem_poll()
1341 struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0); in gem_set_link_modes() local
1371 __netif_tx_lock(txq, smp_processor_id()); in gem_set_link_modes()
1433 __netif_tx_unlock(txq); in gem_set_link_modes()
Dniu.c3604 struct netdev_queue *txq; in niu_tx_work() local
3610 txq = netdev_get_tx_queue(np->dev, index); in niu_tx_work()
3634 if (unlikely(netif_tx_queue_stopped(txq) && in niu_tx_work()
3636 __netif_tx_lock(txq, smp_processor_id()); in niu_tx_work()
3637 if (netif_tx_queue_stopped(txq) && in niu_tx_work()
3639 netif_tx_wake_queue(txq); in niu_tx_work()
3640 __netif_tx_unlock(txq); in niu_tx_work()
6634 struct netdev_queue *txq; in niu_start_xmit() local
6644 txq = netdev_get_tx_queue(dev, i); in niu_start_xmit()
6647 netif_tx_stop_queue(txq); in niu_start_xmit()
[all …]
/linux-4.1.27/drivers/misc/ti-st/
Dst_core.c219 skb_queue_tail(&st_gdata->txq, waiting_skb); in st_wakeup_ack()
409 return skb_dequeue(&st_gdata->txq); in st_int_dequeue()
431 skb_queue_tail(&st_gdata->txq, skb); in st_int_enqueue()
777 skb_queue_purge(&st_gdata->txq); in st_tty_close()
869 skb_queue_head_init(&st_gdata->txq); in st_core_init()
898 skb_queue_purge(&st_gdata->txq); in st_core_exit()
/linux-4.1.27/drivers/net/ethernet/emulex/benet/
Dbe_main.c848 struct be_queue_info *txq = &txo->q; in be_tx_setup_wrb_hdr() local
849 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head); in be_tx_setup_wrb_hdr()
857 atomic_add(num_frags, &txq->used); in be_tx_setup_wrb_hdr()
867 struct be_queue_info *txq = &txo->q; in be_tx_setup_wrb_frag() local
869 wrb = queue_head_node(txq); in be_tx_setup_wrb_frag()
871 queue_head_inc(txq); in be_tx_setup_wrb_frag()
884 struct be_queue_info *txq = &txo->q; in be_xmit_restore() local
887 txq->head = head; in be_xmit_restore()
890 queue_head_inc(txq); in be_xmit_restore()
892 wrb = queue_head_node(txq); in be_xmit_restore()
[all …]
Dbe_cmds.c1266 struct be_queue_info *txq = &txo->q; in be_cmd_txq_create() local
1268 struct be_dma_mem *q_mem = &txq->dma_mem; in be_cmd_txq_create()
1290 req->queue_size = be_encoded_q_len(txq->len); in be_cmd_txq_create()
1298 txq->id = le16_to_cpu(resp->cid); in be_cmd_txq_create()
1303 txq->created = true; in be_cmd_txq_create()
/linux-4.1.27/include/net/irda/
Dirlap.h148 struct sk_buff_head txq; /* Frames to be transmitted */ member
266 #define IRLAP_GET_TX_QUEUE_LEN(self) skb_queue_len(&self->txq)
Dirlan_common.h136 struct sk_buff_head txq; /* Transmit control queue */ member
/linux-4.1.27/drivers/net/ethernet/
Dlantiq_etop.c186 struct netdev_queue *txq = in ltq_etop_poll_tx() local
202 if (netif_tx_queue_stopped(txq)) in ltq_etop_poll_tx()
203 netif_tx_start_queue(txq); in ltq_etop_poll_tx()
521 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue); in ltq_etop_tx() local
534 netif_tx_stop_queue(txq); in ltq_etop_tx()
555 netif_tx_stop_queue(txq); in ltq_etop_tx()
/linux-4.1.27/drivers/net/ethernet/micrel/
Dks8851.c131 struct sk_buff_head txq; member
769 bool last = skb_queue_empty(&ks->txq); in ks8851_tx_work()
774 txb = skb_dequeue(&ks->txq); in ks8851_tx_work()
775 last = skb_queue_empty(&ks->txq); in ks8851_tx_work()
905 while (!skb_queue_empty(&ks->txq)) { in ks8851_net_stop()
906 struct sk_buff *txb = skb_dequeue(&ks->txq); in ks8851_net_stop()
947 skb_queue_tail(&ks->txq, skb); in ks8851_start_xmit()
1512 skb_queue_head_init(&ks->txq); in ks8851_probe()
/linux-4.1.27/drivers/net/ethernet/broadcom/
Dbcmsysport.c671 struct netdev_queue *txq; in __bcm_sysport_tx_reclaim() local
674 txq = netdev_get_tx_queue(ndev, ring->index); in __bcm_sysport_tx_reclaim()
706 if (netif_tx_queue_stopped(txq) && pkts_compl) in __bcm_sysport_tx_reclaim()
707 netif_tx_wake_queue(txq); in __bcm_sysport_tx_reclaim()
936 struct netdev_queue *txq; in bcm_sysport_xmit() local
946 txq = netdev_get_tx_queue(dev, queue); in bcm_sysport_xmit()
952 netif_tx_stop_queue(txq); in bcm_sysport_xmit()
1026 netif_tx_stop_queue(txq); in bcm_sysport_xmit()
1665 u32 txq, rxq; in bcm_sysport_probe() local
1672 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) in bcm_sysport_probe()
[all …]
Dbnx2.c2834 struct netdev_queue *txq; in bnx2_tx_int() local
2837 txq = netdev_get_tx_queue(bp->dev, index); in bnx2_tx_int()
2899 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes); in bnx2_tx_int()
2910 if (unlikely(netif_tx_queue_stopped(txq)) && in bnx2_tx_int()
2912 __netif_tx_lock(txq, smp_processor_id()); in bnx2_tx_int()
2913 if ((netif_tx_queue_stopped(txq)) && in bnx2_tx_int()
2915 netif_tx_wake_queue(txq); in bnx2_tx_int()
2916 __netif_tx_unlock(txq); in bnx2_tx_int()
6574 struct netdev_queue *txq; in bnx2_start_xmit() local
6580 txq = netdev_get_tx_queue(dev, i); in bnx2_start_xmit()
[all …]
Dtg3.c6523 struct netdev_queue *txq; in tg3_tx() local
6530 txq = netdev_get_tx_queue(tp->dev, index); in tg3_tx()
6597 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); in tg3_tx()
6608 if (unlikely(netif_tx_queue_stopped(txq) && in tg3_tx()
6610 __netif_tx_lock(txq, smp_processor_id()); in tg3_tx()
6611 if (netif_tx_queue_stopped(txq) && in tg3_tx()
6613 netif_tx_wake_queue(txq); in tg3_tx()
6614 __netif_tx_unlock(txq); in tg3_tx()
7850 struct netdev_queue *txq, struct sk_buff *skb) in tg3_tso_bug() argument
7857 netif_tx_stop_queue(txq); in tg3_tso_bug()
[all …]
/linux-4.1.27/net/ieee802154/6lowpan/
Dcore.c87 struct netdev_queue *txq, in lowpan_set_lockdep_class_one() argument
90 lockdep_set_class(&txq->_xmit_lock, in lowpan_set_lockdep_class_one()
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_vfpf.c638 req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping; in bnx2x_vfpf_setup_q()
639 req->txq.vf_sb = fp_idx; in bnx2x_vfpf_setup_q()
640 req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0; in bnx2x_vfpf_setup_q()
641 req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0; in bnx2x_vfpf_setup_q()
642 req->txq.flags = flags; in bnx2x_vfpf_setup_q()
643 req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW; in bnx2x_vfpf_setup_q()
1474 q->sb_idx = setup_q->txq.vf_sb; in bnx2x_vf_mbx_setup_q()
1477 init_p->tx.hc_rate = setup_q->txq.hc_rate; in bnx2x_vf_mbx_setup_q()
1478 init_p->tx.sb_cq_index = setup_q->txq.sb_index; in bnx2x_vf_mbx_setup_q()
1480 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, in bnx2x_vf_mbx_setup_q()
[all …]
Dbnx2x_cmn.c276 struct netdev_queue *txq; in bnx2x_tx_int() local
285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); in bnx2x_tx_int()
304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); in bnx2x_tx_int()
320 if (unlikely(netif_tx_queue_stopped(txq))) { in bnx2x_tx_int()
331 __netif_tx_lock(txq, smp_processor_id()); in bnx2x_tx_int()
333 if ((netif_tx_queue_stopped(txq)) && in bnx2x_tx_int()
336 netif_tx_wake_queue(txq); in bnx2x_tx_int()
338 __netif_tx_unlock(txq); in bnx2x_tx_int()
3721 struct netdev_queue *txq; in bnx2x_start_xmit() local
3746 txq = netdev_get_tx_queue(dev, txq_index); in bnx2x_start_xmit()
[all …]
Dbnx2x_vfpf.h277 } txq; member
Dbnx2x_ethtool.c2406 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, in bnx2x_run_loopback() local
2473 netdev_tx_sent_queue(txq, skb->len); in bnx2x_run_loopback()
/linux-4.1.27/net/caif/
Dcaif_dev.c170 struct netdev_queue *txq; in transmit() local
188 txq = netdev_get_tx_queue(skb->dev, 0); in transmit()
189 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); in transmit()
/linux-4.1.27/drivers/net/ethernet/amd/xgbe/
Dxgbe-drv.c990 struct netdev_queue *txq; in xgbe_stop() local
1015 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_stop()
1016 netdev_tx_reset_queue(txq); in xgbe_stop()
1468 struct netdev_queue *txq; in xgbe_xmit() local
1474 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_xmit()
1511 netdev_tx_sent_queue(txq, packet->tx_bytes); in xgbe_xmit()
1856 struct netdev_queue *txq; in xgbe_tx_poll() local
1866 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_tx_poll()
1900 netdev_tx_completed_queue(txq, tx_packets, tx_bytes); in xgbe_tx_poll()
1905 netif_tx_wake_queue(txq); in xgbe_tx_poll()
/linux-4.1.27/Documentation/devicetree/bindings/net/
Dbrcm,systemport.txt18 - systemport,num-txq: number of HW transmit queues, an integer
/linux-4.1.27/net/irda/irlan/
Dirlan_common.c235 skb_queue_head_init(&self->client.txq); in irlan_open()
276 skb_queue_purge(&self->client.txq); in __irlan_close()
577 skb = skb_dequeue(&self->client.txq); in irlan_run_ctrl_tx_queue()
605 skb_queue_tail(&self->client.txq, skb); in irlan_ctrl_data_request()
Dirlan_eth.c148 skb_queue_purge(&self->client.txq); in irlan_eth_close()
Dirlan_client.c229 while ((skb = skb_dequeue(&self->client.txq)) != NULL) { in irlan_client_ctrl_disconnect_indication()
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
Di40e_virtchnl.h207 struct i40e_virtchnl_txq_info txq; member
Di40e_virtchnl_pf.c1279 vsi_queue_id = qpi->txq.queue_id; in i40e_vc_config_queues_msg()
1280 if ((qpi->txq.vsi_id != vsi_id) || in i40e_vc_config_queues_msg()
1291 &qpi->txq)) { in i40e_vc_config_queues_msg()
/linux-4.1.27/drivers/net/hamradio/
Dbpqether.c129 struct netdev_queue *txq, in bpq_set_lockdep_class_one() argument
132 lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key); in bpq_set_lockdep_class_one()
/linux-4.1.27/drivers/net/wan/
Ddlci.c196 struct netdev_queue *txq = skb_get_tx_queue(dev, skb); in dlci_transmit() local
197 netdev_start_xmit(skb, dlp->slave, txq, false); in dlci_transmit()
Dfarsync.c443 struct sk_buff *txq[FST_TXQ_DEPTH]; /* The queue */ member
1366 skb = port->txq[port->txqs]; in do_bottom_half_tx()
2336 port->txq[port->txqe] = skb; in fst_start_xmit()
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/
Dsdio.c442 struct pktq txq; /* Queue length used for flow-control */ member
2376 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)); in brcmf_sdio_sendfromq()
2380 pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, in brcmf_sdio_sendfromq()
2412 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { in brcmf_sdio_sendfromq()
2531 brcmu_pktq_flush(&bus->txq, true, NULL, NULL); in brcmf_sdio_bus_stop()
2729 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && in brcmf_sdio_dpc()
2752 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && in brcmf_sdio_dpc()
2764 return &bus->txq; in brcmf_sdio_bus_gettxq()
2827 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq)); in brcmf_sdio_bus_txdata()
2834 if (!brcmf_sdio_prec_enq(&bus->txq, pkt, prec)) { in brcmf_sdio_bus_txdata()
[all …]
Dfwsignal.c824 struct pktq *txq; in brcmf_fws_bus_txq_cleanup() local
829 txq = brcmf_bus_gettxq(fws->drvr->bus_if); in brcmf_fws_bus_txq_cleanup()
830 if (IS_ERR(txq)) { in brcmf_fws_bus_txq_cleanup()
835 for (prec = 0; prec < txq->num_prec; prec++) { in brcmf_fws_bus_txq_cleanup()
836 skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx); in brcmf_fws_bus_txq_cleanup()
843 skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx); in brcmf_fws_bus_txq_cleanup()
/linux-4.1.27/drivers/net/ethernet/sfc/
Dptp.c273 struct sk_buff_head txq; member
1130 skb_queue_purge(&efx->ptp_data->txq); in efx_ptp_stop()
1183 while ((skb = skb_dequeue(&ptp_data->txq))) in efx_ptp_worker()
1227 skb_queue_head_init(&ptp->txq); in efx_ptp_probe()
1316 skb_queue_purge(&efx->ptp_data->txq); in efx_ptp_remove()
1453 skb_queue_tail(&ptp->txq, skb); in efx_ptp_tx()
/linux-4.1.27/include/linux/usb/
Dusbnet.h58 struct sk_buff_head txq; member
/linux-4.1.27/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic_hw.c403 __netif_tx_lock_bh(tx_ring->txq); in qlcnic_send_cmd_descs()
408 netif_tx_stop_queue(tx_ring->txq); in qlcnic_send_cmd_descs()
412 netif_tx_wake_queue(tx_ring->txq); in qlcnic_send_cmd_descs()
415 __netif_tx_unlock_bh(tx_ring->txq); in qlcnic_send_cmd_descs()
439 __netif_tx_unlock_bh(tx_ring->txq); in qlcnic_send_cmd_descs()
Dqlcnic_io.c692 netif_tx_stop_queue(tx_ring->txq); in qlcnic_xmit_frame()
694 netif_tx_start_queue(tx_ring->txq); in qlcnic_xmit_frame()
927 if (netif_tx_queue_stopped(tx_ring->txq) && in qlcnic_process_cmd_ring()
930 netif_tx_wake_queue(tx_ring->txq); in qlcnic_process_cmd_ring()
Dqlcnic.h638 struct netdev_queue *txq; member
Dqlcnic_main.c2400 tx_ring->txq = netdev_get_tx_queue(netdev, ring); in qlcnic_alloc_tx_rings()
/linux-4.1.27/drivers/net/ethernet/neterion/vxge/
Dvxge-main.c109 if (__netif_tx_trylock(fifo->txq)) { in VXGE_COMPLETE_VPATH_TX()
112 __netif_tx_unlock(fifo->txq); in VXGE_COMPLETE_VPATH_TX()
621 if (netif_tx_queue_stopped(fifo->txq)) in vxge_xmit_compl()
622 netif_tx_wake_queue(fifo->txq); in vxge_xmit_compl()
863 if (netif_tx_queue_stopped(fifo->txq)) in vxge_xmit()
878 netif_tx_stop_queue(fifo->txq); in vxge_xmit()
991 netif_tx_stop_queue(fifo->txq); in vxge_xmit()
1573 if (netif_tx_queue_stopped(vpath->fifo.txq)) in vxge_reset_vpath()
1574 netif_tx_wake_queue(vpath->fifo.txq); in vxge_reset_vpath()
2082 vpath->fifo.txq = in vxge_open_vpaths()
[all …]
Dvxge-main.h240 struct netdev_queue *txq; member
/linux-4.1.27/drivers/scsi/lpfc/
Dlpfc_sli.h206 struct list_head txq; member
Dlpfc_sli.c1030 if (!list_empty(&pring->txq)) in __lpfc_sli_release_iocbq_s4()
1347 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); in lpfc_sli_ringtx_get()
1618 (!list_empty(&pring->txq)) && in lpfc_sli_resume_iocb()
3543 list_splice_init(&pring->txq, &completions); in lpfc_sli_abort_iocb_ring()
3554 list_splice_init(&pring->txq, &completions); in lpfc_sli_abort_iocb_ring()
3611 LIST_HEAD(txq); in lpfc_sli_flush_fcp_rings()
3629 list_splice_init(&pring->txq, &txq); in lpfc_sli_flush_fcp_rings()
3637 lpfc_sli_cancel_iocbs(phba, &txq, in lpfc_sli_flush_fcp_rings()
3650 list_splice_init(&pring->txq, &txq); in lpfc_sli_flush_fcp_rings()
3658 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, in lpfc_sli_flush_fcp_rings()
[all …]
Dlpfc_hbadisc.c730 (!list_empty(&pring->txq))) in lpfc_work_done()
4453 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, in lpfc_no_rpi()
5179 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { in lpfc_free_tx()
Dlpfc_bsg.c5347 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, in lpfc_bsg_timeout()
5384 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, in lpfc_bsg_timeout()
Dlpfc_nportdisc.c252 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { in lpfc_els_abort()
Dlpfc_els.c6412 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { in lpfc_els_flush_cmd()
8220 if (!(list_empty(&pring->txq))) in lpfc_sli4_els_xri_aborted()
Dlpfc_scsi.c601 if (!list_empty(&pring->txq)) in lpfc_sli4_fcp_xri_aborted()
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/
Dehea_main.c822 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, in ehea_proc_cqes() local
873 if (unlikely(netif_tx_queue_stopped(txq) && in ehea_proc_cqes()
875 __netif_tx_lock(txq, smp_processor_id()); in ehea_proc_cqes()
876 if (netif_tx_queue_stopped(txq) && in ehea_proc_cqes()
878 netif_tx_wake_queue(txq); in ehea_proc_cqes()
879 __netif_tx_unlock(txq); in ehea_proc_cqes()
2058 struct netdev_queue *txq; in ehea_start_xmit() local
2061 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); in ehea_start_xmit()
2111 netif_tx_stop_queue(txq); in ehea_start_xmit()
2119 netif_tx_stop_queue(txq); in ehea_start_xmit()
/linux-4.1.27/drivers/net/ethernet/broadcom/genet/
Dbcmgenet.c1023 struct netdev_queue *txq; in __bcmgenet_tx_reclaim() local
1075 txq = netdev_get_tx_queue(dev, ring->queue); in __bcmgenet_tx_reclaim()
1076 if (netif_tx_queue_stopped(txq)) in __bcmgenet_tx_reclaim()
1077 netif_tx_wake_queue(txq); in __bcmgenet_tx_reclaim()
1281 struct netdev_queue *txq; in bcmgenet_xmit() local
1303 txq = netdev_get_tx_queue(dev, ring->queue); in bcmgenet_xmit()
1307 netif_tx_stop_queue(txq); in bcmgenet_xmit()
1359 netif_tx_stop_queue(txq); in bcmgenet_xmit()
1361 if (!skb->xmit_more || netif_xmit_stopped(txq)) in bcmgenet_xmit()
/linux-4.1.27/drivers/net/team/
Dteam.c1543 struct netdev_queue *txq, in team_set_lockdep_class_one() argument
1546 lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key); in team_set_lockdep_class_one()
1676 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; in team_select_queue() local
1683 if (unlikely(txq >= dev->real_num_tx_queues)) { in team_select_queue()
1685 txq -= dev->real_num_tx_queues; in team_select_queue()
1686 } while (txq >= dev->real_num_tx_queues); in team_select_queue()
1688 return txq; in team_select_queue()
/linux-4.1.27/net/batman-adv/
Dsoft-interface.c670 struct netdev_queue *txq, in batadv_set_lockdep_class_one() argument
673 lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key); in batadv_set_lockdep_class_one()
/linux-4.1.27/drivers/net/ipvlan/
Dipvlan_main.c95 struct netdev_queue *txq, in ipvlan_set_lockdep_class_one() argument
98 lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key); in ipvlan_set_lockdep_class_one()
/linux-4.1.27/net/8021q/
Dvlan_dev.c481 struct netdev_queue *txq, in vlan_dev_set_lockdep_one() argument
484 lockdep_set_class_and_subclass(&txq->_xmit_lock, in vlan_dev_set_lockdep_one()
/linux-4.1.27/drivers/net/ethernet/qlogic/netxen/
Dnetxen_nic_hw.c593 __netif_tx_lock_bh(tx_ring->txq); in netxen_send_cmd_descs()
599 netif_tx_stop_queue(tx_ring->txq); in netxen_send_cmd_descs()
603 netif_tx_wake_queue(tx_ring->txq); in netxen_send_cmd_descs()
605 __netif_tx_unlock_bh(tx_ring->txq); in netxen_send_cmd_descs()
629 __netif_tx_unlock_bh(tx_ring->txq); in netxen_send_cmd_descs()
Dnetxen_nic.h664 struct netdev_queue *txq; member
Dnetxen_nic_init.c213 tx_ring->txq = netdev_get_tx_queue(netdev, 0); in netxen_alloc_sw_resources()
/linux-4.1.27/drivers/net/ethernet/nxp/
Dlpc_eth.c1018 struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0); in lpc_eth_poll() local
1020 __netif_tx_lock(txq, smp_processor_id()); in lpc_eth_poll()
1022 __netif_tx_unlock(txq); in lpc_eth_poll()
/linux-4.1.27/drivers/net/ethernet/cisco/enic/
Denic_main.c566 struct netdev_queue *txq; in enic_hard_start_xmit() local
575 txq = netdev_get_tx_queue(netdev, txq_map); in enic_hard_start_xmit()
593 netif_tx_stop_queue(txq); in enic_hard_start_xmit()
603 netif_tx_stop_queue(txq); in enic_hard_start_xmit()
604 if (!skb->xmit_more || netif_xmit_stopped(txq)) in enic_hard_start_xmit()
/linux-4.1.27/drivers/net/bonding/
Dbond_main.c3949 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; in bond_select_queue() local
3954 if (unlikely(txq >= dev->real_num_tx_queues)) { in bond_select_queue()
3956 txq -= dev->real_num_tx_queues; in bond_select_queue()
3957 } while (txq >= dev->real_num_tx_queues); in bond_select_queue()
3959 return txq; in bond_select_queue()
4532 struct netdev_queue *txq, in bond_set_lockdep_class_one() argument
4535 lockdep_set_class(&txq->_xmit_lock, in bond_set_lockdep_class_one()
/linux-4.1.27/net/bluetooth/
D6lowpan.c652 struct netdev_queue *txq, in bt_set_lockdep_class_one() argument
655 lockdep_set_class(&txq->_xmit_lock, &bt_netdev_xmit_lock_key); in bt_set_lockdep_class_one()
/linux-4.1.27/net/netrom/
Daf_netrom.c79 struct netdev_queue *txq, in nr_set_lockdep_one() argument
82 lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); in nr_set_lockdep_one()
/linux-4.1.27/net/packet/
Daf_packet.c250 struct netdev_queue *txq; in packet_direct_xmit() local
262 txq = skb_get_tx_queue(dev, skb); in packet_direct_xmit()
266 HARD_TX_LOCK(dev, txq, smp_processor_id()); in packet_direct_xmit()
267 if (!netif_xmit_frozen_or_drv_stopped(txq)) in packet_direct_xmit()
268 ret = netdev_start_xmit(skb, dev, txq, false); in packet_direct_xmit()
269 HARD_TX_UNLOCK(dev, txq); in packet_direct_xmit()
/linux-4.1.27/drivers/net/ethernet/atheros/atl1c/
Datl1c_main.c1198 u32 mac, txq, rxq; in atl1c_start_mac() local
1204 AT_READ_REG(hw, REG_TXQ_CTRL, &txq); in atl1c_start_mac()
1208 txq |= TXQ_CTRL_EN; in atl1c_start_mac()
1222 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq); in atl1c_start_mac()
/linux-4.1.27/drivers/net/wireless/ath/wil6210/
Ddebugfs.c1167 struct netdev_queue *txq = netdev_get_tx_queue(ndev, i); in wil_info_debugfs_show() local
1168 unsigned long state = txq->state; in wil_info_debugfs_show()
/linux-4.1.27/net/rose/
Daf_rose.c80 struct netdev_queue *txq, in rose_set_lockdep_one() argument
83 lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); in rose_set_lockdep_one()
/linux-4.1.27/drivers/tty/
Dn_gsm.c2035 struct gsm_msg *txq, *ntxq; in gsm_cleanup_mux() local
2074 list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list) in gsm_cleanup_mux()
2075 kfree(txq); in gsm_cleanup_mux()
/linux-4.1.27/drivers/net/wireless/hostap/
Dhostap_hw.c3070 struct netdev_queue *txq, in prism2_set_lockdep_class_one() argument
3073 lockdep_set_class(&txq->_xmit_lock, in prism2_set_lockdep_class_one()
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/
Dixgbe_main.c7244 int txq; in ixgbe_select_queue() local
7269 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : in ixgbe_select_queue()
7272 while (txq >= f->indices) in ixgbe_select_queue()
7273 txq -= f->indices; in ixgbe_select_queue()
7275 return txq + f->offset; in ixgbe_select_queue()
/linux-4.1.27/drivers/net/vmxnet3/
Dvmxnet3_drv.c1679 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; in vmxnet3_msix_tx() local
1680 vmxnet3_tq_tx_complete(txq, adapter); in vmxnet3_msix_tx()
/linux-4.1.27/Documentation/scsi/
DChangeLog.lpfc251 lpfc_els_abort to reset txq and txcmplq iterator after a