Lines Matching refs:txq
60 const struct netdev_queue *txq, in try_bulk_dequeue_skb() argument
63 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; in try_bulk_dequeue_skb()
86 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb() local
92 txq = skb_get_tx_queue(txq->dev, skb); in dequeue_skb()
93 if (!netif_xmit_frozen_or_stopped(txq)) { in dequeue_skb()
102 !netif_xmit_frozen_or_stopped(txq)) { in dequeue_skb()
105 try_bulk_dequeue_skb(q, skb, txq, packets); in dequeue_skb()
150 struct net_device *dev, struct netdev_queue *txq, in sch_direct_xmit() argument
163 HARD_TX_LOCK(dev, txq, smp_processor_id()); in sch_direct_xmit()
164 if (!netif_xmit_frozen_or_stopped(txq)) in sch_direct_xmit()
165 skb = dev_hard_start_xmit(skb, dev, txq, &ret); in sch_direct_xmit()
167 HARD_TX_UNLOCK(dev, txq); in sch_direct_xmit()
179 ret = handle_dev_cpu_collision(skb, txq, q); in sch_direct_xmit()
189 if (ret && netif_xmit_frozen_or_stopped(txq)) in sch_direct_xmit()
216 struct netdev_queue *txq; in qdisc_restart() local
229 txq = skb_get_tx_queue(dev, skb); in qdisc_restart()
231 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); in qdisc_restart()
288 struct netdev_queue *txq; in dev_watchdog() local
290 txq = netdev_get_tx_queue(dev, i); in dev_watchdog()
294 trans_start = txq->trans_start ? : dev->trans_start; in dev_watchdog()
295 if (netif_xmit_stopped(txq) && in dev_watchdog()
299 txq->trans_timeout++; in dev_watchdog()
751 struct netdev_queue *txq; in attach_default_qdiscs() local
754 txq = netdev_get_tx_queue(dev, 0); in attach_default_qdiscs()
759 dev->qdisc = txq->qdisc_sleeping; in attach_default_qdiscs()
762 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); in attach_default_qdiscs()