Lines Matching refs:q
48 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) in dev_requeue_skb() argument
50 q->gso_skb = skb; in dev_requeue_skb()
51 q->qstats.requeues++; in dev_requeue_skb()
52 q->q.qlen++; /* it's still part of the queue */ in dev_requeue_skb()
53 __netif_schedule(q); in dev_requeue_skb()
58 static void try_bulk_dequeue_skb(struct Qdisc *q, in try_bulk_dequeue_skb() argument
66 struct sk_buff *nskb = q->dequeue(q); in try_bulk_dequeue_skb()
82 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, in dequeue_skb() argument
85 struct sk_buff *skb = q->gso_skb; in dequeue_skb()
86 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb()
94 q->gso_skb = NULL; in dequeue_skb()
95 q->q.qlen--; in dequeue_skb()
101 if (!(q->flags & TCQ_F_ONETXQUEUE) || in dequeue_skb()
103 skb = q->dequeue(q); in dequeue_skb()
104 if (skb && qdisc_may_bulk(q)) in dequeue_skb()
105 try_bulk_dequeue_skb(q, skb, txq, packets); in dequeue_skb()
113 struct Qdisc *q) in handle_dev_cpu_collision() argument
127 ret = qdisc_qlen(q); in handle_dev_cpu_collision()
134 ret = dev_requeue_skb(skb, q); in handle_dev_cpu_collision()
149 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, in sch_direct_xmit() argument
173 ret = qdisc_qlen(q); in sch_direct_xmit()
176 ret = handle_dev_cpu_collision(skb, txq, q); in sch_direct_xmit()
181 dev->name, ret, q->q.qlen); in sch_direct_xmit()
183 ret = dev_requeue_skb(skb, q); in sch_direct_xmit()
211 static inline int qdisc_restart(struct Qdisc *q, int *packets) in qdisc_restart() argument
220 skb = dequeue_skb(q, &validate, packets); in qdisc_restart()
224 root_lock = qdisc_lock(q); in qdisc_restart()
225 dev = qdisc_dev(q); in qdisc_restart()
228 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); in qdisc_restart()
231 void __qdisc_run(struct Qdisc *q) in __qdisc_run() argument
236 while (qdisc_restart(q, &packets)) { in __qdisc_run()
244 __netif_schedule(q); in __qdisc_run()
249 qdisc_run_end(q); in __qdisc_run()
413 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
440 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
463 struct sk_buff_head q[PFIFO_FAST_BANDS]; member
477 return priv->q + band; in band2list()
482 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { in pfifo_fast_enqueue()
488 qdisc->q.qlen++; in pfifo_fast_enqueue()
504 qdisc->q.qlen--; in pfifo_fast_dequeue()
538 qdisc->q.qlen = 0; in pfifo_fast_reset()
607 skb_queue_head_init(&sch->q); in qdisc_alloc()
660 qdisc->q.qlen = 0; in qdisc_reset()
846 struct Qdisc *q; in some_qdisc_is_busy() local
850 q = dev_queue->qdisc_sleeping; in some_qdisc_is_busy()
851 root_lock = qdisc_lock(q); in some_qdisc_is_busy()
855 val = (qdisc_is_running(q) || in some_qdisc_is_busy()
856 test_bit(__QDISC_STATE_SCHED, &q->state)); in some_qdisc_is_busy()