Home
last modified time | relevance | path

Searched refs:qdisc (Results 1 – 37 of 37) sorted by relevance

/linux-4.4.14/net/sched/
Dsch_generic.c385 static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) in noop_enqueue() argument
391 static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) in noop_dequeue() argument
406 .qdisc = &noop_qdisc,
422 static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt) in noqueue_init() argument
427 qdisc->enqueue = NULL; in noqueue_init()
475 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) in pfifo_fast_enqueue() argument
477 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { in pfifo_fast_enqueue()
479 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); in pfifo_fast_enqueue()
483 qdisc->q.qlen++; in pfifo_fast_enqueue()
484 return __qdisc_enqueue_tail(skb, qdisc, list); in pfifo_fast_enqueue()
[all …]
Dsch_mqprio.c97 struct Qdisc *qdisc; in mqprio_init() local
127 qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops, in mqprio_init()
130 if (qdisc == NULL) { in mqprio_init()
134 priv->qdiscs[i] = qdisc; in mqprio_init()
135 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; in mqprio_init()
170 struct Qdisc *qdisc, *old; in mqprio_attach() local
175 qdisc = priv->qdiscs[ntx]; in mqprio_attach()
176 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); in mqprio_attach()
180 qdisc_list_add(qdisc); in mqprio_attach()
226 struct Qdisc *qdisc; in mqprio_dump() local
[all …]
Dsch_mq.c43 struct Qdisc *qdisc; in mq_init() local
60 qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops, in mq_init()
63 if (qdisc == NULL) in mq_init()
65 priv->qdiscs[ntx] = qdisc; in mq_init()
66 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; in mq_init()
81 struct Qdisc *qdisc, *old; in mq_attach() local
85 qdisc = priv->qdiscs[ntx]; in mq_attach()
86 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); in mq_attach()
91 qdisc_list_add(qdisc); in mq_attach()
102 struct Qdisc *qdisc; in mq_dump() local
[all …]
Dsch_drr.c30 struct Qdisc *qdisc; member
55 unsigned int len = cl->qdisc->q.qlen; in drr_purge_queue()
56 unsigned int backlog = cl->qdisc->qstats.backlog; in drr_purge_queue()
58 qdisc_reset(cl->qdisc); in drr_purge_queue()
59 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); in drr_purge_queue()
115 cl->qdisc = qdisc_create_dflt(sch->dev_queue, in drr_change_class()
117 if (cl->qdisc == NULL) in drr_change_class()
118 cl->qdisc = &noop_qdisc; in drr_change_class()
125 qdisc_destroy(cl->qdisc); in drr_change_class()
144 qdisc_destroy(cl->qdisc); in drr_destroy_class()
[all …]
Dsch_tbf.c114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */ member
177 ret = qdisc_enqueue(segs, q->qdisc); in tbf_segment()
203 ret = qdisc_enqueue(skb, q->qdisc); in tbf_enqueue()
219 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { in tbf_drop()
236 skb = q->qdisc->ops->peek(q->qdisc); in tbf_dequeue()
259 skb = qdisc_dequeue_peeked(q->qdisc); in tbf_dequeue()
296 qdisc_reset(q->qdisc); in tbf_reset()
390 if (q->qdisc != &noop_qdisc) { in tbf_change()
391 err = fifo_set_limit(q->qdisc, qopt->limit); in tbf_change()
404 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, in tbf_change()
[all …]
Dsch_red.c46 struct Qdisc *qdisc; member
62 struct Qdisc *child = q->qdisc; in red_enqueue()
116 struct Qdisc *child = q->qdisc; in red_dequeue()
132 struct Qdisc *child = q->qdisc; in red_peek()
140 struct Qdisc *child = q->qdisc; in red_drop()
160 qdisc_reset(q->qdisc); in red_reset()
170 qdisc_destroy(q->qdisc); in red_destroy()
213 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, in red_change()
214 q->qdisc->qstats.backlog); in red_change()
215 qdisc_destroy(q->qdisc); in red_change()
[all …]
Dsch_multiq.c70 struct Qdisc *qdisc; in multiq_enqueue() local
73 qdisc = multiq_classify(skb, sch, &ret); in multiq_enqueue()
75 if (qdisc == NULL) { in multiq_enqueue()
84 ret = qdisc_enqueue(skb, qdisc); in multiq_enqueue()
97 struct Qdisc *qdisc; in multiq_dequeue() local
112 qdisc = q->queues[q->curband]; in multiq_dequeue()
113 skb = qdisc->dequeue(qdisc); in multiq_dequeue()
129 struct Qdisc *qdisc; in multiq_peek() local
144 qdisc = q->queues[curband]; in multiq_peek()
145 skb = qdisc->ops->peek(qdisc); in multiq_peek()
[all …]
Dsch_prio.c72 struct Qdisc *qdisc; in prio_enqueue() local
75 qdisc = prio_classify(skb, sch, &ret); in prio_enqueue()
77 if (qdisc == NULL) { in prio_enqueue()
86 ret = qdisc_enqueue(skb, qdisc); in prio_enqueue()
102 struct Qdisc *qdisc = q->queues[prio]; in prio_peek() local
103 struct sk_buff *skb = qdisc->ops->peek(qdisc); in prio_peek()
116 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue() local
117 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); in prio_dequeue()
133 struct Qdisc *qdisc; in prio_drop() local
136 qdisc = q->queues[prio]; in prio_drop()
[all …]
Dsch_qfq.c141 struct Qdisc *qdisc; member
222 unsigned int len = cl->qdisc->q.qlen; in qfq_purge_queue()
223 unsigned int backlog = cl->qdisc->qstats.backlog; in qfq_purge_queue()
225 qdisc_reset(cl->qdisc); in qfq_purge_queue()
226 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); in qfq_purge_queue()
330 if (cl->qdisc->q.qlen > 0) { /* adding an active class */ in qfq_add_to_agg()
379 if (cl->qdisc->q.qlen > 0) /* class is active */ in qfq_deact_rm_from_agg()
481 cl->qdisc = qdisc_create_dflt(sch->dev_queue, in qfq_change_class()
483 if (cl->qdisc == NULL) in qfq_change_class()
484 cl->qdisc = &noop_qdisc; in qfq_change_class()
[all …]
Dsch_sfb.c56 struct Qdisc *qdisc; member
282 struct Qdisc *child = q->qdisc; in sfb_enqueue()
423 struct Qdisc *child = q->qdisc; in sfb_dequeue()
426 skb = child->dequeue(q->qdisc); in sfb_dequeue()
440 struct Qdisc *child = q->qdisc; in sfb_peek()
451 qdisc_reset(q->qdisc); in sfb_reset()
464 qdisc_destroy(q->qdisc); in sfb_destroy()
513 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, in sfb_change()
514 q->qdisc->qstats.backlog); in sfb_change()
515 qdisc_destroy(q->qdisc); in sfb_change()
[all …]
Dsch_hfsc.c126 struct Qdisc *qdisc; /* leaf qdisc */ member
770 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) in update_vf()
897 unsigned int len = cl->qdisc->q.qlen; in hfsc_purge_queue()
898 unsigned int backlog = cl->qdisc->qstats.backlog; in hfsc_purge_queue()
900 qdisc_reset(cl->qdisc); in hfsc_purge_queue()
901 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); in hfsc_purge_queue()
1036 if (cl->qdisc->q.qlen != 0) { in hfsc_change_class()
1038 update_ed(cl, qdisc_peek_len(cl->qdisc)); in hfsc_change_class()
1090 cl->qdisc = qdisc_create_dflt(sch->dev_queue, in hfsc_change_class()
1092 if (cl->qdisc == NULL) in hfsc_change_class()
[all …]
Dsch_netem.c76 struct Qdisc *qdisc; member
600 if (!len && q->qdisc && q->qdisc->ops->drop) in netem_drop()
601 len = q->qdisc->ops->drop(q->qdisc); in netem_drop()
652 if (q->qdisc) { in netem_dequeue()
653 int err = qdisc_enqueue(skb, q->qdisc); in netem_dequeue()
667 if (q->qdisc) { in netem_dequeue()
668 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
675 if (q->qdisc) { in netem_dequeue()
676 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
689 if (q->qdisc) in netem_reset()
[all …]
Dsch_api.c278 struct Qdisc *root = qdisc_dev(q)->qdisc; in qdisc_list_add()
300 q = qdisc_match_from_root(dev->qdisc, handle); in qdisc_lookup()
570 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc) in qdisc_warn_nonwc() argument
572 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { in qdisc_warn_nonwc()
574 txt, qdisc->ops->id, qdisc->handle >> 16); in qdisc_warn_nonwc()
575 qdisc->flags |= TCQ_F_WARN_NONWC; in qdisc_warn_nonwc()
586 qdisc_unthrottled(wd->qdisc); in qdisc_watchdog()
587 __netif_schedule(qdisc_root(wd->qdisc)); in qdisc_watchdog()
593 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) in qdisc_watchdog_init() argument
597 wd->qdisc = qdisc; in qdisc_watchdog_init()
[all …]
Dsch_cbq.c106 struct Qdisc *qdisc; /* Ptr to CBQ discipline */ member
303 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class()
327 struct cbq_sched_data *q = qdisc_priv(this->qdisc); in cbq_deactivate_class()
411 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_classic()
465 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_rclassic()
484 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_delay()
488 &qdisc_root_sleeping(cl->qdisc)->state)) in cbq_ovl_delay()
529 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_lowprio()
547 cl->qdisc->q.qlen--; in cbq_ovl_drop()
591 struct Qdisc *sch = q->watchdog.qdisc; in cbq_undelay()
[all …]
Dsch_teql.c103 q = rcu_dereference_bh(dat_queue->qdisc); in teql_dequeue()
156 root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); in teql_destroy()
158 qdisc_reset(rtnl_dereference(txq->qdisc)); in teql_destroy()
265 if (rcu_access_pointer(txq->qdisc) == &noop_qdisc) in teql_resolve()
Dcls_api.c175 q = dev->qdisc; in tc_ctl_tfilter()
445 q = dev->qdisc; in tc_dump_tfilter()
Dsch_htb.c933 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { in htb_dequeue()
935 qdisc_throttled(q->watchdog.qdisc); in htb_dequeue()
1015 struct Qdisc *sch = q->watchdog.qdisc; in htb_work_func()
/linux-4.4.14/include/net/
Dsch_generic.h105 static inline bool qdisc_is_running(const struct Qdisc *qdisc) in qdisc_is_running() argument
107 return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false; in qdisc_is_running()
110 static inline bool qdisc_run_begin(struct Qdisc *qdisc) in qdisc_run_begin() argument
112 if (qdisc_is_running(qdisc)) in qdisc_run_begin()
114 qdisc->__state |= __QDISC___STATE_RUNNING; in qdisc_run_begin()
118 static inline void qdisc_run_end(struct Qdisc *qdisc) in qdisc_run_end() argument
120 qdisc->__state &= ~__QDISC___STATE_RUNNING; in qdisc_run_end()
123 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) in qdisc_may_bulk() argument
125 return qdisc->flags & TCQ_F_ONETXQUEUE; in qdisc_may_bulk()
138 static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) in qdisc_is_throttled() argument
[all …]
Dpkt_sched.h65 struct Qdisc *qdisc; member
68 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
100 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
/linux-4.4.14/Documentation/networking/
Dmultiqueue.txt33 default pfifo_fast qdisc. This qdisc supports one qdisc per hardware queue.
34 A new round-robin qdisc, sch_multiq also supports multiple hardware queues. The
35 qdisc is responsible for classifying the skb's and then directing the skb's to
43 On qdisc load, the number of bands is based on the number of queues on the
52 qdiscs. To add the MULTIQ qdisc to your network device, assuming the device
55 # tc qdisc add dev eth0 root handle 1: multiq
57 The qdisc will allocate the number of bands to equal the number of queues that
58 the device reports, and bring the qdisc online. Assuming eth0 has 4 Tx
Dvrf.txt132 …11: vrf-mgmt: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group def…
135 …12: vrf-red: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group defa…
138 …13: vrf-blue: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group def…
141 …14: vrf-green: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group de…
176 …3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP mode …
178 …4: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP mode …
180 …7: eth5: <BROADCAST,MULTICAST> mtu 1500 qdisc noop master vrf-red state DOWN mode DEFAULT group de…
214 …3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP group…
222 …4: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP group…
230 …7: eth5: <BROADCAST,MULTICAST> mtu 1500 qdisc noop master vrf-red state DOWN group default qlen 10…
Dieee802154.txt61 submits skb to qdisc), so if you need something from that cb later, you should
Dphonet.txt69 only the (default) Linux FIFO qdisc should be used with them.
Dcan.txt1053 2: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 16 qdisc pfifo_fast state UP qlen 10
1223 5: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 72 qdisc pfifo_fast state UNKNOWN \
Dpacket_mmap.txt998 kernel's qdisc layer and are forcedly pushed to the driver directly. Meaning,
Dip-sysctl.txt707 result in a large amount of packets queued in qdisc/device
710 tcp_limit_output_bytes limits the number of bytes on qdisc
Dfilter.txt45 qdisc layer, SECCOMP-BPF (SECure COMPuting [1]), and lots of other places
Dbonding.txt1635 a multiqueue qdisc and filters to bias certain traffic to transmit on certain
1640 # tc qdisc add dev bond0 handle 1 root multiq
/linux-4.4.14/Documentation/cgroups/
Dnet_prio.txt47 queueing discipline (qdisc) so priorities will be assigned prior to the hardware
50 One usage for the net_prio cgroup is with mqprio qdisc allowing application
Dnet_cls.txt31 tc qdisc add dev eth0 root handle 10: htb
/linux-4.4.14/net/caif/
Dcaif_dev.c189 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); in transmit()
/linux-4.4.14/Documentation/sysctl/
Dnet.txt63 interfaces still use mq as root qdisc, which in turn uses this default for its
/linux-4.4.14/net/core/
Ddev.c2271 struct Qdisc *q = rcu_dereference(txq->qdisc); in netif_schedule_queue()
2294 q = rcu_dereference(txq->qdisc); in netif_wake_subqueue()
2307 q = rcu_dereference(dev_queue->qdisc); in netif_tx_wake_queue()
3113 q = rcu_dereference_bh(txq->qdisc); in __dev_queue_xmit()
7024 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); in dev_ingress_queue_create()
Drtnetlink.c1234 (dev->qdisc && in rtnl_fill_ifinfo()
1235 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || in rtnl_fill_ifinfo()
/linux-4.4.14/include/linux/
Dnetdevice.h563 struct Qdisc __rcu *qdisc; member
1711 struct Qdisc *qdisc; member
/linux-4.4.14/net/netfilter/
DKconfig259 4: eth0: <BROADCAST,MULTICAST,UP> mtu 1500 qdisc pfifo_fast qlen 1000
/linux-4.4.14/
DCREDITS870 D: HTB qdisc and random networking hacks