dev_queue         311 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct netdev_queue *dev_queue;
dev_queue         362 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
dev_queue         363 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	netdev_tx_sent_queue(dev_queue, skb->len);
dev_queue         953 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct netdev_queue *dev_queue;
dev_queue         982 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
dev_queue         983 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	netdev_tx_completed_queue(dev_queue, pkts, bytes);
dev_queue         994 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (netif_tx_queue_stopped(dev_queue) &&
dev_queue         996 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			netif_tx_wake_queue(dev_queue);
dev_queue        1037 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct netdev_queue *dev_queue;
dev_queue        1047 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
dev_queue        1048 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	netdev_tx_reset_queue(dev_queue);
dev_queue         377 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	struct netdev_queue *dev_queue;
dev_queue         408 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
dev_queue         409 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		netdev_tx_reset_queue(dev_queue);
dev_queue         483 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct netdev_queue *dev_queue;
dev_queue         487 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		dev_queue = netdev_get_tx_queue(ndev,
dev_queue         489 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		netdev_tx_reset_queue(dev_queue);
dev_queue        1325 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct netdev_queue *dev_queue;
dev_queue        1384 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
dev_queue        1385 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	netdev_tx_sent_queue(dev_queue, skb->len);
dev_queue        2334 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct netdev_queue *dev_queue;
dev_queue        2366 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
dev_queue        2367 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	netdev_tx_completed_queue(dev_queue, pkts, bytes);
dev_queue        2375 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (netif_tx_queue_stopped(dev_queue) &&
dev_queue        2377 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			netif_tx_wake_queue(dev_queue);
dev_queue        4350 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		struct netdev_queue *dev_queue =
dev_queue        4354 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (!netif_xmit_stopped(dev_queue))
dev_queue        1391 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	struct netdev_queue *dev_queue;
dev_queue        1426 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
dev_queue        1437 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	    __netif_tx_trylock(dev_queue)) {
dev_queue        1443 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		__netif_tx_unlock(dev_queue);
dev_queue        1447 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	if (netif_tx_queue_stopped(dev_queue) &&
dev_queue        1451 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		netif_tx_wake_queue(dev_queue);
dev_queue          31 drivers/net/xen-netback/xenbus.c 	struct netdev_queue *dev_queue;
dev_queue          92 drivers/net/xen-netback/xenbus.c 	dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
dev_queue          97 drivers/net/xen-netback/xenbus.c 		   netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
dev_queue         248 drivers/net/xen-netfront.c 	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
dev_queue         250 drivers/net/xen-netfront.c 	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
dev_queue        3074 include/linux/netdevice.h static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
dev_queue        3076 include/linux/netdevice.h 	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
dev_queue        3100 include/linux/netdevice.h void netif_tx_wake_queue(struct netdev_queue *dev_queue);
dev_queue        3124 include/linux/netdevice.h static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
dev_queue        3126 include/linux/netdevice.h 	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
dev_queue        3144 include/linux/netdevice.h static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
dev_queue        3146 include/linux/netdevice.h 	return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
dev_queue        3160 include/linux/netdevice.h static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
dev_queue        3162 include/linux/netdevice.h 	return dev_queue->state & QUEUE_STATE_ANY_XOFF;
dev_queue        3166 include/linux/netdevice.h netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
dev_queue        3168 include/linux/netdevice.h 	return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
dev_queue        3172 include/linux/netdevice.h netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
dev_queue        3174 include/linux/netdevice.h 	return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
dev_queue        3184 include/linux/netdevice.h static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
dev_queue        3187 include/linux/netdevice.h 	prefetchw(&dev_queue->dql.num_queued);
dev_queue        3198 include/linux/netdevice.h static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
dev_queue        3201 include/linux/netdevice.h 	prefetchw(&dev_queue->dql.limit);
dev_queue        3205 include/linux/netdevice.h static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
dev_queue        3209 include/linux/netdevice.h 	dql_queued(&dev_queue->dql, bytes);
dev_queue        3211 include/linux/netdevice.h 	if (likely(dql_avail(&dev_queue->dql) >= 0))
dev_queue        3214 include/linux/netdevice.h 	set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
dev_queue        3224 include/linux/netdevice.h 	if (unlikely(dql_avail(&dev_queue->dql) >= 0))
dev_queue        3225 include/linux/netdevice.h 		clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
dev_queue        3235 include/linux/netdevice.h static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
dev_queue        3241 include/linux/netdevice.h 		dql_queued(&dev_queue->dql, bytes);
dev_queue        3243 include/linux/netdevice.h 		return netif_tx_queue_stopped(dev_queue);
dev_queue        3245 include/linux/netdevice.h 	netdev_tx_sent_queue(dev_queue, bytes);
dev_queue        3271 include/linux/netdevice.h static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
dev_queue        3278 include/linux/netdevice.h 	dql_completed(&dev_queue->dql, bytes);
dev_queue        3287 include/linux/netdevice.h 	if (unlikely(dql_avail(&dev_queue->dql) < 0))
dev_queue        3290 include/linux/netdevice.h 	if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
dev_queue        3291 include/linux/netdevice.h 		netif_schedule_queue(dev_queue);
dev_queue        3326 include/linux/netdevice.h static inline void netdev_reset_queue(struct net_device *dev_queue)
dev_queue        3328 include/linux/netdevice.h 	netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
dev_queue         152 include/net/pkt_sched.h 	return dev_net(q->dev_queue->dev);
dev_queue          89 include/net/sch_generic.h 	struct netdev_queue	*dev_queue;
dev_queue         503 include/net/sch_generic.h 	struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
dev_queue         510 include/net/sch_generic.h 	return rcu_dereference_bh(qdisc->dev_queue->qdisc);
dev_queue         515 include/net/sch_generic.h 	return qdisc->dev_queue->qdisc_sleeping;
dev_queue         555 include/net/sch_generic.h 	return qdisc->dev_queue->dev;
dev_queue         638 include/net/sch_generic.h struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
dev_queue         668 include/net/sch_generic.h struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
dev_queue         672 include/net/sch_generic.h struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
dev_queue        2701 net/core/dev.c void netif_tx_wake_queue(struct netdev_queue *dev_queue)
dev_queue        2703 net/core/dev.c 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
dev_queue        2707 net/core/dev.c 		q = rcu_dereference(dev_queue->qdisc);
dev_queue         733 net/sched/cls_api.c 	struct net_device *dev = q->dev_queue->dev;
dev_queue         775 net/sched/cls_api.c 	struct net_device *dev = q->dev_queue->dev;
dev_queue        1052 net/sched/sch_api.c 			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
dev_queue        1055 net/sched/sch_api.c 				dev_queue = netdev_get_tx_queue(dev, i);
dev_queue        1057 net/sched/sch_api.c 			old = dev_graft_qdisc(dev_queue, new);
dev_queue        1149 net/sched/sch_api.c 				  struct netdev_queue *dev_queue,
dev_queue        1195 net/sched/sch_api.c 	sch = qdisc_alloc(dev_queue, ops, extack);
dev_queue        1643 net/sched/sch_api.c 		struct netdev_queue *dev_queue;
dev_queue        1646 net/sched/sch_api.c 			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
dev_queue        1648 net/sched/sch_api.c 			dev_queue = p->dev_queue;
dev_queue        1650 net/sched/sch_api.c 			dev_queue = netdev_get_tx_queue(dev, 0);
dev_queue        1652 net/sched/sch_api.c 		q = qdisc_create(dev, dev_queue, p,
dev_queue        1749 net/sched/sch_api.c 		struct netdev_queue *dev_queue;
dev_queue        1761 net/sched/sch_api.c 		dev_queue = dev_ingress_queue(dev);
dev_queue        1762 net/sched/sch_api.c 		if (dev_queue &&
dev_queue        1763 net/sched/sch_api.c 		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
dev_queue        2192 net/sched/sch_api.c 	struct netdev_queue *dev_queue;
dev_queue        2208 net/sched/sch_api.c 	dev_queue = dev_ingress_queue(dev);
dev_queue        2209 net/sched/sch_api.c 	if (dev_queue &&
dev_queue        2210 net/sched/sch_api.c 	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
dev_queue         295 net/sched/sch_atm.c 	flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
dev_queue         551 net/sched/sch_atm.c 	p->link.q = qdisc_create_dflt(sch->dev_queue,
dev_queue        1195 net/sched/sch_cbq.c 	q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue        1402 net/sched/sch_cbq.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue        1635 net/sched/sch_cbq.c 	cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
dev_queue         411 net/sched/sch_cbs.c 	q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue         422 net/sched/sch_cbs.c 	q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
dev_queue         497 net/sched/sch_cbs.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue         111 net/sched/sch_drr.c 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
dev_queue         211 net/sched/sch_drr.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue          74 net/sched/sch_dsmark.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue         390 net/sched/sch_dsmark.c 	p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
dev_queue         382 net/sched/sch_etf.c 	q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
dev_queue         171 net/sched/sch_fifo.c 	q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
dev_queue         715 net/sched/sch_fq.c 			      netdev_queue_numa_node_read(sch->dev_queue));
dev_queue          53 net/sched/sch_generic.c 	const struct netdev_queue *txq = q->dev_queue;
dev_queue         205 net/sched/sch_generic.c 	const struct netdev_queue *txq = q->dev_queue;
dev_queue         559 net/sched/sch_generic.c 	.dev_queue	=	&noop_netdev_queue,
dev_queue         802 net/sched/sch_generic.c struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
dev_queue         812 net/sched/sch_generic.c 	if (!dev_queue) {
dev_queue         818 net/sched/sch_generic.c 	dev = dev_queue->dev;
dev_queue         820 net/sched/sch_generic.c 			 netdev_queue_numa_node_read(dev_queue));
dev_queue         829 net/sched/sch_generic.c 				 netdev_queue_numa_node_read(dev_queue));
dev_queue         862 net/sched/sch_generic.c 	sch->dev_queue = dev_queue;
dev_queue         880 net/sched/sch_generic.c struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
dev_queue         892 net/sched/sch_generic.c 	sch = qdisc_alloc(dev_queue, ops, extack);
dev_queue        1011 net/sched/sch_generic.c struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
dev_queue        1014 net/sched/sch_generic.c 	struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
dev_queue        1023 net/sched/sch_generic.c 	dev_queue->qdisc_sleeping = qdisc;
dev_queue        1024 net/sched/sch_generic.c 	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
dev_queue        1033 net/sched/sch_generic.c 				     struct netdev_queue *dev_queue,
dev_queue        1044 net/sched/sch_generic.c 	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
dev_queue        1051 net/sched/sch_generic.c 	dev_queue->qdisc_sleeping = qdisc;
dev_queue        1080 net/sched/sch_generic.c 				 struct netdev_queue *dev_queue,
dev_queue        1083 net/sched/sch_generic.c 	struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
dev_queue        1089 net/sched/sch_generic.c 	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
dev_queue        1091 net/sched/sch_generic.c 		dev_queue->trans_start = 0;
dev_queue        1125 net/sched/sch_generic.c 				 struct netdev_queue *dev_queue,
dev_queue        1131 net/sched/sch_generic.c 	qdisc = rtnl_dereference(dev_queue->qdisc);
dev_queue        1142 net/sched/sch_generic.c 		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
dev_queue        1156 net/sched/sch_generic.c 		struct netdev_queue *dev_queue;
dev_queue        1161 net/sched/sch_generic.c 		dev_queue = netdev_get_tx_queue(dev, i);
dev_queue        1162 net/sched/sch_generic.c 		q = dev_queue->qdisc_sleeping;
dev_queue        1179 net/sched/sch_generic.c 			    struct netdev_queue *dev_queue,
dev_queue        1182 net/sched/sch_generic.c 	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
dev_queue        1239 net/sched/sch_generic.c 				     struct netdev_queue *dev_queue)
dev_queue        1241 net/sched/sch_generic.c 	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
dev_queue        1272 net/sched/sch_generic.c 				     struct netdev_queue *dev_queue,
dev_queue        1277 net/sched/sch_generic.c 	rcu_assign_pointer(dev_queue->qdisc, qdisc);
dev_queue        1278 net/sched/sch_generic.c 	dev_queue->qdisc_sleeping = qdisc;
dev_queue        1292 net/sched/sch_generic.c 				     struct netdev_queue *dev_queue,
dev_queue        1295 net/sched/sch_generic.c 	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
dev_queue        1299 net/sched/sch_generic.c 		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
dev_queue        1300 net/sched/sch_generic.c 		dev_queue->qdisc_sleeping = qdisc_default;
dev_queue        1056 net/sched/sch_hfsc.c 	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue        1178 net/sched/sch_hfsc.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue        1410 net/sched/sch_hfsc.c 	q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue        1155 net/sched/sch_htb.c 	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue        1264 net/sched/sch_htb.c 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue        1399 net/sched/sch_htb.c 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue          72 net/sched/sch_mq.c 	struct netdev_queue *dev_queue;
dev_queue          89 net/sched/sch_mq.c 		dev_queue = netdev_get_tx_queue(dev, ntx);
dev_queue          90 net/sched/sch_mq.c 		qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
dev_queue         115 net/sched/sch_mq.c 		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
dev_queue         193 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
dev_queue         200 net/sched/sch_mq.c 	*old = dev_graft_qdisc(dev_queue, new);
dev_queue         218 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
dev_queue         220 net/sched/sch_mq.c 	return dev_queue->qdisc_sleeping;
dev_queue         235 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
dev_queue         239 net/sched/sch_mq.c 	tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
dev_queue         246 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
dev_queue         248 net/sched/sch_mq.c 	sch = dev_queue->qdisc_sleeping;
dev_queue         138 net/sched/sch_mqprio.c 	struct netdev_queue *dev_queue;
dev_queue         227 net/sched/sch_mqprio.c 		dev_queue = netdev_get_tx_queue(dev, i);
dev_queue         228 net/sched/sch_mqprio.c 		qdisc = qdisc_create_dflt(dev_queue,
dev_queue         299 net/sched/sch_mqprio.c 		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
dev_queue         324 net/sched/sch_mqprio.c 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
dev_queue         326 net/sched/sch_mqprio.c 	if (!dev_queue)
dev_queue         332 net/sched/sch_mqprio.c 	*old = dev_graft_qdisc(dev_queue, new);
dev_queue         461 net/sched/sch_mqprio.c 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
dev_queue         463 net/sched/sch_mqprio.c 	if (!dev_queue)
dev_queue         466 net/sched/sch_mqprio.c 	return dev_queue->qdisc_sleeping;
dev_queue         492 net/sched/sch_mqprio.c 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
dev_queue         499 net/sched/sch_mqprio.c 		tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
dev_queue         558 net/sched/sch_mqprio.c 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
dev_queue         560 net/sched/sch_mqprio.c 		sch = dev_queue->qdisc_sleeping;
dev_queue         215 net/sched/sch_multiq.c 			child = qdisc_create_dflt(sch->dev_queue,
dev_queue         187 net/sched/sch_plug.c 		netif_schedule_queue(sch->dev_queue);
dev_queue         195 net/sched/sch_plug.c 		netif_schedule_queue(sch->dev_queue);
dev_queue         200 net/sched/sch_prio.c 		queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue         296 net/sched/sch_prio.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue         471 net/sched/sch_qfq.c 	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue         592 net/sched/sch_qfq.c 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
dev_queue        1652 net/sched/sch_taprio.c 		struct netdev_queue *dev_queue;
dev_queue        1655 net/sched/sch_taprio.c 		dev_queue = netdev_get_tx_queue(dev, i);
dev_queue        1656 net/sched/sch_taprio.c 		qdisc = qdisc_create_dflt(dev_queue,
dev_queue        1691 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
dev_queue        1693 net/sched/sch_taprio.c 	if (!dev_queue)
dev_queue        1849 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
dev_queue        1851 net/sched/sch_taprio.c 	if (!dev_queue)
dev_queue        1854 net/sched/sch_taprio.c 	return dev_queue->qdisc_sleeping;
dev_queue        1869 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
dev_queue        1873 net/sched/sch_taprio.c 	tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
dev_queue        1883 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
dev_queue        1885 net/sched/sch_taprio.c 	sch = dev_queue->qdisc_sleeping;