Lines Matching refs:netdev_queue
558 struct netdev_queue { struct
595 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) in netdev_queue_numa_node_read() argument
604 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) in netdev_queue_numa_node_write()
1694 struct netdev_queue __rcu *ingress_queue;
1708 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1859 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, in netdev_get_tx_queue()
1865 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, in skb_get_tx_queue()
1873 struct netdev_queue *, in netdev_for_each_tx_queue() argument
1883 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2588 void netif_schedule_queue(struct netdev_queue *txq);
2598 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) in netif_tx_start_queue()
2619 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_start_all_queues()
2624 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2643 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_wake_all_queues()
2648 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) in netif_tx_stop_queue()
2667 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) in netif_tx_queue_stopped()
2683 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) in netif_xmit_stopped()
2689 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) in netif_xmit_frozen_or_stopped()
2695 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) in netif_xmit_frozen_or_drv_stopped()
2707 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) in netdev_txq_bql_enqueue_prefetchw()
2721 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) in netdev_txq_bql_complete_prefetchw()
2728 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, in netdev_tx_sent_queue()
2766 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, in netdev_tx_completed_queue()
2806 static inline void netdev_tx_reset_queue(struct netdev_queue *q) in netdev_tx_reset_queue()
2873 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_start_subqueue()
2887 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_stop_subqueue()
2901 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in __netif_subqueue_stopped()
3067 struct netdev_queue *txq, int *ret);
3250 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) in __netif_tx_lock()
3256 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) in __netif_tx_lock_bh()
3262 static inline bool __netif_tx_trylock(struct netdev_queue *txq) in __netif_tx_trylock()
3270 static inline void __netif_tx_unlock(struct netdev_queue *txq) in __netif_tx_unlock()
3276 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) in __netif_tx_unlock_bh()
3282 static inline void txq_trans_update(struct netdev_queue *txq) in txq_trans_update()
3302 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_lock()
3327 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_unlock()
3370 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_disable()
3697 struct netdev_queue *txq, bool more) in netdev_start_xmit()