This source file includes following definitions.
- dev_xmit_complete
- napi_disable_pending
- napi_schedule
- napi_schedule_irqoff
- napi_reschedule
- napi_complete
- napi_enable
- napi_synchronize
- napi_if_scheduled_mark_missed
- net_has_fallback_tunnels
- netdev_queue_numa_node_read
- netdev_queue_numa_node_write
- rps_record_sock_flow
- netdev_phys_item_id_same
- netif_elide_gro
- netdev_get_prio_tc_map
- netdev_set_prio_tc_map
- netdev_get_num_tc
- netdev_get_sb_channel
- netdev_get_tx_queue
- skb_get_tx_queue
- netdev_for_each_tx_queue
- netdev_get_fwd_headroom
- netdev_set_rx_headroom
- netdev_reset_rx_headroom
- dev_net
- dev_net_set
- netdev_priv
- netif_tx_napi_add
- gro_recursion_inc_test
- call_gro_receive
- call_gro_receive_sk
- netdev_notifier_info_init
- netdev_notifier_info_to_dev
- netdev_notifier_info_to_extack
- next_net_device
- next_net_device_rcu
- first_net_device
- first_net_device_rcu
- unregister_netdevice
- skb_gro_offset
- skb_gro_len
- skb_gro_pull
- skb_gro_header_fast
- skb_gro_header_hard
- skb_gro_frag0_invalidate
- skb_gro_header_slow
- skb_gro_network_header
- skb_gro_postpull_rcsum
- skb_at_gro_remcsum_start
- __skb_gro_checksum_validate_needed
- __skb_gro_checksum_validate_complete
- skb_gro_incr_csum_unnecessary
- __skb_gro_checksum_convert_check
- __skb_gro_checksum_convert
- skb_gro_remcsum_init
- skb_gro_remcsum_process
- skb_gro_remcsum_cleanup
- skb_gro_flush_final
- skb_gro_flush_final_remcsum
- skb_gro_flush_final
- skb_gro_flush_final_remcsum
- dev_hard_header
- dev_parse_header
- dev_parse_header_protocol
- dev_validate_header
- unregister_gifconf
- input_queue_head_incr
- input_queue_tail_incr_save
- dev_recursion_level
- dev_xmit_recursion
- dev_xmit_recursion_inc
- dev_xmit_recursion_dec
- netif_tx_schedule_all
- netif_tx_start_queue
- netif_start_queue
- netif_tx_start_all_queues
- netif_wake_queue
- netif_tx_wake_all_queues
- netif_tx_stop_queue
- netif_stop_queue
- netif_tx_queue_stopped
- netif_queue_stopped
- netif_xmit_stopped
- netif_xmit_frozen_or_stopped
- netif_xmit_frozen_or_drv_stopped
- netdev_txq_bql_enqueue_prefetchw
- netdev_txq_bql_complete_prefetchw
- netdev_tx_sent_queue
- __netdev_tx_sent_queue
- netdev_sent_queue
- __netdev_sent_queue
- netdev_tx_completed_queue
- netdev_completed_queue
- netdev_tx_reset_queue
- netdev_reset_queue
- netdev_cap_txqueue
- netif_running
- netif_start_subqueue
- netif_stop_subqueue
- __netif_subqueue_stopped
- netif_subqueue_stopped
- netif_wake_subqueue
- netif_attr_test_mask
- netif_attr_test_online
- netif_attrmask_next
- netif_attrmask_next_and
- netif_set_xps_queue
- __netif_set_xps_queue
- netif_is_multiqueue
- netif_set_real_num_rx_queues
- __netif_get_rx_queue
- get_netdev_rx_queue_index
- dev_kfree_skb_irq
- dev_consume_skb_irq
- dev_kfree_skb_any
- dev_consume_skb_any
- napi_free_frags
- ____dev_forward_skb
- dev_put
- dev_hold
- netif_carrier_ok
- netif_dormant_on
- netif_dormant_off
- netif_dormant
- netif_oper_up
- netif_device_present
- netif_msg_init
- __netif_tx_lock
- __netif_tx_acquire
- __netif_tx_release
- __netif_tx_lock_bh
- __netif_tx_trylock
- __netif_tx_unlock
- __netif_tx_unlock_bh
- txq_trans_update
- netif_trans_update
- netif_tx_lock
- netif_tx_lock_bh
- netif_tx_unlock
- netif_tx_unlock_bh
- netif_tx_disable
- netif_addr_lock
- netif_addr_lock_bh
- netif_addr_unlock
- netif_addr_unlock_bh
- __dev_uc_sync
- __dev_uc_unsync
- __dev_mc_sync
- __dev_mc_unsync
- skb_gso_segment
- can_checksum_protocol
- netdev_rx_csum_fault
- __netdev_start_xmit
- netdev_xmit_more
- netdev_start_xmit
- netdev_class_create_file
- netdev_class_remove_file
- netdev_intersect_features
- netdev_get_wanted_features
- netdev_add_tso_features
- net_gso_ok
- skb_gso_ok
- netif_needs_gso
- netif_set_gso_max_size
- skb_gso_error_unwind
- netif_is_macsec
- netif_is_macvlan
- netif_is_macvlan_port
- netif_is_bond_master
- netif_is_bond_slave
- netif_supports_nofcs
- netif_has_l3_rx_handler
- netif_is_l3_master
- netif_is_l3_slave
- netif_is_bridge_master
- netif_is_bridge_port
- netif_is_ovs_master
- netif_is_ovs_port
- netif_is_team_master
- netif_is_team_port
- netif_is_lag_master
- netif_is_lag_port
- netif_is_rxfh_configured
- netif_is_failover
- netif_is_failover_slave
- netif_keep_dst
- netif_reduces_vlan_mtu
- netdev_name
- netdev_unregistering
- netdev_reg_state
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #ifndef _LINUX_NETDEVICE_H
22 #define _LINUX_NETDEVICE_H
23
24 #include <linux/timer.h>
25 #include <linux/bug.h>
26 #include <linux/delay.h>
27 #include <linux/atomic.h>
28 #include <linux/prefetch.h>
29 #include <asm/cache.h>
30 #include <asm/byteorder.h>
31
32 #include <linux/percpu.h>
33 #include <linux/rculist.h>
34 #include <linux/workqueue.h>
35 #include <linux/dynamic_queue_limits.h>
36
37 #include <linux/ethtool.h>
38 #include <net/net_namespace.h>
39 #ifdef CONFIG_DCB
40 #include <net/dcbnl.h>
41 #endif
42 #include <net/netprio_cgroup.h>
43 #include <net/xdp.h>
44
45 #include <linux/netdev_features.h>
46 #include <linux/neighbour.h>
47 #include <uapi/linux/netdevice.h>
48 #include <uapi/linux/if_bonding.h>
49 #include <uapi/linux/pkt_cls.h>
50 #include <linux/hashtable.h>
51
52 struct netpoll_info;
53 struct device;
54 struct phy_device;
55 struct dsa_port;
56
57 struct sfp_bus;
58
59 struct wireless_dev;
60
61 struct wpan_dev;
62 struct mpls_dev;
63
64 struct udp_tunnel_info;
65 struct bpf_prog;
66 struct xdp_buff;
67
68 void netdev_set_default_ethtool_ops(struct net_device *dev,
69 const struct ethtool_ops *ops);
70
71
72 #define NET_RX_SUCCESS 0
73 #define NET_RX_DROP 1
74
75 #define MAX_NEST_DEV 8
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95 #define NET_XMIT_SUCCESS 0x00
96 #define NET_XMIT_DROP 0x01
97 #define NET_XMIT_CN 0x02
98 #define NET_XMIT_MASK 0x0f
99
100
101
102
103 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
104 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
105
106
107 #define NETDEV_TX_MASK 0xf0
108
109 enum netdev_tx {
110 __NETDEV_TX_MIN = INT_MIN,
111 NETDEV_TX_OK = 0x00,
112 NETDEV_TX_BUSY = 0x10,
113 };
114 typedef enum netdev_tx netdev_tx_t;
115
116
117
118
119
120 static inline bool dev_xmit_complete(int rc)
121 {
122
123
124
125
126
127
128 if (likely(rc < NET_XMIT_MASK))
129 return true;
130
131 return false;
132 }
133
134
135
136
137
138
139 #if defined(CONFIG_HYPERV_NET)
140 # define LL_MAX_HEADER 128
141 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
142 # if defined(CONFIG_MAC80211_MESH)
143 # define LL_MAX_HEADER 128
144 # else
145 # define LL_MAX_HEADER 96
146 # endif
147 #else
148 # define LL_MAX_HEADER 32
149 #endif
150
151 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
153 #define MAX_HEADER LL_MAX_HEADER
154 #else
155 #define MAX_HEADER (LL_MAX_HEADER + 48)
156 #endif
157
158
159
160
161
162
163 struct net_device_stats {
164 unsigned long rx_packets;
165 unsigned long tx_packets;
166 unsigned long rx_bytes;
167 unsigned long tx_bytes;
168 unsigned long rx_errors;
169 unsigned long tx_errors;
170 unsigned long rx_dropped;
171 unsigned long tx_dropped;
172 unsigned long multicast;
173 unsigned long collisions;
174 unsigned long rx_length_errors;
175 unsigned long rx_over_errors;
176 unsigned long rx_crc_errors;
177 unsigned long rx_frame_errors;
178 unsigned long rx_fifo_errors;
179 unsigned long rx_missed_errors;
180 unsigned long tx_aborted_errors;
181 unsigned long tx_carrier_errors;
182 unsigned long tx_fifo_errors;
183 unsigned long tx_heartbeat_errors;
184 unsigned long tx_window_errors;
185 unsigned long rx_compressed;
186 unsigned long tx_compressed;
187 };
188
189
190 #include <linux/cache.h>
191 #include <linux/skbuff.h>
192
193 #ifdef CONFIG_RPS
194 #include <linux/static_key.h>
195 extern struct static_key_false rps_needed;
196 extern struct static_key_false rfs_needed;
197 #endif
198
199 struct neighbour;
200 struct neigh_parms;
201 struct sk_buff;
202
203 struct netdev_hw_addr {
204 struct list_head list;
205 unsigned char addr[MAX_ADDR_LEN];
206 unsigned char type;
207 #define NETDEV_HW_ADDR_T_LAN 1
208 #define NETDEV_HW_ADDR_T_SAN 2
209 #define NETDEV_HW_ADDR_T_SLAVE 3
210 #define NETDEV_HW_ADDR_T_UNICAST 4
211 #define NETDEV_HW_ADDR_T_MULTICAST 5
212 bool global_use;
213 int sync_cnt;
214 int refcount;
215 int synced;
216 struct rcu_head rcu_head;
217 };
218
219 struct netdev_hw_addr_list {
220 struct list_head list;
221 int count;
222 };
223
224 #define netdev_hw_addr_list_count(l) ((l)->count)
225 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
226 #define netdev_hw_addr_list_for_each(ha, l) \
227 list_for_each_entry(ha, &(l)->list, list)
228
229 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
230 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
231 #define netdev_for_each_uc_addr(ha, dev) \
232 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
233
234 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
235 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
236 #define netdev_for_each_mc_addr(ha, dev) \
237 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
238
239 struct hh_cache {
240 unsigned int hh_len;
241 seqlock_t hh_lock;
242
243
244 #define HH_DATA_MOD 16
245 #define HH_DATA_OFF(__len) \
246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
247 #define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250 };
251
252
253
254
255
256
257
258
259
260 #define LL_RESERVED_SPACE(dev) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264
265 struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr,
268 const void *saddr, unsigned int len);
269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
271 void (*cache_update)(struct hh_cache *hh,
272 const struct net_device *dev,
273 const unsigned char *haddr);
274 bool (*validate)(const char *ll_header, unsigned int len);
275 __be16 (*parse_protocol)(const struct sk_buff *skb);
276 };
277
278
279
280
281
282
283 enum netdev_state_t {
284 __LINK_STATE_START,
285 __LINK_STATE_PRESENT,
286 __LINK_STATE_NOCARRIER,
287 __LINK_STATE_LINKWATCH_PENDING,
288 __LINK_STATE_DORMANT,
289 };
290
291
292
293
294
295
296 struct netdev_boot_setup {
297 char name[IFNAMSIZ];
298 struct ifmap map;
299 };
300 #define NETDEV_BOOT_SETUP_MAX 8
301
302 int __init netdev_boot_setup(char *str);
303
304 struct gro_list {
305 struct list_head list;
306 int count;
307 };
308
309
310
311
312
313 #define GRO_HASH_BUCKETS 8
314
315
316
317
318 struct napi_struct {
319
320
321
322
323
324
325 struct list_head poll_list;
326
327 unsigned long state;
328 int weight;
329 unsigned long gro_bitmask;
330 int (*poll)(struct napi_struct *, int);
331 #ifdef CONFIG_NETPOLL
332 int poll_owner;
333 #endif
334 struct net_device *dev;
335 struct gro_list gro_hash[GRO_HASH_BUCKETS];
336 struct sk_buff *skb;
337 struct list_head rx_list;
338 int rx_count;
339 struct hrtimer timer;
340 struct list_head dev_list;
341 struct hlist_node napi_hash_node;
342 unsigned int napi_id;
343 };
344
345 enum {
346 NAPI_STATE_SCHED,
347 NAPI_STATE_MISSED,
348 NAPI_STATE_DISABLE,
349 NAPI_STATE_NPSVC,
350 NAPI_STATE_HASHED,
351 NAPI_STATE_NO_BUSY_POLL,
352 NAPI_STATE_IN_BUSY_POLL,
353 };
354
355 enum {
356 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
357 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
358 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
359 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
360 NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
361 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
362 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
363 };
364
365 enum gro_result {
366 GRO_MERGED,
367 GRO_MERGED_FREE,
368 GRO_HELD,
369 GRO_NORMAL,
370 GRO_DROP,
371 GRO_CONSUMED,
372 };
373 typedef enum gro_result gro_result_t;
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416 enum rx_handler_result {
417 RX_HANDLER_CONSUMED,
418 RX_HANDLER_ANOTHER,
419 RX_HANDLER_EXACT,
420 RX_HANDLER_PASS,
421 };
422 typedef enum rx_handler_result rx_handler_result_t;
423 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
424
425 void __napi_schedule(struct napi_struct *n);
426 void __napi_schedule_irqoff(struct napi_struct *n);
427
428 static inline bool napi_disable_pending(struct napi_struct *n)
429 {
430 return test_bit(NAPI_STATE_DISABLE, &n->state);
431 }
432
433 bool napi_schedule_prep(struct napi_struct *n);
434
435
436
437
438
439
440
441
442 static inline void napi_schedule(struct napi_struct *n)
443 {
444 if (napi_schedule_prep(n))
445 __napi_schedule(n);
446 }
447
448
449
450
451
452
453
454 static inline void napi_schedule_irqoff(struct napi_struct *n)
455 {
456 if (napi_schedule_prep(n))
457 __napi_schedule_irqoff(n);
458 }
459
460
461 static inline bool napi_reschedule(struct napi_struct *napi)
462 {
463 if (napi_schedule_prep(napi)) {
464 __napi_schedule(napi);
465 return true;
466 }
467 return false;
468 }
469
470 bool napi_complete_done(struct napi_struct *n, int work_done);
471
472
473
474
475
476
477
478
479 static inline bool napi_complete(struct napi_struct *n)
480 {
481 return napi_complete_done(n, 0);
482 }
483
484
485
486
487
488
489
490
491
492
493
494
495
496 bool napi_hash_del(struct napi_struct *napi);
497
498
499
500
501
502
503
504
505 void napi_disable(struct napi_struct *n);
506
507
508
509
510
511
512
513
514 static inline void napi_enable(struct napi_struct *n)
515 {
516 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
517 smp_mb__before_atomic();
518 clear_bit(NAPI_STATE_SCHED, &n->state);
519 clear_bit(NAPI_STATE_NPSVC, &n->state);
520 }
521
522
523
524
525
526
527
528
529
530 static inline void napi_synchronize(const struct napi_struct *n)
531 {
532 if (IS_ENABLED(CONFIG_SMP))
533 while (test_bit(NAPI_STATE_SCHED, &n->state))
534 msleep(1);
535 else
536 barrier();
537 }
538
539
540
541
542
543
544
545
546
547 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
548 {
549 unsigned long val, new;
550
551 do {
552 val = READ_ONCE(n->state);
553 if (val & NAPIF_STATE_DISABLE)
554 return true;
555
556 if (!(val & NAPIF_STATE_SCHED))
557 return false;
558
559 new = val | NAPIF_STATE_MISSED;
560 } while (cmpxchg(&n->state, val, new) != val);
561
562 return true;
563 }
564
565 enum netdev_queue_state_t {
566 __QUEUE_STATE_DRV_XOFF,
567 __QUEUE_STATE_STACK_XOFF,
568 __QUEUE_STATE_FROZEN,
569 };
570
571 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
572 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
573 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
574
575 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
576 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
577 QUEUE_STATE_FROZEN)
578 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
579 QUEUE_STATE_FROZEN)
580
581
582
583
584
585
586
587
588
589
590
591 struct netdev_queue {
592
593
594
595 struct net_device *dev;
596 struct Qdisc __rcu *qdisc;
597 struct Qdisc *qdisc_sleeping;
598 #ifdef CONFIG_SYSFS
599 struct kobject kobj;
600 #endif
601 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
602 int numa_node;
603 #endif
604 unsigned long tx_maxrate;
605
606
607
608
609 unsigned long trans_timeout;
610
611
612 struct net_device *sb_dev;
613 #ifdef CONFIG_XDP_SOCKETS
614 struct xdp_umem *umem;
615 #endif
616
617
618
619 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
620 int xmit_lock_owner;
621
622
623
624 unsigned long trans_start;
625
626 unsigned long state;
627
628 #ifdef CONFIG_BQL
629 struct dql dql;
630 #endif
631 } ____cacheline_aligned_in_smp;
632
633 extern int sysctl_fb_tunnels_only_for_init_net;
634 extern int sysctl_devconf_inherit_init_net;
635
636 static inline bool net_has_fallback_tunnels(const struct net *net)
637 {
638 return net == &init_net ||
639 !IS_ENABLED(CONFIG_SYSCTL) ||
640 !sysctl_fb_tunnels_only_for_init_net;
641 }
642
643 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
644 {
645 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
646 return q->numa_node;
647 #else
648 return NUMA_NO_NODE;
649 #endif
650 }
651
652 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
653 {
654 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
655 q->numa_node = node;
656 #endif
657 }
658
659 #ifdef CONFIG_RPS
660
661
662
663
664 struct rps_map {
665 unsigned int len;
666 struct rcu_head rcu;
667 u16 cpus[0];
668 };
669 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
670
671
672
673
674
675
676 struct rps_dev_flow {
677 u16 cpu;
678 u16 filter;
679 unsigned int last_qtail;
680 };
681 #define RPS_NO_FILTER 0xffff
682
683
684
685
686 struct rps_dev_flow_table {
687 unsigned int mask;
688 struct rcu_head rcu;
689 struct rps_dev_flow flows[0];
690 };
691 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
692 ((_num) * sizeof(struct rps_dev_flow)))
693
694
695
696
697
698
699
700
701
702
703
704 struct rps_sock_flow_table {
705 u32 mask;
706
707 u32 ents[0] ____cacheline_aligned_in_smp;
708 };
709 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
710
711 #define RPS_NO_CPU 0xffff
712
713 extern u32 rps_cpu_mask;
714 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
715
716 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
717 u32 hash)
718 {
719 if (table && hash) {
720 unsigned int index = hash & table->mask;
721 u32 val = hash & ~rps_cpu_mask;
722
723
724 val |= raw_smp_processor_id();
725
726 if (table->ents[index] != val)
727 table->ents[index] = val;
728 }
729 }
730
731 #ifdef CONFIG_RFS_ACCEL
732 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
733 u16 filter_id);
734 #endif
735 #endif
736
737
738 struct netdev_rx_queue {
739 #ifdef CONFIG_RPS
740 struct rps_map __rcu *rps_map;
741 struct rps_dev_flow_table __rcu *rps_flow_table;
742 #endif
743 struct kobject kobj;
744 struct net_device *dev;
745 struct xdp_rxq_info xdp_rxq;
746 #ifdef CONFIG_XDP_SOCKETS
747 struct xdp_umem *umem;
748 #endif
749 } ____cacheline_aligned_in_smp;
750
751
752
753
754 struct rx_queue_attribute {
755 struct attribute attr;
756 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
757 ssize_t (*store)(struct netdev_rx_queue *queue,
758 const char *buf, size_t len);
759 };
760
761 #ifdef CONFIG_XPS
762
763
764
765
766 struct xps_map {
767 unsigned int len;
768 unsigned int alloc_len;
769 struct rcu_head rcu;
770 u16 queues[0];
771 };
772 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
773 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
774 - sizeof(struct xps_map)) / sizeof(u16))
775
776
777
778
779 struct xps_dev_maps {
780 struct rcu_head rcu;
781 struct xps_map __rcu *attr_map[0];
782 };
783
784 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
785 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
786
787 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
788 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
789
790 #endif
791
792 #define TC_MAX_QUEUE 16
793 #define TC_BITMASK 15
794
795 struct netdev_tc_txq {
796 u16 count;
797 u16 offset;
798 };
799
800 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
801
802
803
804
805 struct netdev_fcoe_hbainfo {
806 char manufacturer[64];
807 char serial_number[64];
808 char hardware_version[64];
809 char driver_version[64];
810 char optionrom_version[64];
811 char firmware_version[64];
812 char model[256];
813 char model_description[256];
814 };
815 #endif
816
817 #define MAX_PHYS_ITEM_ID_LEN 32
818
819
820
821
822 struct netdev_phys_item_id {
823 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
824 unsigned char id_len;
825 };
826
827 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
828 struct netdev_phys_item_id *b)
829 {
830 return a->id_len == b->id_len &&
831 memcmp(a->id, b->id, a->id_len) == 0;
832 }
833
834 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
835 struct sk_buff *skb,
836 struct net_device *sb_dev);
837
838 enum tc_setup_type {
839 TC_SETUP_QDISC_MQPRIO,
840 TC_SETUP_CLSU32,
841 TC_SETUP_CLSFLOWER,
842 TC_SETUP_CLSMATCHALL,
843 TC_SETUP_CLSBPF,
844 TC_SETUP_BLOCK,
845 TC_SETUP_QDISC_CBS,
846 TC_SETUP_QDISC_RED,
847 TC_SETUP_QDISC_PRIO,
848 TC_SETUP_QDISC_MQ,
849 TC_SETUP_QDISC_ETF,
850 TC_SETUP_ROOT_QDISC,
851 TC_SETUP_QDISC_GRED,
852 TC_SETUP_QDISC_TAPRIO,
853 };
854
855
856
857
858 enum bpf_netdev_command {
859
860
861
862
863
864
865
866 XDP_SETUP_PROG,
867 XDP_SETUP_PROG_HW,
868 XDP_QUERY_PROG,
869 XDP_QUERY_PROG_HW,
870
871 BPF_OFFLOAD_MAP_ALLOC,
872 BPF_OFFLOAD_MAP_FREE,
873 XDP_SETUP_XSK_UMEM,
874 };
875
876 struct bpf_prog_offload_ops;
877 struct netlink_ext_ack;
878 struct xdp_umem;
879
880 struct netdev_bpf {
881 enum bpf_netdev_command command;
882 union {
883
884 struct {
885 u32 flags;
886 struct bpf_prog *prog;
887 struct netlink_ext_ack *extack;
888 };
889
890 struct {
891 u32 prog_id;
892
893 u32 prog_flags;
894 };
895
896 struct {
897 struct bpf_offloaded_map *offmap;
898 };
899
900 struct {
901 struct xdp_umem *umem;
902 u16 queue_id;
903 } xsk;
904 };
905 };
906
907
908 #define XDP_WAKEUP_RX (1 << 0)
909 #define XDP_WAKEUP_TX (1 << 1)
910
911 #ifdef CONFIG_XFRM_OFFLOAD
912 struct xfrmdev_ops {
913 int (*xdo_dev_state_add) (struct xfrm_state *x);
914 void (*xdo_dev_state_delete) (struct xfrm_state *x);
915 void (*xdo_dev_state_free) (struct xfrm_state *x);
916 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
917 struct xfrm_state *x);
918 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
919 };
920 #endif
921
922 struct dev_ifalias {
923 struct rcu_head rcuhead;
924 char ifalias[];
925 };
926
927 struct devlink;
928 struct tlsdev_ops;
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 struct net_device_ops {
1250 int (*ndo_init)(struct net_device *dev);
1251 void (*ndo_uninit)(struct net_device *dev);
1252 int (*ndo_open)(struct net_device *dev);
1253 int (*ndo_stop)(struct net_device *dev);
1254 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1255 struct net_device *dev);
1256 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1257 struct net_device *dev,
1258 netdev_features_t features);
1259 u16 (*ndo_select_queue)(struct net_device *dev,
1260 struct sk_buff *skb,
1261 struct net_device *sb_dev);
1262 void (*ndo_change_rx_flags)(struct net_device *dev,
1263 int flags);
1264 void (*ndo_set_rx_mode)(struct net_device *dev);
1265 int (*ndo_set_mac_address)(struct net_device *dev,
1266 void *addr);
1267 int (*ndo_validate_addr)(struct net_device *dev);
1268 int (*ndo_do_ioctl)(struct net_device *dev,
1269 struct ifreq *ifr, int cmd);
1270 int (*ndo_set_config)(struct net_device *dev,
1271 struct ifmap *map);
1272 int (*ndo_change_mtu)(struct net_device *dev,
1273 int new_mtu);
1274 int (*ndo_neigh_setup)(struct net_device *dev,
1275 struct neigh_parms *);
1276 void (*ndo_tx_timeout) (struct net_device *dev);
1277
1278 void (*ndo_get_stats64)(struct net_device *dev,
1279 struct rtnl_link_stats64 *storage);
1280 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1281 int (*ndo_get_offload_stats)(int attr_id,
1282 const struct net_device *dev,
1283 void *attr_data);
1284 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1285
1286 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1287 __be16 proto, u16 vid);
1288 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1289 __be16 proto, u16 vid);
1290 #ifdef CONFIG_NET_POLL_CONTROLLER
1291 void (*ndo_poll_controller)(struct net_device *dev);
1292 int (*ndo_netpoll_setup)(struct net_device *dev,
1293 struct netpoll_info *info);
1294 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1295 #endif
1296 int (*ndo_set_vf_mac)(struct net_device *dev,
1297 int queue, u8 *mac);
1298 int (*ndo_set_vf_vlan)(struct net_device *dev,
1299 int queue, u16 vlan,
1300 u8 qos, __be16 proto);
1301 int (*ndo_set_vf_rate)(struct net_device *dev,
1302 int vf, int min_tx_rate,
1303 int max_tx_rate);
1304 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1305 int vf, bool setting);
1306 int (*ndo_set_vf_trust)(struct net_device *dev,
1307 int vf, bool setting);
1308 int (*ndo_get_vf_config)(struct net_device *dev,
1309 int vf,
1310 struct ifla_vf_info *ivf);
1311 int (*ndo_set_vf_link_state)(struct net_device *dev,
1312 int vf, int link_state);
1313 int (*ndo_get_vf_stats)(struct net_device *dev,
1314 int vf,
1315 struct ifla_vf_stats
1316 *vf_stats);
1317 int (*ndo_set_vf_port)(struct net_device *dev,
1318 int vf,
1319 struct nlattr *port[]);
1320 int (*ndo_get_vf_port)(struct net_device *dev,
1321 int vf, struct sk_buff *skb);
1322 int (*ndo_set_vf_guid)(struct net_device *dev,
1323 int vf, u64 guid,
1324 int guid_type);
1325 int (*ndo_set_vf_rss_query_en)(
1326 struct net_device *dev,
1327 int vf, bool setting);
1328 int (*ndo_setup_tc)(struct net_device *dev,
1329 enum tc_setup_type type,
1330 void *type_data);
1331 #if IS_ENABLED(CONFIG_FCOE)
1332 int (*ndo_fcoe_enable)(struct net_device *dev);
1333 int (*ndo_fcoe_disable)(struct net_device *dev);
1334 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1335 u16 xid,
1336 struct scatterlist *sgl,
1337 unsigned int sgc);
1338 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1339 u16 xid);
1340 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1341 u16 xid,
1342 struct scatterlist *sgl,
1343 unsigned int sgc);
1344 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1345 struct netdev_fcoe_hbainfo *hbainfo);
1346 #endif
1347
1348 #if IS_ENABLED(CONFIG_LIBFCOE)
1349 #define NETDEV_FCOE_WWNN 0
1350 #define NETDEV_FCOE_WWPN 1
1351 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1352 u64 *wwn, int type);
1353 #endif
1354
1355 #ifdef CONFIG_RFS_ACCEL
1356 int (*ndo_rx_flow_steer)(struct net_device *dev,
1357 const struct sk_buff *skb,
1358 u16 rxq_index,
1359 u32 flow_id);
1360 #endif
1361 int (*ndo_add_slave)(struct net_device *dev,
1362 struct net_device *slave_dev,
1363 struct netlink_ext_ack *extack);
1364 int (*ndo_del_slave)(struct net_device *dev,
1365 struct net_device *slave_dev);
1366 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1367 netdev_features_t features);
1368 int (*ndo_set_features)(struct net_device *dev,
1369 netdev_features_t features);
1370 int (*ndo_neigh_construct)(struct net_device *dev,
1371 struct neighbour *n);
1372 void (*ndo_neigh_destroy)(struct net_device *dev,
1373 struct neighbour *n);
1374
1375 int (*ndo_fdb_add)(struct ndmsg *ndm,
1376 struct nlattr *tb[],
1377 struct net_device *dev,
1378 const unsigned char *addr,
1379 u16 vid,
1380 u16 flags,
1381 struct netlink_ext_ack *extack);
1382 int (*ndo_fdb_del)(struct ndmsg *ndm,
1383 struct nlattr *tb[],
1384 struct net_device *dev,
1385 const unsigned char *addr,
1386 u16 vid);
1387 int (*ndo_fdb_dump)(struct sk_buff *skb,
1388 struct netlink_callback *cb,
1389 struct net_device *dev,
1390 struct net_device *filter_dev,
1391 int *idx);
1392 int (*ndo_fdb_get)(struct sk_buff *skb,
1393 struct nlattr *tb[],
1394 struct net_device *dev,
1395 const unsigned char *addr,
1396 u16 vid, u32 portid, u32 seq,
1397 struct netlink_ext_ack *extack);
1398 int (*ndo_bridge_setlink)(struct net_device *dev,
1399 struct nlmsghdr *nlh,
1400 u16 flags,
1401 struct netlink_ext_ack *extack);
1402 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1403 u32 pid, u32 seq,
1404 struct net_device *dev,
1405 u32 filter_mask,
1406 int nlflags);
1407 int (*ndo_bridge_dellink)(struct net_device *dev,
1408 struct nlmsghdr *nlh,
1409 u16 flags);
1410 int (*ndo_change_carrier)(struct net_device *dev,
1411 bool new_carrier);
1412 int (*ndo_get_phys_port_id)(struct net_device *dev,
1413 struct netdev_phys_item_id *ppid);
1414 int (*ndo_get_port_parent_id)(struct net_device *dev,
1415 struct netdev_phys_item_id *ppid);
1416 int (*ndo_get_phys_port_name)(struct net_device *dev,
1417 char *name, size_t len);
1418 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1419 struct udp_tunnel_info *ti);
1420 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1421 struct udp_tunnel_info *ti);
1422 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1423 struct net_device *dev);
1424 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1425 void *priv);
1426
1427 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1428 int queue_index,
1429 u32 maxrate);
1430 int (*ndo_get_iflink)(const struct net_device *dev);
1431 int (*ndo_change_proto_down)(struct net_device *dev,
1432 bool proto_down);
1433 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1434 struct sk_buff *skb);
1435 void (*ndo_set_rx_headroom)(struct net_device *dev,
1436 int needed_headroom);
1437 int (*ndo_bpf)(struct net_device *dev,
1438 struct netdev_bpf *bpf);
1439 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
1440 struct xdp_frame **xdp,
1441 u32 flags);
1442 int (*ndo_xsk_wakeup)(struct net_device *dev,
1443 u32 queue_id, u32 flags);
1444 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
1445 };
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493 enum netdev_priv_flags {
1494 IFF_802_1Q_VLAN = 1<<0,
1495 IFF_EBRIDGE = 1<<1,
1496 IFF_BONDING = 1<<2,
1497 IFF_ISATAP = 1<<3,
1498 IFF_WAN_HDLC = 1<<4,
1499 IFF_XMIT_DST_RELEASE = 1<<5,
1500 IFF_DONT_BRIDGE = 1<<6,
1501 IFF_DISABLE_NETPOLL = 1<<7,
1502 IFF_MACVLAN_PORT = 1<<8,
1503 IFF_BRIDGE_PORT = 1<<9,
1504 IFF_OVS_DATAPATH = 1<<10,
1505 IFF_TX_SKB_SHARING = 1<<11,
1506 IFF_UNICAST_FLT = 1<<12,
1507 IFF_TEAM_PORT = 1<<13,
1508 IFF_SUPP_NOFCS = 1<<14,
1509 IFF_LIVE_ADDR_CHANGE = 1<<15,
1510 IFF_MACVLAN = 1<<16,
1511 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1512 IFF_L3MDEV_MASTER = 1<<18,
1513 IFF_NO_QUEUE = 1<<19,
1514 IFF_OPENVSWITCH = 1<<20,
1515 IFF_L3MDEV_SLAVE = 1<<21,
1516 IFF_TEAM = 1<<22,
1517 IFF_RXFH_CONFIGURED = 1<<23,
1518 IFF_PHONY_HEADROOM = 1<<24,
1519 IFF_MACSEC = 1<<25,
1520 IFF_NO_RX_HANDLER = 1<<26,
1521 IFF_FAILOVER = 1<<27,
1522 IFF_FAILOVER_SLAVE = 1<<28,
1523 IFF_L3MDEV_RX_HANDLER = 1<<29,
1524 IFF_LIVE_RENAME_OK = 1<<30,
1525 };
1526
1527 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1528 #define IFF_EBRIDGE IFF_EBRIDGE
1529 #define IFF_BONDING IFF_BONDING
1530 #define IFF_ISATAP IFF_ISATAP
1531 #define IFF_WAN_HDLC IFF_WAN_HDLC
1532 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1533 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1534 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1535 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1536 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1537 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1538 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1539 #define IFF_UNICAST_FLT IFF_UNICAST_FLT
1540 #define IFF_TEAM_PORT IFF_TEAM_PORT
1541 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1542 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1543 #define IFF_MACVLAN IFF_MACVLAN
1544 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1545 #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1546 #define IFF_NO_QUEUE IFF_NO_QUEUE
1547 #define IFF_OPENVSWITCH IFF_OPENVSWITCH
1548 #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1549 #define IFF_TEAM IFF_TEAM
1550 #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1551 #define IFF_MACSEC IFF_MACSEC
1552 #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
1553 #define IFF_FAILOVER IFF_FAILOVER
1554 #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
1555 #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
1556 #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783 struct net_device {
1784 char name[IFNAMSIZ];
1785 struct hlist_node name_hlist;
1786 struct dev_ifalias __rcu *ifalias;
1787
1788
1789
1790
1791 unsigned long mem_end;
1792 unsigned long mem_start;
1793 unsigned long base_addr;
1794 int irq;
1795
1796
1797
1798
1799
1800
1801
1802 unsigned long state;
1803
1804 struct list_head dev_list;
1805 struct list_head napi_list;
1806 struct list_head unreg_list;
1807 struct list_head close_list;
1808 struct list_head ptype_all;
1809 struct list_head ptype_specific;
1810
1811 struct {
1812 struct list_head upper;
1813 struct list_head lower;
1814 } adj_list;
1815
1816 netdev_features_t features;
1817 netdev_features_t hw_features;
1818 netdev_features_t wanted_features;
1819 netdev_features_t vlan_features;
1820 netdev_features_t hw_enc_features;
1821 netdev_features_t mpls_features;
1822 netdev_features_t gso_partial_features;
1823
1824 int ifindex;
1825 int group;
1826
1827 struct net_device_stats stats;
1828
1829 atomic_long_t rx_dropped;
1830 atomic_long_t tx_dropped;
1831 atomic_long_t rx_nohandler;
1832
1833
1834 atomic_t carrier_up_count;
1835 atomic_t carrier_down_count;
1836
1837 #ifdef CONFIG_WIRELESS_EXT
1838 const struct iw_handler_def *wireless_handlers;
1839 struct iw_public_data *wireless_data;
1840 #endif
1841 const struct net_device_ops *netdev_ops;
1842 const struct ethtool_ops *ethtool_ops;
1843 #ifdef CONFIG_NET_L3_MASTER_DEV
1844 const struct l3mdev_ops *l3mdev_ops;
1845 #endif
1846 #if IS_ENABLED(CONFIG_IPV6)
1847 const struct ndisc_ops *ndisc_ops;
1848 #endif
1849
1850 #ifdef CONFIG_XFRM_OFFLOAD
1851 const struct xfrmdev_ops *xfrmdev_ops;
1852 #endif
1853
1854 #if IS_ENABLED(CONFIG_TLS_DEVICE)
1855 const struct tlsdev_ops *tlsdev_ops;
1856 #endif
1857
1858 const struct header_ops *header_ops;
1859
1860 unsigned int flags;
1861 unsigned int priv_flags;
1862
1863 unsigned short gflags;
1864 unsigned short padded;
1865
1866 unsigned char operstate;
1867 unsigned char link_mode;
1868
1869 unsigned char if_port;
1870 unsigned char dma;
1871
1872
1873
1874
1875
1876
1877 unsigned int mtu;
1878 unsigned int min_mtu;
1879 unsigned int max_mtu;
1880 unsigned short type;
1881 unsigned short hard_header_len;
1882 unsigned char min_header_len;
1883
1884 unsigned short needed_headroom;
1885 unsigned short needed_tailroom;
1886
1887
1888 unsigned char perm_addr[MAX_ADDR_LEN];
1889 unsigned char addr_assign_type;
1890 unsigned char addr_len;
1891 unsigned char upper_level;
1892 unsigned char lower_level;
1893 unsigned short neigh_priv_len;
1894 unsigned short dev_id;
1895 unsigned short dev_port;
1896 spinlock_t addr_list_lock;
1897 unsigned char name_assign_type;
1898 bool uc_promisc;
1899 struct netdev_hw_addr_list uc;
1900 struct netdev_hw_addr_list mc;
1901 struct netdev_hw_addr_list dev_addrs;
1902
1903 #ifdef CONFIG_SYSFS
1904 struct kset *queues_kset;
1905 #endif
1906 unsigned int promiscuity;
1907 unsigned int allmulti;
1908
1909
1910
1911
1912 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1913 struct vlan_info __rcu *vlan_info;
1914 #endif
1915 #if IS_ENABLED(CONFIG_NET_DSA)
1916 struct dsa_port *dsa_ptr;
1917 #endif
1918 #if IS_ENABLED(CONFIG_TIPC)
1919 struct tipc_bearer __rcu *tipc_ptr;
1920 #endif
1921 #if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
1922 void *atalk_ptr;
1923 #endif
1924 struct in_device __rcu *ip_ptr;
1925 #if IS_ENABLED(CONFIG_DECNET)
1926 struct dn_dev __rcu *dn_ptr;
1927 #endif
1928 struct inet6_dev __rcu *ip6_ptr;
1929 #if IS_ENABLED(CONFIG_AX25)
1930 void *ax25_ptr;
1931 #endif
1932 struct wireless_dev *ieee80211_ptr;
1933 struct wpan_dev *ieee802154_ptr;
1934 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
1935 struct mpls_dev __rcu *mpls_ptr;
1936 #endif
1937
1938
1939
1940
1941
1942 unsigned char *dev_addr;
1943
1944 struct netdev_rx_queue *_rx;
1945 unsigned int num_rx_queues;
1946 unsigned int real_num_rx_queues;
1947
1948 struct bpf_prog __rcu *xdp_prog;
1949 unsigned long gro_flush_timeout;
1950 rx_handler_func_t __rcu *rx_handler;
1951 void __rcu *rx_handler_data;
1952
1953 #ifdef CONFIG_NET_CLS_ACT
1954 struct mini_Qdisc __rcu *miniq_ingress;
1955 #endif
1956 struct netdev_queue __rcu *ingress_queue;
1957 #ifdef CONFIG_NETFILTER_INGRESS
1958 struct nf_hook_entries __rcu *nf_hooks_ingress;
1959 #endif
1960
1961 unsigned char broadcast[MAX_ADDR_LEN];
1962 #ifdef CONFIG_RFS_ACCEL
1963 struct cpu_rmap *rx_cpu_rmap;
1964 #endif
1965 struct hlist_node index_hlist;
1966
1967
1968
1969
1970 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1971 unsigned int num_tx_queues;
1972 unsigned int real_num_tx_queues;
1973 struct Qdisc *qdisc;
1974 #ifdef CONFIG_NET_SCHED
1975 DECLARE_HASHTABLE (qdisc_hash, 4);
1976 #endif
1977 unsigned int tx_queue_len;
1978 spinlock_t tx_global_lock;
1979 int watchdog_timeo;
1980
1981 #ifdef CONFIG_XPS
1982 struct xps_dev_maps __rcu *xps_cpus_map;
1983 struct xps_dev_maps __rcu *xps_rxqs_map;
1984 #endif
1985 #ifdef CONFIG_NET_CLS_ACT
1986 struct mini_Qdisc __rcu *miniq_egress;
1987 #endif
1988
1989
1990 struct timer_list watchdog_timer;
1991
1992 int __percpu *pcpu_refcnt;
1993 struct list_head todo_list;
1994
1995 struct list_head link_watch_list;
1996
1997 enum { NETREG_UNINITIALIZED=0,
1998 NETREG_REGISTERED,
1999 NETREG_UNREGISTERING,
2000 NETREG_UNREGISTERED,
2001 NETREG_RELEASED,
2002 NETREG_DUMMY,
2003 } reg_state:8;
2004
2005 bool dismantle;
2006
2007 enum {
2008 RTNL_LINK_INITIALIZED,
2009 RTNL_LINK_INITIALIZING,
2010 } rtnl_link_state:16;
2011
2012 bool needs_free_netdev;
2013 void (*priv_destructor)(struct net_device *dev);
2014
2015 #ifdef CONFIG_NETPOLL
2016 struct netpoll_info __rcu *npinfo;
2017 #endif
2018
2019 possible_net_t nd_net;
2020
2021
2022 union {
2023 void *ml_priv;
2024 struct pcpu_lstats __percpu *lstats;
2025 struct pcpu_sw_netstats __percpu *tstats;
2026 struct pcpu_dstats __percpu *dstats;
2027 };
2028
2029 #if IS_ENABLED(CONFIG_GARP)
2030 struct garp_port __rcu *garp_port;
2031 #endif
2032 #if IS_ENABLED(CONFIG_MRP)
2033 struct mrp_port __rcu *mrp_port;
2034 #endif
2035
2036 struct device dev;
2037 const struct attribute_group *sysfs_groups[4];
2038 const struct attribute_group *sysfs_rx_queue_group;
2039
2040 const struct rtnl_link_ops *rtnl_link_ops;
2041
2042
2043 #define GSO_MAX_SIZE 65536
2044 unsigned int gso_max_size;
2045 #define GSO_MAX_SEGS 65535
2046 u16 gso_max_segs;
2047
2048 #ifdef CONFIG_DCB
2049 const struct dcbnl_rtnl_ops *dcbnl_ops;
2050 #endif
2051 s16 num_tc;
2052 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2053 u8 prio_tc_map[TC_BITMASK + 1];
2054
2055 #if IS_ENABLED(CONFIG_FCOE)
2056 unsigned int fcoe_ddp_xid;
2057 #endif
2058 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2059 struct netprio_map __rcu *priomap;
2060 #endif
2061 struct phy_device *phydev;
2062 struct sfp_bus *sfp_bus;
2063 struct lock_class_key qdisc_tx_busylock_key;
2064 struct lock_class_key qdisc_running_key;
2065 struct lock_class_key qdisc_xmit_lock_key;
2066 struct lock_class_key addr_list_lock_key;
2067 bool proto_down;
2068 unsigned wol_enabled:1;
2069 };
2070 #define to_net_dev(d) container_of(d, struct net_device, dev)
2071
2072 static inline bool netif_elide_gro(const struct net_device *dev)
2073 {
2074 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2075 return true;
2076 return false;
2077 }
2078
2079 #define NETDEV_ALIGN 32
2080
2081 static inline
2082 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2083 {
2084 return dev->prio_tc_map[prio & TC_BITMASK];
2085 }
2086
2087 static inline
2088 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2089 {
2090 if (tc >= dev->num_tc)
2091 return -EINVAL;
2092
2093 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2094 return 0;
2095 }
2096
2097 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2098 void netdev_reset_tc(struct net_device *dev);
2099 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2100 int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2101
2102 static inline
2103 int netdev_get_num_tc(struct net_device *dev)
2104 {
2105 return dev->num_tc;
2106 }
2107
2108 void netdev_unbind_sb_channel(struct net_device *dev,
2109 struct net_device *sb_dev);
2110 int netdev_bind_sb_channel_queue(struct net_device *dev,
2111 struct net_device *sb_dev,
2112 u8 tc, u16 count, u16 offset);
2113 int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2114 static inline int netdev_get_sb_channel(struct net_device *dev)
2115 {
2116 return max_t(int, -dev->num_tc, 0);
2117 }
2118
2119 static inline
2120 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2121 unsigned int index)
2122 {
2123 return &dev->_tx[index];
2124 }
2125
2126 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2127 const struct sk_buff *skb)
2128 {
2129 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2130 }
2131
2132 static inline void netdev_for_each_tx_queue(struct net_device *dev,
2133 void (*f)(struct net_device *,
2134 struct netdev_queue *,
2135 void *),
2136 void *arg)
2137 {
2138 unsigned int i;
2139
2140 for (i = 0; i < dev->num_tx_queues; i++)
2141 f(dev, &dev->_tx[i], arg);
2142 }
2143
2144 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2145 struct net_device *sb_dev);
2146 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2147 struct sk_buff *skb,
2148 struct net_device *sb_dev);
2149
2150
2151
2152
2153 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2154 {
2155 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2156 }
2157
2158 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2159 {
2160 if (dev->netdev_ops->ndo_set_rx_headroom)
2161 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2162 }
2163
2164
2165 static inline void netdev_reset_rx_headroom(struct net_device *dev)
2166 {
2167 netdev_set_rx_headroom(dev, -1);
2168 }
2169
2170
2171
2172
2173 static inline
2174 struct net *dev_net(const struct net_device *dev)
2175 {
2176 return read_pnet(&dev->nd_net);
2177 }
2178
2179 static inline
2180 void dev_net_set(struct net_device *dev, struct net *net)
2181 {
2182 write_pnet(&dev->nd_net, net);
2183 }
2184
2185
2186
2187
2188
2189
2190
2191 static inline void *netdev_priv(const struct net_device *dev)
2192 {
2193 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2194 }
2195
2196
2197
2198
2199 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2200
2201
2202
2203
2204
2205 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2206
2207
2208
2209
2210 #define NAPI_POLL_WEIGHT 64
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2223 int (*poll)(struct napi_struct *, int), int weight);
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236 static inline void netif_tx_napi_add(struct net_device *dev,
2237 struct napi_struct *napi,
2238 int (*poll)(struct napi_struct *, int),
2239 int weight)
2240 {
2241 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2242 netif_napi_add(dev, napi, poll, weight);
2243 }
2244
2245
2246
2247
2248
2249
2250
2251 void netif_napi_del(struct napi_struct *napi);
2252
2253 struct napi_gro_cb {
2254
2255 void *frag0;
2256
2257
2258 unsigned int frag0_len;
2259
2260
2261 int data_offset;
2262
2263
2264 u16 flush;
2265
2266
2267 u16 flush_id;
2268
2269
2270 u16 count;
2271
2272
2273 u16 gro_remcsum_start;
2274
2275
2276 unsigned long age;
2277
2278
2279 u16 proto;
2280
2281
2282 u8 same_flow:1;
2283
2284
2285 u8 encap_mark:1;
2286
2287
2288 u8 csum_valid:1;
2289
2290
2291 u8 csum_cnt:3;
2292
2293
2294 u8 free:2;
2295 #define NAPI_GRO_FREE 1
2296 #define NAPI_GRO_FREE_STOLEN_HEAD 2
2297
2298
2299 u8 is_ipv6:1;
2300
2301
2302 u8 is_fou:1;
2303
2304
2305 u8 is_atomic:1;
2306
2307
2308 u8 recursion_counter:4;
2309
2310
2311
2312
2313 __wsum csum;
2314
2315
2316 struct sk_buff *last;
2317 };
2318
2319 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2320
2321 #define GRO_RECURSION_LIMIT 15
2322 static inline int gro_recursion_inc_test(struct sk_buff *skb)
2323 {
2324 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2325 }
2326
2327 typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2328 static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2329 struct list_head *head,
2330 struct sk_buff *skb)
2331 {
2332 if (unlikely(gro_recursion_inc_test(skb))) {
2333 NAPI_GRO_CB(skb)->flush |= 1;
2334 return NULL;
2335 }
2336
2337 return cb(head, skb);
2338 }
2339
2340 typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2341 struct sk_buff *);
2342 static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2343 struct sock *sk,
2344 struct list_head *head,
2345 struct sk_buff *skb)
2346 {
2347 if (unlikely(gro_recursion_inc_test(skb))) {
2348 NAPI_GRO_CB(skb)->flush |= 1;
2349 return NULL;
2350 }
2351
2352 return cb(sk, head, skb);
2353 }
2354
2355 struct packet_type {
2356 __be16 type;
2357 bool ignore_outgoing;
2358 struct net_device *dev;
2359 int (*func) (struct sk_buff *,
2360 struct net_device *,
2361 struct packet_type *,
2362 struct net_device *);
2363 void (*list_func) (struct list_head *,
2364 struct packet_type *,
2365 struct net_device *);
2366 bool (*id_match)(struct packet_type *ptype,
2367 struct sock *sk);
2368 void *af_packet_priv;
2369 struct list_head list;
2370 };
2371
2372 struct offload_callbacks {
2373 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2374 netdev_features_t features);
2375 struct sk_buff *(*gro_receive)(struct list_head *head,
2376 struct sk_buff *skb);
2377 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2378 };
2379
2380 struct packet_offload {
2381 __be16 type;
2382 u16 priority;
2383 struct offload_callbacks callbacks;
2384 struct list_head list;
2385 };
2386
2387
2388 struct pcpu_sw_netstats {
2389 u64 rx_packets;
2390 u64 rx_bytes;
2391 u64 tx_packets;
2392 u64 tx_bytes;
2393 struct u64_stats_sync syncp;
2394 } __aligned(4 * sizeof(u64));
2395
2396 struct pcpu_lstats {
2397 u64 packets;
2398 u64 bytes;
2399 struct u64_stats_sync syncp;
2400 } __aligned(2 * sizeof(u64));
2401
2402 #define __netdev_alloc_pcpu_stats(type, gfp) \
2403 ({ \
2404 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2405 if (pcpu_stats) { \
2406 int __cpu; \
2407 for_each_possible_cpu(__cpu) { \
2408 typeof(type) *stat; \
2409 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2410 u64_stats_init(&stat->syncp); \
2411 } \
2412 } \
2413 pcpu_stats; \
2414 })
2415
2416 #define netdev_alloc_pcpu_stats(type) \
2417 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2418
2419 enum netdev_lag_tx_type {
2420 NETDEV_LAG_TX_TYPE_UNKNOWN,
2421 NETDEV_LAG_TX_TYPE_RANDOM,
2422 NETDEV_LAG_TX_TYPE_BROADCAST,
2423 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2424 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2425 NETDEV_LAG_TX_TYPE_HASH,
2426 };
2427
2428 enum netdev_lag_hash {
2429 NETDEV_LAG_HASH_NONE,
2430 NETDEV_LAG_HASH_L2,
2431 NETDEV_LAG_HASH_L34,
2432 NETDEV_LAG_HASH_L23,
2433 NETDEV_LAG_HASH_E23,
2434 NETDEV_LAG_HASH_E34,
2435 NETDEV_LAG_HASH_UNKNOWN,
2436 };
2437
2438 struct netdev_lag_upper_info {
2439 enum netdev_lag_tx_type tx_type;
2440 enum netdev_lag_hash hash_type;
2441 };
2442
2443 struct netdev_lag_lower_state_info {
2444 u8 link_up : 1,
2445 tx_enabled : 1;
2446 };
2447
2448 #include <linux/notifier.h>
2449
2450
2451
2452
2453
2454 enum netdev_cmd {
2455 NETDEV_UP = 1,
2456 NETDEV_DOWN,
2457 NETDEV_REBOOT,
2458
2459
2460
2461 NETDEV_CHANGE,
2462 NETDEV_REGISTER,
2463 NETDEV_UNREGISTER,
2464 NETDEV_CHANGEMTU,
2465 NETDEV_CHANGEADDR,
2466 NETDEV_PRE_CHANGEADDR,
2467 NETDEV_GOING_DOWN,
2468 NETDEV_CHANGENAME,
2469 NETDEV_FEAT_CHANGE,
2470 NETDEV_BONDING_FAILOVER,
2471 NETDEV_PRE_UP,
2472 NETDEV_PRE_TYPE_CHANGE,
2473 NETDEV_POST_TYPE_CHANGE,
2474 NETDEV_POST_INIT,
2475 NETDEV_RELEASE,
2476 NETDEV_NOTIFY_PEERS,
2477 NETDEV_JOIN,
2478 NETDEV_CHANGEUPPER,
2479 NETDEV_RESEND_IGMP,
2480 NETDEV_PRECHANGEMTU,
2481 NETDEV_CHANGEINFODATA,
2482 NETDEV_BONDING_INFO,
2483 NETDEV_PRECHANGEUPPER,
2484 NETDEV_CHANGELOWERSTATE,
2485 NETDEV_UDP_TUNNEL_PUSH_INFO,
2486 NETDEV_UDP_TUNNEL_DROP_INFO,
2487 NETDEV_CHANGE_TX_QUEUE_LEN,
2488 NETDEV_CVLAN_FILTER_PUSH_INFO,
2489 NETDEV_CVLAN_FILTER_DROP_INFO,
2490 NETDEV_SVLAN_FILTER_PUSH_INFO,
2491 NETDEV_SVLAN_FILTER_DROP_INFO,
2492 };
2493 const char *netdev_cmd_to_name(enum netdev_cmd cmd);
2494
2495 int register_netdevice_notifier(struct notifier_block *nb);
2496 int unregister_netdevice_notifier(struct notifier_block *nb);
2497
2498 struct netdev_notifier_info {
2499 struct net_device *dev;
2500 struct netlink_ext_ack *extack;
2501 };
2502
2503 struct netdev_notifier_info_ext {
2504 struct netdev_notifier_info info;
2505 union {
2506 u32 mtu;
2507 } ext;
2508 };
2509
2510 struct netdev_notifier_change_info {
2511 struct netdev_notifier_info info;
2512 unsigned int flags_changed;
2513 };
2514
2515 struct netdev_notifier_changeupper_info {
2516 struct netdev_notifier_info info;
2517 struct net_device *upper_dev;
2518 bool master;
2519 bool linking;
2520 void *upper_info;
2521 };
2522
2523 struct netdev_notifier_changelowerstate_info {
2524 struct netdev_notifier_info info;
2525 void *lower_state_info;
2526 };
2527
2528 struct netdev_notifier_pre_changeaddr_info {
2529 struct netdev_notifier_info info;
2530 const unsigned char *dev_addr;
2531 };
2532
2533 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2534 struct net_device *dev)
2535 {
2536 info->dev = dev;
2537 info->extack = NULL;
2538 }
2539
2540 static inline struct net_device *
2541 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2542 {
2543 return info->dev;
2544 }
2545
2546 static inline struct netlink_ext_ack *
2547 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
2548 {
2549 return info->extack;
2550 }
2551
2552 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2553
2554
2555 extern rwlock_t dev_base_lock;
2556
2557 #define for_each_netdev(net, d) \
2558 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2559 #define for_each_netdev_reverse(net, d) \
2560 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2561 #define for_each_netdev_rcu(net, d) \
2562 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2563 #define for_each_netdev_safe(net, d, n) \
2564 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2565 #define for_each_netdev_continue(net, d) \
2566 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2567 #define for_each_netdev_continue_rcu(net, d) \
2568 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2569 #define for_each_netdev_in_bond_rcu(bond, slave) \
2570 for_each_netdev_rcu(&init_net, slave) \
2571 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2572 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2573
2574 static inline struct net_device *next_net_device(struct net_device *dev)
2575 {
2576 struct list_head *lh;
2577 struct net *net;
2578
2579 net = dev_net(dev);
2580 lh = dev->dev_list.next;
2581 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2582 }
2583
2584 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2585 {
2586 struct list_head *lh;
2587 struct net *net;
2588
2589 net = dev_net(dev);
2590 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2591 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2592 }
2593
2594 static inline struct net_device *first_net_device(struct net *net)
2595 {
2596 return list_empty(&net->dev_base_head) ? NULL :
2597 net_device_entry(net->dev_base_head.next);
2598 }
2599
2600 static inline struct net_device *first_net_device_rcu(struct net *net)
2601 {
2602 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2603
2604 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2605 }
2606
2607 int netdev_boot_setup_check(struct net_device *dev);
2608 unsigned long netdev_boot_base(const char *prefix, int unit);
2609 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2610 const char *hwaddr);
2611 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2612 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2613 void dev_add_pack(struct packet_type *pt);
2614 void dev_remove_pack(struct packet_type *pt);
2615 void __dev_remove_pack(struct packet_type *pt);
2616 void dev_add_offload(struct packet_offload *po);
2617 void dev_remove_offload(struct packet_offload *po);
2618
2619 int dev_get_iflink(const struct net_device *dev);
2620 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2621 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2622 unsigned short mask);
2623 struct net_device *dev_get_by_name(struct net *net, const char *name);
2624 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2625 struct net_device *__dev_get_by_name(struct net *net, const char *name);
2626 int dev_alloc_name(struct net_device *dev, const char *name);
2627 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
2628 void dev_close(struct net_device *dev);
2629 void dev_close_many(struct list_head *head, bool unlink);
2630 void dev_disable_lro(struct net_device *dev);
2631 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2632 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2633 struct net_device *sb_dev);
2634 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2635 struct net_device *sb_dev);
2636 int dev_queue_xmit(struct sk_buff *skb);
2637 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
2638 int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2639 int register_netdevice(struct net_device *dev);
2640 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2641 void unregister_netdevice_many(struct list_head *head);
2642 static inline void unregister_netdevice(struct net_device *dev)
2643 {
2644 unregister_netdevice_queue(dev, NULL);
2645 }
2646
2647 int netdev_refcnt_read(const struct net_device *dev);
2648 void free_netdev(struct net_device *dev);
2649 void netdev_freemem(struct net_device *dev);
2650 void synchronize_net(void);
2651 int init_dummy_netdev(struct net_device *dev);
2652
2653 struct net_device *dev_get_by_index(struct net *net, int ifindex);
2654 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2655 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2656 struct net_device *dev_get_by_napi_id(unsigned int napi_id);
2657 int netdev_get_name(struct net *net, char *name, int ifindex);
2658 int dev_restart(struct net_device *dev);
2659 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2660
2661 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2662 {
2663 return NAPI_GRO_CB(skb)->data_offset;
2664 }
2665
2666 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2667 {
2668 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2669 }
2670
2671 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2672 {
2673 NAPI_GRO_CB(skb)->data_offset += len;
2674 }
2675
2676 static inline void *skb_gro_header_fast(struct sk_buff *skb,
2677 unsigned int offset)
2678 {
2679 return NAPI_GRO_CB(skb)->frag0 + offset;
2680 }
2681
2682 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2683 {
2684 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2685 }
2686
2687 static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2688 {
2689 NAPI_GRO_CB(skb)->frag0 = NULL;
2690 NAPI_GRO_CB(skb)->frag0_len = 0;
2691 }
2692
2693 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2694 unsigned int offset)
2695 {
2696 if (!pskb_may_pull(skb, hlen))
2697 return NULL;
2698
2699 skb_gro_frag0_invalidate(skb);
2700 return skb->data + offset;
2701 }
2702
2703 static inline void *skb_gro_network_header(struct sk_buff *skb)
2704 {
2705 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2706 skb_network_offset(skb);
2707 }
2708
2709 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2710 const void *start, unsigned int len)
2711 {
2712 if (NAPI_GRO_CB(skb)->csum_valid)
2713 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2714 csum_partial(start, len, 0));
2715 }
2716
2717
2718
2719
2720
2721
2722 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2723
2724 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2725 {
2726 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2727 }
2728
2729 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2730 bool zero_okay,
2731 __sum16 check)
2732 {
2733 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2734 skb_checksum_start_offset(skb) <
2735 skb_gro_offset(skb)) &&
2736 !skb_at_gro_remcsum_start(skb) &&
2737 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2738 (!zero_okay || check));
2739 }
2740
2741 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2742 __wsum psum)
2743 {
2744 if (NAPI_GRO_CB(skb)->csum_valid &&
2745 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2746 return 0;
2747
2748 NAPI_GRO_CB(skb)->csum = psum;
2749
2750 return __skb_gro_checksum_complete(skb);
2751 }
2752
2753 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2754 {
2755 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2756
2757 NAPI_GRO_CB(skb)->csum_cnt--;
2758 } else {
2759
2760
2761
2762
2763 __skb_incr_checksum_unnecessary(skb);
2764 }
2765 }
2766
2767 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2768 compute_pseudo) \
2769 ({ \
2770 __sum16 __ret = 0; \
2771 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2772 __ret = __skb_gro_checksum_validate_complete(skb, \
2773 compute_pseudo(skb, proto)); \
2774 if (!__ret) \
2775 skb_gro_incr_csum_unnecessary(skb); \
2776 __ret; \
2777 })
2778
2779 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2780 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2781
2782 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2783 compute_pseudo) \
2784 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2785
2786 #define skb_gro_checksum_simple_validate(skb) \
2787 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2788
2789 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2790 {
2791 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2792 !NAPI_GRO_CB(skb)->csum_valid);
2793 }
2794
2795 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2796 __sum16 check, __wsum pseudo)
2797 {
2798 NAPI_GRO_CB(skb)->csum = ~pseudo;
2799 NAPI_GRO_CB(skb)->csum_valid = 1;
2800 }
2801
2802 #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2803 do { \
2804 if (__skb_gro_checksum_convert_check(skb)) \
2805 __skb_gro_checksum_convert(skb, check, \
2806 compute_pseudo(skb, proto)); \
2807 } while (0)
2808
2809 struct gro_remcsum {
2810 int offset;
2811 __wsum delta;
2812 };
2813
2814 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2815 {
2816 grc->offset = 0;
2817 grc->delta = 0;
2818 }
2819
2820 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2821 unsigned int off, size_t hdrlen,
2822 int start, int offset,
2823 struct gro_remcsum *grc,
2824 bool nopartial)
2825 {
2826 __wsum delta;
2827 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2828
2829 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2830
2831 if (!nopartial) {
2832 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2833 return ptr;
2834 }
2835
2836 ptr = skb_gro_header_fast(skb, off);
2837 if (skb_gro_header_hard(skb, off + plen)) {
2838 ptr = skb_gro_header_slow(skb, off + plen, off);
2839 if (!ptr)
2840 return NULL;
2841 }
2842
2843 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2844 start, offset);
2845
2846
2847 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2848
2849 grc->offset = off + hdrlen + offset;
2850 grc->delta = delta;
2851
2852 return ptr;
2853 }
2854
2855 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2856 struct gro_remcsum *grc)
2857 {
2858 void *ptr;
2859 size_t plen = grc->offset + sizeof(u16);
2860
2861 if (!grc->delta)
2862 return;
2863
2864 ptr = skb_gro_header_fast(skb, grc->offset);
2865 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2866 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2867 if (!ptr)
2868 return;
2869 }
2870
2871 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2872 }
2873
2874 #ifdef CONFIG_XFRM_OFFLOAD
2875 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2876 {
2877 if (PTR_ERR(pp) != -EINPROGRESS)
2878 NAPI_GRO_CB(skb)->flush |= flush;
2879 }
2880 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2881 struct sk_buff *pp,
2882 int flush,
2883 struct gro_remcsum *grc)
2884 {
2885 if (PTR_ERR(pp) != -EINPROGRESS) {
2886 NAPI_GRO_CB(skb)->flush |= flush;
2887 skb_gro_remcsum_cleanup(skb, grc);
2888 skb->remcsum_offload = 0;
2889 }
2890 }
2891 #else
2892 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2893 {
2894 NAPI_GRO_CB(skb)->flush |= flush;
2895 }
2896 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2897 struct sk_buff *pp,
2898 int flush,
2899 struct gro_remcsum *grc)
2900 {
2901 NAPI_GRO_CB(skb)->flush |= flush;
2902 skb_gro_remcsum_cleanup(skb, grc);
2903 skb->remcsum_offload = 0;
2904 }
2905 #endif
2906
2907 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2908 unsigned short type,
2909 const void *daddr, const void *saddr,
2910 unsigned int len)
2911 {
2912 if (!dev->header_ops || !dev->header_ops->create)
2913 return 0;
2914
2915 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2916 }
2917
2918 static inline int dev_parse_header(const struct sk_buff *skb,
2919 unsigned char *haddr)
2920 {
2921 const struct net_device *dev = skb->dev;
2922
2923 if (!dev->header_ops || !dev->header_ops->parse)
2924 return 0;
2925 return dev->header_ops->parse(skb, haddr);
2926 }
2927
2928 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
2929 {
2930 const struct net_device *dev = skb->dev;
2931
2932 if (!dev->header_ops || !dev->header_ops->parse_protocol)
2933 return 0;
2934 return dev->header_ops->parse_protocol(skb);
2935 }
2936
2937
2938 static inline bool dev_validate_header(const struct net_device *dev,
2939 char *ll_header, int len)
2940 {
2941 if (likely(len >= dev->hard_header_len))
2942 return true;
2943 if (len < dev->min_header_len)
2944 return false;
2945
2946 if (capable(CAP_SYS_RAWIO)) {
2947 memset(ll_header + len, 0, dev->hard_header_len - len);
2948 return true;
2949 }
2950
2951 if (dev->header_ops && dev->header_ops->validate)
2952 return dev->header_ops->validate(ll_header, len);
2953
2954 return false;
2955 }
2956
2957 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
2958 int len, int size);
2959 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2960 static inline int unregister_gifconf(unsigned int family)
2961 {
2962 return register_gifconf(family, NULL);
2963 }
2964
2965 #ifdef CONFIG_NET_FLOW_LIMIT
2966 #define FLOW_LIMIT_HISTORY (1 << 7)
2967 struct sd_flow_limit {
2968 u64 count;
2969 unsigned int num_buckets;
2970 unsigned int history_head;
2971 u16 history[FLOW_LIMIT_HISTORY];
2972 u8 buckets[];
2973 };
2974
2975 extern int netdev_flow_limit_table_len;
2976 #endif
2977
2978
2979
2980
2981 struct softnet_data {
2982 struct list_head poll_list;
2983 struct sk_buff_head process_queue;
2984
2985
2986 unsigned int processed;
2987 unsigned int time_squeeze;
2988 unsigned int received_rps;
2989 #ifdef CONFIG_RPS
2990 struct softnet_data *rps_ipi_list;
2991 #endif
2992 #ifdef CONFIG_NET_FLOW_LIMIT
2993 struct sd_flow_limit __rcu *flow_limit;
2994 #endif
2995 struct Qdisc *output_queue;
2996 struct Qdisc **output_queue_tailp;
2997 struct sk_buff *completion_queue;
2998 #ifdef CONFIG_XFRM_OFFLOAD
2999 struct sk_buff_head xfrm_backlog;
3000 #endif
3001
3002 struct {
3003 u16 recursion;
3004 u8 more;
3005 } xmit;
3006 #ifdef CONFIG_RPS
3007
3008
3009
3010 unsigned int input_queue_head ____cacheline_aligned_in_smp;
3011
3012
3013 call_single_data_t csd ____cacheline_aligned_in_smp;
3014 struct softnet_data *rps_ipi_next;
3015 unsigned int cpu;
3016 unsigned int input_queue_tail;
3017 #endif
3018 unsigned int dropped;
3019 struct sk_buff_head input_pkt_queue;
3020 struct napi_struct backlog;
3021
3022 };
3023
3024 static inline void input_queue_head_incr(struct softnet_data *sd)
3025 {
3026 #ifdef CONFIG_RPS
3027 sd->input_queue_head++;
3028 #endif
3029 }
3030
3031 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
3032 unsigned int *qtail)
3033 {
3034 #ifdef CONFIG_RPS
3035 *qtail = ++sd->input_queue_tail;
3036 #endif
3037 }
3038
3039 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3040
3041 static inline int dev_recursion_level(void)
3042 {
3043 return this_cpu_read(softnet_data.xmit.recursion);
3044 }
3045
3046 #define XMIT_RECURSION_LIMIT 10
3047 static inline bool dev_xmit_recursion(void)
3048 {
3049 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3050 XMIT_RECURSION_LIMIT);
3051 }
3052
3053 static inline void dev_xmit_recursion_inc(void)
3054 {
3055 __this_cpu_inc(softnet_data.xmit.recursion);
3056 }
3057
3058 static inline void dev_xmit_recursion_dec(void)
3059 {
3060 __this_cpu_dec(softnet_data.xmit.recursion);
3061 }
3062
3063 void __netif_schedule(struct Qdisc *q);
3064 void netif_schedule_queue(struct netdev_queue *txq);
3065
3066 static inline void netif_tx_schedule_all(struct net_device *dev)
3067 {
3068 unsigned int i;
3069
3070 for (i = 0; i < dev->num_tx_queues; i++)
3071 netif_schedule_queue(netdev_get_tx_queue(dev, i));
3072 }
3073
3074 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3075 {
3076 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3077 }
3078
3079
3080
3081
3082
3083
3084
3085 static inline void netif_start_queue(struct net_device *dev)
3086 {
3087 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3088 }
3089
3090 static inline void netif_tx_start_all_queues(struct net_device *dev)
3091 {
3092 unsigned int i;
3093
3094 for (i = 0; i < dev->num_tx_queues; i++) {
3095 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3096 netif_tx_start_queue(txq);
3097 }
3098 }
3099
3100 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3101
3102
3103
3104
3105
3106
3107
3108
3109 static inline void netif_wake_queue(struct net_device *dev)
3110 {
3111 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3112 }
3113
3114 static inline void netif_tx_wake_all_queues(struct net_device *dev)
3115 {
3116 unsigned int i;
3117
3118 for (i = 0; i < dev->num_tx_queues; i++) {
3119 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3120 netif_tx_wake_queue(txq);
3121 }
3122 }
3123
3124 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3125 {
3126 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3127 }
3128
3129
3130
3131
3132
3133
3134
3135
3136 static inline void netif_stop_queue(struct net_device *dev)
3137 {
3138 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3139 }
3140
3141 void netif_tx_stop_all_queues(struct net_device *dev);
3142 void netdev_update_lockdep_key(struct net_device *dev);
3143
3144 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3145 {
3146 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3147 }
3148
3149
3150
3151
3152
3153
3154
3155 static inline bool netif_queue_stopped(const struct net_device *dev)
3156 {
3157 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3158 }
3159
3160 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3161 {
3162 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3163 }
3164
3165 static inline bool
3166 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3167 {
3168 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3169 }
3170
3171 static inline bool
3172 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3173 {
3174 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3175 }
3176
3177
3178
3179
3180
3181
3182
3183
3184 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3185 {
3186 #ifdef CONFIG_BQL
3187 prefetchw(&dev_queue->dql.num_queued);
3188 #endif
3189 }
3190
3191
3192
3193
3194
3195
3196
3197
3198 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3199 {
3200 #ifdef CONFIG_BQL
3201 prefetchw(&dev_queue->dql.limit);
3202 #endif
3203 }
3204
3205 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3206 unsigned int bytes)
3207 {
3208 #ifdef CONFIG_BQL
3209 dql_queued(&dev_queue->dql, bytes);
3210
3211 if (likely(dql_avail(&dev_queue->dql) >= 0))
3212 return;
3213
3214 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3215
3216
3217
3218
3219
3220
3221 smp_mb();
3222
3223
3224 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3225 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3226 #endif
3227 }
3228
3229
3230
3231
3232
3233
3234
3235 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3236 unsigned int bytes,
3237 bool xmit_more)
3238 {
3239 if (xmit_more) {
3240 #ifdef CONFIG_BQL
3241 dql_queued(&dev_queue->dql, bytes);
3242 #endif
3243 return netif_tx_queue_stopped(dev_queue);
3244 }
3245 netdev_tx_sent_queue(dev_queue, bytes);
3246 return true;
3247 }
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3259 {
3260 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3261 }
3262
3263 static inline bool __netdev_sent_queue(struct net_device *dev,
3264 unsigned int bytes,
3265 bool xmit_more)
3266 {
3267 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3268 xmit_more);
3269 }
3270
3271 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3272 unsigned int pkts, unsigned int bytes)
3273 {
3274 #ifdef CONFIG_BQL
3275 if (unlikely(!bytes))
3276 return;
3277
3278 dql_completed(&dev_queue->dql, bytes);
3279
3280
3281
3282
3283
3284
3285 smp_mb();
3286
3287 if (unlikely(dql_avail(&dev_queue->dql) < 0))
3288 return;
3289
3290 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3291 netif_schedule_queue(dev_queue);
3292 #endif
3293 }
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305 static inline void netdev_completed_queue(struct net_device *dev,
3306 unsigned int pkts, unsigned int bytes)
3307 {
3308 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3309 }
3310
3311 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3312 {
3313 #ifdef CONFIG_BQL
3314 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3315 dql_reset(&q->dql);
3316 #endif
3317 }
3318
3319
3320
3321
3322
3323
3324
3325
3326 static inline void netdev_reset_queue(struct net_device *dev_queue)
3327 {
3328 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3329 }
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3340 {
3341 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3342 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3343 dev->name, queue_index,
3344 dev->real_num_tx_queues);
3345 return 0;
3346 }
3347
3348 return queue_index;
3349 }
3350
3351
3352
3353
3354
3355
3356
3357 static inline bool netif_running(const struct net_device *dev)
3358 {
3359 return test_bit(__LINK_STATE_START, &dev->state);
3360 }
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3377 {
3378 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3379
3380 netif_tx_start_queue(txq);
3381 }
3382
3383
3384
3385
3386
3387
3388
3389
3390 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3391 {
3392 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3393 netif_tx_stop_queue(txq);
3394 }
3395
3396
3397
3398
3399
3400
3401
3402
3403 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3404 u16 queue_index)
3405 {
3406 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3407
3408 return netif_tx_queue_stopped(txq);
3409 }
3410
3411 static inline bool netif_subqueue_stopped(const struct net_device *dev,
3412 struct sk_buff *skb)
3413 {
3414 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3415 }
3416
3417
3418
3419
3420
3421
3422
3423
3424 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3425 {
3426 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3427
3428 netif_tx_wake_queue(txq);
3429 }
3430
3431 #ifdef CONFIG_XPS
3432 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3433 u16 index);
3434 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3435 u16 index, bool is_rxqs_map);
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445 static inline bool netif_attr_test_mask(unsigned long j,
3446 const unsigned long *mask,
3447 unsigned int nr_bits)
3448 {
3449 cpu_max_bits_warn(j, nr_bits);
3450 return test_bit(j, mask);
3451 }
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461 static inline bool netif_attr_test_online(unsigned long j,
3462 const unsigned long *online_mask,
3463 unsigned int nr_bits)
3464 {
3465 cpu_max_bits_warn(j, nr_bits);
3466
3467 if (online_mask)
3468 return test_bit(j, online_mask);
3469
3470 return (j < nr_bits);
3471 }
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3482 unsigned int nr_bits)
3483 {
3484
3485 if (n != -1)
3486 cpu_max_bits_warn(n, nr_bits);
3487
3488 if (srcp)
3489 return find_next_bit(srcp, nr_bits, n + 1);
3490
3491 return n + 1;
3492 }
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3504 const unsigned long *src2p,
3505 unsigned int nr_bits)
3506 {
3507
3508 if (n != -1)
3509 cpu_max_bits_warn(n, nr_bits);
3510
3511 if (src1p && src2p)
3512 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3513 else if (src1p)
3514 return find_next_bit(src1p, nr_bits, n + 1);
3515 else if (src2p)
3516 return find_next_bit(src2p, nr_bits, n + 1);
3517
3518 return n + 1;
3519 }
3520 #else
3521 static inline int netif_set_xps_queue(struct net_device *dev,
3522 const struct cpumask *mask,
3523 u16 index)
3524 {
3525 return 0;
3526 }
3527
3528 static inline int __netif_set_xps_queue(struct net_device *dev,
3529 const unsigned long *mask,
3530 u16 index, bool is_rxqs_map)
3531 {
3532 return 0;
3533 }
3534 #endif
3535
3536
3537
3538
3539
3540
3541
3542 static inline bool netif_is_multiqueue(const struct net_device *dev)
3543 {
3544 return dev->num_tx_queues > 1;
3545 }
3546
3547 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3548
3549 #ifdef CONFIG_SYSFS
3550 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3551 #else
3552 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3553 unsigned int rxqs)
3554 {
3555 dev->real_num_rx_queues = rxqs;
3556 return 0;
3557 }
3558 #endif
3559
3560 static inline struct netdev_rx_queue *
3561 __netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
3562 {
3563 return dev->_rx + rxq;
3564 }
3565
3566 #ifdef CONFIG_SYSFS
3567 static inline unsigned int get_netdev_rx_queue_index(
3568 struct netdev_rx_queue *queue)
3569 {
3570 struct net_device *dev = queue->dev;
3571 int index = queue - dev->_rx;
3572
3573 BUG_ON(index >= dev->num_rx_queues);
3574 return index;
3575 }
3576 #endif
3577
3578 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3579 int netif_get_num_default_rss_queues(void);
3580
3581 enum skb_free_reason {
3582 SKB_REASON_CONSUMED,
3583 SKB_REASON_DROPPED,
3584 };
3585
3586 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3587 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3609 {
3610 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3611 }
3612
3613 static inline void dev_consume_skb_irq(struct sk_buff *skb)
3614 {
3615 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3616 }
3617
3618 static inline void dev_kfree_skb_any(struct sk_buff *skb)
3619 {
3620 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3621 }
3622
3623 static inline void dev_consume_skb_any(struct sk_buff *skb)
3624 {
3625 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3626 }
3627
3628 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
3629 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
3630 int netif_rx(struct sk_buff *skb);
3631 int netif_rx_ni(struct sk_buff *skb);
3632 int netif_receive_skb(struct sk_buff *skb);
3633 int netif_receive_skb_core(struct sk_buff *skb);
3634 void netif_receive_skb_list(struct list_head *head);
3635 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3636 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3637 struct sk_buff *napi_get_frags(struct napi_struct *napi);
3638 gro_result_t napi_gro_frags(struct napi_struct *napi);
3639 struct packet_offload *gro_find_receive_by_type(__be16 type);
3640 struct packet_offload *gro_find_complete_by_type(__be16 type);
3641
3642 static inline void napi_free_frags(struct napi_struct *napi)
3643 {
3644 kfree_skb(napi->skb);
3645 napi->skb = NULL;
3646 }
3647
3648 bool netdev_is_rx_handler_busy(struct net_device *dev);
3649 int netdev_rx_handler_register(struct net_device *dev,
3650 rx_handler_func_t *rx_handler,
3651 void *rx_handler_data);
3652 void netdev_rx_handler_unregister(struct net_device *dev);
3653
3654 bool dev_valid_name(const char *name);
3655 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
3656 bool *need_copyout);
3657 int dev_ifconf(struct net *net, struct ifconf *, int);
3658 int dev_ethtool(struct net *net, struct ifreq *);
3659 unsigned int dev_get_flags(const struct net_device *);
3660 int __dev_change_flags(struct net_device *dev, unsigned int flags,
3661 struct netlink_ext_ack *extack);
3662 int dev_change_flags(struct net_device *dev, unsigned int flags,
3663 struct netlink_ext_ack *extack);
3664 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3665 unsigned int gchanges);
3666 int dev_change_name(struct net_device *, const char *);
3667 int dev_set_alias(struct net_device *, const char *, size_t);
3668 int dev_get_alias(const struct net_device *, char *, size_t);
3669 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3670 int __dev_set_mtu(struct net_device *, int);
3671 int dev_validate_mtu(struct net_device *dev, int mtu,
3672 struct netlink_ext_ack *extack);
3673 int dev_set_mtu_ext(struct net_device *dev, int mtu,
3674 struct netlink_ext_ack *extack);
3675 int dev_set_mtu(struct net_device *, int);
3676 int dev_change_tx_queue_len(struct net_device *, unsigned long);
3677 void dev_set_group(struct net_device *, int);
3678 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
3679 struct netlink_ext_ack *extack);
3680 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
3681 struct netlink_ext_ack *extack);
3682 int dev_change_carrier(struct net_device *, bool new_carrier);
3683 int dev_get_phys_port_id(struct net_device *dev,
3684 struct netdev_phys_item_id *ppid);
3685 int dev_get_phys_port_name(struct net_device *dev,
3686 char *name, size_t len);
3687 int dev_get_port_parent_id(struct net_device *dev,
3688 struct netdev_phys_item_id *ppid, bool recurse);
3689 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
3690 int dev_change_proto_down(struct net_device *dev, bool proto_down);
3691 int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
3692 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
3693 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3694 struct netdev_queue *txq, int *ret);
3695
3696 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
3697 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3698 int fd, u32 flags);
3699 u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
3700 enum bpf_netdev_command cmd);
3701 int xdp_umem_query(struct net_device *dev, u16 queue_id);
3702
3703 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3704 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3705 bool is_skb_forwardable(const struct net_device *dev,
3706 const struct sk_buff *skb);
3707
3708 static __always_inline int ____dev_forward_skb(struct net_device *dev,
3709 struct sk_buff *skb)
3710 {
3711 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3712 unlikely(!is_skb_forwardable(dev, skb))) {
3713 atomic_long_inc(&dev->rx_dropped);
3714 kfree_skb(skb);
3715 return NET_RX_DROP;
3716 }
3717
3718 skb_scrub_packet(skb, true);
3719 skb->priority = 0;
3720 return 0;
3721 }
3722
3723 bool dev_nit_active(struct net_device *dev);
3724 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3725
3726 extern int netdev_budget;
3727 extern unsigned int netdev_budget_usecs;
3728
3729
3730 void netdev_run_todo(void);
3731
3732
3733
3734
3735
3736
3737
3738 static inline void dev_put(struct net_device *dev)
3739 {
3740 this_cpu_dec(*dev->pcpu_refcnt);
3741 }
3742
3743
3744
3745
3746
3747
3748
3749 static inline void dev_hold(struct net_device *dev)
3750 {
3751 this_cpu_inc(*dev->pcpu_refcnt);
3752 }
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763 void linkwatch_init_dev(struct net_device *dev);
3764 void linkwatch_fire_event(struct net_device *dev);
3765 void linkwatch_forget_dev(struct net_device *dev);
3766
3767
3768
3769
3770
3771
3772
3773 static inline bool netif_carrier_ok(const struct net_device *dev)
3774 {
3775 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3776 }
3777
3778 unsigned long dev_trans_start(struct net_device *dev);
3779
3780 void __netdev_watchdog_up(struct net_device *dev);
3781
3782 void netif_carrier_on(struct net_device *dev);
3783
3784 void netif_carrier_off(struct net_device *dev);
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798 static inline void netif_dormant_on(struct net_device *dev)
3799 {
3800 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3801 linkwatch_fire_event(dev);
3802 }
3803
3804
3805
3806
3807
3808
3809
3810 static inline void netif_dormant_off(struct net_device *dev)
3811 {
3812 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3813 linkwatch_fire_event(dev);
3814 }
3815
3816
3817
3818
3819
3820
3821
3822 static inline bool netif_dormant(const struct net_device *dev)
3823 {
3824 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3825 }
3826
3827
3828
3829
3830
3831
3832
3833
3834 static inline bool netif_oper_up(const struct net_device *dev)
3835 {
3836 return (dev->operstate == IF_OPER_UP ||
3837 dev->operstate == IF_OPER_UNKNOWN );
3838 }
3839
3840
3841
3842
3843
3844
3845
3846 static inline bool netif_device_present(struct net_device *dev)
3847 {
3848 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3849 }
3850
3851 void netif_device_detach(struct net_device *dev);
3852
3853 void netif_device_attach(struct net_device *dev);
3854
3855
3856
3857
3858
3859 enum {
3860 NETIF_MSG_DRV = 0x0001,
3861 NETIF_MSG_PROBE = 0x0002,
3862 NETIF_MSG_LINK = 0x0004,
3863 NETIF_MSG_TIMER = 0x0008,
3864 NETIF_MSG_IFDOWN = 0x0010,
3865 NETIF_MSG_IFUP = 0x0020,
3866 NETIF_MSG_RX_ERR = 0x0040,
3867 NETIF_MSG_TX_ERR = 0x0080,
3868 NETIF_MSG_TX_QUEUED = 0x0100,
3869 NETIF_MSG_INTR = 0x0200,
3870 NETIF_MSG_TX_DONE = 0x0400,
3871 NETIF_MSG_RX_STATUS = 0x0800,
3872 NETIF_MSG_PKTDATA = 0x1000,
3873 NETIF_MSG_HW = 0x2000,
3874 NETIF_MSG_WOL = 0x4000,
3875 };
3876
3877 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3878 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3879 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3880 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3881 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3882 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3883 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3884 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3885 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3886 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3887 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3888 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3889 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3890 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3891 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3892
3893 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3894 {
3895
3896 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3897 return default_msg_enable_bits;
3898 if (debug_value == 0)
3899 return 0;
3900
3901 return (1U << debug_value) - 1;
3902 }
3903
3904 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
3905 {
3906 spin_lock(&txq->_xmit_lock);
3907 txq->xmit_lock_owner = cpu;
3908 }
3909
3910 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
3911 {
3912 __acquire(&txq->_xmit_lock);
3913 return true;
3914 }
3915
3916 static inline void __netif_tx_release(struct netdev_queue *txq)
3917 {
3918 __release(&txq->_xmit_lock);
3919 }
3920
3921 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3922 {
3923 spin_lock_bh(&txq->_xmit_lock);
3924 txq->xmit_lock_owner = smp_processor_id();
3925 }
3926
3927 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3928 {
3929 bool ok = spin_trylock(&txq->_xmit_lock);
3930 if (likely(ok))
3931 txq->xmit_lock_owner = smp_processor_id();
3932 return ok;
3933 }
3934
3935 static inline void __netif_tx_unlock(struct netdev_queue *txq)
3936 {
3937 txq->xmit_lock_owner = -1;
3938 spin_unlock(&txq->_xmit_lock);
3939 }
3940
3941 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3942 {
3943 txq->xmit_lock_owner = -1;
3944 spin_unlock_bh(&txq->_xmit_lock);
3945 }
3946
3947 static inline void txq_trans_update(struct netdev_queue *txq)
3948 {
3949 if (txq->xmit_lock_owner != -1)
3950 txq->trans_start = jiffies;
3951 }
3952
3953
3954 static inline void netif_trans_update(struct net_device *dev)
3955 {
3956 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3957
3958 if (txq->trans_start != jiffies)
3959 txq->trans_start = jiffies;
3960 }
3961
3962
3963
3964
3965
3966
3967
3968 static inline void netif_tx_lock(struct net_device *dev)
3969 {
3970 unsigned int i;
3971 int cpu;
3972
3973 spin_lock(&dev->tx_global_lock);
3974 cpu = smp_processor_id();
3975 for (i = 0; i < dev->num_tx_queues; i++) {
3976 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3977
3978
3979
3980
3981
3982
3983
3984 __netif_tx_lock(txq, cpu);
3985 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3986 __netif_tx_unlock(txq);
3987 }
3988 }
3989
3990 static inline void netif_tx_lock_bh(struct net_device *dev)
3991 {
3992 local_bh_disable();
3993 netif_tx_lock(dev);
3994 }
3995
3996 static inline void netif_tx_unlock(struct net_device *dev)
3997 {
3998 unsigned int i;
3999
4000 for (i = 0; i < dev->num_tx_queues; i++) {
4001 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4002
4003
4004
4005
4006
4007 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
4008 netif_schedule_queue(txq);
4009 }
4010 spin_unlock(&dev->tx_global_lock);
4011 }
4012
4013 static inline void netif_tx_unlock_bh(struct net_device *dev)
4014 {
4015 netif_tx_unlock(dev);
4016 local_bh_enable();
4017 }
4018
4019 #define HARD_TX_LOCK(dev, txq, cpu) { \
4020 if ((dev->features & NETIF_F_LLTX) == 0) { \
4021 __netif_tx_lock(txq, cpu); \
4022 } else { \
4023 __netif_tx_acquire(txq); \
4024 } \
4025 }
4026
4027 #define HARD_TX_TRYLOCK(dev, txq) \
4028 (((dev->features & NETIF_F_LLTX) == 0) ? \
4029 __netif_tx_trylock(txq) : \
4030 __netif_tx_acquire(txq))
4031
4032 #define HARD_TX_UNLOCK(dev, txq) { \
4033 if ((dev->features & NETIF_F_LLTX) == 0) { \
4034 __netif_tx_unlock(txq); \
4035 } else { \
4036 __netif_tx_release(txq); \
4037 } \
4038 }
4039
4040 static inline void netif_tx_disable(struct net_device *dev)
4041 {
4042 unsigned int i;
4043 int cpu;
4044
4045 local_bh_disable();
4046 cpu = smp_processor_id();
4047 for (i = 0; i < dev->num_tx_queues; i++) {
4048 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4049
4050 __netif_tx_lock(txq, cpu);
4051 netif_tx_stop_queue(txq);
4052 __netif_tx_unlock(txq);
4053 }
4054 local_bh_enable();
4055 }
4056
4057 static inline void netif_addr_lock(struct net_device *dev)
4058 {
4059 spin_lock(&dev->addr_list_lock);
4060 }
4061
4062 static inline void netif_addr_lock_bh(struct net_device *dev)
4063 {
4064 spin_lock_bh(&dev->addr_list_lock);
4065 }
4066
4067 static inline void netif_addr_unlock(struct net_device *dev)
4068 {
4069 spin_unlock(&dev->addr_list_lock);
4070 }
4071
4072 static inline void netif_addr_unlock_bh(struct net_device *dev)
4073 {
4074 spin_unlock_bh(&dev->addr_list_lock);
4075 }
4076
4077
4078
4079
4080
4081 #define for_each_dev_addr(dev, ha) \
4082 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4083
4084
4085
4086 void ether_setup(struct net_device *dev);
4087
4088
4089 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4090 unsigned char name_assign_type,
4091 void (*setup)(struct net_device *),
4092 unsigned int txqs, unsigned int rxqs);
4093 int dev_get_valid_name(struct net *net, struct net_device *dev,
4094 const char *name);
4095
4096 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4097 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4098
4099 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4100 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4101 count)
4102
4103 int register_netdev(struct net_device *dev);
4104 void unregister_netdev(struct net_device *dev);
4105
4106
4107 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4108 struct netdev_hw_addr_list *from_list, int addr_len);
4109 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4110 struct netdev_hw_addr_list *from_list, int addr_len);
4111 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4112 struct net_device *dev,
4113 int (*sync)(struct net_device *, const unsigned char *),
4114 int (*unsync)(struct net_device *,
4115 const unsigned char *));
4116 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4117 struct net_device *dev,
4118 int (*sync)(struct net_device *,
4119 const unsigned char *, int),
4120 int (*unsync)(struct net_device *,
4121 const unsigned char *, int));
4122 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4123 struct net_device *dev,
4124 int (*unsync)(struct net_device *,
4125 const unsigned char *, int));
4126 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4127 struct net_device *dev,
4128 int (*unsync)(struct net_device *,
4129 const unsigned char *));
4130 void __hw_addr_init(struct netdev_hw_addr_list *list);
4131
4132
4133 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4134 unsigned char addr_type);
4135 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4136 unsigned char addr_type);
4137 void dev_addr_flush(struct net_device *dev);
4138 int dev_addr_init(struct net_device *dev);
4139
4140
4141 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4142 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4143 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4144 int dev_uc_sync(struct net_device *to, struct net_device *from);
4145 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4146 void dev_uc_unsync(struct net_device *to, struct net_device *from);
4147 void dev_uc_flush(struct net_device *dev);
4148 void dev_uc_init(struct net_device *dev);
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159 static inline int __dev_uc_sync(struct net_device *dev,
4160 int (*sync)(struct net_device *,
4161 const unsigned char *),
4162 int (*unsync)(struct net_device *,
4163 const unsigned char *))
4164 {
4165 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4166 }
4167
4168
4169
4170
4171
4172
4173
4174
4175 static inline void __dev_uc_unsync(struct net_device *dev,
4176 int (*unsync)(struct net_device *,
4177 const unsigned char *))
4178 {
4179 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
4180 }
4181
4182
4183 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4184 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4185 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4186 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4187 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4188 int dev_mc_sync(struct net_device *to, struct net_device *from);
4189 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4190 void dev_mc_unsync(struct net_device *to, struct net_device *from);
4191 void dev_mc_flush(struct net_device *dev);
4192 void dev_mc_init(struct net_device *dev);
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203 static inline int __dev_mc_sync(struct net_device *dev,
4204 int (*sync)(struct net_device *,
4205 const unsigned char *),
4206 int (*unsync)(struct net_device *,
4207 const unsigned char *))
4208 {
4209 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4210 }
4211
4212
4213
4214
4215
4216
4217
4218
4219 static inline void __dev_mc_unsync(struct net_device *dev,
4220 int (*unsync)(struct net_device *,
4221 const unsigned char *))
4222 {
4223 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
4224 }
4225
4226
4227 void dev_set_rx_mode(struct net_device *dev);
4228 void __dev_set_rx_mode(struct net_device *dev);
4229 int dev_set_promiscuity(struct net_device *dev, int inc);
4230 int dev_set_allmulti(struct net_device *dev, int inc);
4231 void netdev_state_change(struct net_device *dev);
4232 void netdev_notify_peers(struct net_device *dev);
4233 void netdev_features_change(struct net_device *dev);
4234
4235 void dev_load(struct net *net, const char *name);
4236 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4237 struct rtnl_link_stats64 *storage);
4238 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4239 const struct net_device_stats *netdev_stats);
4240
4241 extern int netdev_max_backlog;
4242 extern int netdev_tstamp_prequeue;
4243 extern int weight_p;
4244 extern int dev_weight_rx_bias;
4245 extern int dev_weight_tx_bias;
4246 extern int dev_rx_weight;
4247 extern int dev_tx_weight;
4248 extern int gro_normal_batch;
4249
4250 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
4251 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4252 struct list_head **iter);
4253 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4254 struct list_head **iter);
4255
4256
4257 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4258 for (iter = &(dev)->adj_list.upper, \
4259 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4260 updev; \
4261 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4262
4263 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4264 int (*fn)(struct net_device *upper_dev,
4265 void *data),
4266 void *data);
4267
4268 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4269 struct net_device *upper_dev);
4270
4271 bool netdev_has_any_upper_dev(struct net_device *dev);
4272
4273 void *netdev_lower_get_next_private(struct net_device *dev,
4274 struct list_head **iter);
4275 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4276 struct list_head **iter);
4277
4278 #define netdev_for_each_lower_private(dev, priv, iter) \
4279 for (iter = (dev)->adj_list.lower.next, \
4280 priv = netdev_lower_get_next_private(dev, &(iter)); \
4281 priv; \
4282 priv = netdev_lower_get_next_private(dev, &(iter)))
4283
4284 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4285 for (iter = &(dev)->adj_list.lower, \
4286 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4287 priv; \
4288 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4289
4290 void *netdev_lower_get_next(struct net_device *dev,
4291 struct list_head **iter);
4292
4293 #define netdev_for_each_lower_dev(dev, ldev, iter) \
4294 for (iter = (dev)->adj_list.lower.next, \
4295 ldev = netdev_lower_get_next(dev, &(iter)); \
4296 ldev; \
4297 ldev = netdev_lower_get_next(dev, &(iter)))
4298
4299 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
4300 struct list_head **iter);
4301 int netdev_walk_all_lower_dev(struct net_device *dev,
4302 int (*fn)(struct net_device *lower_dev,
4303 void *data),
4304 void *data);
4305 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4306 int (*fn)(struct net_device *lower_dev,
4307 void *data),
4308 void *data);
4309
4310 void *netdev_adjacent_get_private(struct list_head *adj_list);
4311 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
4312 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4313 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
4314 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4315 struct netlink_ext_ack *extack);
4316 int netdev_master_upper_dev_link(struct net_device *dev,
4317 struct net_device *upper_dev,
4318 void *upper_priv, void *upper_info,
4319 struct netlink_ext_ack *extack);
4320 void netdev_upper_dev_unlink(struct net_device *dev,
4321 struct net_device *upper_dev);
4322 int netdev_adjacent_change_prepare(struct net_device *old_dev,
4323 struct net_device *new_dev,
4324 struct net_device *dev,
4325 struct netlink_ext_ack *extack);
4326 void netdev_adjacent_change_commit(struct net_device *old_dev,
4327 struct net_device *new_dev,
4328 struct net_device *dev);
4329 void netdev_adjacent_change_abort(struct net_device *old_dev,
4330 struct net_device *new_dev,
4331 struct net_device *dev);
4332 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
4333 void *netdev_lower_dev_get_private(struct net_device *dev,
4334 struct net_device *lower_dev);
4335 void netdev_lower_state_changed(struct net_device *lower_dev,
4336 void *lower_state_info);
4337
4338
4339 #define NETDEV_RSS_KEY_LEN 52
4340 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
4341 void netdev_rss_key_fill(void *buffer, size_t len);
4342
4343 int skb_checksum_help(struct sk_buff *skb);
4344 int skb_crc32c_csum_help(struct sk_buff *skb);
4345 int skb_csum_hwoffload_help(struct sk_buff *skb,
4346 const netdev_features_t features);
4347
4348 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
4349 netdev_features_t features, bool tx_path);
4350 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
4351 netdev_features_t features);
4352
4353 struct netdev_bonding_info {
4354 ifslave slave;
4355 ifbond master;
4356 };
4357
4358 struct netdev_notifier_bonding_info {
4359 struct netdev_notifier_info info;
4360 struct netdev_bonding_info bonding_info;
4361 };
4362
4363 void netdev_bonding_info_change(struct net_device *dev,
4364 struct netdev_bonding_info *bonding_info);
4365
4366 static inline
4367 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
4368 {
4369 return __skb_gso_segment(skb, features, true);
4370 }
4371 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
4372
4373 static inline bool can_checksum_protocol(netdev_features_t features,
4374 __be16 protocol)
4375 {
4376 if (protocol == htons(ETH_P_FCOE))
4377 return !!(features & NETIF_F_FCOE_CRC);
4378
4379
4380
4381 if (features & NETIF_F_HW_CSUM) {
4382
4383 return true;
4384 }
4385
4386 switch (protocol) {
4387 case htons(ETH_P_IP):
4388 return !!(features & NETIF_F_IP_CSUM);
4389 case htons(ETH_P_IPV6):
4390 return !!(features & NETIF_F_IPV6_CSUM);
4391 default:
4392 return false;
4393 }
4394 }
4395
4396 #ifdef CONFIG_BUG
4397 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
4398 #else
4399 static inline void netdev_rx_csum_fault(struct net_device *dev,
4400 struct sk_buff *skb)
4401 {
4402 }
4403 #endif
4404
4405 void net_enable_timestamp(void);
4406 void net_disable_timestamp(void);
4407
4408 #ifdef CONFIG_PROC_FS
4409 int __init dev_proc_init(void);
4410 #else
4411 #define dev_proc_init() 0
4412 #endif
4413
4414 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
4415 struct sk_buff *skb, struct net_device *dev,
4416 bool more)
4417 {
4418 __this_cpu_write(softnet_data.xmit.more, more);
4419 return ops->ndo_start_xmit(skb, dev);
4420 }
4421
4422 static inline bool netdev_xmit_more(void)
4423 {
4424 return __this_cpu_read(softnet_data.xmit.more);
4425 }
4426
4427 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
4428 struct netdev_queue *txq, bool more)
4429 {
4430 const struct net_device_ops *ops = dev->netdev_ops;
4431 netdev_tx_t rc;
4432
4433 rc = __netdev_start_xmit(ops, skb, dev, more);
4434 if (rc == NETDEV_TX_OK)
4435 txq_trans_update(txq);
4436
4437 return rc;
4438 }
4439
4440 int netdev_class_create_file_ns(const struct class_attribute *class_attr,
4441 const void *ns);
4442 void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
4443 const void *ns);
4444
4445 static inline int netdev_class_create_file(const struct class_attribute *class_attr)
4446 {
4447 return netdev_class_create_file_ns(class_attr, NULL);
4448 }
4449
4450 static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
4451 {
4452 netdev_class_remove_file_ns(class_attr, NULL);
4453 }
4454
4455 extern const struct kobj_ns_type_operations net_ns_type_operations;
4456
4457 const char *netdev_drivername(const struct net_device *dev);
4458
4459 void linkwatch_run_queue(void);
4460
4461 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4462 netdev_features_t f2)
4463 {
4464 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4465 if (f1 & NETIF_F_HW_CSUM)
4466 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4467 else
4468 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4469 }
4470
4471 return f1 & f2;
4472 }
4473
4474 static inline netdev_features_t netdev_get_wanted_features(
4475 struct net_device *dev)
4476 {
4477 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4478 }
4479 netdev_features_t netdev_increment_features(netdev_features_t all,
4480 netdev_features_t one, netdev_features_t mask);
4481
4482
4483
4484
4485
4486 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4487 netdev_features_t mask)
4488 {
4489 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4490 }
4491
4492 int __netdev_update_features(struct net_device *dev);
4493 void netdev_update_features(struct net_device *dev);
4494 void netdev_change_features(struct net_device *dev);
4495
4496 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4497 struct net_device *dev);
4498
4499 netdev_features_t passthru_features_check(struct sk_buff *skb,
4500 struct net_device *dev,
4501 netdev_features_t features);
4502 netdev_features_t netif_skb_features(struct sk_buff *skb);
4503
4504 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4505 {
4506 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4507
4508
4509 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4510 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4511 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4512 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4513 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4514 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4515 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4516 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4517 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4518 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4519 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4520 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4521 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4522 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4523 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4524 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
4525 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
4526 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
4527
4528 return (features & feature) == feature;
4529 }
4530
4531 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4532 {
4533 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4534 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4535 }
4536
4537 static inline bool netif_needs_gso(struct sk_buff *skb,
4538 netdev_features_t features)
4539 {
4540 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4541 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4542 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4543 }
4544
4545 static inline void netif_set_gso_max_size(struct net_device *dev,
4546 unsigned int size)
4547 {
4548 dev->gso_max_size = size;
4549 }
4550
4551 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4552 int pulled_hlen, u16 mac_offset,
4553 int mac_len)
4554 {
4555 skb->protocol = protocol;
4556 skb->encapsulation = 1;
4557 skb_push(skb, pulled_hlen);
4558 skb_reset_transport_header(skb);
4559 skb->mac_header = mac_offset;
4560 skb->network_header = skb->mac_header + mac_len;
4561 skb->mac_len = mac_len;
4562 }
4563
4564 static inline bool netif_is_macsec(const struct net_device *dev)
4565 {
4566 return dev->priv_flags & IFF_MACSEC;
4567 }
4568
4569 static inline bool netif_is_macvlan(const struct net_device *dev)
4570 {
4571 return dev->priv_flags & IFF_MACVLAN;
4572 }
4573
4574 static inline bool netif_is_macvlan_port(const struct net_device *dev)
4575 {
4576 return dev->priv_flags & IFF_MACVLAN_PORT;
4577 }
4578
4579 static inline bool netif_is_bond_master(const struct net_device *dev)
4580 {
4581 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4582 }
4583
4584 static inline bool netif_is_bond_slave(const struct net_device *dev)
4585 {
4586 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4587 }
4588
4589 static inline bool netif_supports_nofcs(struct net_device *dev)
4590 {
4591 return dev->priv_flags & IFF_SUPP_NOFCS;
4592 }
4593
4594 static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
4595 {
4596 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
4597 }
4598
4599 static inline bool netif_is_l3_master(const struct net_device *dev)
4600 {
4601 return dev->priv_flags & IFF_L3MDEV_MASTER;
4602 }
4603
4604 static inline bool netif_is_l3_slave(const struct net_device *dev)
4605 {
4606 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4607 }
4608
4609 static inline bool netif_is_bridge_master(const struct net_device *dev)
4610 {
4611 return dev->priv_flags & IFF_EBRIDGE;
4612 }
4613
4614 static inline bool netif_is_bridge_port(const struct net_device *dev)
4615 {
4616 return dev->priv_flags & IFF_BRIDGE_PORT;
4617 }
4618
4619 static inline bool netif_is_ovs_master(const struct net_device *dev)
4620 {
4621 return dev->priv_flags & IFF_OPENVSWITCH;
4622 }
4623
4624 static inline bool netif_is_ovs_port(const struct net_device *dev)
4625 {
4626 return dev->priv_flags & IFF_OVS_DATAPATH;
4627 }
4628
4629 static inline bool netif_is_team_master(const struct net_device *dev)
4630 {
4631 return dev->priv_flags & IFF_TEAM;
4632 }
4633
4634 static inline bool netif_is_team_port(const struct net_device *dev)
4635 {
4636 return dev->priv_flags & IFF_TEAM_PORT;
4637 }
4638
4639 static inline bool netif_is_lag_master(const struct net_device *dev)
4640 {
4641 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4642 }
4643
4644 static inline bool netif_is_lag_port(const struct net_device *dev)
4645 {
4646 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4647 }
4648
4649 static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4650 {
4651 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4652 }
4653
4654 static inline bool netif_is_failover(const struct net_device *dev)
4655 {
4656 return dev->priv_flags & IFF_FAILOVER;
4657 }
4658
4659 static inline bool netif_is_failover_slave(const struct net_device *dev)
4660 {
4661 return dev->priv_flags & IFF_FAILOVER_SLAVE;
4662 }
4663
4664
4665 static inline void netif_keep_dst(struct net_device *dev)
4666 {
4667 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4668 }
4669
4670
4671 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4672 {
4673
4674 return dev->priv_flags & IFF_MACSEC;
4675 }
4676
4677 extern struct pernet_operations __net_initdata loopback_net_ops;
4678
4679
4680
4681
4682
4683 static inline const char *netdev_name(const struct net_device *dev)
4684 {
4685 if (!dev->name[0] || strchr(dev->name, '%'))
4686 return "(unnamed net_device)";
4687 return dev->name;
4688 }
4689
4690 static inline bool netdev_unregistering(const struct net_device *dev)
4691 {
4692 return dev->reg_state == NETREG_UNREGISTERING;
4693 }
4694
4695 static inline const char *netdev_reg_state(const struct net_device *dev)
4696 {
4697 switch (dev->reg_state) {
4698 case NETREG_UNINITIALIZED: return " (uninitialized)";
4699 case NETREG_REGISTERED: return "";
4700 case NETREG_UNREGISTERING: return " (unregistering)";
4701 case NETREG_UNREGISTERED: return " (unregistered)";
4702 case NETREG_RELEASED: return " (released)";
4703 case NETREG_DUMMY: return " (dummy)";
4704 }
4705
4706 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4707 return " (unknown)";
4708 }
4709
4710 __printf(3, 4) __cold
4711 void netdev_printk(const char *level, const struct net_device *dev,
4712 const char *format, ...);
4713 __printf(2, 3) __cold
4714 void netdev_emerg(const struct net_device *dev, const char *format, ...);
4715 __printf(2, 3) __cold
4716 void netdev_alert(const struct net_device *dev, const char *format, ...);
4717 __printf(2, 3) __cold
4718 void netdev_crit(const struct net_device *dev, const char *format, ...);
4719 __printf(2, 3) __cold
4720 void netdev_err(const struct net_device *dev, const char *format, ...);
4721 __printf(2, 3) __cold
4722 void netdev_warn(const struct net_device *dev, const char *format, ...);
4723 __printf(2, 3) __cold
4724 void netdev_notice(const struct net_device *dev, const char *format, ...);
4725 __printf(2, 3) __cold
4726 void netdev_info(const struct net_device *dev, const char *format, ...);
4727
4728 #define netdev_level_once(level, dev, fmt, ...) \
4729 do { \
4730 static bool __print_once __read_mostly; \
4731 \
4732 if (!__print_once) { \
4733 __print_once = true; \
4734 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
4735 } \
4736 } while (0)
4737
4738 #define netdev_emerg_once(dev, fmt, ...) \
4739 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
4740 #define netdev_alert_once(dev, fmt, ...) \
4741 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
4742 #define netdev_crit_once(dev, fmt, ...) \
4743 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
4744 #define netdev_err_once(dev, fmt, ...) \
4745 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
4746 #define netdev_warn_once(dev, fmt, ...) \
4747 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
4748 #define netdev_notice_once(dev, fmt, ...) \
4749 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
4750 #define netdev_info_once(dev, fmt, ...) \
4751 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
4752
4753 #define MODULE_ALIAS_NETDEV(device) \
4754 MODULE_ALIAS("netdev-" device)
4755
4756 #if defined(CONFIG_DYNAMIC_DEBUG)
4757 #define netdev_dbg(__dev, format, args...) \
4758 do { \
4759 dynamic_netdev_dbg(__dev, format, ##args); \
4760 } while (0)
4761 #elif defined(DEBUG)
4762 #define netdev_dbg(__dev, format, args...) \
4763 netdev_printk(KERN_DEBUG, __dev, format, ##args)
4764 #else
4765 #define netdev_dbg(__dev, format, args...) \
4766 ({ \
4767 if (0) \
4768 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
4769 })
4770 #endif
4771
4772 #if defined(VERBOSE_DEBUG)
4773 #define netdev_vdbg netdev_dbg
4774 #else
4775
4776 #define netdev_vdbg(dev, format, args...) \
4777 ({ \
4778 if (0) \
4779 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4780 0; \
4781 })
4782 #endif
4783
4784
4785
4786
4787
4788
4789 #define netdev_WARN(dev, format, args...) \
4790 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
4791 netdev_reg_state(dev), ##args)
4792
4793 #define netdev_WARN_ONCE(dev, format, args...) \
4794 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
4795 netdev_reg_state(dev), ##args)
4796
4797
4798
4799 #define netif_printk(priv, type, level, dev, fmt, args...) \
4800 do { \
4801 if (netif_msg_##type(priv)) \
4802 netdev_printk(level, (dev), fmt, ##args); \
4803 } while (0)
4804
4805 #define netif_level(level, priv, type, dev, fmt, args...) \
4806 do { \
4807 if (netif_msg_##type(priv)) \
4808 netdev_##level(dev, fmt, ##args); \
4809 } while (0)
4810
4811 #define netif_emerg(priv, type, dev, fmt, args...) \
4812 netif_level(emerg, priv, type, dev, fmt, ##args)
4813 #define netif_alert(priv, type, dev, fmt, args...) \
4814 netif_level(alert, priv, type, dev, fmt, ##args)
4815 #define netif_crit(priv, type, dev, fmt, args...) \
4816 netif_level(crit, priv, type, dev, fmt, ##args)
4817 #define netif_err(priv, type, dev, fmt, args...) \
4818 netif_level(err, priv, type, dev, fmt, ##args)
4819 #define netif_warn(priv, type, dev, fmt, args...) \
4820 netif_level(warn, priv, type, dev, fmt, ##args)
4821 #define netif_notice(priv, type, dev, fmt, args...) \
4822 netif_level(notice, priv, type, dev, fmt, ##args)
4823 #define netif_info(priv, type, dev, fmt, args...) \
4824 netif_level(info, priv, type, dev, fmt, ##args)
4825
4826 #if defined(CONFIG_DYNAMIC_DEBUG)
4827 #define netif_dbg(priv, type, netdev, format, args...) \
4828 do { \
4829 if (netif_msg_##type(priv)) \
4830 dynamic_netdev_dbg(netdev, format, ##args); \
4831 } while (0)
4832 #elif defined(DEBUG)
4833 #define netif_dbg(priv, type, dev, format, args...) \
4834 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
4835 #else
4836 #define netif_dbg(priv, type, dev, format, args...) \
4837 ({ \
4838 if (0) \
4839 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4840 0; \
4841 })
4842 #endif
4843
4844
4845 #define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
4846 do { \
4847 if (cond) \
4848 netif_dbg(priv, type, netdev, fmt, ##args); \
4849 else \
4850 netif_ ## level(priv, type, netdev, fmt, ##args); \
4851 } while (0)
4852
4853 #if defined(VERBOSE_DEBUG)
4854 #define netif_vdbg netif_dbg
4855 #else
4856 #define netif_vdbg(priv, type, dev, format, args...) \
4857 ({ \
4858 if (0) \
4859 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4860 0; \
4861 })
4862 #endif
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883 #define PTYPE_HASH_SIZE (16)
4884 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
4885
4886 extern struct net_device *blackhole_netdev;
4887
4888 #endif