This source file includes following definitions.
- vlan_eth_hdr
- is_vlan_dev
- vlan_get_rx_ctag_filter_info
- vlan_drop_rx_ctag_filter_info
- vlan_get_rx_stag_filter_info
- vlan_drop_rx_stag_filter_info
- vlan_dev_get_egress_qos_mask
- __vlan_find_dev_deep_rcu
- vlan_for_each
- vlan_dev_real_dev
- vlan_dev_vlan_id
- vlan_dev_vlan_proto
- vlan_dev_get_egress_qos_mask
- vlan_do_receive
- vlan_vid_add
- vlan_vid_del
- vlan_vids_add_by_dev
- vlan_vids_del_by_dev
- vlan_uses_dev
- eth_type_vlan
- vlan_hw_offload_capable
- __vlan_insert_inner_tag
- __vlan_insert_tag
- vlan_insert_inner_tag
- vlan_insert_tag
- vlan_insert_tag_set_proto
- __vlan_hwaccel_clear_tag
- __vlan_hwaccel_copy_tag
- __vlan_hwaccel_push_inside
- __vlan_hwaccel_put_tag
- __vlan_get_tag
- __vlan_hwaccel_get_tag
- vlan_get_tag
- __vlan_get_protocol
- vlan_get_protocol
- vlan_set_encap_proto
- skb_vlan_tagged
- skb_vlan_tagged_multi
- vlan_features_check
- compare_vlan_header
1
2
3
4
5
6
7 #ifndef _LINUX_IF_VLAN_H_
8 #define _LINUX_IF_VLAN_H_
9
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/rtnetlink.h>
13 #include <linux/bug.h>
14 #include <uapi/linux/if_vlan.h>
15
16 #define VLAN_HLEN 4
17
18
19 #define VLAN_ETH_HLEN 18
20 #define VLAN_ETH_ZLEN 64
21
22
23
24
25 #define VLAN_ETH_DATA_LEN 1500
26 #define VLAN_ETH_FRAME_LEN 1518
27
28
29
30
31
32
33 struct vlan_hdr {
34 __be16 h_vlan_TCI;
35 __be16 h_vlan_encapsulated_proto;
36 };
37
38
39
40
41
42
43
44
45
46 struct vlan_ethhdr {
47 unsigned char h_dest[ETH_ALEN];
48 unsigned char h_source[ETH_ALEN];
49 __be16 h_vlan_proto;
50 __be16 h_vlan_TCI;
51 __be16 h_vlan_encapsulated_proto;
52 };
53
54 #include <linux/skbuff.h>
55
56 static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
57 {
58 return (struct vlan_ethhdr *)skb_mac_header(skb);
59 }
60
61 #define VLAN_PRIO_MASK 0xe000
62 #define VLAN_PRIO_SHIFT 13
63 #define VLAN_CFI_MASK 0x1000
64 #define VLAN_VID_MASK 0x0fff
65 #define VLAN_N_VID 4096
66
67
68 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
69
70 static inline bool is_vlan_dev(const struct net_device *dev)
71 {
72 return dev->priv_flags & IFF_802_1Q_VLAN;
73 }
74
75 #define skb_vlan_tag_present(__skb) ((__skb)->vlan_present)
76 #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
77 #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
78 #define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
79 #define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
80
81 static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
82 {
83 ASSERT_RTNL();
84 return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
85 }
86
87 static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
88 {
89 ASSERT_RTNL();
90 call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
91 }
92
93 static inline int vlan_get_rx_stag_filter_info(struct net_device *dev)
94 {
95 ASSERT_RTNL();
96 return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
97 }
98
99 static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
100 {
101 ASSERT_RTNL();
102 call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
103 }
104
105
106
107
108
109
110
111
112
113
114
115
116 struct vlan_pcpu_stats {
117 u64 rx_packets;
118 u64 rx_bytes;
119 u64 rx_multicast;
120 u64 tx_packets;
121 u64 tx_bytes;
122 struct u64_stats_sync syncp;
123 u32 rx_errors;
124 u32 tx_dropped;
125 };
126
127 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
128
129 extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
130 __be16 vlan_proto, u16 vlan_id);
131 extern int vlan_for_each(struct net_device *dev,
132 int (*action)(struct net_device *dev, int vid,
133 void *arg), void *arg);
134 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
135 extern u16 vlan_dev_vlan_id(const struct net_device *dev);
136 extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
137
138
139
140
141
142
143
144 struct vlan_priority_tci_mapping {
145 u32 priority;
146 u16 vlan_qos;
147 struct vlan_priority_tci_mapping *next;
148 };
149
150 struct proc_dir_entry;
151 struct netpoll;
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167 struct vlan_dev_priv {
168 unsigned int nr_ingress_mappings;
169 u32 ingress_priority_map[8];
170 unsigned int nr_egress_mappings;
171 struct vlan_priority_tci_mapping *egress_priority_map[16];
172
173 __be16 vlan_proto;
174 u16 vlan_id;
175 u16 flags;
176
177 struct net_device *real_dev;
178 unsigned char real_dev_addr[ETH_ALEN];
179
180 struct proc_dir_entry *dent;
181 struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
182 #ifdef CONFIG_NET_POLL_CONTROLLER
183 struct netpoll *netpoll;
184 #endif
185 };
186
187 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
188 {
189 return netdev_priv(dev);
190 }
191
192 static inline u16
193 vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
194 {
195 struct vlan_priority_tci_mapping *mp;
196
197 smp_rmb();
198
199 mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
200 while (mp) {
201 if (mp->priority == skprio) {
202 return mp->vlan_qos;
203
204
205 }
206 mp = mp->next;
207 }
208 return 0;
209 }
210
211 extern bool vlan_do_receive(struct sk_buff **skb);
212
213 extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
214 extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
215
216 extern int vlan_vids_add_by_dev(struct net_device *dev,
217 const struct net_device *by_dev);
218 extern void vlan_vids_del_by_dev(struct net_device *dev,
219 const struct net_device *by_dev);
220
221 extern bool vlan_uses_dev(const struct net_device *dev);
222
223 #else
224 static inline struct net_device *
225 __vlan_find_dev_deep_rcu(struct net_device *real_dev,
226 __be16 vlan_proto, u16 vlan_id)
227 {
228 return NULL;
229 }
230
231 static inline int
232 vlan_for_each(struct net_device *dev,
233 int (*action)(struct net_device *dev, int vid, void *arg),
234 void *arg)
235 {
236 return 0;
237 }
238
239 static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
240 {
241 BUG();
242 return NULL;
243 }
244
245 static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
246 {
247 BUG();
248 return 0;
249 }
250
251 static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
252 {
253 BUG();
254 return 0;
255 }
256
257 static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
258 u32 skprio)
259 {
260 return 0;
261 }
262
263 static inline bool vlan_do_receive(struct sk_buff **skb)
264 {
265 return false;
266 }
267
268 static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
269 {
270 return 0;
271 }
272
273 static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
274 {
275 }
276
277 static inline int vlan_vids_add_by_dev(struct net_device *dev,
278 const struct net_device *by_dev)
279 {
280 return 0;
281 }
282
283 static inline void vlan_vids_del_by_dev(struct net_device *dev,
284 const struct net_device *by_dev)
285 {
286 }
287
288 static inline bool vlan_uses_dev(const struct net_device *dev)
289 {
290 return false;
291 }
292 #endif
293
294
295
296
297
298
299
300 static inline bool eth_type_vlan(__be16 ethertype)
301 {
302 switch (ethertype) {
303 case htons(ETH_P_8021Q):
304 case htons(ETH_P_8021AD):
305 return true;
306 default:
307 return false;
308 }
309 }
310
311 static inline bool vlan_hw_offload_capable(netdev_features_t features,
312 __be16 proto)
313 {
314 if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
315 return true;
316 if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX)
317 return true;
318 return false;
319 }
320
321
322
323
324
325
326
327
328
329
330
331
332
333 static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
334 __be16 vlan_proto, u16 vlan_tci,
335 unsigned int mac_len)
336 {
337 struct vlan_ethhdr *veth;
338
339 if (skb_cow_head(skb, VLAN_HLEN) < 0)
340 return -ENOMEM;
341
342 skb_push(skb, VLAN_HLEN);
343
344
345 if (likely(mac_len > ETH_TLEN))
346 memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
347 skb->mac_header -= VLAN_HLEN;
348
349 veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
350
351
352 if (likely(mac_len >= ETH_TLEN)) {
353
354
355
356 veth->h_vlan_proto = vlan_proto;
357 } else {
358
359
360
361 veth->h_vlan_encapsulated_proto = skb->protocol;
362 }
363
364
365 veth->h_vlan_TCI = htons(vlan_tci);
366
367 return 0;
368 }
369
370
371
372
373
374
375
376
377
378
379
380
381 static inline int __vlan_insert_tag(struct sk_buff *skb,
382 __be16 vlan_proto, u16 vlan_tci)
383 {
384 return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
385 }
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402 static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
403 __be16 vlan_proto,
404 u16 vlan_tci,
405 unsigned int mac_len)
406 {
407 int err;
408
409 err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
410 if (err) {
411 dev_kfree_skb_any(skb);
412 return NULL;
413 }
414 return skb;
415 }
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431 static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
432 __be16 vlan_proto, u16 vlan_tci)
433 {
434 return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
435 }
436
437
438
439
440
441
442
443
444
445
446
447
448
449 static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
450 __be16 vlan_proto,
451 u16 vlan_tci)
452 {
453 skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
454 if (skb)
455 skb->protocol = vlan_proto;
456 return skb;
457 }
458
459
460
461
462
463
464
465 static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
466 {
467 skb->vlan_present = 0;
468 }
469
470
471
472
473
474
475
476
477 static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
478 {
479 dst->vlan_present = src->vlan_present;
480 dst->vlan_proto = src->vlan_proto;
481 dst->vlan_tci = src->vlan_tci;
482 }
483
484
485
486
487
488
489
490
491
492
493 static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
494 {
495 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
496 skb_vlan_tag_get(skb));
497 if (likely(skb))
498 __vlan_hwaccel_clear_tag(skb);
499 return skb;
500 }
501
502
503
504
505
506
507
508
509
510 static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
511 __be16 vlan_proto, u16 vlan_tci)
512 {
513 skb->vlan_proto = vlan_proto;
514 skb->vlan_tci = vlan_tci;
515 skb->vlan_present = 1;
516 }
517
518
519
520
521
522
523
524
525 static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
526 {
527 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
528
529 if (!eth_type_vlan(veth->h_vlan_proto))
530 return -EINVAL;
531
532 *vlan_tci = ntohs(veth->h_vlan_TCI);
533 return 0;
534 }
535
536
537
538
539
540
541
542
543 static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
544 u16 *vlan_tci)
545 {
546 if (skb_vlan_tag_present(skb)) {
547 *vlan_tci = skb_vlan_tag_get(skb);
548 return 0;
549 } else {
550 *vlan_tci = 0;
551 return -EINVAL;
552 }
553 }
554
555
556
557
558
559
560
561
562 static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
563 {
564 if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
565 return __vlan_hwaccel_get_tag(skb, vlan_tci);
566 } else {
567 return __vlan_get_tag(skb, vlan_tci);
568 }
569 }
570
571
572
573
574
575
576
577
578
579
580 static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
581 int *depth)
582 {
583 unsigned int vlan_depth = skb->mac_len;
584
585
586
587
588
589 if (eth_type_vlan(type)) {
590 if (vlan_depth) {
591 if (WARN_ON(vlan_depth < VLAN_HLEN))
592 return 0;
593 vlan_depth -= VLAN_HLEN;
594 } else {
595 vlan_depth = ETH_HLEN;
596 }
597 do {
598 struct vlan_hdr *vh;
599
600 if (unlikely(!pskb_may_pull(skb,
601 vlan_depth + VLAN_HLEN)))
602 return 0;
603
604 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
605 type = vh->h_vlan_encapsulated_proto;
606 vlan_depth += VLAN_HLEN;
607 } while (eth_type_vlan(type));
608 }
609
610 if (depth)
611 *depth = vlan_depth;
612
613 return type;
614 }
615
616
617
618
619
620
621
622
623 static inline __be16 vlan_get_protocol(struct sk_buff *skb)
624 {
625 return __vlan_get_protocol(skb, skb->protocol, NULL);
626 }
627
628 static inline void vlan_set_encap_proto(struct sk_buff *skb,
629 struct vlan_hdr *vhdr)
630 {
631 __be16 proto;
632 unsigned short *rawp;
633
634
635
636
637
638
639 proto = vhdr->h_vlan_encapsulated_proto;
640 if (eth_proto_is_802_3(proto)) {
641 skb->protocol = proto;
642 return;
643 }
644
645 rawp = (unsigned short *)(vhdr + 1);
646 if (*rawp == 0xFFFF)
647
648
649
650
651
652
653
654 skb->protocol = htons(ETH_P_802_3);
655 else
656
657
658
659 skb->protocol = htons(ETH_P_802_2);
660 }
661
662
663
664
665
666
667
668
669 static inline bool skb_vlan_tagged(const struct sk_buff *skb)
670 {
671 if (!skb_vlan_tag_present(skb) &&
672 likely(!eth_type_vlan(skb->protocol)))
673 return false;
674
675 return true;
676 }
677
678
679
680
681
682
683
684
685 static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
686 {
687 __be16 protocol = skb->protocol;
688
689 if (!skb_vlan_tag_present(skb)) {
690 struct vlan_ethhdr *veh;
691
692 if (likely(!eth_type_vlan(protocol)))
693 return false;
694
695 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
696 return false;
697
698 veh = (struct vlan_ethhdr *)skb->data;
699 protocol = veh->h_vlan_encapsulated_proto;
700 }
701
702 if (!eth_type_vlan(protocol))
703 return false;
704
705 return true;
706 }
707
708
709
710
711
712
713
714
715 static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
716 netdev_features_t features)
717 {
718 if (skb_vlan_tagged_multi(skb)) {
719
720
721
722
723
724 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
725 NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
726 NETIF_F_HW_VLAN_STAG_TX;
727 }
728
729 return features;
730 }
731
732
733
734
735
736
737
738
739
740
741 static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
742 const struct vlan_hdr *h2)
743 {
744 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
745 return *(u32 *)h1 ^ *(u32 *)h2;
746 #else
747 return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
748 ((__force u32)h1->h_vlan_encapsulated_proto ^
749 (__force u32)h2->h_vlan_encapsulated_proto);
750 #endif
751 }
752 #endif