This source file includes following definitions.
- is_link_local_ether_addr
- is_zero_ether_addr
- is_multicast_ether_addr
- is_multicast_ether_addr_64bits
- is_local_ether_addr
- is_broadcast_ether_addr
- is_unicast_ether_addr
- is_valid_ether_addr
- eth_proto_is_802_3
- eth_random_addr
- eth_broadcast_addr
- eth_zero_addr
- eth_hw_addr_random
- ether_addr_copy
- eth_hw_addr_inherit
- ether_addr_equal
- ether_addr_equal_64bits
- ether_addr_equal_unaligned
- ether_addr_equal_masked
- ether_addr_to_u64
- u64_to_ether_addr
- eth_addr_dec
- eth_addr_inc
- is_etherdev_addr
- compare_ether_header
- eth_skb_pad
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #ifndef _LINUX_ETHERDEVICE_H
18 #define _LINUX_ETHERDEVICE_H
19
20 #include <linux/if_ether.h>
21 #include <linux/netdevice.h>
22 #include <linux/random.h>
23 #include <asm/unaligned.h>
24 #include <asm/bitsperlong.h>
25
26 #ifdef __KERNEL__
27 struct device;
28 int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
29 unsigned char *arch_get_platform_mac_address(void);
30 int nvmem_get_mac_address(struct device *dev, void *addrbuf);
31 u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len);
32 __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
33 extern const struct header_ops eth_header_ops;
34
35 int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
36 const void *daddr, const void *saddr, unsigned len);
37 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
38 int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
39 __be16 type);
40 void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
41 const unsigned char *haddr);
42 __be16 eth_header_parse_protocol(const struct sk_buff *skb);
43 int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
44 void eth_commit_mac_addr_change(struct net_device *dev, void *p);
45 int eth_mac_addr(struct net_device *dev, void *p);
46 int eth_change_mtu(struct net_device *dev, int new_mtu);
47 int eth_validate_addr(struct net_device *dev);
48
49 struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
50 unsigned int rxqs);
51 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
52 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
53
54 struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
55 unsigned int txqs,
56 unsigned int rxqs);
57 #define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
58
59 struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
60 int eth_gro_complete(struct sk_buff *skb, int nhoff);
61
62
63 static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
64 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
65 #define eth_stp_addr eth_reserved_addr_base
66
67
68
69
70
71
72
73
74
75
76 static inline bool is_link_local_ether_addr(const u8 *addr)
77 {
78 __be16 *a = (__be16 *)addr;
79 static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
80 static const __be16 m = cpu_to_be16(0xfff0);
81
82 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
83 return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
84 (__force int)((a[2] ^ b[2]) & m)) == 0;
85 #else
86 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
87 #endif
88 }
89
90
91
92
93
94
95
96
97
98 static inline bool is_zero_ether_addr(const u8 *addr)
99 {
100 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
101 return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0;
102 #else
103 return (*(const u16 *)(addr + 0) |
104 *(const u16 *)(addr + 2) |
105 *(const u16 *)(addr + 4)) == 0;
106 #endif
107 }
108
109
110
111
112
113
114
115
116 static inline bool is_multicast_ether_addr(const u8 *addr)
117 {
118 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
119 u32 a = *(const u32 *)addr;
120 #else
121 u16 a = *(const u16 *)addr;
122 #endif
123 #ifdef __BIG_ENDIAN
124 return 0x01 & (a >> ((sizeof(a) * 8) - 8));
125 #else
126 return 0x01 & a;
127 #endif
128 }
129
130 static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
131 {
132 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
133 #ifdef __BIG_ENDIAN
134 return 0x01 & ((*(const u64 *)addr) >> 56);
135 #else
136 return 0x01 & (*(const u64 *)addr);
137 #endif
138 #else
139 return is_multicast_ether_addr(addr);
140 #endif
141 }
142
143
144
145
146
147
148
149 static inline bool is_local_ether_addr(const u8 *addr)
150 {
151 return 0x02 & addr[0];
152 }
153
154
155
156
157
158
159
160
161
162 static inline bool is_broadcast_ether_addr(const u8 *addr)
163 {
164 return (*(const u16 *)(addr + 0) &
165 *(const u16 *)(addr + 2) &
166 *(const u16 *)(addr + 4)) == 0xffff;
167 }
168
169
170
171
172
173
174
175 static inline bool is_unicast_ether_addr(const u8 *addr)
176 {
177 return !is_multicast_ether_addr(addr);
178 }
179
180
181
182
183
184
185
186
187
188
189
190
191 static inline bool is_valid_ether_addr(const u8 *addr)
192 {
193
194
195 return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
196 }
197
198
199
200
201
202
203
204
205
206 static inline bool eth_proto_is_802_3(__be16 proto)
207 {
208 #ifndef __BIG_ENDIAN
209
210 proto &= htons(0xFF00);
211 #endif
212
213 return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
214 }
215
216
217
218
219
220
221
222
223 static inline void eth_random_addr(u8 *addr)
224 {
225 get_random_bytes(addr, ETH_ALEN);
226 addr[0] &= 0xfe;
227 addr[0] |= 0x02;
228 }
229
230 #define random_ether_addr(addr) eth_random_addr(addr)
231
232
233
234
235
236
237
238 static inline void eth_broadcast_addr(u8 *addr)
239 {
240 memset(addr, 0xff, ETH_ALEN);
241 }
242
243
244
245
246
247
248
249 static inline void eth_zero_addr(u8 *addr)
250 {
251 memset(addr, 0x00, ETH_ALEN);
252 }
253
254
255
256
257
258
259
260
261
262
263 static inline void eth_hw_addr_random(struct net_device *dev)
264 {
265 dev->addr_assign_type = NET_ADDR_RANDOM;
266 eth_random_addr(dev->dev_addr);
267 }
268
269
270
271
272
273
274
275
276 static inline void ether_addr_copy(u8 *dst, const u8 *src)
277 {
278 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
279 *(u32 *)dst = *(const u32 *)src;
280 *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
281 #else
282 u16 *a = (u16 *)dst;
283 const u16 *b = (const u16 *)src;
284
285 a[0] = b[0];
286 a[1] = b[1];
287 a[2] = b[2];
288 #endif
289 }
290
291
292
293
294
295
296
297
298
299 static inline void eth_hw_addr_inherit(struct net_device *dst,
300 struct net_device *src)
301 {
302 dst->addr_assign_type = src->addr_assign_type;
303 ether_addr_copy(dst->dev_addr, src->dev_addr);
304 }
305
306
307
308
309
310
311
312
313
314
315 static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
316 {
317 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
318 u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
319 ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
320
321 return fold == 0;
322 #else
323 const u16 *a = (const u16 *)addr1;
324 const u16 *b = (const u16 *)addr2;
325
326 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
327 #endif
328 }
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344 static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
345 const u8 addr2[6+2])
346 {
347 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
348 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
349
350 #ifdef __BIG_ENDIAN
351 return (fold >> 16) == 0;
352 #else
353 return (fold << 16) == 0;
354 #endif
355 #else
356 return ether_addr_equal(addr1, addr2);
357 #endif
358 }
359
360
361
362
363
364
365
366
367
368
369 static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
370 {
371 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
372 return ether_addr_equal(addr1, addr2);
373 #else
374 return memcmp(addr1, addr2, ETH_ALEN) == 0;
375 #endif
376 }
377
378
379
380
381
382
383
384
385
386
387
388 static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
389 const u8 *mask)
390 {
391 int i;
392
393 for (i = 0; i < ETH_ALEN; i++) {
394 if ((addr1[i] ^ addr2[i]) & mask[i])
395 return false;
396 }
397
398 return true;
399 }
400
401
402
403
404
405
406
407 static inline u64 ether_addr_to_u64(const u8 *addr)
408 {
409 u64 u = 0;
410 int i;
411
412 for (i = 0; i < ETH_ALEN; i++)
413 u = u << 8 | addr[i];
414
415 return u;
416 }
417
418
419
420
421
422
423 static inline void u64_to_ether_addr(u64 u, u8 *addr)
424 {
425 int i;
426
427 for (i = ETH_ALEN - 1; i >= 0; i--) {
428 addr[i] = u & 0xff;
429 u = u >> 8;
430 }
431 }
432
433
434
435
436
437
438 static inline void eth_addr_dec(u8 *addr)
439 {
440 u64 u = ether_addr_to_u64(addr);
441
442 u--;
443 u64_to_ether_addr(u, addr);
444 }
445
446
447
448
449
450 static inline void eth_addr_inc(u8 *addr)
451 {
452 u64 u = ether_addr_to_u64(addr);
453
454 u++;
455 u64_to_ether_addr(u, addr);
456 }
457
458
459
460
461
462
463
464
465
466
467
468
469 static inline bool is_etherdev_addr(const struct net_device *dev,
470 const u8 addr[6 + 2])
471 {
472 struct netdev_hw_addr *ha;
473 bool res = false;
474
475 rcu_read_lock();
476 for_each_dev_addr(dev, ha) {
477 res = ether_addr_equal_64bits(addr, ha->addr);
478 if (res)
479 break;
480 }
481 rcu_read_unlock();
482 return res;
483 }
484 #endif
485
486
487
488
489
490
491
492
493
494
495
496
497
498 static inline unsigned long compare_ether_header(const void *a, const void *b)
499 {
500 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
501 unsigned long fold;
502
503
504
505
506
507
508
509
510
511 fold = *(unsigned long *)a ^ *(unsigned long *)b;
512 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
513 return fold;
514 #else
515 u32 *a32 = (u32 *)((u8 *)a + 2);
516 u32 *b32 = (u32 *)((u8 *)b + 2);
517
518 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
519 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
520 #endif
521 }
522
523
524
525
526
527
528
529
530 static inline int eth_skb_pad(struct sk_buff *skb)
531 {
532 return skb_put_padto(skb, ETH_ZLEN);
533 }
534
535 #endif