This source file includes following definitions.
- eth_header
- eth_get_headlen
- eth_type_trans
- eth_header_parse
- eth_header_cache
- eth_header_cache_update
- eth_header_parse_protocol
- eth_prepare_mac_addr_change
- eth_commit_mac_addr_change
- eth_mac_addr
- eth_change_mtu
- eth_validate_addr
- ether_setup
- alloc_etherdev_mqs
- devm_free_netdev
- devm_alloc_etherdev_mqs
- sysfs_format_mac
- eth_gro_receive
- eth_gro_complete
- eth_offload_init
- arch_get_platform_mac_address
- eth_platform_get_mac_address
- nvmem_get_mac_address
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <linux/string.h>
40 #include <linux/mm.h>
41 #include <linux/socket.h>
42 #include <linux/in.h>
43 #include <linux/inet.h>
44 #include <linux/ip.h>
45 #include <linux/netdevice.h>
46 #include <linux/nvmem-consumer.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/errno.h>
50 #include <linux/init.h>
51 #include <linux/if_ether.h>
52 #include <linux/of_net.h>
53 #include <linux/pci.h>
54 #include <net/dst.h>
55 #include <net/arp.h>
56 #include <net/sock.h>
57 #include <net/ipv6.h>
58 #include <net/ip.h>
59 #include <net/dsa.h>
60 #include <net/flow_dissector.h>
61 #include <linux/uaccess.h>
62 #include <net/pkt_sched.h>
63
64 __setup("ether=", netdev_boot_setup);
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79 int eth_header(struct sk_buff *skb, struct net_device *dev,
80 unsigned short type,
81 const void *daddr, const void *saddr, unsigned int len)
82 {
83 struct ethhdr *eth = skb_push(skb, ETH_HLEN);
84
85 if (type != ETH_P_802_3 && type != ETH_P_802_2)
86 eth->h_proto = htons(type);
87 else
88 eth->h_proto = htons(len);
89
90
91
92
93
94 if (!saddr)
95 saddr = dev->dev_addr;
96 memcpy(eth->h_source, saddr, ETH_ALEN);
97
98 if (daddr) {
99 memcpy(eth->h_dest, daddr, ETH_ALEN);
100 return ETH_HLEN;
101 }
102
103
104
105
106
107 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
108 eth_zero_addr(eth->h_dest);
109 return ETH_HLEN;
110 }
111
112 return -ETH_HLEN;
113 }
114 EXPORT_SYMBOL(eth_header);
115
116
117
118
119
120
121
122
123
124
125 u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len)
126 {
127 const unsigned int flags = FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
128 const struct ethhdr *eth = (const struct ethhdr *)data;
129 struct flow_keys_basic keys;
130
131
132 if (unlikely(len < sizeof(*eth)))
133 return len;
134
135
136 if (!skb_flow_dissect_flow_keys_basic(dev_net(dev), NULL, &keys, data,
137 eth->h_proto, sizeof(*eth),
138 len, flags))
139 return max_t(u32, keys.control.thoff, sizeof(*eth));
140
141
142 return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len);
143 }
144 EXPORT_SYMBOL(eth_get_headlen);
145
146
147
148
149
150
151
152
153
154
155 __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
156 {
157 unsigned short _service_access_point;
158 const unsigned short *sap;
159 const struct ethhdr *eth;
160
161 skb->dev = dev;
162 skb_reset_mac_header(skb);
163
164 eth = (struct ethhdr *)skb->data;
165 skb_pull_inline(skb, ETH_HLEN);
166
167 if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
168 dev->dev_addr))) {
169 if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
170 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
171 skb->pkt_type = PACKET_BROADCAST;
172 else
173 skb->pkt_type = PACKET_MULTICAST;
174 } else {
175 skb->pkt_type = PACKET_OTHERHOST;
176 }
177 }
178
179
180
181
182
183
184
185
186
187
188
189 if (unlikely(netdev_uses_dsa(dev)) && dsa_can_decode(skb, dev))
190 return htons(ETH_P_XDSA);
191
192 if (likely(eth_proto_is_802_3(eth->h_proto)))
193 return eth->h_proto;
194
195
196
197
198
199
200
201 sap = skb_header_pointer(skb, 0, sizeof(*sap), &_service_access_point);
202 if (sap && *sap == 0xFFFF)
203 return htons(ETH_P_802_3);
204
205
206
207
208 return htons(ETH_P_802_2);
209 }
210 EXPORT_SYMBOL(eth_type_trans);
211
212
213
214
215
216
217 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr)
218 {
219 const struct ethhdr *eth = eth_hdr(skb);
220 memcpy(haddr, eth->h_source, ETH_ALEN);
221 return ETH_ALEN;
222 }
223 EXPORT_SYMBOL(eth_header_parse);
224
225
226
227
228
229
230
231
232
233 int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type)
234 {
235 struct ethhdr *eth;
236 const struct net_device *dev = neigh->dev;
237
238 eth = (struct ethhdr *)
239 (((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth))));
240
241 if (type == htons(ETH_P_802_3))
242 return -1;
243
244 eth->h_proto = type;
245 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
246 memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
247
248
249
250
251 smp_store_release(&hh->hh_len, ETH_HLEN);
252
253 return 0;
254 }
255 EXPORT_SYMBOL(eth_header_cache);
256
257
258
259
260
261
262
263
264
265 void eth_header_cache_update(struct hh_cache *hh,
266 const struct net_device *dev,
267 const unsigned char *haddr)
268 {
269 memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
270 haddr, ETH_ALEN);
271 }
272 EXPORT_SYMBOL(eth_header_cache_update);
273
274
275
276
277
278 __be16 eth_header_parse_protocol(const struct sk_buff *skb)
279 {
280 const struct ethhdr *eth = eth_hdr(skb);
281
282 return eth->h_proto;
283 }
284 EXPORT_SYMBOL(eth_header_parse_protocol);
285
286
287
288
289
290
291 int eth_prepare_mac_addr_change(struct net_device *dev, void *p)
292 {
293 struct sockaddr *addr = p;
294
295 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
296 return -EBUSY;
297 if (!is_valid_ether_addr(addr->sa_data))
298 return -EADDRNOTAVAIL;
299 return 0;
300 }
301 EXPORT_SYMBOL(eth_prepare_mac_addr_change);
302
303
304
305
306
307
308 void eth_commit_mac_addr_change(struct net_device *dev, void *p)
309 {
310 struct sockaddr *addr = p;
311
312 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
313 }
314 EXPORT_SYMBOL(eth_commit_mac_addr_change);
315
316
317
318
319
320
321
322
323
324
325
326 int eth_mac_addr(struct net_device *dev, void *p)
327 {
328 int ret;
329
330 ret = eth_prepare_mac_addr_change(dev, p);
331 if (ret < 0)
332 return ret;
333 eth_commit_mac_addr_change(dev, p);
334 return 0;
335 }
336 EXPORT_SYMBOL(eth_mac_addr);
337
338
339
340
341
342
343
344
345
346 int eth_change_mtu(struct net_device *dev, int new_mtu)
347 {
348 netdev_warn(dev, "%s is deprecated\n", __func__);
349 dev->mtu = new_mtu;
350 return 0;
351 }
352 EXPORT_SYMBOL(eth_change_mtu);
353
354 int eth_validate_addr(struct net_device *dev)
355 {
356 if (!is_valid_ether_addr(dev->dev_addr))
357 return -EADDRNOTAVAIL;
358
359 return 0;
360 }
361 EXPORT_SYMBOL(eth_validate_addr);
362
363 const struct header_ops eth_header_ops ____cacheline_aligned = {
364 .create = eth_header,
365 .parse = eth_header_parse,
366 .cache = eth_header_cache,
367 .cache_update = eth_header_cache_update,
368 .parse_protocol = eth_header_parse_protocol,
369 };
370
371
372
373
374
375
376
377 void ether_setup(struct net_device *dev)
378 {
379 dev->header_ops = ð_header_ops;
380 dev->type = ARPHRD_ETHER;
381 dev->hard_header_len = ETH_HLEN;
382 dev->min_header_len = ETH_HLEN;
383 dev->mtu = ETH_DATA_LEN;
384 dev->min_mtu = ETH_MIN_MTU;
385 dev->max_mtu = ETH_DATA_LEN;
386 dev->addr_len = ETH_ALEN;
387 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
388 dev->flags = IFF_BROADCAST|IFF_MULTICAST;
389 dev->priv_flags |= IFF_TX_SKB_SHARING;
390
391 eth_broadcast_addr(dev->broadcast);
392
393 }
394 EXPORT_SYMBOL(ether_setup);
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411 struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
412 unsigned int rxqs)
413 {
414 return alloc_netdev_mqs(sizeof_priv, "eth%d", NET_NAME_UNKNOWN,
415 ether_setup, txqs, rxqs);
416 }
417 EXPORT_SYMBOL(alloc_etherdev_mqs);
418
419 static void devm_free_netdev(struct device *dev, void *res)
420 {
421 free_netdev(*(struct net_device **)res);
422 }
423
424 struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
425 unsigned int txqs, unsigned int rxqs)
426 {
427 struct net_device **dr;
428 struct net_device *netdev;
429
430 dr = devres_alloc(devm_free_netdev, sizeof(*dr), GFP_KERNEL);
431 if (!dr)
432 return NULL;
433
434 netdev = alloc_etherdev_mqs(sizeof_priv, txqs, rxqs);
435 if (!netdev) {
436 devres_free(dr);
437 return NULL;
438 }
439
440 *dr = netdev;
441 devres_add(dev, dr);
442
443 return netdev;
444 }
445 EXPORT_SYMBOL(devm_alloc_etherdev_mqs);
446
447 ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
448 {
449 return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
450 }
451 EXPORT_SYMBOL(sysfs_format_mac);
452
453 struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
454 {
455 const struct packet_offload *ptype;
456 unsigned int hlen, off_eth;
457 struct sk_buff *pp = NULL;
458 struct ethhdr *eh, *eh2;
459 struct sk_buff *p;
460 __be16 type;
461 int flush = 1;
462
463 off_eth = skb_gro_offset(skb);
464 hlen = off_eth + sizeof(*eh);
465 eh = skb_gro_header_fast(skb, off_eth);
466 if (skb_gro_header_hard(skb, hlen)) {
467 eh = skb_gro_header_slow(skb, hlen, off_eth);
468 if (unlikely(!eh))
469 goto out;
470 }
471
472 flush = 0;
473
474 list_for_each_entry(p, head, list) {
475 if (!NAPI_GRO_CB(p)->same_flow)
476 continue;
477
478 eh2 = (struct ethhdr *)(p->data + off_eth);
479 if (compare_ether_header(eh, eh2)) {
480 NAPI_GRO_CB(p)->same_flow = 0;
481 continue;
482 }
483 }
484
485 type = eh->h_proto;
486
487 rcu_read_lock();
488 ptype = gro_find_receive_by_type(type);
489 if (ptype == NULL) {
490 flush = 1;
491 goto out_unlock;
492 }
493
494 skb_gro_pull(skb, sizeof(*eh));
495 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
496 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
497
498 out_unlock:
499 rcu_read_unlock();
500 out:
501 skb_gro_flush_final(skb, pp, flush);
502
503 return pp;
504 }
505 EXPORT_SYMBOL(eth_gro_receive);
506
507 int eth_gro_complete(struct sk_buff *skb, int nhoff)
508 {
509 struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
510 __be16 type = eh->h_proto;
511 struct packet_offload *ptype;
512 int err = -ENOSYS;
513
514 if (skb->encapsulation)
515 skb_set_inner_mac_header(skb, nhoff);
516
517 rcu_read_lock();
518 ptype = gro_find_complete_by_type(type);
519 if (ptype != NULL)
520 err = ptype->callbacks.gro_complete(skb, nhoff +
521 sizeof(struct ethhdr));
522
523 rcu_read_unlock();
524 return err;
525 }
526 EXPORT_SYMBOL(eth_gro_complete);
527
528 static struct packet_offload eth_packet_offload __read_mostly = {
529 .type = cpu_to_be16(ETH_P_TEB),
530 .priority = 10,
531 .callbacks = {
532 .gro_receive = eth_gro_receive,
533 .gro_complete = eth_gro_complete,
534 },
535 };
536
537 static int __init eth_offload_init(void)
538 {
539 dev_add_offload(ð_packet_offload);
540
541 return 0;
542 }
543
544 fs_initcall(eth_offload_init);
545
546 unsigned char * __weak arch_get_platform_mac_address(void)
547 {
548 return NULL;
549 }
550
551 int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr)
552 {
553 const unsigned char *addr = NULL;
554
555 if (dev->of_node)
556 addr = of_get_mac_address(dev->of_node);
557 if (IS_ERR_OR_NULL(addr))
558 addr = arch_get_platform_mac_address();
559
560 if (!addr)
561 return -ENODEV;
562
563 ether_addr_copy(mac_addr, addr);
564
565 return 0;
566 }
567 EXPORT_SYMBOL(eth_platform_get_mac_address);
568
569
570
571
572
573
574
575
576
577
578 int nvmem_get_mac_address(struct device *dev, void *addrbuf)
579 {
580 struct nvmem_cell *cell;
581 const void *mac;
582 size_t len;
583
584 cell = nvmem_cell_get(dev, "mac-address");
585 if (IS_ERR(cell))
586 return PTR_ERR(cell);
587
588 mac = nvmem_cell_read(cell, &len);
589 nvmem_cell_put(cell);
590
591 if (IS_ERR(mac))
592 return PTR_ERR(mac);
593
594 if (len != ETH_ALEN || !is_valid_ether_addr(mac)) {
595 kfree(mac);
596 return -EINVAL;
597 }
598
599 ether_addr_copy(addrbuf, mac);
600 kfree(mac);
601
602 return 0;
603 }
604 EXPORT_SYMBOL(nvmem_get_mac_address);