1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
12 * Fixes:
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
41 * and packet_mreq.
42 * Johann Baudy : Added TX RING.
43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
47 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
54
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/uaccess.h>
77 #include <asm/ioctls.h>
78 #include <asm/page.h>
79 #include <asm/cacheflush.h>
80 #include <asm/io.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 #include <linux/percpu.h>
92 #ifdef CONFIG_INET
93 #include <net/inet_common.h>
94 #endif
95 #include <linux/bpf.h>
96
97 #include "internal.h"
98
99 /*
100 Assumptions:
101 - if device has no dev->hard_header routine, it adds and removes ll header
102 inside itself. In this case ll header is invisible outside of device,
103 but higher levels still should reserve dev->hard_header_len.
104 Some devices are enough clever to reallocate skb, when header
105 will not fit to reserved space (tunnel), another ones are silly
106 (PPP).
107 - packet socket receives packets with pulled ll header,
108 so that SOCK_RAW should push it back.
109
110 On receive:
111 -----------
112
113 Incoming, dev->hard_header!=NULL
114 mac_header -> ll header
115 data -> data
116
117 Outgoing, dev->hard_header!=NULL
118 mac_header -> ll header
119 data -> ll header
120
121 Incoming, dev->hard_header==NULL
122 mac_header -> UNKNOWN position. It is very likely, that it points to ll
123 header. PPP makes it, that is wrong, because introduce
124 assymetry between rx and tx paths.
125 data -> data
126
127 Outgoing, dev->hard_header==NULL
128 mac_header -> data. ll header is still not built!
129 data -> data
130
131 Resume
132 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
133
134
135 On transmit:
136 ------------
137
138 dev->hard_header != NULL
139 mac_header -> ll header
140 data -> ll header
141
142 dev->hard_header == NULL (ll header is added by device, we cannot control it)
143 mac_header -> data
144 data -> data
145
146 We should set nh.raw on output to correct posistion,
147 packet classifier depends on it.
148 */
149
150 /* Private packet socket structures. */
151
152 /* identical to struct packet_mreq except it has
153 * a longer address field.
154 */
155 struct packet_mreq_max {
156 int mr_ifindex;
157 unsigned short mr_type;
158 unsigned short mr_alen;
159 unsigned char mr_address[MAX_ADDR_LEN];
160 };
161
162 union tpacket_uhdr {
163 struct tpacket_hdr *h1;
164 struct tpacket2_hdr *h2;
165 struct tpacket3_hdr *h3;
166 void *raw;
167 };
168
169 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
170 int closing, int tx_ring);
171
172 #define V3_ALIGNMENT (8)
173
174 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
175
176 #define BLK_PLUS_PRIV(sz_of_priv) \
177 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178
179 #define PGV_FROM_VMALLOC 1
180
181 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
182 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
183 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
184 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
185 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
186 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
187 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
188
189 struct packet_sock;
190 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
191 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
192 struct packet_type *pt, struct net_device *orig_dev);
193
194 static void *packet_previous_frame(struct packet_sock *po,
195 struct packet_ring_buffer *rb,
196 int status);
197 static void packet_increment_head(struct packet_ring_buffer *buff);
198 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
199 struct tpacket_block_desc *);
200 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
201 struct packet_sock *);
202 static void prb_retire_current_block(struct tpacket_kbdq_core *,
203 struct packet_sock *, unsigned int status);
204 static int prb_queue_frozen(struct tpacket_kbdq_core *);
205 static void prb_open_block(struct tpacket_kbdq_core *,
206 struct tpacket_block_desc *);
207 static void prb_retire_rx_blk_timer_expired(unsigned long);
208 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
209 static void prb_init_blk_timer(struct packet_sock *,
210 struct tpacket_kbdq_core *,
211 void (*func) (unsigned long));
212 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
213 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
214 struct tpacket3_hdr *);
215 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
216 struct tpacket3_hdr *);
217 static void packet_flush_mclist(struct sock *sk);
218
219 struct packet_skb_cb {
220 union {
221 struct sockaddr_pkt pkt;
222 union {
223 /* Trick: alias skb original length with
224 * ll.sll_family and ll.protocol in order
225 * to save room.
226 */
227 unsigned int origlen;
228 struct sockaddr_ll ll;
229 };
230 } sa;
231 };
232
233 #define vio_le() virtio_legacy_is_little_endian()
234
235 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
236
237 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
238 #define GET_PBLOCK_DESC(x, bid) \
239 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
240 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
241 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
242 #define GET_NEXT_PRB_BLK_NUM(x) \
243 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
244 ((x)->kactive_blk_num+1) : 0)
245
246 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
247 static void __fanout_link(struct sock *sk, struct packet_sock *po);
248
packet_direct_xmit(struct sk_buff * skb)249 static int packet_direct_xmit(struct sk_buff *skb)
250 {
251 struct net_device *dev = skb->dev;
252 netdev_features_t features;
253 struct netdev_queue *txq;
254 int ret = NETDEV_TX_BUSY;
255
256 if (unlikely(!netif_running(dev) ||
257 !netif_carrier_ok(dev)))
258 goto drop;
259
260 features = netif_skb_features(skb);
261 if (skb_needs_linearize(skb, features) &&
262 __skb_linearize(skb))
263 goto drop;
264
265 txq = skb_get_tx_queue(dev, skb);
266
267 local_bh_disable();
268
269 HARD_TX_LOCK(dev, txq, smp_processor_id());
270 if (!netif_xmit_frozen_or_drv_stopped(txq))
271 ret = netdev_start_xmit(skb, dev, txq, false);
272 HARD_TX_UNLOCK(dev, txq);
273
274 local_bh_enable();
275
276 if (!dev_xmit_complete(ret))
277 kfree_skb(skb);
278
279 return ret;
280 drop:
281 atomic_long_inc(&dev->tx_dropped);
282 kfree_skb(skb);
283 return NET_XMIT_DROP;
284 }
285
packet_cached_dev_get(struct packet_sock * po)286 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
287 {
288 struct net_device *dev;
289
290 rcu_read_lock();
291 dev = rcu_dereference(po->cached_dev);
292 if (likely(dev))
293 dev_hold(dev);
294 rcu_read_unlock();
295
296 return dev;
297 }
298
packet_cached_dev_assign(struct packet_sock * po,struct net_device * dev)299 static void packet_cached_dev_assign(struct packet_sock *po,
300 struct net_device *dev)
301 {
302 rcu_assign_pointer(po->cached_dev, dev);
303 }
304
packet_cached_dev_reset(struct packet_sock * po)305 static void packet_cached_dev_reset(struct packet_sock *po)
306 {
307 RCU_INIT_POINTER(po->cached_dev, NULL);
308 }
309
packet_use_direct_xmit(const struct packet_sock * po)310 static bool packet_use_direct_xmit(const struct packet_sock *po)
311 {
312 return po->xmit == packet_direct_xmit;
313 }
314
__packet_pick_tx_queue(struct net_device * dev,struct sk_buff * skb)315 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
316 {
317 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
318 }
319
packet_pick_tx_queue(struct net_device * dev,struct sk_buff * skb)320 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
321 {
322 const struct net_device_ops *ops = dev->netdev_ops;
323 u16 queue_index;
324
325 if (ops->ndo_select_queue) {
326 queue_index = ops->ndo_select_queue(dev, skb, NULL,
327 __packet_pick_tx_queue);
328 queue_index = netdev_cap_txqueue(dev, queue_index);
329 } else {
330 queue_index = __packet_pick_tx_queue(dev, skb);
331 }
332
333 skb_set_queue_mapping(skb, queue_index);
334 }
335
336 /* register_prot_hook must be invoked with the po->bind_lock held,
337 * or from a context in which asynchronous accesses to the packet
338 * socket is not possible (packet_create()).
339 */
register_prot_hook(struct sock * sk)340 static void register_prot_hook(struct sock *sk)
341 {
342 struct packet_sock *po = pkt_sk(sk);
343
344 if (!po->running) {
345 if (po->fanout)
346 __fanout_link(sk, po);
347 else
348 dev_add_pack(&po->prot_hook);
349
350 sock_hold(sk);
351 po->running = 1;
352 }
353 }
354
355 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
356 * held. If the sync parameter is true, we will temporarily drop
357 * the po->bind_lock and do a synchronize_net to make sure no
358 * asynchronous packet processing paths still refer to the elements
359 * of po->prot_hook. If the sync parameter is false, it is the
360 * callers responsibility to take care of this.
361 */
__unregister_prot_hook(struct sock * sk,bool sync)362 static void __unregister_prot_hook(struct sock *sk, bool sync)
363 {
364 struct packet_sock *po = pkt_sk(sk);
365
366 po->running = 0;
367
368 if (po->fanout)
369 __fanout_unlink(sk, po);
370 else
371 __dev_remove_pack(&po->prot_hook);
372
373 __sock_put(sk);
374
375 if (sync) {
376 spin_unlock(&po->bind_lock);
377 synchronize_net();
378 spin_lock(&po->bind_lock);
379 }
380 }
381
unregister_prot_hook(struct sock * sk,bool sync)382 static void unregister_prot_hook(struct sock *sk, bool sync)
383 {
384 struct packet_sock *po = pkt_sk(sk);
385
386 if (po->running)
387 __unregister_prot_hook(sk, sync);
388 }
389
pgv_to_page(void * addr)390 static inline struct page * __pure pgv_to_page(void *addr)
391 {
392 if (is_vmalloc_addr(addr))
393 return vmalloc_to_page(addr);
394 return virt_to_page(addr);
395 }
396
__packet_set_status(struct packet_sock * po,void * frame,int status)397 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
398 {
399 union tpacket_uhdr h;
400
401 h.raw = frame;
402 switch (po->tp_version) {
403 case TPACKET_V1:
404 h.h1->tp_status = status;
405 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
406 break;
407 case TPACKET_V2:
408 h.h2->tp_status = status;
409 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
410 break;
411 case TPACKET_V3:
412 default:
413 WARN(1, "TPACKET version not supported.\n");
414 BUG();
415 }
416
417 smp_wmb();
418 }
419
__packet_get_status(struct packet_sock * po,void * frame)420 static int __packet_get_status(struct packet_sock *po, void *frame)
421 {
422 union tpacket_uhdr h;
423
424 smp_rmb();
425
426 h.raw = frame;
427 switch (po->tp_version) {
428 case TPACKET_V1:
429 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
430 return h.h1->tp_status;
431 case TPACKET_V2:
432 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
433 return h.h2->tp_status;
434 case TPACKET_V3:
435 default:
436 WARN(1, "TPACKET version not supported.\n");
437 BUG();
438 return 0;
439 }
440 }
441
tpacket_get_timestamp(struct sk_buff * skb,struct timespec * ts,unsigned int flags)442 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
443 unsigned int flags)
444 {
445 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
446
447 if (shhwtstamps &&
448 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
449 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
450 return TP_STATUS_TS_RAW_HARDWARE;
451
452 if (ktime_to_timespec_cond(skb->tstamp, ts))
453 return TP_STATUS_TS_SOFTWARE;
454
455 return 0;
456 }
457
__packet_set_timestamp(struct packet_sock * po,void * frame,struct sk_buff * skb)458 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
459 struct sk_buff *skb)
460 {
461 union tpacket_uhdr h;
462 struct timespec ts;
463 __u32 ts_status;
464
465 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
466 return 0;
467
468 h.raw = frame;
469 switch (po->tp_version) {
470 case TPACKET_V1:
471 h.h1->tp_sec = ts.tv_sec;
472 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
473 break;
474 case TPACKET_V2:
475 h.h2->tp_sec = ts.tv_sec;
476 h.h2->tp_nsec = ts.tv_nsec;
477 break;
478 case TPACKET_V3:
479 default:
480 WARN(1, "TPACKET version not supported.\n");
481 BUG();
482 }
483
484 /* one flush is safe, as both fields always lie on the same cacheline */
485 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
486 smp_wmb();
487
488 return ts_status;
489 }
490
packet_lookup_frame(struct packet_sock * po,struct packet_ring_buffer * rb,unsigned int position,int status)491 static void *packet_lookup_frame(struct packet_sock *po,
492 struct packet_ring_buffer *rb,
493 unsigned int position,
494 int status)
495 {
496 unsigned int pg_vec_pos, frame_offset;
497 union tpacket_uhdr h;
498
499 pg_vec_pos = position / rb->frames_per_block;
500 frame_offset = position % rb->frames_per_block;
501
502 h.raw = rb->pg_vec[pg_vec_pos].buffer +
503 (frame_offset * rb->frame_size);
504
505 if (status != __packet_get_status(po, h.raw))
506 return NULL;
507
508 return h.raw;
509 }
510
packet_current_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)511 static void *packet_current_frame(struct packet_sock *po,
512 struct packet_ring_buffer *rb,
513 int status)
514 {
515 return packet_lookup_frame(po, rb, rb->head, status);
516 }
517
prb_del_retire_blk_timer(struct tpacket_kbdq_core * pkc)518 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
519 {
520 del_timer_sync(&pkc->retire_blk_timer);
521 }
522
prb_shutdown_retire_blk_timer(struct packet_sock * po,struct sk_buff_head * rb_queue)523 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
524 struct sk_buff_head *rb_queue)
525 {
526 struct tpacket_kbdq_core *pkc;
527
528 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
529
530 spin_lock_bh(&rb_queue->lock);
531 pkc->delete_blk_timer = 1;
532 spin_unlock_bh(&rb_queue->lock);
533
534 prb_del_retire_blk_timer(pkc);
535 }
536
prb_init_blk_timer(struct packet_sock * po,struct tpacket_kbdq_core * pkc,void (* func)(unsigned long))537 static void prb_init_blk_timer(struct packet_sock *po,
538 struct tpacket_kbdq_core *pkc,
539 void (*func) (unsigned long))
540 {
541 init_timer(&pkc->retire_blk_timer);
542 pkc->retire_blk_timer.data = (long)po;
543 pkc->retire_blk_timer.function = func;
544 pkc->retire_blk_timer.expires = jiffies;
545 }
546
prb_setup_retire_blk_timer(struct packet_sock * po)547 static void prb_setup_retire_blk_timer(struct packet_sock *po)
548 {
549 struct tpacket_kbdq_core *pkc;
550
551 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
552 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
553 }
554
prb_calc_retire_blk_tmo(struct packet_sock * po,int blk_size_in_bytes)555 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
556 int blk_size_in_bytes)
557 {
558 struct net_device *dev;
559 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
560 struct ethtool_cmd ecmd;
561 int err;
562 u32 speed;
563
564 rtnl_lock();
565 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
566 if (unlikely(!dev)) {
567 rtnl_unlock();
568 return DEFAULT_PRB_RETIRE_TOV;
569 }
570 err = __ethtool_get_settings(dev, &ecmd);
571 speed = ethtool_cmd_speed(&ecmd);
572 rtnl_unlock();
573 if (!err) {
574 /*
575 * If the link speed is so slow you don't really
576 * need to worry about perf anyways
577 */
578 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
579 return DEFAULT_PRB_RETIRE_TOV;
580 } else {
581 msec = 1;
582 div = speed / 1000;
583 }
584 }
585
586 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
587
588 if (div)
589 mbits /= div;
590
591 tmo = mbits * msec;
592
593 if (div)
594 return tmo+1;
595 return tmo;
596 }
597
prb_init_ft_ops(struct tpacket_kbdq_core * p1,union tpacket_req_u * req_u)598 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
599 union tpacket_req_u *req_u)
600 {
601 p1->feature_req_word = req_u->req3.tp_feature_req_word;
602 }
603
init_prb_bdqc(struct packet_sock * po,struct packet_ring_buffer * rb,struct pgv * pg_vec,union tpacket_req_u * req_u)604 static void init_prb_bdqc(struct packet_sock *po,
605 struct packet_ring_buffer *rb,
606 struct pgv *pg_vec,
607 union tpacket_req_u *req_u)
608 {
609 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
610 struct tpacket_block_desc *pbd;
611
612 memset(p1, 0x0, sizeof(*p1));
613
614 p1->knxt_seq_num = 1;
615 p1->pkbdq = pg_vec;
616 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
617 p1->pkblk_start = pg_vec[0].buffer;
618 p1->kblk_size = req_u->req3.tp_block_size;
619 p1->knum_blocks = req_u->req3.tp_block_nr;
620 p1->hdrlen = po->tp_hdrlen;
621 p1->version = po->tp_version;
622 p1->last_kactive_blk_num = 0;
623 po->stats.stats3.tp_freeze_q_cnt = 0;
624 if (req_u->req3.tp_retire_blk_tov)
625 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
626 else
627 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
628 req_u->req3.tp_block_size);
629 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
630 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
631
632 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
633 prb_init_ft_ops(p1, req_u);
634 prb_setup_retire_blk_timer(po);
635 prb_open_block(p1, pbd);
636 }
637
638 /* Do NOT update the last_blk_num first.
639 * Assumes sk_buff_head lock is held.
640 */
_prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core * pkc)641 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
642 {
643 mod_timer(&pkc->retire_blk_timer,
644 jiffies + pkc->tov_in_jiffies);
645 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
646 }
647
648 /*
649 * Timer logic:
650 * 1) We refresh the timer only when we open a block.
651 * By doing this we don't waste cycles refreshing the timer
652 * on packet-by-packet basis.
653 *
654 * With a 1MB block-size, on a 1Gbps line, it will take
655 * i) ~8 ms to fill a block + ii) memcpy etc.
656 * In this cut we are not accounting for the memcpy time.
657 *
658 * So, if the user sets the 'tmo' to 10ms then the timer
659 * will never fire while the block is still getting filled
660 * (which is what we want). However, the user could choose
661 * to close a block early and that's fine.
662 *
663 * But when the timer does fire, we check whether or not to refresh it.
664 * Since the tmo granularity is in msecs, it is not too expensive
665 * to refresh the timer, lets say every '8' msecs.
666 * Either the user can set the 'tmo' or we can derive it based on
667 * a) line-speed and b) block-size.
668 * prb_calc_retire_blk_tmo() calculates the tmo.
669 *
670 */
prb_retire_rx_blk_timer_expired(unsigned long data)671 static void prb_retire_rx_blk_timer_expired(unsigned long data)
672 {
673 struct packet_sock *po = (struct packet_sock *)data;
674 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
675 unsigned int frozen;
676 struct tpacket_block_desc *pbd;
677
678 spin_lock(&po->sk.sk_receive_queue.lock);
679
680 frozen = prb_queue_frozen(pkc);
681 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
682
683 if (unlikely(pkc->delete_blk_timer))
684 goto out;
685
686 /* We only need to plug the race when the block is partially filled.
687 * tpacket_rcv:
688 * lock(); increment BLOCK_NUM_PKTS; unlock()
689 * copy_bits() is in progress ...
690 * timer fires on other cpu:
691 * we can't retire the current block because copy_bits
692 * is in progress.
693 *
694 */
695 if (BLOCK_NUM_PKTS(pbd)) {
696 while (atomic_read(&pkc->blk_fill_in_prog)) {
697 /* Waiting for skb_copy_bits to finish... */
698 cpu_relax();
699 }
700 }
701
702 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
703 if (!frozen) {
704 if (!BLOCK_NUM_PKTS(pbd)) {
705 /* An empty block. Just refresh the timer. */
706 goto refresh_timer;
707 }
708 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
709 if (!prb_dispatch_next_block(pkc, po))
710 goto refresh_timer;
711 else
712 goto out;
713 } else {
714 /* Case 1. Queue was frozen because user-space was
715 * lagging behind.
716 */
717 if (prb_curr_blk_in_use(pkc, pbd)) {
718 /*
719 * Ok, user-space is still behind.
720 * So just refresh the timer.
721 */
722 goto refresh_timer;
723 } else {
724 /* Case 2. queue was frozen,user-space caught up,
725 * now the link went idle && the timer fired.
726 * We don't have a block to close.So we open this
727 * block and restart the timer.
728 * opening a block thaws the queue,restarts timer
729 * Thawing/timer-refresh is a side effect.
730 */
731 prb_open_block(pkc, pbd);
732 goto out;
733 }
734 }
735 }
736
737 refresh_timer:
738 _prb_refresh_rx_retire_blk_timer(pkc);
739
740 out:
741 spin_unlock(&po->sk.sk_receive_queue.lock);
742 }
743
prb_flush_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,__u32 status)744 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
745 struct tpacket_block_desc *pbd1, __u32 status)
746 {
747 /* Flush everything minus the block header */
748
749 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
750 u8 *start, *end;
751
752 start = (u8 *)pbd1;
753
754 /* Skip the block header(we know header WILL fit in 4K) */
755 start += PAGE_SIZE;
756
757 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
758 for (; start < end; start += PAGE_SIZE)
759 flush_dcache_page(pgv_to_page(start));
760
761 smp_wmb();
762 #endif
763
764 /* Now update the block status. */
765
766 BLOCK_STATUS(pbd1) = status;
767
768 /* Flush the block header */
769
770 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
771 start = (u8 *)pbd1;
772 flush_dcache_page(pgv_to_page(start));
773
774 smp_wmb();
775 #endif
776 }
777
778 /*
779 * Side effect:
780 *
781 * 1) flush the block
782 * 2) Increment active_blk_num
783 *
784 * Note:We DONT refresh the timer on purpose.
785 * Because almost always the next block will be opened.
786 */
prb_close_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,struct packet_sock * po,unsigned int stat)787 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
788 struct tpacket_block_desc *pbd1,
789 struct packet_sock *po, unsigned int stat)
790 {
791 __u32 status = TP_STATUS_USER | stat;
792
793 struct tpacket3_hdr *last_pkt;
794 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
795 struct sock *sk = &po->sk;
796
797 if (po->stats.stats3.tp_drops)
798 status |= TP_STATUS_LOSING;
799
800 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
801 last_pkt->tp_next_offset = 0;
802
803 /* Get the ts of the last pkt */
804 if (BLOCK_NUM_PKTS(pbd1)) {
805 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
806 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
807 } else {
808 /* Ok, we tmo'd - so get the current time.
809 *
810 * It shouldn't really happen as we don't close empty
811 * blocks. See prb_retire_rx_blk_timer_expired().
812 */
813 struct timespec ts;
814 getnstimeofday(&ts);
815 h1->ts_last_pkt.ts_sec = ts.tv_sec;
816 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
817 }
818
819 smp_wmb();
820
821 /* Flush the block */
822 prb_flush_block(pkc1, pbd1, status);
823
824 sk->sk_data_ready(sk);
825
826 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
827 }
828
prb_thaw_queue(struct tpacket_kbdq_core * pkc)829 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
830 {
831 pkc->reset_pending_on_curr_blk = 0;
832 }
833
834 /*
835 * Side effect of opening a block:
836 *
837 * 1) prb_queue is thawed.
838 * 2) retire_blk_timer is refreshed.
839 *
840 */
prb_open_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1)841 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
842 struct tpacket_block_desc *pbd1)
843 {
844 struct timespec ts;
845 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
846
847 smp_rmb();
848
849 /* We could have just memset this but we will lose the
850 * flexibility of making the priv area sticky
851 */
852
853 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
854 BLOCK_NUM_PKTS(pbd1) = 0;
855 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
856
857 getnstimeofday(&ts);
858
859 h1->ts_first_pkt.ts_sec = ts.tv_sec;
860 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
861
862 pkc1->pkblk_start = (char *)pbd1;
863 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
864
865 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
866 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
867
868 pbd1->version = pkc1->version;
869 pkc1->prev = pkc1->nxt_offset;
870 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
871
872 prb_thaw_queue(pkc1);
873 _prb_refresh_rx_retire_blk_timer(pkc1);
874
875 smp_wmb();
876 }
877
878 /*
879 * Queue freeze logic:
880 * 1) Assume tp_block_nr = 8 blocks.
881 * 2) At time 't0', user opens Rx ring.
882 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
883 * 4) user-space is either sleeping or processing block '0'.
884 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
885 * it will close block-7,loop around and try to fill block '0'.
886 * call-flow:
887 * __packet_lookup_frame_in_block
888 * prb_retire_current_block()
889 * prb_dispatch_next_block()
890 * |->(BLOCK_STATUS == USER) evaluates to true
891 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
892 * 6) Now there are two cases:
893 * 6.1) Link goes idle right after the queue is frozen.
894 * But remember, the last open_block() refreshed the timer.
895 * When this timer expires,it will refresh itself so that we can
896 * re-open block-0 in near future.
897 * 6.2) Link is busy and keeps on receiving packets. This is a simple
898 * case and __packet_lookup_frame_in_block will check if block-0
899 * is free and can now be re-used.
900 */
prb_freeze_queue(struct tpacket_kbdq_core * pkc,struct packet_sock * po)901 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
902 struct packet_sock *po)
903 {
904 pkc->reset_pending_on_curr_blk = 1;
905 po->stats.stats3.tp_freeze_q_cnt++;
906 }
907
908 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
909
910 /*
911 * If the next block is free then we will dispatch it
912 * and return a good offset.
913 * Else, we will freeze the queue.
914 * So, caller must check the return value.
915 */
prb_dispatch_next_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po)916 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
917 struct packet_sock *po)
918 {
919 struct tpacket_block_desc *pbd;
920
921 smp_rmb();
922
923 /* 1. Get current block num */
924 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
925
926 /* 2. If this block is currently in_use then freeze the queue */
927 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
928 prb_freeze_queue(pkc, po);
929 return NULL;
930 }
931
932 /*
933 * 3.
934 * open this block and return the offset where the first packet
935 * needs to get stored.
936 */
937 prb_open_block(pkc, pbd);
938 return (void *)pkc->nxt_offset;
939 }
940
prb_retire_current_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po,unsigned int status)941 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
942 struct packet_sock *po, unsigned int status)
943 {
944 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
945
946 /* retire/close the current block */
947 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
948 /*
949 * Plug the case where copy_bits() is in progress on
950 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
951 * have space to copy the pkt in the current block and
952 * called prb_retire_current_block()
953 *
954 * We don't need to worry about the TMO case because
955 * the timer-handler already handled this case.
956 */
957 if (!(status & TP_STATUS_BLK_TMO)) {
958 while (atomic_read(&pkc->blk_fill_in_prog)) {
959 /* Waiting for skb_copy_bits to finish... */
960 cpu_relax();
961 }
962 }
963 prb_close_block(pkc, pbd, po, status);
964 return;
965 }
966 }
967
prb_curr_blk_in_use(struct tpacket_kbdq_core * pkc,struct tpacket_block_desc * pbd)968 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
969 struct tpacket_block_desc *pbd)
970 {
971 return TP_STATUS_USER & BLOCK_STATUS(pbd);
972 }
973
prb_queue_frozen(struct tpacket_kbdq_core * pkc)974 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
975 {
976 return pkc->reset_pending_on_curr_blk;
977 }
978
prb_clear_blk_fill_status(struct packet_ring_buffer * rb)979 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
980 {
981 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
982 atomic_dec(&pkc->blk_fill_in_prog);
983 }
984
prb_fill_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)985 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
986 struct tpacket3_hdr *ppd)
987 {
988 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
989 }
990
prb_clear_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)991 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
992 struct tpacket3_hdr *ppd)
993 {
994 ppd->hv1.tp_rxhash = 0;
995 }
996
prb_fill_vlan_info(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)997 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
998 struct tpacket3_hdr *ppd)
999 {
1000 if (skb_vlan_tag_present(pkc->skb)) {
1001 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1002 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1003 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1004 } else {
1005 ppd->hv1.tp_vlan_tci = 0;
1006 ppd->hv1.tp_vlan_tpid = 0;
1007 ppd->tp_status = TP_STATUS_AVAILABLE;
1008 }
1009 }
1010
prb_run_all_ft_ops(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1011 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1012 struct tpacket3_hdr *ppd)
1013 {
1014 ppd->hv1.tp_padding = 0;
1015 prb_fill_vlan_info(pkc, ppd);
1016
1017 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1018 prb_fill_rxhash(pkc, ppd);
1019 else
1020 prb_clear_rxhash(pkc, ppd);
1021 }
1022
prb_fill_curr_block(char * curr,struct tpacket_kbdq_core * pkc,struct tpacket_block_desc * pbd,unsigned int len)1023 static void prb_fill_curr_block(char *curr,
1024 struct tpacket_kbdq_core *pkc,
1025 struct tpacket_block_desc *pbd,
1026 unsigned int len)
1027 {
1028 struct tpacket3_hdr *ppd;
1029
1030 ppd = (struct tpacket3_hdr *)curr;
1031 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1032 pkc->prev = curr;
1033 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1034 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1035 BLOCK_NUM_PKTS(pbd) += 1;
1036 atomic_inc(&pkc->blk_fill_in_prog);
1037 prb_run_all_ft_ops(pkc, ppd);
1038 }
1039
1040 /* Assumes caller has the sk->rx_queue.lock */
__packet_lookup_frame_in_block(struct packet_sock * po,struct sk_buff * skb,int status,unsigned int len)1041 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1042 struct sk_buff *skb,
1043 int status,
1044 unsigned int len
1045 )
1046 {
1047 struct tpacket_kbdq_core *pkc;
1048 struct tpacket_block_desc *pbd;
1049 char *curr, *end;
1050
1051 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1052 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1053
1054 /* Queue is frozen when user space is lagging behind */
1055 if (prb_queue_frozen(pkc)) {
1056 /*
1057 * Check if that last block which caused the queue to freeze,
1058 * is still in_use by user-space.
1059 */
1060 if (prb_curr_blk_in_use(pkc, pbd)) {
1061 /* Can't record this packet */
1062 return NULL;
1063 } else {
1064 /*
1065 * Ok, the block was released by user-space.
1066 * Now let's open that block.
1067 * opening a block also thaws the queue.
1068 * Thawing is a side effect.
1069 */
1070 prb_open_block(pkc, pbd);
1071 }
1072 }
1073
1074 smp_mb();
1075 curr = pkc->nxt_offset;
1076 pkc->skb = skb;
1077 end = (char *)pbd + pkc->kblk_size;
1078
1079 /* first try the current block */
1080 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1081 prb_fill_curr_block(curr, pkc, pbd, len);
1082 return (void *)curr;
1083 }
1084
1085 /* Ok, close the current block */
1086 prb_retire_current_block(pkc, po, 0);
1087
1088 /* Now, try to dispatch the next block */
1089 curr = (char *)prb_dispatch_next_block(pkc, po);
1090 if (curr) {
1091 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1092 prb_fill_curr_block(curr, pkc, pbd, len);
1093 return (void *)curr;
1094 }
1095
1096 /*
1097 * No free blocks are available.user_space hasn't caught up yet.
1098 * Queue was just frozen and now this packet will get dropped.
1099 */
1100 return NULL;
1101 }
1102
packet_current_rx_frame(struct packet_sock * po,struct sk_buff * skb,int status,unsigned int len)1103 static void *packet_current_rx_frame(struct packet_sock *po,
1104 struct sk_buff *skb,
1105 int status, unsigned int len)
1106 {
1107 char *curr = NULL;
1108 switch (po->tp_version) {
1109 case TPACKET_V1:
1110 case TPACKET_V2:
1111 curr = packet_lookup_frame(po, &po->rx_ring,
1112 po->rx_ring.head, status);
1113 return curr;
1114 case TPACKET_V3:
1115 return __packet_lookup_frame_in_block(po, skb, status, len);
1116 default:
1117 WARN(1, "TPACKET version not supported\n");
1118 BUG();
1119 return NULL;
1120 }
1121 }
1122
prb_lookup_block(struct packet_sock * po,struct packet_ring_buffer * rb,unsigned int idx,int status)1123 static void *prb_lookup_block(struct packet_sock *po,
1124 struct packet_ring_buffer *rb,
1125 unsigned int idx,
1126 int status)
1127 {
1128 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1129 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1130
1131 if (status != BLOCK_STATUS(pbd))
1132 return NULL;
1133 return pbd;
1134 }
1135
prb_previous_blk_num(struct packet_ring_buffer * rb)1136 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1137 {
1138 unsigned int prev;
1139 if (rb->prb_bdqc.kactive_blk_num)
1140 prev = rb->prb_bdqc.kactive_blk_num-1;
1141 else
1142 prev = rb->prb_bdqc.knum_blocks-1;
1143 return prev;
1144 }
1145
1146 /* Assumes caller has held the rx_queue.lock */
__prb_previous_block(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1147 static void *__prb_previous_block(struct packet_sock *po,
1148 struct packet_ring_buffer *rb,
1149 int status)
1150 {
1151 unsigned int previous = prb_previous_blk_num(rb);
1152 return prb_lookup_block(po, rb, previous, status);
1153 }
1154
packet_previous_rx_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1155 static void *packet_previous_rx_frame(struct packet_sock *po,
1156 struct packet_ring_buffer *rb,
1157 int status)
1158 {
1159 if (po->tp_version <= TPACKET_V2)
1160 return packet_previous_frame(po, rb, status);
1161
1162 return __prb_previous_block(po, rb, status);
1163 }
1164
packet_increment_rx_head(struct packet_sock * po,struct packet_ring_buffer * rb)1165 static void packet_increment_rx_head(struct packet_sock *po,
1166 struct packet_ring_buffer *rb)
1167 {
1168 switch (po->tp_version) {
1169 case TPACKET_V1:
1170 case TPACKET_V2:
1171 return packet_increment_head(rb);
1172 case TPACKET_V3:
1173 default:
1174 WARN(1, "TPACKET version not supported.\n");
1175 BUG();
1176 return;
1177 }
1178 }
1179
packet_previous_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1180 static void *packet_previous_frame(struct packet_sock *po,
1181 struct packet_ring_buffer *rb,
1182 int status)
1183 {
1184 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1185 return packet_lookup_frame(po, rb, previous, status);
1186 }
1187
packet_increment_head(struct packet_ring_buffer * buff)1188 static void packet_increment_head(struct packet_ring_buffer *buff)
1189 {
1190 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1191 }
1192
packet_inc_pending(struct packet_ring_buffer * rb)1193 static void packet_inc_pending(struct packet_ring_buffer *rb)
1194 {
1195 this_cpu_inc(*rb->pending_refcnt);
1196 }
1197
packet_dec_pending(struct packet_ring_buffer * rb)1198 static void packet_dec_pending(struct packet_ring_buffer *rb)
1199 {
1200 this_cpu_dec(*rb->pending_refcnt);
1201 }
1202
packet_read_pending(const struct packet_ring_buffer * rb)1203 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1204 {
1205 unsigned int refcnt = 0;
1206 int cpu;
1207
1208 /* We don't use pending refcount in rx_ring. */
1209 if (rb->pending_refcnt == NULL)
1210 return 0;
1211
1212 for_each_possible_cpu(cpu)
1213 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1214
1215 return refcnt;
1216 }
1217
packet_alloc_pending(struct packet_sock * po)1218 static int packet_alloc_pending(struct packet_sock *po)
1219 {
1220 po->rx_ring.pending_refcnt = NULL;
1221
1222 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1223 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1224 return -ENOBUFS;
1225
1226 return 0;
1227 }
1228
packet_free_pending(struct packet_sock * po)1229 static void packet_free_pending(struct packet_sock *po)
1230 {
1231 free_percpu(po->tx_ring.pending_refcnt);
1232 }
1233
1234 #define ROOM_POW_OFF 2
1235 #define ROOM_NONE 0x0
1236 #define ROOM_LOW 0x1
1237 #define ROOM_NORMAL 0x2
1238
__tpacket_has_room(struct packet_sock * po,int pow_off)1239 static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1240 {
1241 int idx, len;
1242
1243 len = po->rx_ring.frame_max + 1;
1244 idx = po->rx_ring.head;
1245 if (pow_off)
1246 idx += len >> pow_off;
1247 if (idx >= len)
1248 idx -= len;
1249 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1250 }
1251
__tpacket_v3_has_room(struct packet_sock * po,int pow_off)1252 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1253 {
1254 int idx, len;
1255
1256 len = po->rx_ring.prb_bdqc.knum_blocks;
1257 idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1258 if (pow_off)
1259 idx += len >> pow_off;
1260 if (idx >= len)
1261 idx -= len;
1262 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1263 }
1264
__packet_rcv_has_room(struct packet_sock * po,struct sk_buff * skb)1265 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1266 {
1267 struct sock *sk = &po->sk;
1268 int ret = ROOM_NONE;
1269
1270 if (po->prot_hook.func != tpacket_rcv) {
1271 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1272 - (skb ? skb->truesize : 0);
1273 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
1274 return ROOM_NORMAL;
1275 else if (avail > 0)
1276 return ROOM_LOW;
1277 else
1278 return ROOM_NONE;
1279 }
1280
1281 if (po->tp_version == TPACKET_V3) {
1282 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1283 ret = ROOM_NORMAL;
1284 else if (__tpacket_v3_has_room(po, 0))
1285 ret = ROOM_LOW;
1286 } else {
1287 if (__tpacket_has_room(po, ROOM_POW_OFF))
1288 ret = ROOM_NORMAL;
1289 else if (__tpacket_has_room(po, 0))
1290 ret = ROOM_LOW;
1291 }
1292
1293 return ret;
1294 }
1295
packet_rcv_has_room(struct packet_sock * po,struct sk_buff * skb)1296 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1297 {
1298 int ret;
1299 bool has_room;
1300
1301 spin_lock_bh(&po->sk.sk_receive_queue.lock);
1302 ret = __packet_rcv_has_room(po, skb);
1303 has_room = ret == ROOM_NORMAL;
1304 if (po->pressure == has_room)
1305 po->pressure = !has_room;
1306 spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1307
1308 return ret;
1309 }
1310
packet_sock_destruct(struct sock * sk)1311 static void packet_sock_destruct(struct sock *sk)
1312 {
1313 skb_queue_purge(&sk->sk_error_queue);
1314
1315 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1316 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1317
1318 if (!sock_flag(sk, SOCK_DEAD)) {
1319 pr_err("Attempt to release alive packet socket: %p\n", sk);
1320 return;
1321 }
1322
1323 sk_refcnt_debug_dec(sk);
1324 }
1325
fanout_flow_is_huge(struct packet_sock * po,struct sk_buff * skb)1326 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1327 {
1328 u32 rxhash;
1329 int i, count = 0;
1330
1331 rxhash = skb_get_hash(skb);
1332 for (i = 0; i < ROLLOVER_HLEN; i++)
1333 if (po->rollover->history[i] == rxhash)
1334 count++;
1335
1336 po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
1337 return count > (ROLLOVER_HLEN >> 1);
1338 }
1339
fanout_demux_hash(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1340 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1341 struct sk_buff *skb,
1342 unsigned int num)
1343 {
1344 return reciprocal_scale(skb_get_hash(skb), num);
1345 }
1346
fanout_demux_lb(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1347 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1348 struct sk_buff *skb,
1349 unsigned int num)
1350 {
1351 unsigned int val = atomic_inc_return(&f->rr_cur);
1352
1353 return val % num;
1354 }
1355
fanout_demux_cpu(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1356 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1357 struct sk_buff *skb,
1358 unsigned int num)
1359 {
1360 return smp_processor_id() % num;
1361 }
1362
fanout_demux_rnd(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1363 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1364 struct sk_buff *skb,
1365 unsigned int num)
1366 {
1367 return prandom_u32_max(num);
1368 }
1369
fanout_demux_rollover(struct packet_fanout * f,struct sk_buff * skb,unsigned int idx,bool try_self,unsigned int num)1370 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1371 struct sk_buff *skb,
1372 unsigned int idx, bool try_self,
1373 unsigned int num)
1374 {
1375 struct packet_sock *po, *po_next, *po_skip = NULL;
1376 unsigned int i, j, room = ROOM_NONE;
1377
1378 po = pkt_sk(f->arr[idx]);
1379
1380 if (try_self) {
1381 room = packet_rcv_has_room(po, skb);
1382 if (room == ROOM_NORMAL ||
1383 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1384 return idx;
1385 po_skip = po;
1386 }
1387
1388 i = j = min_t(int, po->rollover->sock, num - 1);
1389 do {
1390 po_next = pkt_sk(f->arr[i]);
1391 if (po_next != po_skip && !po_next->pressure &&
1392 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1393 if (i != j)
1394 po->rollover->sock = i;
1395 atomic_long_inc(&po->rollover->num);
1396 if (room == ROOM_LOW)
1397 atomic_long_inc(&po->rollover->num_huge);
1398 return i;
1399 }
1400
1401 if (++i == num)
1402 i = 0;
1403 } while (i != j);
1404
1405 atomic_long_inc(&po->rollover->num_failed);
1406 return idx;
1407 }
1408
fanout_demux_qm(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1409 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1410 struct sk_buff *skb,
1411 unsigned int num)
1412 {
1413 return skb_get_queue_mapping(skb) % num;
1414 }
1415
fanout_demux_bpf(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1416 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1417 struct sk_buff *skb,
1418 unsigned int num)
1419 {
1420 struct bpf_prog *prog;
1421 unsigned int ret = 0;
1422
1423 rcu_read_lock();
1424 prog = rcu_dereference(f->bpf_prog);
1425 if (prog)
1426 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1427 rcu_read_unlock();
1428
1429 return ret;
1430 }
1431
fanout_has_flag(struct packet_fanout * f,u16 flag)1432 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1433 {
1434 return f->flags & (flag >> 8);
1435 }
1436
packet_rcv_fanout(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1437 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1438 struct packet_type *pt, struct net_device *orig_dev)
1439 {
1440 struct packet_fanout *f = pt->af_packet_priv;
1441 unsigned int num = READ_ONCE(f->num_members);
1442 struct net *net = read_pnet(&f->net);
1443 struct packet_sock *po;
1444 unsigned int idx;
1445
1446 if (!net_eq(dev_net(dev), net) || !num) {
1447 kfree_skb(skb);
1448 return 0;
1449 }
1450
1451 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1452 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1453 if (!skb)
1454 return 0;
1455 }
1456 switch (f->type) {
1457 case PACKET_FANOUT_HASH:
1458 default:
1459 idx = fanout_demux_hash(f, skb, num);
1460 break;
1461 case PACKET_FANOUT_LB:
1462 idx = fanout_demux_lb(f, skb, num);
1463 break;
1464 case PACKET_FANOUT_CPU:
1465 idx = fanout_demux_cpu(f, skb, num);
1466 break;
1467 case PACKET_FANOUT_RND:
1468 idx = fanout_demux_rnd(f, skb, num);
1469 break;
1470 case PACKET_FANOUT_QM:
1471 idx = fanout_demux_qm(f, skb, num);
1472 break;
1473 case PACKET_FANOUT_ROLLOVER:
1474 idx = fanout_demux_rollover(f, skb, 0, false, num);
1475 break;
1476 case PACKET_FANOUT_CBPF:
1477 case PACKET_FANOUT_EBPF:
1478 idx = fanout_demux_bpf(f, skb, num);
1479 break;
1480 }
1481
1482 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1483 idx = fanout_demux_rollover(f, skb, idx, true, num);
1484
1485 po = pkt_sk(f->arr[idx]);
1486 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1487 }
1488
1489 DEFINE_MUTEX(fanout_mutex);
1490 EXPORT_SYMBOL_GPL(fanout_mutex);
1491 static LIST_HEAD(fanout_list);
1492
__fanout_link(struct sock * sk,struct packet_sock * po)1493 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1494 {
1495 struct packet_fanout *f = po->fanout;
1496
1497 spin_lock(&f->lock);
1498 f->arr[f->num_members] = sk;
1499 smp_wmb();
1500 f->num_members++;
1501 spin_unlock(&f->lock);
1502 }
1503
__fanout_unlink(struct sock * sk,struct packet_sock * po)1504 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1505 {
1506 struct packet_fanout *f = po->fanout;
1507 int i;
1508
1509 spin_lock(&f->lock);
1510 for (i = 0; i < f->num_members; i++) {
1511 if (f->arr[i] == sk)
1512 break;
1513 }
1514 BUG_ON(i >= f->num_members);
1515 f->arr[i] = f->arr[f->num_members - 1];
1516 f->num_members--;
1517 spin_unlock(&f->lock);
1518 }
1519
match_fanout_group(struct packet_type * ptype,struct sock * sk)1520 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1521 {
1522 if (sk->sk_family != PF_PACKET)
1523 return false;
1524
1525 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1526 }
1527
fanout_init_data(struct packet_fanout * f)1528 static void fanout_init_data(struct packet_fanout *f)
1529 {
1530 switch (f->type) {
1531 case PACKET_FANOUT_LB:
1532 atomic_set(&f->rr_cur, 0);
1533 break;
1534 case PACKET_FANOUT_CBPF:
1535 case PACKET_FANOUT_EBPF:
1536 RCU_INIT_POINTER(f->bpf_prog, NULL);
1537 break;
1538 }
1539 }
1540
__fanout_set_data_bpf(struct packet_fanout * f,struct bpf_prog * new)1541 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1542 {
1543 struct bpf_prog *old;
1544
1545 spin_lock(&f->lock);
1546 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1547 rcu_assign_pointer(f->bpf_prog, new);
1548 spin_unlock(&f->lock);
1549
1550 if (old) {
1551 synchronize_net();
1552 bpf_prog_destroy(old);
1553 }
1554 }
1555
fanout_set_data_cbpf(struct packet_sock * po,char __user * data,unsigned int len)1556 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1557 unsigned int len)
1558 {
1559 struct bpf_prog *new;
1560 struct sock_fprog fprog;
1561 int ret;
1562
1563 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1564 return -EPERM;
1565 if (len != sizeof(fprog))
1566 return -EINVAL;
1567 if (copy_from_user(&fprog, data, len))
1568 return -EFAULT;
1569
1570 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1571 if (ret)
1572 return ret;
1573
1574 __fanout_set_data_bpf(po->fanout, new);
1575 return 0;
1576 }
1577
fanout_set_data_ebpf(struct packet_sock * po,char __user * data,unsigned int len)1578 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1579 unsigned int len)
1580 {
1581 struct bpf_prog *new;
1582 u32 fd;
1583
1584 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1585 return -EPERM;
1586 if (len != sizeof(fd))
1587 return -EINVAL;
1588 if (copy_from_user(&fd, data, len))
1589 return -EFAULT;
1590
1591 new = bpf_prog_get(fd);
1592 if (IS_ERR(new))
1593 return PTR_ERR(new);
1594 if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1595 bpf_prog_put(new);
1596 return -EINVAL;
1597 }
1598
1599 __fanout_set_data_bpf(po->fanout, new);
1600 return 0;
1601 }
1602
fanout_set_data(struct packet_sock * po,char __user * data,unsigned int len)1603 static int fanout_set_data(struct packet_sock *po, char __user *data,
1604 unsigned int len)
1605 {
1606 switch (po->fanout->type) {
1607 case PACKET_FANOUT_CBPF:
1608 return fanout_set_data_cbpf(po, data, len);
1609 case PACKET_FANOUT_EBPF:
1610 return fanout_set_data_ebpf(po, data, len);
1611 default:
1612 return -EINVAL;
1613 };
1614 }
1615
fanout_release_data(struct packet_fanout * f)1616 static void fanout_release_data(struct packet_fanout *f)
1617 {
1618 switch (f->type) {
1619 case PACKET_FANOUT_CBPF:
1620 case PACKET_FANOUT_EBPF:
1621 __fanout_set_data_bpf(f, NULL);
1622 };
1623 }
1624
fanout_add(struct sock * sk,u16 id,u16 type_flags)1625 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1626 {
1627 struct packet_sock *po = pkt_sk(sk);
1628 struct packet_fanout *f, *match;
1629 u8 type = type_flags & 0xff;
1630 u8 flags = type_flags >> 8;
1631 int err;
1632
1633 switch (type) {
1634 case PACKET_FANOUT_ROLLOVER:
1635 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1636 return -EINVAL;
1637 case PACKET_FANOUT_HASH:
1638 case PACKET_FANOUT_LB:
1639 case PACKET_FANOUT_CPU:
1640 case PACKET_FANOUT_RND:
1641 case PACKET_FANOUT_QM:
1642 case PACKET_FANOUT_CBPF:
1643 case PACKET_FANOUT_EBPF:
1644 break;
1645 default:
1646 return -EINVAL;
1647 }
1648
1649 if (!po->running)
1650 return -EINVAL;
1651
1652 if (po->fanout)
1653 return -EALREADY;
1654
1655 if (type == PACKET_FANOUT_ROLLOVER ||
1656 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1657 po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
1658 if (!po->rollover)
1659 return -ENOMEM;
1660 atomic_long_set(&po->rollover->num, 0);
1661 atomic_long_set(&po->rollover->num_huge, 0);
1662 atomic_long_set(&po->rollover->num_failed, 0);
1663 }
1664
1665 mutex_lock(&fanout_mutex);
1666 match = NULL;
1667 list_for_each_entry(f, &fanout_list, list) {
1668 if (f->id == id &&
1669 read_pnet(&f->net) == sock_net(sk)) {
1670 match = f;
1671 break;
1672 }
1673 }
1674 err = -EINVAL;
1675 if (match && match->flags != flags)
1676 goto out;
1677 if (!match) {
1678 err = -ENOMEM;
1679 match = kzalloc(sizeof(*match), GFP_KERNEL);
1680 if (!match)
1681 goto out;
1682 write_pnet(&match->net, sock_net(sk));
1683 match->id = id;
1684 match->type = type;
1685 match->flags = flags;
1686 INIT_LIST_HEAD(&match->list);
1687 spin_lock_init(&match->lock);
1688 atomic_set(&match->sk_ref, 0);
1689 fanout_init_data(match);
1690 match->prot_hook.type = po->prot_hook.type;
1691 match->prot_hook.dev = po->prot_hook.dev;
1692 match->prot_hook.func = packet_rcv_fanout;
1693 match->prot_hook.af_packet_priv = match;
1694 match->prot_hook.id_match = match_fanout_group;
1695 dev_add_pack(&match->prot_hook);
1696 list_add(&match->list, &fanout_list);
1697 }
1698 err = -EINVAL;
1699 if (match->type == type &&
1700 match->prot_hook.type == po->prot_hook.type &&
1701 match->prot_hook.dev == po->prot_hook.dev) {
1702 err = -ENOSPC;
1703 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1704 __dev_remove_pack(&po->prot_hook);
1705 po->fanout = match;
1706 atomic_inc(&match->sk_ref);
1707 __fanout_link(sk, po);
1708 err = 0;
1709 }
1710 }
1711 out:
1712 mutex_unlock(&fanout_mutex);
1713 if (err) {
1714 kfree(po->rollover);
1715 po->rollover = NULL;
1716 }
1717 return err;
1718 }
1719
fanout_release(struct sock * sk)1720 static void fanout_release(struct sock *sk)
1721 {
1722 struct packet_sock *po = pkt_sk(sk);
1723 struct packet_fanout *f;
1724
1725 f = po->fanout;
1726 if (!f)
1727 return;
1728
1729 mutex_lock(&fanout_mutex);
1730 po->fanout = NULL;
1731
1732 if (atomic_dec_and_test(&f->sk_ref)) {
1733 list_del(&f->list);
1734 dev_remove_pack(&f->prot_hook);
1735 fanout_release_data(f);
1736 kfree(f);
1737 }
1738 mutex_unlock(&fanout_mutex);
1739
1740 if (po->rollover)
1741 kfree_rcu(po->rollover, rcu);
1742 }
1743
packet_extra_vlan_len_allowed(const struct net_device * dev,struct sk_buff * skb)1744 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1745 struct sk_buff *skb)
1746 {
1747 /* Earlier code assumed this would be a VLAN pkt, double-check
1748 * this now that we have the actual packet in hand. We can only
1749 * do this check on Ethernet devices.
1750 */
1751 if (unlikely(dev->type != ARPHRD_ETHER))
1752 return false;
1753
1754 skb_reset_mac_header(skb);
1755 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1756 }
1757
1758 static const struct proto_ops packet_ops;
1759
1760 static const struct proto_ops packet_ops_spkt;
1761
packet_rcv_spkt(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1762 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1763 struct packet_type *pt, struct net_device *orig_dev)
1764 {
1765 struct sock *sk;
1766 struct sockaddr_pkt *spkt;
1767
1768 /*
1769 * When we registered the protocol we saved the socket in the data
1770 * field for just this event.
1771 */
1772
1773 sk = pt->af_packet_priv;
1774
1775 /*
1776 * Yank back the headers [hope the device set this
1777 * right or kerboom...]
1778 *
1779 * Incoming packets have ll header pulled,
1780 * push it back.
1781 *
1782 * For outgoing ones skb->data == skb_mac_header(skb)
1783 * so that this procedure is noop.
1784 */
1785
1786 if (skb->pkt_type == PACKET_LOOPBACK)
1787 goto out;
1788
1789 if (!net_eq(dev_net(dev), sock_net(sk)))
1790 goto out;
1791
1792 skb = skb_share_check(skb, GFP_ATOMIC);
1793 if (skb == NULL)
1794 goto oom;
1795
1796 /* drop any routing info */
1797 skb_dst_drop(skb);
1798
1799 /* drop conntrack reference */
1800 nf_reset(skb);
1801
1802 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1803
1804 skb_push(skb, skb->data - skb_mac_header(skb));
1805
1806 /*
1807 * The SOCK_PACKET socket receives _all_ frames.
1808 */
1809
1810 spkt->spkt_family = dev->type;
1811 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1812 spkt->spkt_protocol = skb->protocol;
1813
1814 /*
1815 * Charge the memory to the socket. This is done specifically
1816 * to prevent sockets using all the memory up.
1817 */
1818
1819 if (sock_queue_rcv_skb(sk, skb) == 0)
1820 return 0;
1821
1822 out:
1823 kfree_skb(skb);
1824 oom:
1825 return 0;
1826 }
1827
1828
1829 /*
1830 * Output a raw packet to a device layer. This bypasses all the other
1831 * protocol layers and you must therefore supply it with a complete frame
1832 */
1833
packet_sendmsg_spkt(struct socket * sock,struct msghdr * msg,size_t len)1834 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1835 size_t len)
1836 {
1837 struct sock *sk = sock->sk;
1838 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1839 struct sk_buff *skb = NULL;
1840 struct net_device *dev;
1841 __be16 proto = 0;
1842 int err;
1843 int extra_len = 0;
1844
1845 /*
1846 * Get and verify the address.
1847 */
1848
1849 if (saddr) {
1850 if (msg->msg_namelen < sizeof(struct sockaddr))
1851 return -EINVAL;
1852 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1853 proto = saddr->spkt_protocol;
1854 } else
1855 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1856
1857 /*
1858 * Find the device first to size check it
1859 */
1860
1861 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1862 retry:
1863 rcu_read_lock();
1864 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1865 err = -ENODEV;
1866 if (dev == NULL)
1867 goto out_unlock;
1868
1869 err = -ENETDOWN;
1870 if (!(dev->flags & IFF_UP))
1871 goto out_unlock;
1872
1873 /*
1874 * You may not queue a frame bigger than the mtu. This is the lowest level
1875 * raw protocol and you must do your own fragmentation at this level.
1876 */
1877
1878 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1879 if (!netif_supports_nofcs(dev)) {
1880 err = -EPROTONOSUPPORT;
1881 goto out_unlock;
1882 }
1883 extra_len = 4; /* We're doing our own CRC */
1884 }
1885
1886 err = -EMSGSIZE;
1887 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1888 goto out_unlock;
1889
1890 if (!skb) {
1891 size_t reserved = LL_RESERVED_SPACE(dev);
1892 int tlen = dev->needed_tailroom;
1893 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1894
1895 rcu_read_unlock();
1896 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1897 if (skb == NULL)
1898 return -ENOBUFS;
1899 /* FIXME: Save some space for broken drivers that write a hard
1900 * header at transmission time by themselves. PPP is the notable
1901 * one here. This should really be fixed at the driver level.
1902 */
1903 skb_reserve(skb, reserved);
1904 skb_reset_network_header(skb);
1905
1906 /* Try to align data part correctly */
1907 if (hhlen) {
1908 skb->data -= hhlen;
1909 skb->tail -= hhlen;
1910 if (len < hhlen)
1911 skb_reset_network_header(skb);
1912 }
1913 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1914 if (err)
1915 goto out_free;
1916 goto retry;
1917 }
1918
1919 if (!dev_validate_header(dev, skb->data, len)) {
1920 err = -EINVAL;
1921 goto out_unlock;
1922 }
1923 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1924 !packet_extra_vlan_len_allowed(dev, skb)) {
1925 err = -EMSGSIZE;
1926 goto out_unlock;
1927 }
1928
1929 skb->protocol = proto;
1930 skb->dev = dev;
1931 skb->priority = sk->sk_priority;
1932 skb->mark = sk->sk_mark;
1933
1934 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1935
1936 if (unlikely(extra_len == 4))
1937 skb->no_fcs = 1;
1938
1939 skb_probe_transport_header(skb, 0);
1940
1941 dev_queue_xmit(skb);
1942 rcu_read_unlock();
1943 return len;
1944
1945 out_unlock:
1946 rcu_read_unlock();
1947 out_free:
1948 kfree_skb(skb);
1949 return err;
1950 }
1951
run_filter(struct sk_buff * skb,const struct sock * sk,unsigned int res)1952 static unsigned int run_filter(struct sk_buff *skb,
1953 const struct sock *sk,
1954 unsigned int res)
1955 {
1956 struct sk_filter *filter;
1957
1958 rcu_read_lock();
1959 filter = rcu_dereference(sk->sk_filter);
1960 if (filter != NULL)
1961 res = bpf_prog_run_clear_cb(filter->prog, skb);
1962 rcu_read_unlock();
1963
1964 return res;
1965 }
1966
1967 /*
1968 * This function makes lazy skb cloning in hope that most of packets
1969 * are discarded by BPF.
1970 *
1971 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1972 * and skb->cb are mangled. It works because (and until) packets
1973 * falling here are owned by current CPU. Output packets are cloned
1974 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1975 * sequencially, so that if we return skb to original state on exit,
1976 * we will not harm anyone.
1977 */
1978
packet_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1979 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1980 struct packet_type *pt, struct net_device *orig_dev)
1981 {
1982 struct sock *sk;
1983 struct sockaddr_ll *sll;
1984 struct packet_sock *po;
1985 u8 *skb_head = skb->data;
1986 int skb_len = skb->len;
1987 unsigned int snaplen, res;
1988
1989 if (skb->pkt_type == PACKET_LOOPBACK)
1990 goto drop;
1991
1992 sk = pt->af_packet_priv;
1993 po = pkt_sk(sk);
1994
1995 if (!net_eq(dev_net(dev), sock_net(sk)))
1996 goto drop;
1997
1998 skb->dev = dev;
1999
2000 if (dev->header_ops) {
2001 /* The device has an explicit notion of ll header,
2002 * exported to higher levels.
2003 *
2004 * Otherwise, the device hides details of its frame
2005 * structure, so that corresponding packet head is
2006 * never delivered to user.
2007 */
2008 if (sk->sk_type != SOCK_DGRAM)
2009 skb_push(skb, skb->data - skb_mac_header(skb));
2010 else if (skb->pkt_type == PACKET_OUTGOING) {
2011 /* Special case: outgoing packets have ll header at head */
2012 skb_pull(skb, skb_network_offset(skb));
2013 }
2014 }
2015
2016 snaplen = skb->len;
2017
2018 res = run_filter(skb, sk, snaplen);
2019 if (!res)
2020 goto drop_n_restore;
2021 if (snaplen > res)
2022 snaplen = res;
2023
2024 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2025 goto drop_n_acct;
2026
2027 if (skb_shared(skb)) {
2028 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2029 if (nskb == NULL)
2030 goto drop_n_acct;
2031
2032 if (skb_head != skb->data) {
2033 skb->data = skb_head;
2034 skb->len = skb_len;
2035 }
2036 consume_skb(skb);
2037 skb = nskb;
2038 }
2039
2040 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2041
2042 sll = &PACKET_SKB_CB(skb)->sa.ll;
2043 sll->sll_hatype = dev->type;
2044 sll->sll_pkttype = skb->pkt_type;
2045 if (unlikely(po->origdev))
2046 sll->sll_ifindex = orig_dev->ifindex;
2047 else
2048 sll->sll_ifindex = dev->ifindex;
2049
2050 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2051
2052 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2053 * Use their space for storing the original skb length.
2054 */
2055 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2056
2057 if (pskb_trim(skb, snaplen))
2058 goto drop_n_acct;
2059
2060 skb_set_owner_r(skb, sk);
2061 skb->dev = NULL;
2062 skb_dst_drop(skb);
2063
2064 /* drop conntrack reference */
2065 nf_reset(skb);
2066
2067 spin_lock(&sk->sk_receive_queue.lock);
2068 po->stats.stats1.tp_packets++;
2069 sock_skb_set_dropcount(sk, skb);
2070 __skb_queue_tail(&sk->sk_receive_queue, skb);
2071 spin_unlock(&sk->sk_receive_queue.lock);
2072 sk->sk_data_ready(sk);
2073 return 0;
2074
2075 drop_n_acct:
2076 spin_lock(&sk->sk_receive_queue.lock);
2077 po->stats.stats1.tp_drops++;
2078 atomic_inc(&sk->sk_drops);
2079 spin_unlock(&sk->sk_receive_queue.lock);
2080
2081 drop_n_restore:
2082 if (skb_head != skb->data && skb_shared(skb)) {
2083 skb->data = skb_head;
2084 skb->len = skb_len;
2085 }
2086 drop:
2087 consume_skb(skb);
2088 return 0;
2089 }
2090
tpacket_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)2091 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2092 struct packet_type *pt, struct net_device *orig_dev)
2093 {
2094 struct sock *sk;
2095 struct packet_sock *po;
2096 struct sockaddr_ll *sll;
2097 union tpacket_uhdr h;
2098 u8 *skb_head = skb->data;
2099 int skb_len = skb->len;
2100 unsigned int snaplen, res;
2101 unsigned long status = TP_STATUS_USER;
2102 unsigned short macoff, netoff, hdrlen;
2103 struct sk_buff *copy_skb = NULL;
2104 struct timespec ts;
2105 __u32 ts_status;
2106
2107 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2108 * We may add members to them until current aligned size without forcing
2109 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2110 */
2111 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2112 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2113
2114 if (skb->pkt_type == PACKET_LOOPBACK)
2115 goto drop;
2116
2117 sk = pt->af_packet_priv;
2118 po = pkt_sk(sk);
2119
2120 if (!net_eq(dev_net(dev), sock_net(sk)))
2121 goto drop;
2122
2123 if (dev->header_ops) {
2124 if (sk->sk_type != SOCK_DGRAM)
2125 skb_push(skb, skb->data - skb_mac_header(skb));
2126 else if (skb->pkt_type == PACKET_OUTGOING) {
2127 /* Special case: outgoing packets have ll header at head */
2128 skb_pull(skb, skb_network_offset(skb));
2129 }
2130 }
2131
2132 snaplen = skb->len;
2133
2134 res = run_filter(skb, sk, snaplen);
2135 if (!res)
2136 goto drop_n_restore;
2137
2138 if (skb->ip_summed == CHECKSUM_PARTIAL)
2139 status |= TP_STATUS_CSUMNOTREADY;
2140 else if (skb->pkt_type != PACKET_OUTGOING &&
2141 (skb->ip_summed == CHECKSUM_COMPLETE ||
2142 skb_csum_unnecessary(skb)))
2143 status |= TP_STATUS_CSUM_VALID;
2144
2145 if (snaplen > res)
2146 snaplen = res;
2147
2148 if (sk->sk_type == SOCK_DGRAM) {
2149 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2150 po->tp_reserve;
2151 } else {
2152 unsigned int maclen = skb_network_offset(skb);
2153 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2154 (maclen < 16 ? 16 : maclen)) +
2155 po->tp_reserve;
2156 macoff = netoff - maclen;
2157 }
2158 if (po->tp_version <= TPACKET_V2) {
2159 if (macoff + snaplen > po->rx_ring.frame_size) {
2160 if (po->copy_thresh &&
2161 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2162 if (skb_shared(skb)) {
2163 copy_skb = skb_clone(skb, GFP_ATOMIC);
2164 } else {
2165 copy_skb = skb_get(skb);
2166 skb_head = skb->data;
2167 }
2168 if (copy_skb)
2169 skb_set_owner_r(copy_skb, sk);
2170 }
2171 snaplen = po->rx_ring.frame_size - macoff;
2172 if ((int)snaplen < 0)
2173 snaplen = 0;
2174 }
2175 } else if (unlikely(macoff + snaplen >
2176 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2177 u32 nval;
2178
2179 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2180 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2181 snaplen, nval, macoff);
2182 snaplen = nval;
2183 if (unlikely((int)snaplen < 0)) {
2184 snaplen = 0;
2185 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2186 }
2187 }
2188 spin_lock(&sk->sk_receive_queue.lock);
2189 h.raw = packet_current_rx_frame(po, skb,
2190 TP_STATUS_KERNEL, (macoff+snaplen));
2191 if (!h.raw)
2192 goto ring_is_full;
2193 if (po->tp_version <= TPACKET_V2) {
2194 packet_increment_rx_head(po, &po->rx_ring);
2195 /*
2196 * LOSING will be reported till you read the stats,
2197 * because it's COR - Clear On Read.
2198 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2199 * at packet level.
2200 */
2201 if (po->stats.stats1.tp_drops)
2202 status |= TP_STATUS_LOSING;
2203 }
2204 po->stats.stats1.tp_packets++;
2205 if (copy_skb) {
2206 status |= TP_STATUS_COPY;
2207 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2208 }
2209 spin_unlock(&sk->sk_receive_queue.lock);
2210
2211 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2212
2213 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2214 getnstimeofday(&ts);
2215
2216 status |= ts_status;
2217
2218 switch (po->tp_version) {
2219 case TPACKET_V1:
2220 h.h1->tp_len = skb->len;
2221 h.h1->tp_snaplen = snaplen;
2222 h.h1->tp_mac = macoff;
2223 h.h1->tp_net = netoff;
2224 h.h1->tp_sec = ts.tv_sec;
2225 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2226 hdrlen = sizeof(*h.h1);
2227 break;
2228 case TPACKET_V2:
2229 h.h2->tp_len = skb->len;
2230 h.h2->tp_snaplen = snaplen;
2231 h.h2->tp_mac = macoff;
2232 h.h2->tp_net = netoff;
2233 h.h2->tp_sec = ts.tv_sec;
2234 h.h2->tp_nsec = ts.tv_nsec;
2235 if (skb_vlan_tag_present(skb)) {
2236 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2237 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2238 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2239 } else {
2240 h.h2->tp_vlan_tci = 0;
2241 h.h2->tp_vlan_tpid = 0;
2242 }
2243 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2244 hdrlen = sizeof(*h.h2);
2245 break;
2246 case TPACKET_V3:
2247 /* tp_nxt_offset,vlan are already populated above.
2248 * So DONT clear those fields here
2249 */
2250 h.h3->tp_status |= status;
2251 h.h3->tp_len = skb->len;
2252 h.h3->tp_snaplen = snaplen;
2253 h.h3->tp_mac = macoff;
2254 h.h3->tp_net = netoff;
2255 h.h3->tp_sec = ts.tv_sec;
2256 h.h3->tp_nsec = ts.tv_nsec;
2257 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2258 hdrlen = sizeof(*h.h3);
2259 break;
2260 default:
2261 BUG();
2262 }
2263
2264 sll = h.raw + TPACKET_ALIGN(hdrlen);
2265 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2266 sll->sll_family = AF_PACKET;
2267 sll->sll_hatype = dev->type;
2268 sll->sll_protocol = skb->protocol;
2269 sll->sll_pkttype = skb->pkt_type;
2270 if (unlikely(po->origdev))
2271 sll->sll_ifindex = orig_dev->ifindex;
2272 else
2273 sll->sll_ifindex = dev->ifindex;
2274
2275 smp_mb();
2276
2277 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2278 if (po->tp_version <= TPACKET_V2) {
2279 u8 *start, *end;
2280
2281 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2282 macoff + snaplen);
2283
2284 for (start = h.raw; start < end; start += PAGE_SIZE)
2285 flush_dcache_page(pgv_to_page(start));
2286 }
2287 smp_wmb();
2288 #endif
2289
2290 if (po->tp_version <= TPACKET_V2) {
2291 __packet_set_status(po, h.raw, status);
2292 sk->sk_data_ready(sk);
2293 } else {
2294 prb_clear_blk_fill_status(&po->rx_ring);
2295 }
2296
2297 drop_n_restore:
2298 if (skb_head != skb->data && skb_shared(skb)) {
2299 skb->data = skb_head;
2300 skb->len = skb_len;
2301 }
2302 drop:
2303 kfree_skb(skb);
2304 return 0;
2305
2306 ring_is_full:
2307 po->stats.stats1.tp_drops++;
2308 spin_unlock(&sk->sk_receive_queue.lock);
2309
2310 sk->sk_data_ready(sk);
2311 kfree_skb(copy_skb);
2312 goto drop_n_restore;
2313 }
2314
tpacket_destruct_skb(struct sk_buff * skb)2315 static void tpacket_destruct_skb(struct sk_buff *skb)
2316 {
2317 struct packet_sock *po = pkt_sk(skb->sk);
2318
2319 if (likely(po->tx_ring.pg_vec)) {
2320 void *ph;
2321 __u32 ts;
2322
2323 ph = skb_shinfo(skb)->destructor_arg;
2324 packet_dec_pending(&po->tx_ring);
2325
2326 ts = __packet_set_timestamp(po, ph, skb);
2327 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2328 }
2329
2330 sock_wfree(skb);
2331 }
2332
tpacket_set_protocol(const struct net_device * dev,struct sk_buff * skb)2333 static void tpacket_set_protocol(const struct net_device *dev,
2334 struct sk_buff *skb)
2335 {
2336 if (dev->type == ARPHRD_ETHER) {
2337 skb_reset_mac_header(skb);
2338 skb->protocol = eth_hdr(skb)->h_proto;
2339 }
2340 }
2341
tpacket_fill_skb(struct packet_sock * po,struct sk_buff * skb,void * frame,struct net_device * dev,int size_max,__be16 proto,unsigned char * addr,int hlen)2342 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2343 void *frame, struct net_device *dev, int size_max,
2344 __be16 proto, unsigned char *addr, int hlen)
2345 {
2346 union tpacket_uhdr ph;
2347 int to_write, offset, len, tp_len, nr_frags, len_max;
2348 struct socket *sock = po->sk.sk_socket;
2349 struct page *page;
2350 void *data;
2351 int err;
2352
2353 ph.raw = frame;
2354
2355 skb->protocol = proto;
2356 skb->dev = dev;
2357 skb->priority = po->sk.sk_priority;
2358 skb->mark = po->sk.sk_mark;
2359 sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
2360 skb_shinfo(skb)->destructor_arg = ph.raw;
2361
2362 switch (po->tp_version) {
2363 case TPACKET_V2:
2364 tp_len = ph.h2->tp_len;
2365 break;
2366 default:
2367 tp_len = ph.h1->tp_len;
2368 break;
2369 }
2370 if (unlikely(tp_len > size_max)) {
2371 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2372 return -EMSGSIZE;
2373 }
2374
2375 skb_reserve(skb, hlen);
2376 skb_reset_network_header(skb);
2377
2378 if (unlikely(po->tp_tx_has_off)) {
2379 int off_min, off_max, off;
2380 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2381 off_max = po->tx_ring.frame_size - tp_len;
2382 if (sock->type == SOCK_DGRAM) {
2383 switch (po->tp_version) {
2384 case TPACKET_V2:
2385 off = ph.h2->tp_net;
2386 break;
2387 default:
2388 off = ph.h1->tp_net;
2389 break;
2390 }
2391 } else {
2392 switch (po->tp_version) {
2393 case TPACKET_V2:
2394 off = ph.h2->tp_mac;
2395 break;
2396 default:
2397 off = ph.h1->tp_mac;
2398 break;
2399 }
2400 }
2401 if (unlikely((off < off_min) || (off_max < off)))
2402 return -EINVAL;
2403 data = ph.raw + off;
2404 } else {
2405 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2406 }
2407 to_write = tp_len;
2408
2409 if (sock->type == SOCK_DGRAM) {
2410 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2411 NULL, tp_len);
2412 if (unlikely(err < 0))
2413 return -EINVAL;
2414 } else if (dev->hard_header_len) {
2415 int hdrlen = min_t(int, dev->hard_header_len, tp_len);
2416
2417 skb_push(skb, dev->hard_header_len);
2418 err = skb_store_bits(skb, 0, data, hdrlen);
2419 if (unlikely(err))
2420 return err;
2421 if (!dev_validate_header(dev, skb->data, hdrlen))
2422 return -EINVAL;
2423 if (!skb->protocol)
2424 tpacket_set_protocol(dev, skb);
2425
2426 data += hdrlen;
2427 to_write -= hdrlen;
2428 }
2429
2430 offset = offset_in_page(data);
2431 len_max = PAGE_SIZE - offset;
2432 len = ((to_write > len_max) ? len_max : to_write);
2433
2434 skb->data_len = to_write;
2435 skb->len += to_write;
2436 skb->truesize += to_write;
2437 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2438
2439 while (likely(to_write)) {
2440 nr_frags = skb_shinfo(skb)->nr_frags;
2441
2442 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2443 pr_err("Packet exceed the number of skb frags(%lu)\n",
2444 MAX_SKB_FRAGS);
2445 return -EFAULT;
2446 }
2447
2448 page = pgv_to_page(data);
2449 data += len;
2450 flush_dcache_page(page);
2451 get_page(page);
2452 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2453 to_write -= len;
2454 offset = 0;
2455 len_max = PAGE_SIZE;
2456 len = ((to_write > len_max) ? len_max : to_write);
2457 }
2458
2459 skb_probe_transport_header(skb, 0);
2460
2461 return tp_len;
2462 }
2463
tpacket_snd(struct packet_sock * po,struct msghdr * msg)2464 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2465 {
2466 struct sk_buff *skb;
2467 struct net_device *dev;
2468 __be16 proto;
2469 int err, reserve = 0;
2470 void *ph;
2471 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2472 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2473 int tp_len, size_max;
2474 unsigned char *addr;
2475 int len_sum = 0;
2476 int status = TP_STATUS_AVAILABLE;
2477 int hlen, tlen;
2478
2479 mutex_lock(&po->pg_vec_lock);
2480
2481 if (likely(saddr == NULL)) {
2482 dev = packet_cached_dev_get(po);
2483 proto = po->num;
2484 addr = NULL;
2485 } else {
2486 err = -EINVAL;
2487 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2488 goto out;
2489 if (msg->msg_namelen < (saddr->sll_halen
2490 + offsetof(struct sockaddr_ll,
2491 sll_addr)))
2492 goto out;
2493 proto = saddr->sll_protocol;
2494 addr = saddr->sll_addr;
2495 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2496 }
2497
2498 err = -ENXIO;
2499 if (unlikely(dev == NULL))
2500 goto out;
2501 err = -ENETDOWN;
2502 if (unlikely(!(dev->flags & IFF_UP)))
2503 goto out_put;
2504
2505 if (po->sk.sk_socket->type == SOCK_RAW)
2506 reserve = dev->hard_header_len;
2507 size_max = po->tx_ring.frame_size
2508 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2509
2510 if (size_max > dev->mtu + reserve + VLAN_HLEN)
2511 size_max = dev->mtu + reserve + VLAN_HLEN;
2512
2513 do {
2514 ph = packet_current_frame(po, &po->tx_ring,
2515 TP_STATUS_SEND_REQUEST);
2516 if (unlikely(ph == NULL)) {
2517 if (need_wait && need_resched())
2518 schedule();
2519 continue;
2520 }
2521
2522 status = TP_STATUS_SEND_REQUEST;
2523 hlen = LL_RESERVED_SPACE(dev);
2524 tlen = dev->needed_tailroom;
2525 skb = sock_alloc_send_skb(&po->sk,
2526 hlen + tlen + sizeof(struct sockaddr_ll),
2527 !need_wait, &err);
2528
2529 if (unlikely(skb == NULL)) {
2530 /* we assume the socket was initially writeable ... */
2531 if (likely(len_sum > 0))
2532 err = len_sum;
2533 goto out_status;
2534 }
2535 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2536 addr, hlen);
2537 if (likely(tp_len >= 0) &&
2538 tp_len > dev->mtu + reserve &&
2539 !packet_extra_vlan_len_allowed(dev, skb))
2540 tp_len = -EMSGSIZE;
2541
2542 if (unlikely(tp_len < 0)) {
2543 if (po->tp_loss) {
2544 __packet_set_status(po, ph,
2545 TP_STATUS_AVAILABLE);
2546 packet_increment_head(&po->tx_ring);
2547 kfree_skb(skb);
2548 continue;
2549 } else {
2550 status = TP_STATUS_WRONG_FORMAT;
2551 err = tp_len;
2552 goto out_status;
2553 }
2554 }
2555
2556 packet_pick_tx_queue(dev, skb);
2557
2558 skb->destructor = tpacket_destruct_skb;
2559 __packet_set_status(po, ph, TP_STATUS_SENDING);
2560 packet_inc_pending(&po->tx_ring);
2561
2562 status = TP_STATUS_SEND_REQUEST;
2563 err = po->xmit(skb);
2564 if (unlikely(err > 0)) {
2565 err = net_xmit_errno(err);
2566 if (err && __packet_get_status(po, ph) ==
2567 TP_STATUS_AVAILABLE) {
2568 /* skb was destructed already */
2569 skb = NULL;
2570 goto out_status;
2571 }
2572 /*
2573 * skb was dropped but not destructed yet;
2574 * let's treat it like congestion or err < 0
2575 */
2576 err = 0;
2577 }
2578 packet_increment_head(&po->tx_ring);
2579 len_sum += tp_len;
2580 } while (likely((ph != NULL) ||
2581 /* Note: packet_read_pending() might be slow if we have
2582 * to call it as it's per_cpu variable, but in fast-path
2583 * we already short-circuit the loop with the first
2584 * condition, and luckily don't have to go that path
2585 * anyway.
2586 */
2587 (need_wait && packet_read_pending(&po->tx_ring))));
2588
2589 err = len_sum;
2590 goto out_put;
2591
2592 out_status:
2593 __packet_set_status(po, ph, status);
2594 kfree_skb(skb);
2595 out_put:
2596 dev_put(dev);
2597 out:
2598 mutex_unlock(&po->pg_vec_lock);
2599 return err;
2600 }
2601
packet_alloc_skb(struct sock * sk,size_t prepad,size_t reserve,size_t len,size_t linear,int noblock,int * err)2602 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2603 size_t reserve, size_t len,
2604 size_t linear, int noblock,
2605 int *err)
2606 {
2607 struct sk_buff *skb;
2608
2609 /* Under a page? Don't bother with paged skb. */
2610 if (prepad + len < PAGE_SIZE || !linear)
2611 linear = len;
2612
2613 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2614 err, 0);
2615 if (!skb)
2616 return NULL;
2617
2618 skb_reserve(skb, reserve);
2619 skb_put(skb, linear);
2620 skb->data_len = len - linear;
2621 skb->len += len - linear;
2622
2623 return skb;
2624 }
2625
packet_snd(struct socket * sock,struct msghdr * msg,size_t len)2626 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2627 {
2628 struct sock *sk = sock->sk;
2629 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2630 struct sk_buff *skb;
2631 struct net_device *dev;
2632 __be16 proto;
2633 unsigned char *addr;
2634 int err, reserve = 0;
2635 struct sockcm_cookie sockc;
2636 struct virtio_net_hdr vnet_hdr = { 0 };
2637 int offset = 0;
2638 int vnet_hdr_len;
2639 struct packet_sock *po = pkt_sk(sk);
2640 unsigned short gso_type = 0;
2641 int hlen, tlen;
2642 int extra_len = 0;
2643 ssize_t n;
2644
2645 /*
2646 * Get and verify the address.
2647 */
2648
2649 if (likely(saddr == NULL)) {
2650 dev = packet_cached_dev_get(po);
2651 proto = po->num;
2652 addr = NULL;
2653 } else {
2654 err = -EINVAL;
2655 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2656 goto out;
2657 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2658 goto out;
2659 proto = saddr->sll_protocol;
2660 addr = saddr->sll_addr;
2661 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2662 }
2663
2664 err = -ENXIO;
2665 if (unlikely(dev == NULL))
2666 goto out_unlock;
2667 err = -ENETDOWN;
2668 if (unlikely(!(dev->flags & IFF_UP)))
2669 goto out_unlock;
2670
2671 sockc.mark = sk->sk_mark;
2672 if (msg->msg_controllen) {
2673 err = sock_cmsg_send(sk, msg, &sockc);
2674 if (unlikely(err))
2675 goto out_unlock;
2676 }
2677
2678 if (sock->type == SOCK_RAW)
2679 reserve = dev->hard_header_len;
2680 if (po->has_vnet_hdr) {
2681 vnet_hdr_len = sizeof(vnet_hdr);
2682
2683 err = -EINVAL;
2684 if (len < vnet_hdr_len)
2685 goto out_unlock;
2686
2687 len -= vnet_hdr_len;
2688
2689 err = -EFAULT;
2690 n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
2691 if (n != vnet_hdr_len)
2692 goto out_unlock;
2693
2694 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2695 (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
2696 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 >
2697 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len)))
2698 vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(),
2699 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
2700 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2);
2701
2702 err = -EINVAL;
2703 if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len)
2704 goto out_unlock;
2705
2706 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2707 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2708 case VIRTIO_NET_HDR_GSO_TCPV4:
2709 gso_type = SKB_GSO_TCPV4;
2710 break;
2711 case VIRTIO_NET_HDR_GSO_TCPV6:
2712 gso_type = SKB_GSO_TCPV6;
2713 break;
2714 case VIRTIO_NET_HDR_GSO_UDP:
2715 gso_type = SKB_GSO_UDP;
2716 break;
2717 default:
2718 goto out_unlock;
2719 }
2720
2721 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2722 gso_type |= SKB_GSO_TCP_ECN;
2723
2724 if (vnet_hdr.gso_size == 0)
2725 goto out_unlock;
2726
2727 }
2728 }
2729
2730 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2731 if (!netif_supports_nofcs(dev)) {
2732 err = -EPROTONOSUPPORT;
2733 goto out_unlock;
2734 }
2735 extra_len = 4; /* We're doing our own CRC */
2736 }
2737
2738 err = -EMSGSIZE;
2739 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2740 goto out_unlock;
2741
2742 err = -ENOBUFS;
2743 hlen = LL_RESERVED_SPACE(dev);
2744 tlen = dev->needed_tailroom;
2745 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2746 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
2747 msg->msg_flags & MSG_DONTWAIT, &err);
2748 if (skb == NULL)
2749 goto out_unlock;
2750
2751 skb_set_network_header(skb, reserve);
2752
2753 err = -EINVAL;
2754 if (sock->type == SOCK_DGRAM) {
2755 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2756 if (unlikely(offset < 0))
2757 goto out_free;
2758 }
2759
2760 /* Returns -EFAULT on error */
2761 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2762 if (err)
2763 goto out_free;
2764
2765 if (sock->type == SOCK_RAW &&
2766 !dev_validate_header(dev, skb->data, len)) {
2767 err = -EINVAL;
2768 goto out_free;
2769 }
2770
2771 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2772
2773 if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
2774 !packet_extra_vlan_len_allowed(dev, skb)) {
2775 err = -EMSGSIZE;
2776 goto out_free;
2777 }
2778
2779 skb->protocol = proto;
2780 skb->dev = dev;
2781 skb->priority = sk->sk_priority;
2782 skb->mark = sockc.mark;
2783
2784 packet_pick_tx_queue(dev, skb);
2785
2786 if (po->has_vnet_hdr) {
2787 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2788 u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
2789 u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
2790 if (!skb_partial_csum_set(skb, s, o)) {
2791 err = -EINVAL;
2792 goto out_free;
2793 }
2794 }
2795
2796 skb_shinfo(skb)->gso_size =
2797 __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size);
2798 skb_shinfo(skb)->gso_type = gso_type;
2799
2800 /* Header must be checked, and gso_segs computed. */
2801 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2802 skb_shinfo(skb)->gso_segs = 0;
2803
2804 len += vnet_hdr_len;
2805 }
2806
2807 skb_probe_transport_header(skb, reserve);
2808
2809 if (unlikely(extra_len == 4))
2810 skb->no_fcs = 1;
2811
2812 err = po->xmit(skb);
2813 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2814 goto out_unlock;
2815
2816 dev_put(dev);
2817
2818 return len;
2819
2820 out_free:
2821 kfree_skb(skb);
2822 out_unlock:
2823 if (dev)
2824 dev_put(dev);
2825 out:
2826 return err;
2827 }
2828
packet_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)2829 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2830 {
2831 struct sock *sk = sock->sk;
2832 struct packet_sock *po = pkt_sk(sk);
2833
2834 if (po->tx_ring.pg_vec)
2835 return tpacket_snd(po, msg);
2836 else
2837 return packet_snd(sock, msg, len);
2838 }
2839
2840 /*
2841 * Close a PACKET socket. This is fairly simple. We immediately go
2842 * to 'closed' state and remove our protocol entry in the device list.
2843 */
2844
packet_release(struct socket * sock)2845 static int packet_release(struct socket *sock)
2846 {
2847 struct sock *sk = sock->sk;
2848 struct packet_sock *po;
2849 struct net *net;
2850 union tpacket_req_u req_u;
2851
2852 if (!sk)
2853 return 0;
2854
2855 net = sock_net(sk);
2856 po = pkt_sk(sk);
2857
2858 mutex_lock(&net->packet.sklist_lock);
2859 sk_del_node_init_rcu(sk);
2860 mutex_unlock(&net->packet.sklist_lock);
2861
2862 preempt_disable();
2863 sock_prot_inuse_add(net, sk->sk_prot, -1);
2864 preempt_enable();
2865
2866 spin_lock(&po->bind_lock);
2867 unregister_prot_hook(sk, false);
2868 packet_cached_dev_reset(po);
2869
2870 if (po->prot_hook.dev) {
2871 dev_put(po->prot_hook.dev);
2872 po->prot_hook.dev = NULL;
2873 }
2874 spin_unlock(&po->bind_lock);
2875
2876 packet_flush_mclist(sk);
2877
2878 if (po->rx_ring.pg_vec) {
2879 memset(&req_u, 0, sizeof(req_u));
2880 packet_set_ring(sk, &req_u, 1, 0);
2881 }
2882
2883 if (po->tx_ring.pg_vec) {
2884 memset(&req_u, 0, sizeof(req_u));
2885 packet_set_ring(sk, &req_u, 1, 1);
2886 }
2887
2888 fanout_release(sk);
2889
2890 synchronize_net();
2891 /*
2892 * Now the socket is dead. No more input will appear.
2893 */
2894 sock_orphan(sk);
2895 sock->sk = NULL;
2896
2897 /* Purge queues */
2898
2899 skb_queue_purge(&sk->sk_receive_queue);
2900 packet_free_pending(po);
2901 sk_refcnt_debug_release(sk);
2902
2903 sock_put(sk);
2904 return 0;
2905 }
2906
2907 /*
2908 * Attach a packet hook.
2909 */
2910
packet_do_bind(struct sock * sk,const char * name,int ifindex,__be16 proto)2911 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
2912 __be16 proto)
2913 {
2914 struct packet_sock *po = pkt_sk(sk);
2915 struct net_device *dev_curr;
2916 __be16 proto_curr;
2917 bool need_rehook;
2918 struct net_device *dev = NULL;
2919 int ret = 0;
2920 bool unlisted = false;
2921
2922 if (po->fanout)
2923 return -EINVAL;
2924
2925 lock_sock(sk);
2926 spin_lock(&po->bind_lock);
2927 rcu_read_lock();
2928
2929 if (name) {
2930 dev = dev_get_by_name_rcu(sock_net(sk), name);
2931 if (!dev) {
2932 ret = -ENODEV;
2933 goto out_unlock;
2934 }
2935 } else if (ifindex) {
2936 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
2937 if (!dev) {
2938 ret = -ENODEV;
2939 goto out_unlock;
2940 }
2941 }
2942
2943 if (dev)
2944 dev_hold(dev);
2945
2946 proto_curr = po->prot_hook.type;
2947 dev_curr = po->prot_hook.dev;
2948
2949 need_rehook = proto_curr != proto || dev_curr != dev;
2950
2951 if (need_rehook) {
2952 if (po->running) {
2953 rcu_read_unlock();
2954 __unregister_prot_hook(sk, true);
2955 rcu_read_lock();
2956 dev_curr = po->prot_hook.dev;
2957 if (dev)
2958 unlisted = !dev_get_by_index_rcu(sock_net(sk),
2959 dev->ifindex);
2960 }
2961
2962 po->num = proto;
2963 po->prot_hook.type = proto;
2964
2965 if (unlikely(unlisted)) {
2966 dev_put(dev);
2967 po->prot_hook.dev = NULL;
2968 po->ifindex = -1;
2969 packet_cached_dev_reset(po);
2970 } else {
2971 po->prot_hook.dev = dev;
2972 po->ifindex = dev ? dev->ifindex : 0;
2973 packet_cached_dev_assign(po, dev);
2974 }
2975 }
2976 if (dev_curr)
2977 dev_put(dev_curr);
2978
2979 if (proto == 0 || !need_rehook)
2980 goto out_unlock;
2981
2982 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
2983 register_prot_hook(sk);
2984 } else {
2985 sk->sk_err = ENETDOWN;
2986 if (!sock_flag(sk, SOCK_DEAD))
2987 sk->sk_error_report(sk);
2988 }
2989
2990 out_unlock:
2991 rcu_read_unlock();
2992 spin_unlock(&po->bind_lock);
2993 release_sock(sk);
2994 return ret;
2995 }
2996
2997 /*
2998 * Bind a packet socket to a device
2999 */
3000
packet_bind_spkt(struct socket * sock,struct sockaddr * uaddr,int addr_len)3001 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3002 int addr_len)
3003 {
3004 struct sock *sk = sock->sk;
3005 char name[15];
3006
3007 /*
3008 * Check legality
3009 */
3010
3011 if (addr_len != sizeof(struct sockaddr))
3012 return -EINVAL;
3013 strlcpy(name, uaddr->sa_data, sizeof(name));
3014
3015 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3016 }
3017
packet_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)3018 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3019 {
3020 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3021 struct sock *sk = sock->sk;
3022
3023 /*
3024 * Check legality
3025 */
3026
3027 if (addr_len < sizeof(struct sockaddr_ll))
3028 return -EINVAL;
3029 if (sll->sll_family != AF_PACKET)
3030 return -EINVAL;
3031
3032 return packet_do_bind(sk, NULL, sll->sll_ifindex,
3033 sll->sll_protocol ? : pkt_sk(sk)->num);
3034 }
3035
3036 static struct proto packet_proto = {
3037 .name = "PACKET",
3038 .owner = THIS_MODULE,
3039 .obj_size = sizeof(struct packet_sock),
3040 };
3041
3042 /*
3043 * Create a packet of type SOCK_PACKET.
3044 */
3045
packet_create(struct net * net,struct socket * sock,int protocol,int kern)3046 static int packet_create(struct net *net, struct socket *sock, int protocol,
3047 int kern)
3048 {
3049 struct sock *sk;
3050 struct packet_sock *po;
3051 __be16 proto = (__force __be16)protocol; /* weird, but documented */
3052 int err;
3053
3054 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3055 return -EPERM;
3056 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3057 sock->type != SOCK_PACKET)
3058 return -ESOCKTNOSUPPORT;
3059
3060 sock->state = SS_UNCONNECTED;
3061
3062 err = -ENOBUFS;
3063 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3064 if (sk == NULL)
3065 goto out;
3066
3067 sock->ops = &packet_ops;
3068 if (sock->type == SOCK_PACKET)
3069 sock->ops = &packet_ops_spkt;
3070
3071 sock_init_data(sock, sk);
3072
3073 po = pkt_sk(sk);
3074 sk->sk_family = PF_PACKET;
3075 po->num = proto;
3076 po->xmit = dev_queue_xmit;
3077
3078 err = packet_alloc_pending(po);
3079 if (err)
3080 goto out2;
3081
3082 packet_cached_dev_reset(po);
3083
3084 sk->sk_destruct = packet_sock_destruct;
3085 sk_refcnt_debug_inc(sk);
3086
3087 /*
3088 * Attach a protocol block
3089 */
3090
3091 spin_lock_init(&po->bind_lock);
3092 mutex_init(&po->pg_vec_lock);
3093 po->rollover = NULL;
3094 po->prot_hook.func = packet_rcv;
3095
3096 if (sock->type == SOCK_PACKET)
3097 po->prot_hook.func = packet_rcv_spkt;
3098
3099 po->prot_hook.af_packet_priv = sk;
3100
3101 if (proto) {
3102 po->prot_hook.type = proto;
3103 register_prot_hook(sk);
3104 }
3105
3106 mutex_lock(&net->packet.sklist_lock);
3107 sk_add_node_rcu(sk, &net->packet.sklist);
3108 mutex_unlock(&net->packet.sklist_lock);
3109
3110 preempt_disable();
3111 sock_prot_inuse_add(net, &packet_proto, 1);
3112 preempt_enable();
3113
3114 return 0;
3115 out2:
3116 sk_free(sk);
3117 out:
3118 return err;
3119 }
3120
3121 /*
3122 * Pull a packet from our receive queue and hand it to the user.
3123 * If necessary we block.
3124 */
3125
packet_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)3126 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3127 int flags)
3128 {
3129 struct sock *sk = sock->sk;
3130 struct sk_buff *skb;
3131 int copied, err;
3132 int vnet_hdr_len = 0;
3133 unsigned int origlen = 0;
3134
3135 err = -EINVAL;
3136 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3137 goto out;
3138
3139 #if 0
3140 /* What error should we return now? EUNATTACH? */
3141 if (pkt_sk(sk)->ifindex < 0)
3142 return -ENODEV;
3143 #endif
3144
3145 if (flags & MSG_ERRQUEUE) {
3146 err = sock_recv_errqueue(sk, msg, len,
3147 SOL_PACKET, PACKET_TX_TIMESTAMP);
3148 goto out;
3149 }
3150
3151 /*
3152 * Call the generic datagram receiver. This handles all sorts
3153 * of horrible races and re-entrancy so we can forget about it
3154 * in the protocol layers.
3155 *
3156 * Now it will return ENETDOWN, if device have just gone down,
3157 * but then it will block.
3158 */
3159
3160 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3161
3162 /*
3163 * An error occurred so return it. Because skb_recv_datagram()
3164 * handles the blocking we don't see and worry about blocking
3165 * retries.
3166 */
3167
3168 if (skb == NULL)
3169 goto out;
3170
3171 if (pkt_sk(sk)->pressure)
3172 packet_rcv_has_room(pkt_sk(sk), NULL);
3173
3174 if (pkt_sk(sk)->has_vnet_hdr) {
3175 struct virtio_net_hdr vnet_hdr = { 0 };
3176
3177 err = -EINVAL;
3178 vnet_hdr_len = sizeof(vnet_hdr);
3179 if (len < vnet_hdr_len)
3180 goto out_free;
3181
3182 len -= vnet_hdr_len;
3183
3184 if (skb_is_gso(skb)) {
3185 struct skb_shared_info *sinfo = skb_shinfo(skb);
3186
3187 /* This is a hint as to how much should be linear. */
3188 vnet_hdr.hdr_len =
3189 __cpu_to_virtio16(vio_le(), skb_headlen(skb));
3190 vnet_hdr.gso_size =
3191 __cpu_to_virtio16(vio_le(), sinfo->gso_size);
3192 if (sinfo->gso_type & SKB_GSO_TCPV4)
3193 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
3194 else if (sinfo->gso_type & SKB_GSO_TCPV6)
3195 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
3196 else if (sinfo->gso_type & SKB_GSO_UDP)
3197 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
3198 else if (sinfo->gso_type & SKB_GSO_FCOE)
3199 goto out_free;
3200 else
3201 BUG();
3202 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
3203 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
3204 } else
3205 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
3206
3207 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3208 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
3209 vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(),
3210 skb_checksum_start_offset(skb));
3211 vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(),
3212 skb->csum_offset);
3213 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3214 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
3215 } /* else everything is zero */
3216
3217 err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
3218 if (err < 0)
3219 goto out_free;
3220 }
3221
3222 /* You lose any data beyond the buffer you gave. If it worries
3223 * a user program they can ask the device for its MTU
3224 * anyway.
3225 */
3226 copied = skb->len;
3227 if (copied > len) {
3228 copied = len;
3229 msg->msg_flags |= MSG_TRUNC;
3230 }
3231
3232 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3233 if (err)
3234 goto out_free;
3235
3236 if (sock->type != SOCK_PACKET) {
3237 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3238
3239 /* Original length was stored in sockaddr_ll fields */
3240 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3241 sll->sll_family = AF_PACKET;
3242 sll->sll_protocol = skb->protocol;
3243 }
3244
3245 sock_recv_ts_and_drops(msg, sk, skb);
3246
3247 if (msg->msg_name) {
3248 /* If the address length field is there to be filled
3249 * in, we fill it in now.
3250 */
3251 if (sock->type == SOCK_PACKET) {
3252 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3253 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3254 } else {
3255 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3256
3257 msg->msg_namelen = sll->sll_halen +
3258 offsetof(struct sockaddr_ll, sll_addr);
3259 }
3260 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3261 msg->msg_namelen);
3262 }
3263
3264 if (pkt_sk(sk)->auxdata) {
3265 struct tpacket_auxdata aux;
3266
3267 aux.tp_status = TP_STATUS_USER;
3268 if (skb->ip_summed == CHECKSUM_PARTIAL)
3269 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3270 else if (skb->pkt_type != PACKET_OUTGOING &&
3271 (skb->ip_summed == CHECKSUM_COMPLETE ||
3272 skb_csum_unnecessary(skb)))
3273 aux.tp_status |= TP_STATUS_CSUM_VALID;
3274
3275 aux.tp_len = origlen;
3276 aux.tp_snaplen = skb->len;
3277 aux.tp_mac = 0;
3278 aux.tp_net = skb_network_offset(skb);
3279 if (skb_vlan_tag_present(skb)) {
3280 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3281 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3282 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3283 } else {
3284 aux.tp_vlan_tci = 0;
3285 aux.tp_vlan_tpid = 0;
3286 }
3287 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3288 }
3289
3290 /*
3291 * Free or return the buffer as appropriate. Again this
3292 * hides all the races and re-entrancy issues from us.
3293 */
3294 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3295
3296 out_free:
3297 skb_free_datagram(sk, skb);
3298 out:
3299 return err;
3300 }
3301
packet_getname_spkt(struct socket * sock,struct sockaddr * uaddr,int * uaddr_len,int peer)3302 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3303 int *uaddr_len, int peer)
3304 {
3305 struct net_device *dev;
3306 struct sock *sk = sock->sk;
3307
3308 if (peer)
3309 return -EOPNOTSUPP;
3310
3311 uaddr->sa_family = AF_PACKET;
3312 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3313 rcu_read_lock();
3314 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3315 if (dev)
3316 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3317 rcu_read_unlock();
3318 *uaddr_len = sizeof(*uaddr);
3319
3320 return 0;
3321 }
3322
packet_getname(struct socket * sock,struct sockaddr * uaddr,int * uaddr_len,int peer)3323 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3324 int *uaddr_len, int peer)
3325 {
3326 struct net_device *dev;
3327 struct sock *sk = sock->sk;
3328 struct packet_sock *po = pkt_sk(sk);
3329 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3330
3331 if (peer)
3332 return -EOPNOTSUPP;
3333
3334 sll->sll_family = AF_PACKET;
3335 sll->sll_ifindex = po->ifindex;
3336 sll->sll_protocol = po->num;
3337 sll->sll_pkttype = 0;
3338 rcu_read_lock();
3339 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3340 if (dev) {
3341 sll->sll_hatype = dev->type;
3342 sll->sll_halen = dev->addr_len;
3343 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3344 } else {
3345 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3346 sll->sll_halen = 0;
3347 }
3348 rcu_read_unlock();
3349 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3350
3351 return 0;
3352 }
3353
packet_dev_mc(struct net_device * dev,struct packet_mclist * i,int what)3354 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3355 int what)
3356 {
3357 switch (i->type) {
3358 case PACKET_MR_MULTICAST:
3359 if (i->alen != dev->addr_len)
3360 return -EINVAL;
3361 if (what > 0)
3362 return dev_mc_add(dev, i->addr);
3363 else
3364 return dev_mc_del(dev, i->addr);
3365 break;
3366 case PACKET_MR_PROMISC:
3367 return dev_set_promiscuity(dev, what);
3368 case PACKET_MR_ALLMULTI:
3369 return dev_set_allmulti(dev, what);
3370 case PACKET_MR_UNICAST:
3371 if (i->alen != dev->addr_len)
3372 return -EINVAL;
3373 if (what > 0)
3374 return dev_uc_add(dev, i->addr);
3375 else
3376 return dev_uc_del(dev, i->addr);
3377 break;
3378 default:
3379 break;
3380 }
3381 return 0;
3382 }
3383
packet_dev_mclist_delete(struct net_device * dev,struct packet_mclist ** mlp)3384 static void packet_dev_mclist_delete(struct net_device *dev,
3385 struct packet_mclist **mlp)
3386 {
3387 struct packet_mclist *ml;
3388
3389 while ((ml = *mlp) != NULL) {
3390 if (ml->ifindex == dev->ifindex) {
3391 packet_dev_mc(dev, ml, -1);
3392 *mlp = ml->next;
3393 kfree(ml);
3394 } else
3395 mlp = &ml->next;
3396 }
3397 }
3398
packet_mc_add(struct sock * sk,struct packet_mreq_max * mreq)3399 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3400 {
3401 struct packet_sock *po = pkt_sk(sk);
3402 struct packet_mclist *ml, *i;
3403 struct net_device *dev;
3404 int err;
3405
3406 rtnl_lock();
3407
3408 err = -ENODEV;
3409 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3410 if (!dev)
3411 goto done;
3412
3413 err = -EINVAL;
3414 if (mreq->mr_alen > dev->addr_len)
3415 goto done;
3416
3417 err = -ENOBUFS;
3418 i = kmalloc(sizeof(*i), GFP_KERNEL);
3419 if (i == NULL)
3420 goto done;
3421
3422 err = 0;
3423 for (ml = po->mclist; ml; ml = ml->next) {
3424 if (ml->ifindex == mreq->mr_ifindex &&
3425 ml->type == mreq->mr_type &&
3426 ml->alen == mreq->mr_alen &&
3427 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3428 ml->count++;
3429 /* Free the new element ... */
3430 kfree(i);
3431 goto done;
3432 }
3433 }
3434
3435 i->type = mreq->mr_type;
3436 i->ifindex = mreq->mr_ifindex;
3437 i->alen = mreq->mr_alen;
3438 memcpy(i->addr, mreq->mr_address, i->alen);
3439 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3440 i->count = 1;
3441 i->next = po->mclist;
3442 po->mclist = i;
3443 err = packet_dev_mc(dev, i, 1);
3444 if (err) {
3445 po->mclist = i->next;
3446 kfree(i);
3447 }
3448
3449 done:
3450 rtnl_unlock();
3451 return err;
3452 }
3453
packet_mc_drop(struct sock * sk,struct packet_mreq_max * mreq)3454 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3455 {
3456 struct packet_mclist *ml, **mlp;
3457
3458 rtnl_lock();
3459
3460 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3461 if (ml->ifindex == mreq->mr_ifindex &&
3462 ml->type == mreq->mr_type &&
3463 ml->alen == mreq->mr_alen &&
3464 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3465 if (--ml->count == 0) {
3466 struct net_device *dev;
3467 *mlp = ml->next;
3468 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3469 if (dev)
3470 packet_dev_mc(dev, ml, -1);
3471 kfree(ml);
3472 }
3473 break;
3474 }
3475 }
3476 rtnl_unlock();
3477 return 0;
3478 }
3479
packet_flush_mclist(struct sock * sk)3480 static void packet_flush_mclist(struct sock *sk)
3481 {
3482 struct packet_sock *po = pkt_sk(sk);
3483 struct packet_mclist *ml;
3484
3485 if (!po->mclist)
3486 return;
3487
3488 rtnl_lock();
3489 while ((ml = po->mclist) != NULL) {
3490 struct net_device *dev;
3491
3492 po->mclist = ml->next;
3493 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3494 if (dev != NULL)
3495 packet_dev_mc(dev, ml, -1);
3496 kfree(ml);
3497 }
3498 rtnl_unlock();
3499 }
3500
3501 static int
packet_setsockopt(struct socket * sock,int level,int optname,char __user * optval,unsigned int optlen)3502 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3503 {
3504 struct sock *sk = sock->sk;
3505 struct packet_sock *po = pkt_sk(sk);
3506 int ret;
3507
3508 if (level != SOL_PACKET)
3509 return -ENOPROTOOPT;
3510
3511 switch (optname) {
3512 case PACKET_ADD_MEMBERSHIP:
3513 case PACKET_DROP_MEMBERSHIP:
3514 {
3515 struct packet_mreq_max mreq;
3516 int len = optlen;
3517 memset(&mreq, 0, sizeof(mreq));
3518 if (len < sizeof(struct packet_mreq))
3519 return -EINVAL;
3520 if (len > sizeof(mreq))
3521 len = sizeof(mreq);
3522 if (copy_from_user(&mreq, optval, len))
3523 return -EFAULT;
3524 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3525 return -EINVAL;
3526 if (optname == PACKET_ADD_MEMBERSHIP)
3527 ret = packet_mc_add(sk, &mreq);
3528 else
3529 ret = packet_mc_drop(sk, &mreq);
3530 return ret;
3531 }
3532
3533 case PACKET_RX_RING:
3534 case PACKET_TX_RING:
3535 {
3536 union tpacket_req_u req_u;
3537 int len;
3538
3539 switch (po->tp_version) {
3540 case TPACKET_V1:
3541 case TPACKET_V2:
3542 len = sizeof(req_u.req);
3543 break;
3544 case TPACKET_V3:
3545 default:
3546 len = sizeof(req_u.req3);
3547 break;
3548 }
3549 if (optlen < len)
3550 return -EINVAL;
3551 if (pkt_sk(sk)->has_vnet_hdr)
3552 return -EINVAL;
3553 if (copy_from_user(&req_u.req, optval, len))
3554 return -EFAULT;
3555 return packet_set_ring(sk, &req_u, 0,
3556 optname == PACKET_TX_RING);
3557 }
3558 case PACKET_COPY_THRESH:
3559 {
3560 int val;
3561
3562 if (optlen != sizeof(val))
3563 return -EINVAL;
3564 if (copy_from_user(&val, optval, sizeof(val)))
3565 return -EFAULT;
3566
3567 pkt_sk(sk)->copy_thresh = val;
3568 return 0;
3569 }
3570 case PACKET_VERSION:
3571 {
3572 int val;
3573
3574 if (optlen != sizeof(val))
3575 return -EINVAL;
3576 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3577 return -EBUSY;
3578 if (copy_from_user(&val, optval, sizeof(val)))
3579 return -EFAULT;
3580 switch (val) {
3581 case TPACKET_V1:
3582 case TPACKET_V2:
3583 case TPACKET_V3:
3584 po->tp_version = val;
3585 return 0;
3586 default:
3587 return -EINVAL;
3588 }
3589 }
3590 case PACKET_RESERVE:
3591 {
3592 unsigned int val;
3593
3594 if (optlen != sizeof(val))
3595 return -EINVAL;
3596 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3597 return -EBUSY;
3598 if (copy_from_user(&val, optval, sizeof(val)))
3599 return -EFAULT;
3600 po->tp_reserve = val;
3601 return 0;
3602 }
3603 case PACKET_LOSS:
3604 {
3605 unsigned int val;
3606
3607 if (optlen != sizeof(val))
3608 return -EINVAL;
3609 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3610 return -EBUSY;
3611 if (copy_from_user(&val, optval, sizeof(val)))
3612 return -EFAULT;
3613 po->tp_loss = !!val;
3614 return 0;
3615 }
3616 case PACKET_AUXDATA:
3617 {
3618 int val;
3619
3620 if (optlen < sizeof(val))
3621 return -EINVAL;
3622 if (copy_from_user(&val, optval, sizeof(val)))
3623 return -EFAULT;
3624
3625 po->auxdata = !!val;
3626 return 0;
3627 }
3628 case PACKET_ORIGDEV:
3629 {
3630 int val;
3631
3632 if (optlen < sizeof(val))
3633 return -EINVAL;
3634 if (copy_from_user(&val, optval, sizeof(val)))
3635 return -EFAULT;
3636
3637 po->origdev = !!val;
3638 return 0;
3639 }
3640 case PACKET_VNET_HDR:
3641 {
3642 int val;
3643
3644 if (sock->type != SOCK_RAW)
3645 return -EINVAL;
3646 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3647 return -EBUSY;
3648 if (optlen < sizeof(val))
3649 return -EINVAL;
3650 if (copy_from_user(&val, optval, sizeof(val)))
3651 return -EFAULT;
3652
3653 po->has_vnet_hdr = !!val;
3654 return 0;
3655 }
3656 case PACKET_TIMESTAMP:
3657 {
3658 int val;
3659
3660 if (optlen != sizeof(val))
3661 return -EINVAL;
3662 if (copy_from_user(&val, optval, sizeof(val)))
3663 return -EFAULT;
3664
3665 po->tp_tstamp = val;
3666 return 0;
3667 }
3668 case PACKET_FANOUT:
3669 {
3670 int val;
3671
3672 if (optlen != sizeof(val))
3673 return -EINVAL;
3674 if (copy_from_user(&val, optval, sizeof(val)))
3675 return -EFAULT;
3676
3677 return fanout_add(sk, val & 0xffff, val >> 16);
3678 }
3679 case PACKET_FANOUT_DATA:
3680 {
3681 if (!po->fanout)
3682 return -EINVAL;
3683
3684 return fanout_set_data(po, optval, optlen);
3685 }
3686 case PACKET_TX_HAS_OFF:
3687 {
3688 unsigned int val;
3689
3690 if (optlen != sizeof(val))
3691 return -EINVAL;
3692 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3693 return -EBUSY;
3694 if (copy_from_user(&val, optval, sizeof(val)))
3695 return -EFAULT;
3696 po->tp_tx_has_off = !!val;
3697 return 0;
3698 }
3699 case PACKET_QDISC_BYPASS:
3700 {
3701 int val;
3702
3703 if (optlen != sizeof(val))
3704 return -EINVAL;
3705 if (copy_from_user(&val, optval, sizeof(val)))
3706 return -EFAULT;
3707
3708 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3709 return 0;
3710 }
3711 default:
3712 return -ENOPROTOOPT;
3713 }
3714 }
3715
packet_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)3716 static int packet_getsockopt(struct socket *sock, int level, int optname,
3717 char __user *optval, int __user *optlen)
3718 {
3719 int len;
3720 int val, lv = sizeof(val);
3721 struct sock *sk = sock->sk;
3722 struct packet_sock *po = pkt_sk(sk);
3723 void *data = &val;
3724 union tpacket_stats_u st;
3725 struct tpacket_rollover_stats rstats;
3726
3727 if (level != SOL_PACKET)
3728 return -ENOPROTOOPT;
3729
3730 if (get_user(len, optlen))
3731 return -EFAULT;
3732
3733 if (len < 0)
3734 return -EINVAL;
3735
3736 switch (optname) {
3737 case PACKET_STATISTICS:
3738 spin_lock_bh(&sk->sk_receive_queue.lock);
3739 memcpy(&st, &po->stats, sizeof(st));
3740 memset(&po->stats, 0, sizeof(po->stats));
3741 spin_unlock_bh(&sk->sk_receive_queue.lock);
3742
3743 if (po->tp_version == TPACKET_V3) {
3744 lv = sizeof(struct tpacket_stats_v3);
3745 st.stats3.tp_packets += st.stats3.tp_drops;
3746 data = &st.stats3;
3747 } else {
3748 lv = sizeof(struct tpacket_stats);
3749 st.stats1.tp_packets += st.stats1.tp_drops;
3750 data = &st.stats1;
3751 }
3752
3753 break;
3754 case PACKET_AUXDATA:
3755 val = po->auxdata;
3756 break;
3757 case PACKET_ORIGDEV:
3758 val = po->origdev;
3759 break;
3760 case PACKET_VNET_HDR:
3761 val = po->has_vnet_hdr;
3762 break;
3763 case PACKET_VERSION:
3764 val = po->tp_version;
3765 break;
3766 case PACKET_HDRLEN:
3767 if (len > sizeof(int))
3768 len = sizeof(int);
3769 if (copy_from_user(&val, optval, len))
3770 return -EFAULT;
3771 switch (val) {
3772 case TPACKET_V1:
3773 val = sizeof(struct tpacket_hdr);
3774 break;
3775 case TPACKET_V2:
3776 val = sizeof(struct tpacket2_hdr);
3777 break;
3778 case TPACKET_V3:
3779 val = sizeof(struct tpacket3_hdr);
3780 break;
3781 default:
3782 return -EINVAL;
3783 }
3784 break;
3785 case PACKET_RESERVE:
3786 val = po->tp_reserve;
3787 break;
3788 case PACKET_LOSS:
3789 val = po->tp_loss;
3790 break;
3791 case PACKET_TIMESTAMP:
3792 val = po->tp_tstamp;
3793 break;
3794 case PACKET_FANOUT:
3795 val = (po->fanout ?
3796 ((u32)po->fanout->id |
3797 ((u32)po->fanout->type << 16) |
3798 ((u32)po->fanout->flags << 24)) :
3799 0);
3800 break;
3801 case PACKET_ROLLOVER_STATS:
3802 if (!po->rollover)
3803 return -EINVAL;
3804 rstats.tp_all = atomic_long_read(&po->rollover->num);
3805 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3806 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3807 data = &rstats;
3808 lv = sizeof(rstats);
3809 break;
3810 case PACKET_TX_HAS_OFF:
3811 val = po->tp_tx_has_off;
3812 break;
3813 case PACKET_QDISC_BYPASS:
3814 val = packet_use_direct_xmit(po);
3815 break;
3816 default:
3817 return -ENOPROTOOPT;
3818 }
3819
3820 if (len > lv)
3821 len = lv;
3822 if (put_user(len, optlen))
3823 return -EFAULT;
3824 if (copy_to_user(optval, data, len))
3825 return -EFAULT;
3826 return 0;
3827 }
3828
3829
packet_notifier(struct notifier_block * this,unsigned long msg,void * ptr)3830 static int packet_notifier(struct notifier_block *this,
3831 unsigned long msg, void *ptr)
3832 {
3833 struct sock *sk;
3834 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3835 struct net *net = dev_net(dev);
3836
3837 rcu_read_lock();
3838 sk_for_each_rcu(sk, &net->packet.sklist) {
3839 struct packet_sock *po = pkt_sk(sk);
3840
3841 switch (msg) {
3842 case NETDEV_UNREGISTER:
3843 if (po->mclist)
3844 packet_dev_mclist_delete(dev, &po->mclist);
3845 /* fallthrough */
3846
3847 case NETDEV_DOWN:
3848 if (dev->ifindex == po->ifindex) {
3849 spin_lock(&po->bind_lock);
3850 if (po->running) {
3851 __unregister_prot_hook(sk, false);
3852 sk->sk_err = ENETDOWN;
3853 if (!sock_flag(sk, SOCK_DEAD))
3854 sk->sk_error_report(sk);
3855 }
3856 if (msg == NETDEV_UNREGISTER) {
3857 packet_cached_dev_reset(po);
3858 po->ifindex = -1;
3859 if (po->prot_hook.dev)
3860 dev_put(po->prot_hook.dev);
3861 po->prot_hook.dev = NULL;
3862 }
3863 spin_unlock(&po->bind_lock);
3864 }
3865 break;
3866 case NETDEV_UP:
3867 if (dev->ifindex == po->ifindex) {
3868 spin_lock(&po->bind_lock);
3869 if (po->num)
3870 register_prot_hook(sk);
3871 spin_unlock(&po->bind_lock);
3872 }
3873 break;
3874 }
3875 }
3876 rcu_read_unlock();
3877 return NOTIFY_DONE;
3878 }
3879
3880
packet_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)3881 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3882 unsigned long arg)
3883 {
3884 struct sock *sk = sock->sk;
3885
3886 switch (cmd) {
3887 case SIOCOUTQ:
3888 {
3889 int amount = sk_wmem_alloc_get(sk);
3890
3891 return put_user(amount, (int __user *)arg);
3892 }
3893 case SIOCINQ:
3894 {
3895 struct sk_buff *skb;
3896 int amount = 0;
3897
3898 spin_lock_bh(&sk->sk_receive_queue.lock);
3899 skb = skb_peek(&sk->sk_receive_queue);
3900 if (skb)
3901 amount = skb->len;
3902 spin_unlock_bh(&sk->sk_receive_queue.lock);
3903 return put_user(amount, (int __user *)arg);
3904 }
3905 case SIOCGSTAMP:
3906 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3907 case SIOCGSTAMPNS:
3908 return sock_get_timestampns(sk, (struct timespec __user *)arg);
3909
3910 #ifdef CONFIG_INET
3911 case SIOCADDRT:
3912 case SIOCDELRT:
3913 case SIOCDARP:
3914 case SIOCGARP:
3915 case SIOCSARP:
3916 case SIOCGIFADDR:
3917 case SIOCSIFADDR:
3918 case SIOCGIFBRDADDR:
3919 case SIOCSIFBRDADDR:
3920 case SIOCGIFNETMASK:
3921 case SIOCSIFNETMASK:
3922 case SIOCGIFDSTADDR:
3923 case SIOCSIFDSTADDR:
3924 case SIOCSIFFLAGS:
3925 return inet_dgram_ops.ioctl(sock, cmd, arg);
3926 #endif
3927
3928 default:
3929 return -ENOIOCTLCMD;
3930 }
3931 return 0;
3932 }
3933
packet_poll(struct file * file,struct socket * sock,poll_table * wait)3934 static unsigned int packet_poll(struct file *file, struct socket *sock,
3935 poll_table *wait)
3936 {
3937 struct sock *sk = sock->sk;
3938 struct packet_sock *po = pkt_sk(sk);
3939 unsigned int mask = datagram_poll(file, sock, wait);
3940
3941 spin_lock_bh(&sk->sk_receive_queue.lock);
3942 if (po->rx_ring.pg_vec) {
3943 if (!packet_previous_rx_frame(po, &po->rx_ring,
3944 TP_STATUS_KERNEL))
3945 mask |= POLLIN | POLLRDNORM;
3946 }
3947 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
3948 po->pressure = 0;
3949 spin_unlock_bh(&sk->sk_receive_queue.lock);
3950 spin_lock_bh(&sk->sk_write_queue.lock);
3951 if (po->tx_ring.pg_vec) {
3952 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3953 mask |= POLLOUT | POLLWRNORM;
3954 }
3955 spin_unlock_bh(&sk->sk_write_queue.lock);
3956 return mask;
3957 }
3958
3959
3960 /* Dirty? Well, I still did not learn better way to account
3961 * for user mmaps.
3962 */
3963
packet_mm_open(struct vm_area_struct * vma)3964 static void packet_mm_open(struct vm_area_struct *vma)
3965 {
3966 struct file *file = vma->vm_file;
3967 struct socket *sock = file->private_data;
3968 struct sock *sk = sock->sk;
3969
3970 if (sk)
3971 atomic_inc(&pkt_sk(sk)->mapped);
3972 }
3973
packet_mm_close(struct vm_area_struct * vma)3974 static void packet_mm_close(struct vm_area_struct *vma)
3975 {
3976 struct file *file = vma->vm_file;
3977 struct socket *sock = file->private_data;
3978 struct sock *sk = sock->sk;
3979
3980 if (sk)
3981 atomic_dec(&pkt_sk(sk)->mapped);
3982 }
3983
3984 static const struct vm_operations_struct packet_mmap_ops = {
3985 .open = packet_mm_open,
3986 .close = packet_mm_close,
3987 };
3988
free_pg_vec(struct pgv * pg_vec,unsigned int order,unsigned int len)3989 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3990 unsigned int len)
3991 {
3992 int i;
3993
3994 for (i = 0; i < len; i++) {
3995 if (likely(pg_vec[i].buffer)) {
3996 if (is_vmalloc_addr(pg_vec[i].buffer))
3997 vfree(pg_vec[i].buffer);
3998 else
3999 free_pages((unsigned long)pg_vec[i].buffer,
4000 order);
4001 pg_vec[i].buffer = NULL;
4002 }
4003 }
4004 kfree(pg_vec);
4005 }
4006
alloc_one_pg_vec_page(unsigned long order)4007 static char *alloc_one_pg_vec_page(unsigned long order)
4008 {
4009 char *buffer;
4010 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4011 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4012
4013 buffer = (char *) __get_free_pages(gfp_flags, order);
4014 if (buffer)
4015 return buffer;
4016
4017 /* __get_free_pages failed, fall back to vmalloc */
4018 buffer = vzalloc((1 << order) * PAGE_SIZE);
4019 if (buffer)
4020 return buffer;
4021
4022 /* vmalloc failed, lets dig into swap here */
4023 gfp_flags &= ~__GFP_NORETRY;
4024 buffer = (char *) __get_free_pages(gfp_flags, order);
4025 if (buffer)
4026 return buffer;
4027
4028 /* complete and utter failure */
4029 return NULL;
4030 }
4031
alloc_pg_vec(struct tpacket_req * req,int order)4032 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4033 {
4034 unsigned int block_nr = req->tp_block_nr;
4035 struct pgv *pg_vec;
4036 int i;
4037
4038 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4039 if (unlikely(!pg_vec))
4040 goto out;
4041
4042 for (i = 0; i < block_nr; i++) {
4043 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4044 if (unlikely(!pg_vec[i].buffer))
4045 goto out_free_pgvec;
4046 }
4047
4048 out:
4049 return pg_vec;
4050
4051 out_free_pgvec:
4052 free_pg_vec(pg_vec, order, block_nr);
4053 pg_vec = NULL;
4054 goto out;
4055 }
4056
packet_set_ring(struct sock * sk,union tpacket_req_u * req_u,int closing,int tx_ring)4057 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4058 int closing, int tx_ring)
4059 {
4060 struct pgv *pg_vec = NULL;
4061 struct packet_sock *po = pkt_sk(sk);
4062 int was_running, order = 0;
4063 struct packet_ring_buffer *rb;
4064 struct sk_buff_head *rb_queue;
4065 __be16 num;
4066 int err = -EINVAL;
4067 /* Added to avoid minimal code churn */
4068 struct tpacket_req *req = &req_u->req;
4069
4070 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
4071 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
4072 WARN(1, "Tx-ring is not supported.\n");
4073 goto out;
4074 }
4075
4076 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4077 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4078
4079 err = -EBUSY;
4080 if (!closing) {
4081 if (atomic_read(&po->mapped))
4082 goto out;
4083 if (packet_read_pending(rb))
4084 goto out;
4085 }
4086
4087 if (req->tp_block_nr) {
4088 /* Sanity tests and some calculations */
4089 err = -EBUSY;
4090 if (unlikely(rb->pg_vec))
4091 goto out;
4092
4093 switch (po->tp_version) {
4094 case TPACKET_V1:
4095 po->tp_hdrlen = TPACKET_HDRLEN;
4096 break;
4097 case TPACKET_V2:
4098 po->tp_hdrlen = TPACKET2_HDRLEN;
4099 break;
4100 case TPACKET_V3:
4101 po->tp_hdrlen = TPACKET3_HDRLEN;
4102 break;
4103 }
4104
4105 err = -EINVAL;
4106 if (unlikely((int)req->tp_block_size <= 0))
4107 goto out;
4108 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4109 goto out;
4110 if (po->tp_version >= TPACKET_V3 &&
4111 (int)(req->tp_block_size -
4112 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
4113 goto out;
4114 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4115 po->tp_reserve))
4116 goto out;
4117 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4118 goto out;
4119
4120 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4121 if (unlikely(rb->frames_per_block == 0))
4122 goto out;
4123 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4124 req->tp_frame_nr))
4125 goto out;
4126
4127 err = -ENOMEM;
4128 order = get_order(req->tp_block_size);
4129 pg_vec = alloc_pg_vec(req, order);
4130 if (unlikely(!pg_vec))
4131 goto out;
4132 switch (po->tp_version) {
4133 case TPACKET_V3:
4134 /* Transmit path is not supported. We checked
4135 * it above but just being paranoid
4136 */
4137 if (!tx_ring)
4138 init_prb_bdqc(po, rb, pg_vec, req_u);
4139 break;
4140 default:
4141 break;
4142 }
4143 }
4144 /* Done */
4145 else {
4146 err = -EINVAL;
4147 if (unlikely(req->tp_frame_nr))
4148 goto out;
4149 }
4150
4151 lock_sock(sk);
4152
4153 /* Detach socket from network */
4154 spin_lock(&po->bind_lock);
4155 was_running = po->running;
4156 num = po->num;
4157 if (was_running) {
4158 po->num = 0;
4159 __unregister_prot_hook(sk, false);
4160 }
4161 spin_unlock(&po->bind_lock);
4162
4163 synchronize_net();
4164
4165 err = -EBUSY;
4166 mutex_lock(&po->pg_vec_lock);
4167 if (closing || atomic_read(&po->mapped) == 0) {
4168 err = 0;
4169 spin_lock_bh(&rb_queue->lock);
4170 swap(rb->pg_vec, pg_vec);
4171 rb->frame_max = (req->tp_frame_nr - 1);
4172 rb->head = 0;
4173 rb->frame_size = req->tp_frame_size;
4174 spin_unlock_bh(&rb_queue->lock);
4175
4176 swap(rb->pg_vec_order, order);
4177 swap(rb->pg_vec_len, req->tp_block_nr);
4178
4179 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4180 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4181 tpacket_rcv : packet_rcv;
4182 skb_queue_purge(rb_queue);
4183 if (atomic_read(&po->mapped))
4184 pr_err("packet_mmap: vma is busy: %d\n",
4185 atomic_read(&po->mapped));
4186 }
4187 mutex_unlock(&po->pg_vec_lock);
4188
4189 spin_lock(&po->bind_lock);
4190 if (was_running) {
4191 po->num = num;
4192 register_prot_hook(sk);
4193 }
4194 spin_unlock(&po->bind_lock);
4195 if (closing && (po->tp_version > TPACKET_V2)) {
4196 /* Because we don't support block-based V3 on tx-ring */
4197 if (!tx_ring)
4198 prb_shutdown_retire_blk_timer(po, rb_queue);
4199 }
4200 release_sock(sk);
4201
4202 if (pg_vec)
4203 free_pg_vec(pg_vec, order, req->tp_block_nr);
4204 out:
4205 return err;
4206 }
4207
packet_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)4208 static int packet_mmap(struct file *file, struct socket *sock,
4209 struct vm_area_struct *vma)
4210 {
4211 struct sock *sk = sock->sk;
4212 struct packet_sock *po = pkt_sk(sk);
4213 unsigned long size, expected_size;
4214 struct packet_ring_buffer *rb;
4215 unsigned long start;
4216 int err = -EINVAL;
4217 int i;
4218
4219 if (vma->vm_pgoff)
4220 return -EINVAL;
4221
4222 mutex_lock(&po->pg_vec_lock);
4223
4224 expected_size = 0;
4225 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4226 if (rb->pg_vec) {
4227 expected_size += rb->pg_vec_len
4228 * rb->pg_vec_pages
4229 * PAGE_SIZE;
4230 }
4231 }
4232
4233 if (expected_size == 0)
4234 goto out;
4235
4236 size = vma->vm_end - vma->vm_start;
4237 if (size != expected_size)
4238 goto out;
4239
4240 start = vma->vm_start;
4241 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4242 if (rb->pg_vec == NULL)
4243 continue;
4244
4245 for (i = 0; i < rb->pg_vec_len; i++) {
4246 struct page *page;
4247 void *kaddr = rb->pg_vec[i].buffer;
4248 int pg_num;
4249
4250 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4251 page = pgv_to_page(kaddr);
4252 err = vm_insert_page(vma, start, page);
4253 if (unlikely(err))
4254 goto out;
4255 start += PAGE_SIZE;
4256 kaddr += PAGE_SIZE;
4257 }
4258 }
4259 }
4260
4261 atomic_inc(&po->mapped);
4262 vma->vm_ops = &packet_mmap_ops;
4263 err = 0;
4264
4265 out:
4266 mutex_unlock(&po->pg_vec_lock);
4267 return err;
4268 }
4269
4270 static const struct proto_ops packet_ops_spkt = {
4271 .family = PF_PACKET,
4272 .owner = THIS_MODULE,
4273 .release = packet_release,
4274 .bind = packet_bind_spkt,
4275 .connect = sock_no_connect,
4276 .socketpair = sock_no_socketpair,
4277 .accept = sock_no_accept,
4278 .getname = packet_getname_spkt,
4279 .poll = datagram_poll,
4280 .ioctl = packet_ioctl,
4281 .listen = sock_no_listen,
4282 .shutdown = sock_no_shutdown,
4283 .setsockopt = sock_no_setsockopt,
4284 .getsockopt = sock_no_getsockopt,
4285 .sendmsg = packet_sendmsg_spkt,
4286 .recvmsg = packet_recvmsg,
4287 .mmap = sock_no_mmap,
4288 .sendpage = sock_no_sendpage,
4289 };
4290
4291 static const struct proto_ops packet_ops = {
4292 .family = PF_PACKET,
4293 .owner = THIS_MODULE,
4294 .release = packet_release,
4295 .bind = packet_bind,
4296 .connect = sock_no_connect,
4297 .socketpair = sock_no_socketpair,
4298 .accept = sock_no_accept,
4299 .getname = packet_getname,
4300 .poll = packet_poll,
4301 .ioctl = packet_ioctl,
4302 .listen = sock_no_listen,
4303 .shutdown = sock_no_shutdown,
4304 .setsockopt = packet_setsockopt,
4305 .getsockopt = packet_getsockopt,
4306 .sendmsg = packet_sendmsg,
4307 .recvmsg = packet_recvmsg,
4308 .mmap = packet_mmap,
4309 .sendpage = sock_no_sendpage,
4310 };
4311
4312 static const struct net_proto_family packet_family_ops = {
4313 .family = PF_PACKET,
4314 .create = packet_create,
4315 .owner = THIS_MODULE,
4316 };
4317
4318 static struct notifier_block packet_netdev_notifier = {
4319 .notifier_call = packet_notifier,
4320 };
4321
4322 #ifdef CONFIG_PROC_FS
4323
packet_seq_start(struct seq_file * seq,loff_t * pos)4324 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4325 __acquires(RCU)
4326 {
4327 struct net *net = seq_file_net(seq);
4328
4329 rcu_read_lock();
4330 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4331 }
4332
packet_seq_next(struct seq_file * seq,void * v,loff_t * pos)4333 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4334 {
4335 struct net *net = seq_file_net(seq);
4336 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4337 }
4338
packet_seq_stop(struct seq_file * seq,void * v)4339 static void packet_seq_stop(struct seq_file *seq, void *v)
4340 __releases(RCU)
4341 {
4342 rcu_read_unlock();
4343 }
4344
packet_seq_show(struct seq_file * seq,void * v)4345 static int packet_seq_show(struct seq_file *seq, void *v)
4346 {
4347 if (v == SEQ_START_TOKEN)
4348 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4349 else {
4350 struct sock *s = sk_entry(v);
4351 const struct packet_sock *po = pkt_sk(s);
4352
4353 seq_printf(seq,
4354 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4355 s,
4356 atomic_read(&s->sk_refcnt),
4357 s->sk_type,
4358 ntohs(po->num),
4359 po->ifindex,
4360 po->running,
4361 atomic_read(&s->sk_rmem_alloc),
4362 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4363 sock_i_ino(s));
4364 }
4365
4366 return 0;
4367 }
4368
4369 static const struct seq_operations packet_seq_ops = {
4370 .start = packet_seq_start,
4371 .next = packet_seq_next,
4372 .stop = packet_seq_stop,
4373 .show = packet_seq_show,
4374 };
4375
packet_seq_open(struct inode * inode,struct file * file)4376 static int packet_seq_open(struct inode *inode, struct file *file)
4377 {
4378 return seq_open_net(inode, file, &packet_seq_ops,
4379 sizeof(struct seq_net_private));
4380 }
4381
4382 static const struct file_operations packet_seq_fops = {
4383 .owner = THIS_MODULE,
4384 .open = packet_seq_open,
4385 .read = seq_read,
4386 .llseek = seq_lseek,
4387 .release = seq_release_net,
4388 };
4389
4390 #endif
4391
packet_net_init(struct net * net)4392 static int __net_init packet_net_init(struct net *net)
4393 {
4394 mutex_init(&net->packet.sklist_lock);
4395 INIT_HLIST_HEAD(&net->packet.sklist);
4396
4397 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
4398 return -ENOMEM;
4399
4400 return 0;
4401 }
4402
packet_net_exit(struct net * net)4403 static void __net_exit packet_net_exit(struct net *net)
4404 {
4405 remove_proc_entry("packet", net->proc_net);
4406 }
4407
4408 static struct pernet_operations packet_net_ops = {
4409 .init = packet_net_init,
4410 .exit = packet_net_exit,
4411 };
4412
4413
packet_exit(void)4414 static void __exit packet_exit(void)
4415 {
4416 unregister_netdevice_notifier(&packet_netdev_notifier);
4417 unregister_pernet_subsys(&packet_net_ops);
4418 sock_unregister(PF_PACKET);
4419 proto_unregister(&packet_proto);
4420 }
4421
packet_init(void)4422 static int __init packet_init(void)
4423 {
4424 int rc = proto_register(&packet_proto, 0);
4425
4426 if (rc != 0)
4427 goto out;
4428
4429 sock_register(&packet_family_ops);
4430 register_pernet_subsys(&packet_net_ops);
4431 register_netdevice_notifier(&packet_netdev_notifier);
4432 out:
4433 return rc;
4434 }
4435
4436 module_init(packet_init);
4437 module_exit(packet_exit);
4438 MODULE_LICENSE("GPL");
4439 MODULE_ALIAS_NETPROTO(PF_PACKET);
4440