1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		PACKET - implements raw packet sockets.
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *
12  * Fixes:
13  *		Alan Cox	:	verify_area() now used correctly
14  *		Alan Cox	:	new skbuff lists, look ma no backlogs!
15  *		Alan Cox	:	tidied skbuff lists.
16  *		Alan Cox	:	Now uses generic datagram routines I
17  *					added. Also fixed the peek/read crash
18  *					from all old Linux datagram code.
19  *		Alan Cox	:	Uses the improved datagram code.
20  *		Alan Cox	:	Added NULL's for socket options.
21  *		Alan Cox	:	Re-commented the code.
22  *		Alan Cox	:	Use new kernel side addressing
23  *		Rob Janssen	:	Correct MTU usage.
24  *		Dave Platt	:	Counter leaks caused by incorrect
25  *					interrupt locking and some slightly
26  *					dubious gcc output. Can you read
27  *					compiler: it said _VOLATILE_
28  *	Richard Kooijman	:	Timestamp fixes.
29  *		Alan Cox	:	New buffers. Use sk->mac.raw.
30  *		Alan Cox	:	sendmsg/recvmsg support.
31  *		Alan Cox	:	Protocol setting support
32  *	Alexey Kuznetsov	:	Untied from IPv4 stack.
33  *	Cyrus Durgin		:	Fixed kerneld for kmod.
34  *	Michal Ostrowski        :       Module initialization cleanup.
35  *         Ulises Alonso        :       Frame number limit removal and
36  *                                      packet_set_ring memory leak.
37  *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
38  *					The convention is that longer addresses
39  *					will simply extend the hardware address
40  *					byte arrays at the end of sockaddr_ll
41  *					and packet_mreq.
42  *		Johann Baudy	:	Added TX RING.
43  *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
44  *					layer.
45  *					Copyright (C) 2011, <lokec@ccs.neu.edu>
46  *
47  *
48  *		This program is free software; you can redistribute it and/or
49  *		modify it under the terms of the GNU General Public License
50  *		as published by the Free Software Foundation; either version
51  *		2 of the License, or (at your option) any later version.
52  *
53  */
54 
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/uaccess.h>
77 #include <asm/ioctls.h>
78 #include <asm/page.h>
79 #include <asm/cacheflush.h>
80 #include <asm/io.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 #include <linux/percpu.h>
92 #ifdef CONFIG_INET
93 #include <net/inet_common.h>
94 #endif
95 
96 #include "internal.h"
97 
98 /*
99    Assumptions:
100    - if device has no dev->hard_header routine, it adds and removes ll header
101      inside itself. In this case ll header is invisible outside of device,
102      but higher levels still should reserve dev->hard_header_len.
103      Some devices are enough clever to reallocate skb, when header
104      will not fit to reserved space (tunnel), another ones are silly
105      (PPP).
106    - packet socket receives packets with pulled ll header,
107      so that SOCK_RAW should push it back.
108 
109 On receive:
110 -----------
111 
112 Incoming, dev->hard_header!=NULL
113    mac_header -> ll header
114    data       -> data
115 
116 Outgoing, dev->hard_header!=NULL
117    mac_header -> ll header
118    data       -> ll header
119 
120 Incoming, dev->hard_header==NULL
121    mac_header -> UNKNOWN position. It is very likely, that it points to ll
122 		 header.  PPP makes it, that is wrong, because introduce
123 		 assymetry between rx and tx paths.
124    data       -> data
125 
126 Outgoing, dev->hard_header==NULL
127    mac_header -> data. ll header is still not built!
128    data       -> data
129 
130 Resume
131   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132 
133 
134 On transmit:
135 ------------
136 
137 dev->hard_header != NULL
138    mac_header -> ll header
139    data       -> ll header
140 
141 dev->hard_header == NULL (ll header is added by device, we cannot control it)
142    mac_header -> data
143    data       -> data
144 
145    We should set nh.raw on output to correct posistion,
146    packet classifier depends on it.
147  */
148 
149 /* Private packet socket structures. */
150 
151 /* identical to struct packet_mreq except it has
152  * a longer address field.
153  */
154 struct packet_mreq_max {
155 	int		mr_ifindex;
156 	unsigned short	mr_type;
157 	unsigned short	mr_alen;
158 	unsigned char	mr_address[MAX_ADDR_LEN];
159 };
160 
161 union tpacket_uhdr {
162 	struct tpacket_hdr  *h1;
163 	struct tpacket2_hdr *h2;
164 	struct tpacket3_hdr *h3;
165 	void *raw;
166 };
167 
168 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
169 		int closing, int tx_ring);
170 
171 #define V3_ALIGNMENT	(8)
172 
173 #define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
174 
175 #define BLK_PLUS_PRIV(sz_of_priv) \
176 	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177 
178 #define PGV_FROM_VMALLOC 1
179 
180 #define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
181 #define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
182 #define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
183 #define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
184 #define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
185 #define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
186 #define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
187 
188 struct packet_sock;
189 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
190 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
191 		       struct packet_type *pt, struct net_device *orig_dev);
192 
193 static void *packet_previous_frame(struct packet_sock *po,
194 		struct packet_ring_buffer *rb,
195 		int status);
196 static void packet_increment_head(struct packet_ring_buffer *buff);
197 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
198 			struct tpacket_block_desc *);
199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
200 			struct packet_sock *);
201 static void prb_retire_current_block(struct tpacket_kbdq_core *,
202 		struct packet_sock *, unsigned int status);
203 static int prb_queue_frozen(struct tpacket_kbdq_core *);
204 static void prb_open_block(struct tpacket_kbdq_core *,
205 		struct tpacket_block_desc *);
206 static void prb_retire_rx_blk_timer_expired(unsigned long);
207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208 static void prb_init_blk_timer(struct packet_sock *,
209 		struct tpacket_kbdq_core *,
210 		void (*func) (unsigned long));
211 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
212 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
213 		struct tpacket3_hdr *);
214 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
215 		struct tpacket3_hdr *);
216 static void packet_flush_mclist(struct sock *sk);
217 
218 struct packet_skb_cb {
219 	union {
220 		struct sockaddr_pkt pkt;
221 		union {
222 			/* Trick: alias skb original length with
223 			 * ll.sll_family and ll.protocol in order
224 			 * to save room.
225 			 */
226 			unsigned int origlen;
227 			struct sockaddr_ll ll;
228 		};
229 	} sa;
230 };
231 
232 #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
233 
234 #define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
235 #define GET_PBLOCK_DESC(x, bid)	\
236 	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
238 	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
239 #define GET_NEXT_PRB_BLK_NUM(x) \
240 	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
241 	((x)->kactive_blk_num+1) : 0)
242 
243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
244 static void __fanout_link(struct sock *sk, struct packet_sock *po);
245 
packet_direct_xmit(struct sk_buff * skb)246 static int packet_direct_xmit(struct sk_buff *skb)
247 {
248 	struct net_device *dev = skb->dev;
249 	netdev_features_t features;
250 	struct netdev_queue *txq;
251 	int ret = NETDEV_TX_BUSY;
252 
253 	if (unlikely(!netif_running(dev) ||
254 		     !netif_carrier_ok(dev)))
255 		goto drop;
256 
257 	features = netif_skb_features(skb);
258 	if (skb_needs_linearize(skb, features) &&
259 	    __skb_linearize(skb))
260 		goto drop;
261 
262 	txq = skb_get_tx_queue(dev, skb);
263 
264 	local_bh_disable();
265 
266 	HARD_TX_LOCK(dev, txq, smp_processor_id());
267 	if (!netif_xmit_frozen_or_drv_stopped(txq))
268 		ret = netdev_start_xmit(skb, dev, txq, false);
269 	HARD_TX_UNLOCK(dev, txq);
270 
271 	local_bh_enable();
272 
273 	if (!dev_xmit_complete(ret))
274 		kfree_skb(skb);
275 
276 	return ret;
277 drop:
278 	atomic_long_inc(&dev->tx_dropped);
279 	kfree_skb(skb);
280 	return NET_XMIT_DROP;
281 }
282 
packet_cached_dev_get(struct packet_sock * po)283 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
284 {
285 	struct net_device *dev;
286 
287 	rcu_read_lock();
288 	dev = rcu_dereference(po->cached_dev);
289 	if (likely(dev))
290 		dev_hold(dev);
291 	rcu_read_unlock();
292 
293 	return dev;
294 }
295 
packet_cached_dev_assign(struct packet_sock * po,struct net_device * dev)296 static void packet_cached_dev_assign(struct packet_sock *po,
297 				     struct net_device *dev)
298 {
299 	rcu_assign_pointer(po->cached_dev, dev);
300 }
301 
packet_cached_dev_reset(struct packet_sock * po)302 static void packet_cached_dev_reset(struct packet_sock *po)
303 {
304 	RCU_INIT_POINTER(po->cached_dev, NULL);
305 }
306 
packet_use_direct_xmit(const struct packet_sock * po)307 static bool packet_use_direct_xmit(const struct packet_sock *po)
308 {
309 	return po->xmit == packet_direct_xmit;
310 }
311 
__packet_pick_tx_queue(struct net_device * dev,struct sk_buff * skb)312 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
313 {
314 	return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
315 }
316 
packet_pick_tx_queue(struct net_device * dev,struct sk_buff * skb)317 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
318 {
319 	const struct net_device_ops *ops = dev->netdev_ops;
320 	u16 queue_index;
321 
322 	if (ops->ndo_select_queue) {
323 		queue_index = ops->ndo_select_queue(dev, skb, NULL,
324 						    __packet_pick_tx_queue);
325 		queue_index = netdev_cap_txqueue(dev, queue_index);
326 	} else {
327 		queue_index = __packet_pick_tx_queue(dev, skb);
328 	}
329 
330 	skb_set_queue_mapping(skb, queue_index);
331 }
332 
333 /* register_prot_hook must be invoked with the po->bind_lock held,
334  * or from a context in which asynchronous accesses to the packet
335  * socket is not possible (packet_create()).
336  */
register_prot_hook(struct sock * sk)337 static void register_prot_hook(struct sock *sk)
338 {
339 	struct packet_sock *po = pkt_sk(sk);
340 
341 	if (!po->running) {
342 		if (po->fanout)
343 			__fanout_link(sk, po);
344 		else
345 			dev_add_pack(&po->prot_hook);
346 
347 		sock_hold(sk);
348 		po->running = 1;
349 	}
350 }
351 
352 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
353  * held.   If the sync parameter is true, we will temporarily drop
354  * the po->bind_lock and do a synchronize_net to make sure no
355  * asynchronous packet processing paths still refer to the elements
356  * of po->prot_hook.  If the sync parameter is false, it is the
357  * callers responsibility to take care of this.
358  */
__unregister_prot_hook(struct sock * sk,bool sync)359 static void __unregister_prot_hook(struct sock *sk, bool sync)
360 {
361 	struct packet_sock *po = pkt_sk(sk);
362 
363 	po->running = 0;
364 
365 	if (po->fanout)
366 		__fanout_unlink(sk, po);
367 	else
368 		__dev_remove_pack(&po->prot_hook);
369 
370 	__sock_put(sk);
371 
372 	if (sync) {
373 		spin_unlock(&po->bind_lock);
374 		synchronize_net();
375 		spin_lock(&po->bind_lock);
376 	}
377 }
378 
unregister_prot_hook(struct sock * sk,bool sync)379 static void unregister_prot_hook(struct sock *sk, bool sync)
380 {
381 	struct packet_sock *po = pkt_sk(sk);
382 
383 	if (po->running)
384 		__unregister_prot_hook(sk, sync);
385 }
386 
pgv_to_page(void * addr)387 static inline struct page * __pure pgv_to_page(void *addr)
388 {
389 	if (is_vmalloc_addr(addr))
390 		return vmalloc_to_page(addr);
391 	return virt_to_page(addr);
392 }
393 
__packet_set_status(struct packet_sock * po,void * frame,int status)394 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
395 {
396 	union tpacket_uhdr h;
397 
398 	h.raw = frame;
399 	switch (po->tp_version) {
400 	case TPACKET_V1:
401 		h.h1->tp_status = status;
402 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
403 		break;
404 	case TPACKET_V2:
405 		h.h2->tp_status = status;
406 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
407 		break;
408 	case TPACKET_V3:
409 	default:
410 		WARN(1, "TPACKET version not supported.\n");
411 		BUG();
412 	}
413 
414 	smp_wmb();
415 }
416 
__packet_get_status(struct packet_sock * po,void * frame)417 static int __packet_get_status(struct packet_sock *po, void *frame)
418 {
419 	union tpacket_uhdr h;
420 
421 	smp_rmb();
422 
423 	h.raw = frame;
424 	switch (po->tp_version) {
425 	case TPACKET_V1:
426 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
427 		return h.h1->tp_status;
428 	case TPACKET_V2:
429 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
430 		return h.h2->tp_status;
431 	case TPACKET_V3:
432 	default:
433 		WARN(1, "TPACKET version not supported.\n");
434 		BUG();
435 		return 0;
436 	}
437 }
438 
tpacket_get_timestamp(struct sk_buff * skb,struct timespec * ts,unsigned int flags)439 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
440 				   unsigned int flags)
441 {
442 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
443 
444 	if (shhwtstamps &&
445 	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
446 	    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
447 		return TP_STATUS_TS_RAW_HARDWARE;
448 
449 	if (ktime_to_timespec_cond(skb->tstamp, ts))
450 		return TP_STATUS_TS_SOFTWARE;
451 
452 	return 0;
453 }
454 
__packet_set_timestamp(struct packet_sock * po,void * frame,struct sk_buff * skb)455 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
456 				    struct sk_buff *skb)
457 {
458 	union tpacket_uhdr h;
459 	struct timespec ts;
460 	__u32 ts_status;
461 
462 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
463 		return 0;
464 
465 	h.raw = frame;
466 	switch (po->tp_version) {
467 	case TPACKET_V1:
468 		h.h1->tp_sec = ts.tv_sec;
469 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
470 		break;
471 	case TPACKET_V2:
472 		h.h2->tp_sec = ts.tv_sec;
473 		h.h2->tp_nsec = ts.tv_nsec;
474 		break;
475 	case TPACKET_V3:
476 	default:
477 		WARN(1, "TPACKET version not supported.\n");
478 		BUG();
479 	}
480 
481 	/* one flush is safe, as both fields always lie on the same cacheline */
482 	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
483 	smp_wmb();
484 
485 	return ts_status;
486 }
487 
packet_lookup_frame(struct packet_sock * po,struct packet_ring_buffer * rb,unsigned int position,int status)488 static void *packet_lookup_frame(struct packet_sock *po,
489 		struct packet_ring_buffer *rb,
490 		unsigned int position,
491 		int status)
492 {
493 	unsigned int pg_vec_pos, frame_offset;
494 	union tpacket_uhdr h;
495 
496 	pg_vec_pos = position / rb->frames_per_block;
497 	frame_offset = position % rb->frames_per_block;
498 
499 	h.raw = rb->pg_vec[pg_vec_pos].buffer +
500 		(frame_offset * rb->frame_size);
501 
502 	if (status != __packet_get_status(po, h.raw))
503 		return NULL;
504 
505 	return h.raw;
506 }
507 
packet_current_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)508 static void *packet_current_frame(struct packet_sock *po,
509 		struct packet_ring_buffer *rb,
510 		int status)
511 {
512 	return packet_lookup_frame(po, rb, rb->head, status);
513 }
514 
prb_del_retire_blk_timer(struct tpacket_kbdq_core * pkc)515 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
516 {
517 	del_timer_sync(&pkc->retire_blk_timer);
518 }
519 
prb_shutdown_retire_blk_timer(struct packet_sock * po,int tx_ring,struct sk_buff_head * rb_queue)520 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
521 		int tx_ring,
522 		struct sk_buff_head *rb_queue)
523 {
524 	struct tpacket_kbdq_core *pkc;
525 
526 	pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
527 			GET_PBDQC_FROM_RB(&po->rx_ring);
528 
529 	spin_lock_bh(&rb_queue->lock);
530 	pkc->delete_blk_timer = 1;
531 	spin_unlock_bh(&rb_queue->lock);
532 
533 	prb_del_retire_blk_timer(pkc);
534 }
535 
prb_init_blk_timer(struct packet_sock * po,struct tpacket_kbdq_core * pkc,void (* func)(unsigned long))536 static void prb_init_blk_timer(struct packet_sock *po,
537 		struct tpacket_kbdq_core *pkc,
538 		void (*func) (unsigned long))
539 {
540 	init_timer(&pkc->retire_blk_timer);
541 	pkc->retire_blk_timer.data = (long)po;
542 	pkc->retire_blk_timer.function = func;
543 	pkc->retire_blk_timer.expires = jiffies;
544 }
545 
prb_setup_retire_blk_timer(struct packet_sock * po,int tx_ring)546 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
547 {
548 	struct tpacket_kbdq_core *pkc;
549 
550 	if (tx_ring)
551 		BUG();
552 
553 	pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
554 			GET_PBDQC_FROM_RB(&po->rx_ring);
555 	prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
556 }
557 
prb_calc_retire_blk_tmo(struct packet_sock * po,int blk_size_in_bytes)558 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
559 				int blk_size_in_bytes)
560 {
561 	struct net_device *dev;
562 	unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
563 	struct ethtool_cmd ecmd;
564 	int err;
565 	u32 speed;
566 
567 	rtnl_lock();
568 	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
569 	if (unlikely(!dev)) {
570 		rtnl_unlock();
571 		return DEFAULT_PRB_RETIRE_TOV;
572 	}
573 	err = __ethtool_get_settings(dev, &ecmd);
574 	speed = ethtool_cmd_speed(&ecmd);
575 	rtnl_unlock();
576 	if (!err) {
577 		/*
578 		 * If the link speed is so slow you don't really
579 		 * need to worry about perf anyways
580 		 */
581 		if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
582 			return DEFAULT_PRB_RETIRE_TOV;
583 		} else {
584 			msec = 1;
585 			div = speed / 1000;
586 		}
587 	}
588 
589 	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
590 
591 	if (div)
592 		mbits /= div;
593 
594 	tmo = mbits * msec;
595 
596 	if (div)
597 		return tmo+1;
598 	return tmo;
599 }
600 
prb_init_ft_ops(struct tpacket_kbdq_core * p1,union tpacket_req_u * req_u)601 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
602 			union tpacket_req_u *req_u)
603 {
604 	p1->feature_req_word = req_u->req3.tp_feature_req_word;
605 }
606 
init_prb_bdqc(struct packet_sock * po,struct packet_ring_buffer * rb,struct pgv * pg_vec,union tpacket_req_u * req_u,int tx_ring)607 static void init_prb_bdqc(struct packet_sock *po,
608 			struct packet_ring_buffer *rb,
609 			struct pgv *pg_vec,
610 			union tpacket_req_u *req_u, int tx_ring)
611 {
612 	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
613 	struct tpacket_block_desc *pbd;
614 
615 	memset(p1, 0x0, sizeof(*p1));
616 
617 	p1->knxt_seq_num = 1;
618 	p1->pkbdq = pg_vec;
619 	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
620 	p1->pkblk_start	= pg_vec[0].buffer;
621 	p1->kblk_size = req_u->req3.tp_block_size;
622 	p1->knum_blocks	= req_u->req3.tp_block_nr;
623 	p1->hdrlen = po->tp_hdrlen;
624 	p1->version = po->tp_version;
625 	p1->last_kactive_blk_num = 0;
626 	po->stats.stats3.tp_freeze_q_cnt = 0;
627 	if (req_u->req3.tp_retire_blk_tov)
628 		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
629 	else
630 		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
631 						req_u->req3.tp_block_size);
632 	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
633 	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
634 
635 	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
636 	prb_init_ft_ops(p1, req_u);
637 	prb_setup_retire_blk_timer(po, tx_ring);
638 	prb_open_block(p1, pbd);
639 }
640 
641 /*  Do NOT update the last_blk_num first.
642  *  Assumes sk_buff_head lock is held.
643  */
_prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core * pkc)644 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
645 {
646 	mod_timer(&pkc->retire_blk_timer,
647 			jiffies + pkc->tov_in_jiffies);
648 	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
649 }
650 
651 /*
652  * Timer logic:
653  * 1) We refresh the timer only when we open a block.
654  *    By doing this we don't waste cycles refreshing the timer
655  *	  on packet-by-packet basis.
656  *
657  * With a 1MB block-size, on a 1Gbps line, it will take
658  * i) ~8 ms to fill a block + ii) memcpy etc.
659  * In this cut we are not accounting for the memcpy time.
660  *
661  * So, if the user sets the 'tmo' to 10ms then the timer
662  * will never fire while the block is still getting filled
663  * (which is what we want). However, the user could choose
664  * to close a block early and that's fine.
665  *
666  * But when the timer does fire, we check whether or not to refresh it.
667  * Since the tmo granularity is in msecs, it is not too expensive
668  * to refresh the timer, lets say every '8' msecs.
669  * Either the user can set the 'tmo' or we can derive it based on
670  * a) line-speed and b) block-size.
671  * prb_calc_retire_blk_tmo() calculates the tmo.
672  *
673  */
prb_retire_rx_blk_timer_expired(unsigned long data)674 static void prb_retire_rx_blk_timer_expired(unsigned long data)
675 {
676 	struct packet_sock *po = (struct packet_sock *)data;
677 	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
678 	unsigned int frozen;
679 	struct tpacket_block_desc *pbd;
680 
681 	spin_lock(&po->sk.sk_receive_queue.lock);
682 
683 	frozen = prb_queue_frozen(pkc);
684 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
685 
686 	if (unlikely(pkc->delete_blk_timer))
687 		goto out;
688 
689 	/* We only need to plug the race when the block is partially filled.
690 	 * tpacket_rcv:
691 	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
692 	 *		copy_bits() is in progress ...
693 	 *		timer fires on other cpu:
694 	 *		we can't retire the current block because copy_bits
695 	 *		is in progress.
696 	 *
697 	 */
698 	if (BLOCK_NUM_PKTS(pbd)) {
699 		while (atomic_read(&pkc->blk_fill_in_prog)) {
700 			/* Waiting for skb_copy_bits to finish... */
701 			cpu_relax();
702 		}
703 	}
704 
705 	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
706 		if (!frozen) {
707 			if (!BLOCK_NUM_PKTS(pbd)) {
708 				/* An empty block. Just refresh the timer. */
709 				goto refresh_timer;
710 			}
711 			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
712 			if (!prb_dispatch_next_block(pkc, po))
713 				goto refresh_timer;
714 			else
715 				goto out;
716 		} else {
717 			/* Case 1. Queue was frozen because user-space was
718 			 *	   lagging behind.
719 			 */
720 			if (prb_curr_blk_in_use(pkc, pbd)) {
721 				/*
722 				 * Ok, user-space is still behind.
723 				 * So just refresh the timer.
724 				 */
725 				goto refresh_timer;
726 			} else {
727 			       /* Case 2. queue was frozen,user-space caught up,
728 				* now the link went idle && the timer fired.
729 				* We don't have a block to close.So we open this
730 				* block and restart the timer.
731 				* opening a block thaws the queue,restarts timer
732 				* Thawing/timer-refresh is a side effect.
733 				*/
734 				prb_open_block(pkc, pbd);
735 				goto out;
736 			}
737 		}
738 	}
739 
740 refresh_timer:
741 	_prb_refresh_rx_retire_blk_timer(pkc);
742 
743 out:
744 	spin_unlock(&po->sk.sk_receive_queue.lock);
745 }
746 
prb_flush_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,__u32 status)747 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
748 		struct tpacket_block_desc *pbd1, __u32 status)
749 {
750 	/* Flush everything minus the block header */
751 
752 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
753 	u8 *start, *end;
754 
755 	start = (u8 *)pbd1;
756 
757 	/* Skip the block header(we know header WILL fit in 4K) */
758 	start += PAGE_SIZE;
759 
760 	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
761 	for (; start < end; start += PAGE_SIZE)
762 		flush_dcache_page(pgv_to_page(start));
763 
764 	smp_wmb();
765 #endif
766 
767 	/* Now update the block status. */
768 
769 	BLOCK_STATUS(pbd1) = status;
770 
771 	/* Flush the block header */
772 
773 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
774 	start = (u8 *)pbd1;
775 	flush_dcache_page(pgv_to_page(start));
776 
777 	smp_wmb();
778 #endif
779 }
780 
781 /*
782  * Side effect:
783  *
784  * 1) flush the block
785  * 2) Increment active_blk_num
786  *
787  * Note:We DONT refresh the timer on purpose.
788  *	Because almost always the next block will be opened.
789  */
prb_close_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,struct packet_sock * po,unsigned int stat)790 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
791 		struct tpacket_block_desc *pbd1,
792 		struct packet_sock *po, unsigned int stat)
793 {
794 	__u32 status = TP_STATUS_USER | stat;
795 
796 	struct tpacket3_hdr *last_pkt;
797 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
798 	struct sock *sk = &po->sk;
799 
800 	if (po->stats.stats3.tp_drops)
801 		status |= TP_STATUS_LOSING;
802 
803 	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
804 	last_pkt->tp_next_offset = 0;
805 
806 	/* Get the ts of the last pkt */
807 	if (BLOCK_NUM_PKTS(pbd1)) {
808 		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
809 		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
810 	} else {
811 		/* Ok, we tmo'd - so get the current time.
812 		 *
813 		 * It shouldn't really happen as we don't close empty
814 		 * blocks. See prb_retire_rx_blk_timer_expired().
815 		 */
816 		struct timespec ts;
817 		getnstimeofday(&ts);
818 		h1->ts_last_pkt.ts_sec = ts.tv_sec;
819 		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
820 	}
821 
822 	smp_wmb();
823 
824 	/* Flush the block */
825 	prb_flush_block(pkc1, pbd1, status);
826 
827 	sk->sk_data_ready(sk);
828 
829 	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
830 }
831 
prb_thaw_queue(struct tpacket_kbdq_core * pkc)832 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
833 {
834 	pkc->reset_pending_on_curr_blk = 0;
835 }
836 
837 /*
838  * Side effect of opening a block:
839  *
840  * 1) prb_queue is thawed.
841  * 2) retire_blk_timer is refreshed.
842  *
843  */
prb_open_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1)844 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
845 	struct tpacket_block_desc *pbd1)
846 {
847 	struct timespec ts;
848 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
849 
850 	smp_rmb();
851 
852 	/* We could have just memset this but we will lose the
853 	 * flexibility of making the priv area sticky
854 	 */
855 
856 	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
857 	BLOCK_NUM_PKTS(pbd1) = 0;
858 	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
859 
860 	getnstimeofday(&ts);
861 
862 	h1->ts_first_pkt.ts_sec = ts.tv_sec;
863 	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
864 
865 	pkc1->pkblk_start = (char *)pbd1;
866 	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
867 
868 	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
869 	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
870 
871 	pbd1->version = pkc1->version;
872 	pkc1->prev = pkc1->nxt_offset;
873 	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
874 
875 	prb_thaw_queue(pkc1);
876 	_prb_refresh_rx_retire_blk_timer(pkc1);
877 
878 	smp_wmb();
879 }
880 
881 /*
882  * Queue freeze logic:
883  * 1) Assume tp_block_nr = 8 blocks.
884  * 2) At time 't0', user opens Rx ring.
885  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
886  * 4) user-space is either sleeping or processing block '0'.
887  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
888  *    it will close block-7,loop around and try to fill block '0'.
889  *    call-flow:
890  *    __packet_lookup_frame_in_block
891  *      prb_retire_current_block()
892  *      prb_dispatch_next_block()
893  *        |->(BLOCK_STATUS == USER) evaluates to true
894  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
895  * 6) Now there are two cases:
896  *    6.1) Link goes idle right after the queue is frozen.
897  *         But remember, the last open_block() refreshed the timer.
898  *         When this timer expires,it will refresh itself so that we can
899  *         re-open block-0 in near future.
900  *    6.2) Link is busy and keeps on receiving packets. This is a simple
901  *         case and __packet_lookup_frame_in_block will check if block-0
902  *         is free and can now be re-used.
903  */
prb_freeze_queue(struct tpacket_kbdq_core * pkc,struct packet_sock * po)904 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
905 				  struct packet_sock *po)
906 {
907 	pkc->reset_pending_on_curr_blk = 1;
908 	po->stats.stats3.tp_freeze_q_cnt++;
909 }
910 
911 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
912 
913 /*
914  * If the next block is free then we will dispatch it
915  * and return a good offset.
916  * Else, we will freeze the queue.
917  * So, caller must check the return value.
918  */
prb_dispatch_next_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po)919 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
920 		struct packet_sock *po)
921 {
922 	struct tpacket_block_desc *pbd;
923 
924 	smp_rmb();
925 
926 	/* 1. Get current block num */
927 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
928 
929 	/* 2. If this block is currently in_use then freeze the queue */
930 	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
931 		prb_freeze_queue(pkc, po);
932 		return NULL;
933 	}
934 
935 	/*
936 	 * 3.
937 	 * open this block and return the offset where the first packet
938 	 * needs to get stored.
939 	 */
940 	prb_open_block(pkc, pbd);
941 	return (void *)pkc->nxt_offset;
942 }
943 
prb_retire_current_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po,unsigned int status)944 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
945 		struct packet_sock *po, unsigned int status)
946 {
947 	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
948 
949 	/* retire/close the current block */
950 	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
951 		/*
952 		 * Plug the case where copy_bits() is in progress on
953 		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
954 		 * have space to copy the pkt in the current block and
955 		 * called prb_retire_current_block()
956 		 *
957 		 * We don't need to worry about the TMO case because
958 		 * the timer-handler already handled this case.
959 		 */
960 		if (!(status & TP_STATUS_BLK_TMO)) {
961 			while (atomic_read(&pkc->blk_fill_in_prog)) {
962 				/* Waiting for skb_copy_bits to finish... */
963 				cpu_relax();
964 			}
965 		}
966 		prb_close_block(pkc, pbd, po, status);
967 		return;
968 	}
969 }
970 
prb_curr_blk_in_use(struct tpacket_kbdq_core * pkc,struct tpacket_block_desc * pbd)971 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
972 				      struct tpacket_block_desc *pbd)
973 {
974 	return TP_STATUS_USER & BLOCK_STATUS(pbd);
975 }
976 
prb_queue_frozen(struct tpacket_kbdq_core * pkc)977 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
978 {
979 	return pkc->reset_pending_on_curr_blk;
980 }
981 
prb_clear_blk_fill_status(struct packet_ring_buffer * rb)982 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
983 {
984 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
985 	atomic_dec(&pkc->blk_fill_in_prog);
986 }
987 
prb_fill_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)988 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
989 			struct tpacket3_hdr *ppd)
990 {
991 	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
992 }
993 
prb_clear_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)994 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
995 			struct tpacket3_hdr *ppd)
996 {
997 	ppd->hv1.tp_rxhash = 0;
998 }
999 
prb_fill_vlan_info(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1000 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1001 			struct tpacket3_hdr *ppd)
1002 {
1003 	if (skb_vlan_tag_present(pkc->skb)) {
1004 		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1005 		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1006 		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1007 	} else {
1008 		ppd->hv1.tp_vlan_tci = 0;
1009 		ppd->hv1.tp_vlan_tpid = 0;
1010 		ppd->tp_status = TP_STATUS_AVAILABLE;
1011 	}
1012 }
1013 
prb_run_all_ft_ops(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1014 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1015 			struct tpacket3_hdr *ppd)
1016 {
1017 	ppd->hv1.tp_padding = 0;
1018 	prb_fill_vlan_info(pkc, ppd);
1019 
1020 	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1021 		prb_fill_rxhash(pkc, ppd);
1022 	else
1023 		prb_clear_rxhash(pkc, ppd);
1024 }
1025 
prb_fill_curr_block(char * curr,struct tpacket_kbdq_core * pkc,struct tpacket_block_desc * pbd,unsigned int len)1026 static void prb_fill_curr_block(char *curr,
1027 				struct tpacket_kbdq_core *pkc,
1028 				struct tpacket_block_desc *pbd,
1029 				unsigned int len)
1030 {
1031 	struct tpacket3_hdr *ppd;
1032 
1033 	ppd  = (struct tpacket3_hdr *)curr;
1034 	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1035 	pkc->prev = curr;
1036 	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1037 	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1038 	BLOCK_NUM_PKTS(pbd) += 1;
1039 	atomic_inc(&pkc->blk_fill_in_prog);
1040 	prb_run_all_ft_ops(pkc, ppd);
1041 }
1042 
1043 /* Assumes caller has the sk->rx_queue.lock */
__packet_lookup_frame_in_block(struct packet_sock * po,struct sk_buff * skb,int status,unsigned int len)1044 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1045 					    struct sk_buff *skb,
1046 						int status,
1047 					    unsigned int len
1048 					    )
1049 {
1050 	struct tpacket_kbdq_core *pkc;
1051 	struct tpacket_block_desc *pbd;
1052 	char *curr, *end;
1053 
1054 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1055 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1056 
1057 	/* Queue is frozen when user space is lagging behind */
1058 	if (prb_queue_frozen(pkc)) {
1059 		/*
1060 		 * Check if that last block which caused the queue to freeze,
1061 		 * is still in_use by user-space.
1062 		 */
1063 		if (prb_curr_blk_in_use(pkc, pbd)) {
1064 			/* Can't record this packet */
1065 			return NULL;
1066 		} else {
1067 			/*
1068 			 * Ok, the block was released by user-space.
1069 			 * Now let's open that block.
1070 			 * opening a block also thaws the queue.
1071 			 * Thawing is a side effect.
1072 			 */
1073 			prb_open_block(pkc, pbd);
1074 		}
1075 	}
1076 
1077 	smp_mb();
1078 	curr = pkc->nxt_offset;
1079 	pkc->skb = skb;
1080 	end = (char *)pbd + pkc->kblk_size;
1081 
1082 	/* first try the current block */
1083 	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1084 		prb_fill_curr_block(curr, pkc, pbd, len);
1085 		return (void *)curr;
1086 	}
1087 
1088 	/* Ok, close the current block */
1089 	prb_retire_current_block(pkc, po, 0);
1090 
1091 	/* Now, try to dispatch the next block */
1092 	curr = (char *)prb_dispatch_next_block(pkc, po);
1093 	if (curr) {
1094 		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1095 		prb_fill_curr_block(curr, pkc, pbd, len);
1096 		return (void *)curr;
1097 	}
1098 
1099 	/*
1100 	 * No free blocks are available.user_space hasn't caught up yet.
1101 	 * Queue was just frozen and now this packet will get dropped.
1102 	 */
1103 	return NULL;
1104 }
1105 
packet_current_rx_frame(struct packet_sock * po,struct sk_buff * skb,int status,unsigned int len)1106 static void *packet_current_rx_frame(struct packet_sock *po,
1107 					    struct sk_buff *skb,
1108 					    int status, unsigned int len)
1109 {
1110 	char *curr = NULL;
1111 	switch (po->tp_version) {
1112 	case TPACKET_V1:
1113 	case TPACKET_V2:
1114 		curr = packet_lookup_frame(po, &po->rx_ring,
1115 					po->rx_ring.head, status);
1116 		return curr;
1117 	case TPACKET_V3:
1118 		return __packet_lookup_frame_in_block(po, skb, status, len);
1119 	default:
1120 		WARN(1, "TPACKET version not supported\n");
1121 		BUG();
1122 		return NULL;
1123 	}
1124 }
1125 
prb_lookup_block(struct packet_sock * po,struct packet_ring_buffer * rb,unsigned int idx,int status)1126 static void *prb_lookup_block(struct packet_sock *po,
1127 				     struct packet_ring_buffer *rb,
1128 				     unsigned int idx,
1129 				     int status)
1130 {
1131 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1132 	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1133 
1134 	if (status != BLOCK_STATUS(pbd))
1135 		return NULL;
1136 	return pbd;
1137 }
1138 
prb_previous_blk_num(struct packet_ring_buffer * rb)1139 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1140 {
1141 	unsigned int prev;
1142 	if (rb->prb_bdqc.kactive_blk_num)
1143 		prev = rb->prb_bdqc.kactive_blk_num-1;
1144 	else
1145 		prev = rb->prb_bdqc.knum_blocks-1;
1146 	return prev;
1147 }
1148 
1149 /* Assumes caller has held the rx_queue.lock */
__prb_previous_block(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1150 static void *__prb_previous_block(struct packet_sock *po,
1151 					 struct packet_ring_buffer *rb,
1152 					 int status)
1153 {
1154 	unsigned int previous = prb_previous_blk_num(rb);
1155 	return prb_lookup_block(po, rb, previous, status);
1156 }
1157 
packet_previous_rx_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1158 static void *packet_previous_rx_frame(struct packet_sock *po,
1159 					     struct packet_ring_buffer *rb,
1160 					     int status)
1161 {
1162 	if (po->tp_version <= TPACKET_V2)
1163 		return packet_previous_frame(po, rb, status);
1164 
1165 	return __prb_previous_block(po, rb, status);
1166 }
1167 
packet_increment_rx_head(struct packet_sock * po,struct packet_ring_buffer * rb)1168 static void packet_increment_rx_head(struct packet_sock *po,
1169 					    struct packet_ring_buffer *rb)
1170 {
1171 	switch (po->tp_version) {
1172 	case TPACKET_V1:
1173 	case TPACKET_V2:
1174 		return packet_increment_head(rb);
1175 	case TPACKET_V3:
1176 	default:
1177 		WARN(1, "TPACKET version not supported.\n");
1178 		BUG();
1179 		return;
1180 	}
1181 }
1182 
packet_previous_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1183 static void *packet_previous_frame(struct packet_sock *po,
1184 		struct packet_ring_buffer *rb,
1185 		int status)
1186 {
1187 	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1188 	return packet_lookup_frame(po, rb, previous, status);
1189 }
1190 
packet_increment_head(struct packet_ring_buffer * buff)1191 static void packet_increment_head(struct packet_ring_buffer *buff)
1192 {
1193 	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1194 }
1195 
packet_inc_pending(struct packet_ring_buffer * rb)1196 static void packet_inc_pending(struct packet_ring_buffer *rb)
1197 {
1198 	this_cpu_inc(*rb->pending_refcnt);
1199 }
1200 
packet_dec_pending(struct packet_ring_buffer * rb)1201 static void packet_dec_pending(struct packet_ring_buffer *rb)
1202 {
1203 	this_cpu_dec(*rb->pending_refcnt);
1204 }
1205 
packet_read_pending(const struct packet_ring_buffer * rb)1206 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1207 {
1208 	unsigned int refcnt = 0;
1209 	int cpu;
1210 
1211 	/* We don't use pending refcount in rx_ring. */
1212 	if (rb->pending_refcnt == NULL)
1213 		return 0;
1214 
1215 	for_each_possible_cpu(cpu)
1216 		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1217 
1218 	return refcnt;
1219 }
1220 
packet_alloc_pending(struct packet_sock * po)1221 static int packet_alloc_pending(struct packet_sock *po)
1222 {
1223 	po->rx_ring.pending_refcnt = NULL;
1224 
1225 	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1226 	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1227 		return -ENOBUFS;
1228 
1229 	return 0;
1230 }
1231 
packet_free_pending(struct packet_sock * po)1232 static void packet_free_pending(struct packet_sock *po)
1233 {
1234 	free_percpu(po->tx_ring.pending_refcnt);
1235 }
1236 
packet_rcv_has_room(struct packet_sock * po,struct sk_buff * skb)1237 static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1238 {
1239 	struct sock *sk = &po->sk;
1240 	bool has_room;
1241 
1242 	if (po->prot_hook.func != tpacket_rcv)
1243 		return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
1244 			<= sk->sk_rcvbuf;
1245 
1246 	spin_lock(&sk->sk_receive_queue.lock);
1247 	if (po->tp_version == TPACKET_V3)
1248 		has_room = prb_lookup_block(po, &po->rx_ring,
1249 					    po->rx_ring.prb_bdqc.kactive_blk_num,
1250 					    TP_STATUS_KERNEL);
1251 	else
1252 		has_room = packet_lookup_frame(po, &po->rx_ring,
1253 					       po->rx_ring.head,
1254 					       TP_STATUS_KERNEL);
1255 	spin_unlock(&sk->sk_receive_queue.lock);
1256 
1257 	return has_room;
1258 }
1259 
packet_sock_destruct(struct sock * sk)1260 static void packet_sock_destruct(struct sock *sk)
1261 {
1262 	skb_queue_purge(&sk->sk_error_queue);
1263 
1264 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1265 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1266 
1267 	if (!sock_flag(sk, SOCK_DEAD)) {
1268 		pr_err("Attempt to release alive packet socket: %p\n", sk);
1269 		return;
1270 	}
1271 
1272 	sk_refcnt_debug_dec(sk);
1273 }
1274 
fanout_demux_hash(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1275 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1276 				      struct sk_buff *skb,
1277 				      unsigned int num)
1278 {
1279 	return reciprocal_scale(skb_get_hash(skb), num);
1280 }
1281 
fanout_demux_lb(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1282 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1283 				    struct sk_buff *skb,
1284 				    unsigned int num)
1285 {
1286 	unsigned int val = atomic_inc_return(&f->rr_cur);
1287 
1288 	return val % num;
1289 }
1290 
fanout_demux_cpu(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1291 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1292 				     struct sk_buff *skb,
1293 				     unsigned int num)
1294 {
1295 	return smp_processor_id() % num;
1296 }
1297 
fanout_demux_rnd(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1298 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1299 				     struct sk_buff *skb,
1300 				     unsigned int num)
1301 {
1302 	return prandom_u32_max(num);
1303 }
1304 
fanout_demux_rollover(struct packet_fanout * f,struct sk_buff * skb,unsigned int idx,unsigned int skip,unsigned int num)1305 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1306 					  struct sk_buff *skb,
1307 					  unsigned int idx, unsigned int skip,
1308 					  unsigned int num)
1309 {
1310 	unsigned int i, j;
1311 
1312 	i = j = min_t(int, f->next[idx], num - 1);
1313 	do {
1314 		if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
1315 			if (i != j)
1316 				f->next[idx] = i;
1317 			return i;
1318 		}
1319 		if (++i == num)
1320 			i = 0;
1321 	} while (i != j);
1322 
1323 	return idx;
1324 }
1325 
fanout_demux_qm(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1326 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1327 				    struct sk_buff *skb,
1328 				    unsigned int num)
1329 {
1330 	return skb_get_queue_mapping(skb) % num;
1331 }
1332 
fanout_has_flag(struct packet_fanout * f,u16 flag)1333 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1334 {
1335 	return f->flags & (flag >> 8);
1336 }
1337 
packet_rcv_fanout(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1338 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1339 			     struct packet_type *pt, struct net_device *orig_dev)
1340 {
1341 	struct packet_fanout *f = pt->af_packet_priv;
1342 	unsigned int num = READ_ONCE(f->num_members);
1343 	struct packet_sock *po;
1344 	unsigned int idx;
1345 
1346 	if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1347 	    !num) {
1348 		kfree_skb(skb);
1349 		return 0;
1350 	}
1351 
1352 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1353 		skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1354 		if (!skb)
1355 			return 0;
1356 	}
1357 	switch (f->type) {
1358 	case PACKET_FANOUT_HASH:
1359 	default:
1360 		idx = fanout_demux_hash(f, skb, num);
1361 		break;
1362 	case PACKET_FANOUT_LB:
1363 		idx = fanout_demux_lb(f, skb, num);
1364 		break;
1365 	case PACKET_FANOUT_CPU:
1366 		idx = fanout_demux_cpu(f, skb, num);
1367 		break;
1368 	case PACKET_FANOUT_RND:
1369 		idx = fanout_demux_rnd(f, skb, num);
1370 		break;
1371 	case PACKET_FANOUT_QM:
1372 		idx = fanout_demux_qm(f, skb, num);
1373 		break;
1374 	case PACKET_FANOUT_ROLLOVER:
1375 		idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
1376 		break;
1377 	}
1378 
1379 	po = pkt_sk(f->arr[idx]);
1380 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
1381 	    unlikely(!packet_rcv_has_room(po, skb))) {
1382 		idx = fanout_demux_rollover(f, skb, idx, idx, num);
1383 		po = pkt_sk(f->arr[idx]);
1384 	}
1385 
1386 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1387 }
1388 
1389 DEFINE_MUTEX(fanout_mutex);
1390 EXPORT_SYMBOL_GPL(fanout_mutex);
1391 static LIST_HEAD(fanout_list);
1392 
__fanout_link(struct sock * sk,struct packet_sock * po)1393 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1394 {
1395 	struct packet_fanout *f = po->fanout;
1396 
1397 	spin_lock(&f->lock);
1398 	f->arr[f->num_members] = sk;
1399 	smp_wmb();
1400 	f->num_members++;
1401 	spin_unlock(&f->lock);
1402 }
1403 
__fanout_unlink(struct sock * sk,struct packet_sock * po)1404 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1405 {
1406 	struct packet_fanout *f = po->fanout;
1407 	int i;
1408 
1409 	spin_lock(&f->lock);
1410 	for (i = 0; i < f->num_members; i++) {
1411 		if (f->arr[i] == sk)
1412 			break;
1413 	}
1414 	BUG_ON(i >= f->num_members);
1415 	f->arr[i] = f->arr[f->num_members - 1];
1416 	f->num_members--;
1417 	spin_unlock(&f->lock);
1418 }
1419 
match_fanout_group(struct packet_type * ptype,struct sock * sk)1420 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1421 {
1422 	if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout)
1423 		return true;
1424 
1425 	return false;
1426 }
1427 
fanout_add(struct sock * sk,u16 id,u16 type_flags)1428 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1429 {
1430 	struct packet_sock *po = pkt_sk(sk);
1431 	struct packet_fanout *f, *match;
1432 	u8 type = type_flags & 0xff;
1433 	u8 flags = type_flags >> 8;
1434 	int err;
1435 
1436 	switch (type) {
1437 	case PACKET_FANOUT_ROLLOVER:
1438 		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1439 			return -EINVAL;
1440 	case PACKET_FANOUT_HASH:
1441 	case PACKET_FANOUT_LB:
1442 	case PACKET_FANOUT_CPU:
1443 	case PACKET_FANOUT_RND:
1444 	case PACKET_FANOUT_QM:
1445 		break;
1446 	default:
1447 		return -EINVAL;
1448 	}
1449 
1450 	if (!po->running)
1451 		return -EINVAL;
1452 
1453 	if (po->fanout)
1454 		return -EALREADY;
1455 
1456 	mutex_lock(&fanout_mutex);
1457 	match = NULL;
1458 	list_for_each_entry(f, &fanout_list, list) {
1459 		if (f->id == id &&
1460 		    read_pnet(&f->net) == sock_net(sk)) {
1461 			match = f;
1462 			break;
1463 		}
1464 	}
1465 	err = -EINVAL;
1466 	if (match && match->flags != flags)
1467 		goto out;
1468 	if (!match) {
1469 		err = -ENOMEM;
1470 		match = kzalloc(sizeof(*match), GFP_KERNEL);
1471 		if (!match)
1472 			goto out;
1473 		write_pnet(&match->net, sock_net(sk));
1474 		match->id = id;
1475 		match->type = type;
1476 		match->flags = flags;
1477 		atomic_set(&match->rr_cur, 0);
1478 		INIT_LIST_HEAD(&match->list);
1479 		spin_lock_init(&match->lock);
1480 		atomic_set(&match->sk_ref, 0);
1481 		match->prot_hook.type = po->prot_hook.type;
1482 		match->prot_hook.dev = po->prot_hook.dev;
1483 		match->prot_hook.func = packet_rcv_fanout;
1484 		match->prot_hook.af_packet_priv = match;
1485 		match->prot_hook.id_match = match_fanout_group;
1486 		dev_add_pack(&match->prot_hook);
1487 		list_add(&match->list, &fanout_list);
1488 	}
1489 	err = -EINVAL;
1490 	if (match->type == type &&
1491 	    match->prot_hook.type == po->prot_hook.type &&
1492 	    match->prot_hook.dev == po->prot_hook.dev) {
1493 		err = -ENOSPC;
1494 		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1495 			__dev_remove_pack(&po->prot_hook);
1496 			po->fanout = match;
1497 			atomic_inc(&match->sk_ref);
1498 			__fanout_link(sk, po);
1499 			err = 0;
1500 		}
1501 	}
1502 out:
1503 	mutex_unlock(&fanout_mutex);
1504 	return err;
1505 }
1506 
fanout_release(struct sock * sk)1507 static void fanout_release(struct sock *sk)
1508 {
1509 	struct packet_sock *po = pkt_sk(sk);
1510 	struct packet_fanout *f;
1511 
1512 	f = po->fanout;
1513 	if (!f)
1514 		return;
1515 
1516 	mutex_lock(&fanout_mutex);
1517 	po->fanout = NULL;
1518 
1519 	if (atomic_dec_and_test(&f->sk_ref)) {
1520 		list_del(&f->list);
1521 		dev_remove_pack(&f->prot_hook);
1522 		kfree(f);
1523 	}
1524 	mutex_unlock(&fanout_mutex);
1525 }
1526 
packet_extra_vlan_len_allowed(const struct net_device * dev,struct sk_buff * skb)1527 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1528 					  struct sk_buff *skb)
1529 {
1530 	/* Earlier code assumed this would be a VLAN pkt, double-check
1531 	 * this now that we have the actual packet in hand. We can only
1532 	 * do this check on Ethernet devices.
1533 	 */
1534 	if (unlikely(dev->type != ARPHRD_ETHER))
1535 		return false;
1536 
1537 	skb_reset_mac_header(skb);
1538 	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1539 }
1540 
1541 static const struct proto_ops packet_ops;
1542 
1543 static const struct proto_ops packet_ops_spkt;
1544 
packet_rcv_spkt(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1545 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1546 			   struct packet_type *pt, struct net_device *orig_dev)
1547 {
1548 	struct sock *sk;
1549 	struct sockaddr_pkt *spkt;
1550 
1551 	/*
1552 	 *	When we registered the protocol we saved the socket in the data
1553 	 *	field for just this event.
1554 	 */
1555 
1556 	sk = pt->af_packet_priv;
1557 
1558 	/*
1559 	 *	Yank back the headers [hope the device set this
1560 	 *	right or kerboom...]
1561 	 *
1562 	 *	Incoming packets have ll header pulled,
1563 	 *	push it back.
1564 	 *
1565 	 *	For outgoing ones skb->data == skb_mac_header(skb)
1566 	 *	so that this procedure is noop.
1567 	 */
1568 
1569 	if (skb->pkt_type == PACKET_LOOPBACK)
1570 		goto out;
1571 
1572 	if (!net_eq(dev_net(dev), sock_net(sk)))
1573 		goto out;
1574 
1575 	skb = skb_share_check(skb, GFP_ATOMIC);
1576 	if (skb == NULL)
1577 		goto oom;
1578 
1579 	/* drop any routing info */
1580 	skb_dst_drop(skb);
1581 
1582 	/* drop conntrack reference */
1583 	nf_reset(skb);
1584 
1585 	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1586 
1587 	skb_push(skb, skb->data - skb_mac_header(skb));
1588 
1589 	/*
1590 	 *	The SOCK_PACKET socket receives _all_ frames.
1591 	 */
1592 
1593 	spkt->spkt_family = dev->type;
1594 	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1595 	spkt->spkt_protocol = skb->protocol;
1596 
1597 	/*
1598 	 *	Charge the memory to the socket. This is done specifically
1599 	 *	to prevent sockets using all the memory up.
1600 	 */
1601 
1602 	if (sock_queue_rcv_skb(sk, skb) == 0)
1603 		return 0;
1604 
1605 out:
1606 	kfree_skb(skb);
1607 oom:
1608 	return 0;
1609 }
1610 
1611 
1612 /*
1613  *	Output a raw packet to a device layer. This bypasses all the other
1614  *	protocol layers and you must therefore supply it with a complete frame
1615  */
1616 
packet_sendmsg_spkt(struct socket * sock,struct msghdr * msg,size_t len)1617 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1618 			       size_t len)
1619 {
1620 	struct sock *sk = sock->sk;
1621 	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1622 	struct sk_buff *skb = NULL;
1623 	struct net_device *dev;
1624 	__be16 proto = 0;
1625 	int err;
1626 	int extra_len = 0;
1627 
1628 	/*
1629 	 *	Get and verify the address.
1630 	 */
1631 
1632 	if (saddr) {
1633 		if (msg->msg_namelen < sizeof(struct sockaddr))
1634 			return -EINVAL;
1635 		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1636 			proto = saddr->spkt_protocol;
1637 	} else
1638 		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1639 
1640 	/*
1641 	 *	Find the device first to size check it
1642 	 */
1643 
1644 	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1645 retry:
1646 	rcu_read_lock();
1647 	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1648 	err = -ENODEV;
1649 	if (dev == NULL)
1650 		goto out_unlock;
1651 
1652 	err = -ENETDOWN;
1653 	if (!(dev->flags & IFF_UP))
1654 		goto out_unlock;
1655 
1656 	/*
1657 	 * You may not queue a frame bigger than the mtu. This is the lowest level
1658 	 * raw protocol and you must do your own fragmentation at this level.
1659 	 */
1660 
1661 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1662 		if (!netif_supports_nofcs(dev)) {
1663 			err = -EPROTONOSUPPORT;
1664 			goto out_unlock;
1665 		}
1666 		extra_len = 4; /* We're doing our own CRC */
1667 	}
1668 
1669 	err = -EMSGSIZE;
1670 	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1671 		goto out_unlock;
1672 
1673 	if (!skb) {
1674 		size_t reserved = LL_RESERVED_SPACE(dev);
1675 		int tlen = dev->needed_tailroom;
1676 		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1677 
1678 		rcu_read_unlock();
1679 		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1680 		if (skb == NULL)
1681 			return -ENOBUFS;
1682 		/* FIXME: Save some space for broken drivers that write a hard
1683 		 * header at transmission time by themselves. PPP is the notable
1684 		 * one here. This should really be fixed at the driver level.
1685 		 */
1686 		skb_reserve(skb, reserved);
1687 		skb_reset_network_header(skb);
1688 
1689 		/* Try to align data part correctly */
1690 		if (hhlen) {
1691 			skb->data -= hhlen;
1692 			skb->tail -= hhlen;
1693 			if (len < hhlen)
1694 				skb_reset_network_header(skb);
1695 		}
1696 		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1697 		if (err)
1698 			goto out_free;
1699 		goto retry;
1700 	}
1701 
1702 	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1703 	    !packet_extra_vlan_len_allowed(dev, skb)) {
1704 		err = -EMSGSIZE;
1705 		goto out_unlock;
1706 	}
1707 
1708 	skb->protocol = proto;
1709 	skb->dev = dev;
1710 	skb->priority = sk->sk_priority;
1711 	skb->mark = sk->sk_mark;
1712 
1713 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1714 
1715 	if (unlikely(extra_len == 4))
1716 		skb->no_fcs = 1;
1717 
1718 	skb_probe_transport_header(skb, 0);
1719 
1720 	dev_queue_xmit(skb);
1721 	rcu_read_unlock();
1722 	return len;
1723 
1724 out_unlock:
1725 	rcu_read_unlock();
1726 out_free:
1727 	kfree_skb(skb);
1728 	return err;
1729 }
1730 
run_filter(const struct sk_buff * skb,const struct sock * sk,unsigned int res)1731 static unsigned int run_filter(const struct sk_buff *skb,
1732 				      const struct sock *sk,
1733 				      unsigned int res)
1734 {
1735 	struct sk_filter *filter;
1736 
1737 	rcu_read_lock();
1738 	filter = rcu_dereference(sk->sk_filter);
1739 	if (filter != NULL)
1740 		res = SK_RUN_FILTER(filter, skb);
1741 	rcu_read_unlock();
1742 
1743 	return res;
1744 }
1745 
1746 /*
1747  * This function makes lazy skb cloning in hope that most of packets
1748  * are discarded by BPF.
1749  *
1750  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1751  * and skb->cb are mangled. It works because (and until) packets
1752  * falling here are owned by current CPU. Output packets are cloned
1753  * by dev_queue_xmit_nit(), input packets are processed by net_bh
1754  * sequencially, so that if we return skb to original state on exit,
1755  * we will not harm anyone.
1756  */
1757 
packet_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1758 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1759 		      struct packet_type *pt, struct net_device *orig_dev)
1760 {
1761 	struct sock *sk;
1762 	struct sockaddr_ll *sll;
1763 	struct packet_sock *po;
1764 	u8 *skb_head = skb->data;
1765 	int skb_len = skb->len;
1766 	unsigned int snaplen, res;
1767 
1768 	if (skb->pkt_type == PACKET_LOOPBACK)
1769 		goto drop;
1770 
1771 	sk = pt->af_packet_priv;
1772 	po = pkt_sk(sk);
1773 
1774 	if (!net_eq(dev_net(dev), sock_net(sk)))
1775 		goto drop;
1776 
1777 	skb->dev = dev;
1778 
1779 	if (dev->header_ops) {
1780 		/* The device has an explicit notion of ll header,
1781 		 * exported to higher levels.
1782 		 *
1783 		 * Otherwise, the device hides details of its frame
1784 		 * structure, so that corresponding packet head is
1785 		 * never delivered to user.
1786 		 */
1787 		if (sk->sk_type != SOCK_DGRAM)
1788 			skb_push(skb, skb->data - skb_mac_header(skb));
1789 		else if (skb->pkt_type == PACKET_OUTGOING) {
1790 			/* Special case: outgoing packets have ll header at head */
1791 			skb_pull(skb, skb_network_offset(skb));
1792 		}
1793 	}
1794 
1795 	snaplen = skb->len;
1796 
1797 	res = run_filter(skb, sk, snaplen);
1798 	if (!res)
1799 		goto drop_n_restore;
1800 	if (snaplen > res)
1801 		snaplen = res;
1802 
1803 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1804 		goto drop_n_acct;
1805 
1806 	if (skb_shared(skb)) {
1807 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1808 		if (nskb == NULL)
1809 			goto drop_n_acct;
1810 
1811 		if (skb_head != skb->data) {
1812 			skb->data = skb_head;
1813 			skb->len = skb_len;
1814 		}
1815 		consume_skb(skb);
1816 		skb = nskb;
1817 	}
1818 
1819 	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
1820 
1821 	sll = &PACKET_SKB_CB(skb)->sa.ll;
1822 	sll->sll_hatype = dev->type;
1823 	sll->sll_pkttype = skb->pkt_type;
1824 	if (unlikely(po->origdev))
1825 		sll->sll_ifindex = orig_dev->ifindex;
1826 	else
1827 		sll->sll_ifindex = dev->ifindex;
1828 
1829 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1830 
1831 	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
1832 	 * Use their space for storing the original skb length.
1833 	 */
1834 	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
1835 
1836 	if (pskb_trim(skb, snaplen))
1837 		goto drop_n_acct;
1838 
1839 	skb_set_owner_r(skb, sk);
1840 	skb->dev = NULL;
1841 	skb_dst_drop(skb);
1842 
1843 	/* drop conntrack reference */
1844 	nf_reset(skb);
1845 
1846 	spin_lock(&sk->sk_receive_queue.lock);
1847 	po->stats.stats1.tp_packets++;
1848 	sock_skb_set_dropcount(sk, skb);
1849 	__skb_queue_tail(&sk->sk_receive_queue, skb);
1850 	spin_unlock(&sk->sk_receive_queue.lock);
1851 	sk->sk_data_ready(sk);
1852 	return 0;
1853 
1854 drop_n_acct:
1855 	spin_lock(&sk->sk_receive_queue.lock);
1856 	po->stats.stats1.tp_drops++;
1857 	atomic_inc(&sk->sk_drops);
1858 	spin_unlock(&sk->sk_receive_queue.lock);
1859 
1860 drop_n_restore:
1861 	if (skb_head != skb->data && skb_shared(skb)) {
1862 		skb->data = skb_head;
1863 		skb->len = skb_len;
1864 	}
1865 drop:
1866 	consume_skb(skb);
1867 	return 0;
1868 }
1869 
tpacket_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1870 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1871 		       struct packet_type *pt, struct net_device *orig_dev)
1872 {
1873 	struct sock *sk;
1874 	struct packet_sock *po;
1875 	struct sockaddr_ll *sll;
1876 	union tpacket_uhdr h;
1877 	u8 *skb_head = skb->data;
1878 	int skb_len = skb->len;
1879 	unsigned int snaplen, res;
1880 	unsigned long status = TP_STATUS_USER;
1881 	unsigned short macoff, netoff, hdrlen;
1882 	struct sk_buff *copy_skb = NULL;
1883 	struct timespec ts;
1884 	__u32 ts_status;
1885 
1886 	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
1887 	 * We may add members to them until current aligned size without forcing
1888 	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
1889 	 */
1890 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
1891 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
1892 
1893 	if (skb->pkt_type == PACKET_LOOPBACK)
1894 		goto drop;
1895 
1896 	sk = pt->af_packet_priv;
1897 	po = pkt_sk(sk);
1898 
1899 	if (!net_eq(dev_net(dev), sock_net(sk)))
1900 		goto drop;
1901 
1902 	if (dev->header_ops) {
1903 		if (sk->sk_type != SOCK_DGRAM)
1904 			skb_push(skb, skb->data - skb_mac_header(skb));
1905 		else if (skb->pkt_type == PACKET_OUTGOING) {
1906 			/* Special case: outgoing packets have ll header at head */
1907 			skb_pull(skb, skb_network_offset(skb));
1908 		}
1909 	}
1910 
1911 	snaplen = skb->len;
1912 
1913 	res = run_filter(skb, sk, snaplen);
1914 	if (!res)
1915 		goto drop_n_restore;
1916 
1917 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1918 		status |= TP_STATUS_CSUMNOTREADY;
1919 	else if (skb->pkt_type != PACKET_OUTGOING &&
1920 		 (skb->ip_summed == CHECKSUM_COMPLETE ||
1921 		  skb_csum_unnecessary(skb)))
1922 		status |= TP_STATUS_CSUM_VALID;
1923 
1924 	if (snaplen > res)
1925 		snaplen = res;
1926 
1927 	if (sk->sk_type == SOCK_DGRAM) {
1928 		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1929 				  po->tp_reserve;
1930 	} else {
1931 		unsigned int maclen = skb_network_offset(skb);
1932 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
1933 				       (maclen < 16 ? 16 : maclen)) +
1934 			po->tp_reserve;
1935 		macoff = netoff - maclen;
1936 	}
1937 	if (po->tp_version <= TPACKET_V2) {
1938 		if (macoff + snaplen > po->rx_ring.frame_size) {
1939 			if (po->copy_thresh &&
1940 			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1941 				if (skb_shared(skb)) {
1942 					copy_skb = skb_clone(skb, GFP_ATOMIC);
1943 				} else {
1944 					copy_skb = skb_get(skb);
1945 					skb_head = skb->data;
1946 				}
1947 				if (copy_skb)
1948 					skb_set_owner_r(copy_skb, sk);
1949 			}
1950 			snaplen = po->rx_ring.frame_size - macoff;
1951 			if ((int)snaplen < 0)
1952 				snaplen = 0;
1953 		}
1954 	} else if (unlikely(macoff + snaplen >
1955 			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
1956 		u32 nval;
1957 
1958 		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
1959 		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
1960 			    snaplen, nval, macoff);
1961 		snaplen = nval;
1962 		if (unlikely((int)snaplen < 0)) {
1963 			snaplen = 0;
1964 			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
1965 		}
1966 	}
1967 	spin_lock(&sk->sk_receive_queue.lock);
1968 	h.raw = packet_current_rx_frame(po, skb,
1969 					TP_STATUS_KERNEL, (macoff+snaplen));
1970 	if (!h.raw)
1971 		goto ring_is_full;
1972 	if (po->tp_version <= TPACKET_V2) {
1973 		packet_increment_rx_head(po, &po->rx_ring);
1974 	/*
1975 	 * LOSING will be reported till you read the stats,
1976 	 * because it's COR - Clear On Read.
1977 	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1978 	 * at packet level.
1979 	 */
1980 		if (po->stats.stats1.tp_drops)
1981 			status |= TP_STATUS_LOSING;
1982 	}
1983 	po->stats.stats1.tp_packets++;
1984 	if (copy_skb) {
1985 		status |= TP_STATUS_COPY;
1986 		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1987 	}
1988 	spin_unlock(&sk->sk_receive_queue.lock);
1989 
1990 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1991 
1992 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
1993 		getnstimeofday(&ts);
1994 
1995 	status |= ts_status;
1996 
1997 	switch (po->tp_version) {
1998 	case TPACKET_V1:
1999 		h.h1->tp_len = skb->len;
2000 		h.h1->tp_snaplen = snaplen;
2001 		h.h1->tp_mac = macoff;
2002 		h.h1->tp_net = netoff;
2003 		h.h1->tp_sec = ts.tv_sec;
2004 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2005 		hdrlen = sizeof(*h.h1);
2006 		break;
2007 	case TPACKET_V2:
2008 		h.h2->tp_len = skb->len;
2009 		h.h2->tp_snaplen = snaplen;
2010 		h.h2->tp_mac = macoff;
2011 		h.h2->tp_net = netoff;
2012 		h.h2->tp_sec = ts.tv_sec;
2013 		h.h2->tp_nsec = ts.tv_nsec;
2014 		if (skb_vlan_tag_present(skb)) {
2015 			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2016 			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2017 			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2018 		} else {
2019 			h.h2->tp_vlan_tci = 0;
2020 			h.h2->tp_vlan_tpid = 0;
2021 		}
2022 		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2023 		hdrlen = sizeof(*h.h2);
2024 		break;
2025 	case TPACKET_V3:
2026 		/* tp_nxt_offset,vlan are already populated above.
2027 		 * So DONT clear those fields here
2028 		 */
2029 		h.h3->tp_status |= status;
2030 		h.h3->tp_len = skb->len;
2031 		h.h3->tp_snaplen = snaplen;
2032 		h.h3->tp_mac = macoff;
2033 		h.h3->tp_net = netoff;
2034 		h.h3->tp_sec  = ts.tv_sec;
2035 		h.h3->tp_nsec = ts.tv_nsec;
2036 		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2037 		hdrlen = sizeof(*h.h3);
2038 		break;
2039 	default:
2040 		BUG();
2041 	}
2042 
2043 	sll = h.raw + TPACKET_ALIGN(hdrlen);
2044 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2045 	sll->sll_family = AF_PACKET;
2046 	sll->sll_hatype = dev->type;
2047 	sll->sll_protocol = skb->protocol;
2048 	sll->sll_pkttype = skb->pkt_type;
2049 	if (unlikely(po->origdev))
2050 		sll->sll_ifindex = orig_dev->ifindex;
2051 	else
2052 		sll->sll_ifindex = dev->ifindex;
2053 
2054 	smp_mb();
2055 
2056 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2057 	if (po->tp_version <= TPACKET_V2) {
2058 		u8 *start, *end;
2059 
2060 		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2061 					macoff + snaplen);
2062 
2063 		for (start = h.raw; start < end; start += PAGE_SIZE)
2064 			flush_dcache_page(pgv_to_page(start));
2065 	}
2066 	smp_wmb();
2067 #endif
2068 
2069 	if (po->tp_version <= TPACKET_V2) {
2070 		__packet_set_status(po, h.raw, status);
2071 		sk->sk_data_ready(sk);
2072 	} else {
2073 		prb_clear_blk_fill_status(&po->rx_ring);
2074 	}
2075 
2076 drop_n_restore:
2077 	if (skb_head != skb->data && skb_shared(skb)) {
2078 		skb->data = skb_head;
2079 		skb->len = skb_len;
2080 	}
2081 drop:
2082 	kfree_skb(skb);
2083 	return 0;
2084 
2085 ring_is_full:
2086 	po->stats.stats1.tp_drops++;
2087 	spin_unlock(&sk->sk_receive_queue.lock);
2088 
2089 	sk->sk_data_ready(sk);
2090 	kfree_skb(copy_skb);
2091 	goto drop_n_restore;
2092 }
2093 
tpacket_destruct_skb(struct sk_buff * skb)2094 static void tpacket_destruct_skb(struct sk_buff *skb)
2095 {
2096 	struct packet_sock *po = pkt_sk(skb->sk);
2097 
2098 	if (likely(po->tx_ring.pg_vec)) {
2099 		void *ph;
2100 		__u32 ts;
2101 
2102 		ph = skb_shinfo(skb)->destructor_arg;
2103 		packet_dec_pending(&po->tx_ring);
2104 
2105 		ts = __packet_set_timestamp(po, ph, skb);
2106 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2107 	}
2108 
2109 	sock_wfree(skb);
2110 }
2111 
ll_header_truncated(const struct net_device * dev,int len)2112 static bool ll_header_truncated(const struct net_device *dev, int len)
2113 {
2114 	/* net device doesn't like empty head */
2115 	if (unlikely(len <= dev->hard_header_len)) {
2116 		net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
2117 				     current->comm, len, dev->hard_header_len);
2118 		return true;
2119 	}
2120 
2121 	return false;
2122 }
2123 
tpacket_set_protocol(const struct net_device * dev,struct sk_buff * skb)2124 static void tpacket_set_protocol(const struct net_device *dev,
2125 				 struct sk_buff *skb)
2126 {
2127 	if (dev->type == ARPHRD_ETHER) {
2128 		skb_reset_mac_header(skb);
2129 		skb->protocol = eth_hdr(skb)->h_proto;
2130 	}
2131 }
2132 
tpacket_fill_skb(struct packet_sock * po,struct sk_buff * skb,void * frame,struct net_device * dev,int size_max,__be16 proto,unsigned char * addr,int hlen)2133 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2134 		void *frame, struct net_device *dev, int size_max,
2135 		__be16 proto, unsigned char *addr, int hlen)
2136 {
2137 	union tpacket_uhdr ph;
2138 	int to_write, offset, len, tp_len, nr_frags, len_max;
2139 	struct socket *sock = po->sk.sk_socket;
2140 	struct page *page;
2141 	void *data;
2142 	int err;
2143 
2144 	ph.raw = frame;
2145 
2146 	skb->protocol = proto;
2147 	skb->dev = dev;
2148 	skb->priority = po->sk.sk_priority;
2149 	skb->mark = po->sk.sk_mark;
2150 	sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
2151 	skb_shinfo(skb)->destructor_arg = ph.raw;
2152 
2153 	switch (po->tp_version) {
2154 	case TPACKET_V2:
2155 		tp_len = ph.h2->tp_len;
2156 		break;
2157 	default:
2158 		tp_len = ph.h1->tp_len;
2159 		break;
2160 	}
2161 	if (unlikely(tp_len > size_max)) {
2162 		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2163 		return -EMSGSIZE;
2164 	}
2165 
2166 	skb_reserve(skb, hlen);
2167 	skb_reset_network_header(skb);
2168 
2169 	if (unlikely(po->tp_tx_has_off)) {
2170 		int off_min, off_max, off;
2171 		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2172 		off_max = po->tx_ring.frame_size - tp_len;
2173 		if (sock->type == SOCK_DGRAM) {
2174 			switch (po->tp_version) {
2175 			case TPACKET_V2:
2176 				off = ph.h2->tp_net;
2177 				break;
2178 			default:
2179 				off = ph.h1->tp_net;
2180 				break;
2181 			}
2182 		} else {
2183 			switch (po->tp_version) {
2184 			case TPACKET_V2:
2185 				off = ph.h2->tp_mac;
2186 				break;
2187 			default:
2188 				off = ph.h1->tp_mac;
2189 				break;
2190 			}
2191 		}
2192 		if (unlikely((off < off_min) || (off_max < off)))
2193 			return -EINVAL;
2194 		data = ph.raw + off;
2195 	} else {
2196 		data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2197 	}
2198 	to_write = tp_len;
2199 
2200 	if (sock->type == SOCK_DGRAM) {
2201 		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2202 				NULL, tp_len);
2203 		if (unlikely(err < 0))
2204 			return -EINVAL;
2205 	} else if (dev->hard_header_len) {
2206 		if (ll_header_truncated(dev, tp_len))
2207 			return -EINVAL;
2208 
2209 		skb_push(skb, dev->hard_header_len);
2210 		err = skb_store_bits(skb, 0, data,
2211 				dev->hard_header_len);
2212 		if (unlikely(err))
2213 			return err;
2214 		if (!skb->protocol)
2215 			tpacket_set_protocol(dev, skb);
2216 
2217 		data += dev->hard_header_len;
2218 		to_write -= dev->hard_header_len;
2219 	}
2220 
2221 	offset = offset_in_page(data);
2222 	len_max = PAGE_SIZE - offset;
2223 	len = ((to_write > len_max) ? len_max : to_write);
2224 
2225 	skb->data_len = to_write;
2226 	skb->len += to_write;
2227 	skb->truesize += to_write;
2228 	atomic_add(to_write, &po->sk.sk_wmem_alloc);
2229 
2230 	while (likely(to_write)) {
2231 		nr_frags = skb_shinfo(skb)->nr_frags;
2232 
2233 		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2234 			pr_err("Packet exceed the number of skb frags(%lu)\n",
2235 			       MAX_SKB_FRAGS);
2236 			return -EFAULT;
2237 		}
2238 
2239 		page = pgv_to_page(data);
2240 		data += len;
2241 		flush_dcache_page(page);
2242 		get_page(page);
2243 		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2244 		to_write -= len;
2245 		offset = 0;
2246 		len_max = PAGE_SIZE;
2247 		len = ((to_write > len_max) ? len_max : to_write);
2248 	}
2249 
2250 	skb_probe_transport_header(skb, 0);
2251 
2252 	return tp_len;
2253 }
2254 
tpacket_snd(struct packet_sock * po,struct msghdr * msg)2255 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2256 {
2257 	struct sk_buff *skb;
2258 	struct net_device *dev;
2259 	__be16 proto;
2260 	int err, reserve = 0;
2261 	void *ph;
2262 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2263 	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2264 	int tp_len, size_max;
2265 	unsigned char *addr;
2266 	int len_sum = 0;
2267 	int status = TP_STATUS_AVAILABLE;
2268 	int hlen, tlen;
2269 
2270 	mutex_lock(&po->pg_vec_lock);
2271 
2272 	if (likely(saddr == NULL)) {
2273 		dev	= packet_cached_dev_get(po);
2274 		proto	= po->num;
2275 		addr	= NULL;
2276 	} else {
2277 		err = -EINVAL;
2278 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2279 			goto out;
2280 		if (msg->msg_namelen < (saddr->sll_halen
2281 					+ offsetof(struct sockaddr_ll,
2282 						sll_addr)))
2283 			goto out;
2284 		proto	= saddr->sll_protocol;
2285 		addr	= saddr->sll_addr;
2286 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2287 	}
2288 
2289 	err = -ENXIO;
2290 	if (unlikely(dev == NULL))
2291 		goto out;
2292 	err = -ENETDOWN;
2293 	if (unlikely(!(dev->flags & IFF_UP)))
2294 		goto out_put;
2295 
2296 	if (po->sk.sk_socket->type == SOCK_RAW)
2297 		reserve = dev->hard_header_len;
2298 	size_max = po->tx_ring.frame_size
2299 		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2300 
2301 	if (size_max > dev->mtu + reserve + VLAN_HLEN)
2302 		size_max = dev->mtu + reserve + VLAN_HLEN;
2303 
2304 	do {
2305 		ph = packet_current_frame(po, &po->tx_ring,
2306 					  TP_STATUS_SEND_REQUEST);
2307 		if (unlikely(ph == NULL)) {
2308 			if (need_wait && need_resched())
2309 				schedule();
2310 			continue;
2311 		}
2312 
2313 		status = TP_STATUS_SEND_REQUEST;
2314 		hlen = LL_RESERVED_SPACE(dev);
2315 		tlen = dev->needed_tailroom;
2316 		skb = sock_alloc_send_skb(&po->sk,
2317 				hlen + tlen + sizeof(struct sockaddr_ll),
2318 				!need_wait, &err);
2319 
2320 		if (unlikely(skb == NULL)) {
2321 			/* we assume the socket was initially writeable ... */
2322 			if (likely(len_sum > 0))
2323 				err = len_sum;
2324 			goto out_status;
2325 		}
2326 		tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2327 					  addr, hlen);
2328 		if (likely(tp_len >= 0) &&
2329 		    tp_len > dev->mtu + reserve &&
2330 		    !packet_extra_vlan_len_allowed(dev, skb))
2331 			tp_len = -EMSGSIZE;
2332 
2333 		if (unlikely(tp_len < 0)) {
2334 			if (po->tp_loss) {
2335 				__packet_set_status(po, ph,
2336 						TP_STATUS_AVAILABLE);
2337 				packet_increment_head(&po->tx_ring);
2338 				kfree_skb(skb);
2339 				continue;
2340 			} else {
2341 				status = TP_STATUS_WRONG_FORMAT;
2342 				err = tp_len;
2343 				goto out_status;
2344 			}
2345 		}
2346 
2347 		packet_pick_tx_queue(dev, skb);
2348 
2349 		skb->destructor = tpacket_destruct_skb;
2350 		__packet_set_status(po, ph, TP_STATUS_SENDING);
2351 		packet_inc_pending(&po->tx_ring);
2352 
2353 		status = TP_STATUS_SEND_REQUEST;
2354 		err = po->xmit(skb);
2355 		if (unlikely(err > 0)) {
2356 			err = net_xmit_errno(err);
2357 			if (err && __packet_get_status(po, ph) ==
2358 				   TP_STATUS_AVAILABLE) {
2359 				/* skb was destructed already */
2360 				skb = NULL;
2361 				goto out_status;
2362 			}
2363 			/*
2364 			 * skb was dropped but not destructed yet;
2365 			 * let's treat it like congestion or err < 0
2366 			 */
2367 			err = 0;
2368 		}
2369 		packet_increment_head(&po->tx_ring);
2370 		len_sum += tp_len;
2371 	} while (likely((ph != NULL) ||
2372 		/* Note: packet_read_pending() might be slow if we have
2373 		 * to call it as it's per_cpu variable, but in fast-path
2374 		 * we already short-circuit the loop with the first
2375 		 * condition, and luckily don't have to go that path
2376 		 * anyway.
2377 		 */
2378 		 (need_wait && packet_read_pending(&po->tx_ring))));
2379 
2380 	err = len_sum;
2381 	goto out_put;
2382 
2383 out_status:
2384 	__packet_set_status(po, ph, status);
2385 	kfree_skb(skb);
2386 out_put:
2387 	dev_put(dev);
2388 out:
2389 	mutex_unlock(&po->pg_vec_lock);
2390 	return err;
2391 }
2392 
packet_alloc_skb(struct sock * sk,size_t prepad,size_t reserve,size_t len,size_t linear,int noblock,int * err)2393 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2394 				        size_t reserve, size_t len,
2395 				        size_t linear, int noblock,
2396 				        int *err)
2397 {
2398 	struct sk_buff *skb;
2399 
2400 	/* Under a page?  Don't bother with paged skb. */
2401 	if (prepad + len < PAGE_SIZE || !linear)
2402 		linear = len;
2403 
2404 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2405 				   err, 0);
2406 	if (!skb)
2407 		return NULL;
2408 
2409 	skb_reserve(skb, reserve);
2410 	skb_put(skb, linear);
2411 	skb->data_len = len - linear;
2412 	skb->len += len - linear;
2413 
2414 	return skb;
2415 }
2416 
packet_snd(struct socket * sock,struct msghdr * msg,size_t len)2417 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2418 {
2419 	struct sock *sk = sock->sk;
2420 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2421 	struct sk_buff *skb;
2422 	struct net_device *dev;
2423 	__be16 proto;
2424 	unsigned char *addr;
2425 	int err, reserve = 0;
2426 	struct virtio_net_hdr vnet_hdr = { 0 };
2427 	int offset = 0;
2428 	int vnet_hdr_len;
2429 	struct packet_sock *po = pkt_sk(sk);
2430 	unsigned short gso_type = 0;
2431 	int hlen, tlen;
2432 	int extra_len = 0;
2433 	ssize_t n;
2434 
2435 	/*
2436 	 *	Get and verify the address.
2437 	 */
2438 
2439 	if (likely(saddr == NULL)) {
2440 		dev	= packet_cached_dev_get(po);
2441 		proto	= po->num;
2442 		addr	= NULL;
2443 	} else {
2444 		err = -EINVAL;
2445 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2446 			goto out;
2447 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2448 			goto out;
2449 		proto	= saddr->sll_protocol;
2450 		addr	= saddr->sll_addr;
2451 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2452 	}
2453 
2454 	err = -ENXIO;
2455 	if (unlikely(dev == NULL))
2456 		goto out_unlock;
2457 	err = -ENETDOWN;
2458 	if (unlikely(!(dev->flags & IFF_UP)))
2459 		goto out_unlock;
2460 
2461 	if (sock->type == SOCK_RAW)
2462 		reserve = dev->hard_header_len;
2463 	if (po->has_vnet_hdr) {
2464 		vnet_hdr_len = sizeof(vnet_hdr);
2465 
2466 		err = -EINVAL;
2467 		if (len < vnet_hdr_len)
2468 			goto out_unlock;
2469 
2470 		len -= vnet_hdr_len;
2471 
2472 		err = -EFAULT;
2473 		n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
2474 		if (n != vnet_hdr_len)
2475 			goto out_unlock;
2476 
2477 		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2478 		    (__virtio16_to_cpu(false, vnet_hdr.csum_start) +
2479 		     __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 >
2480 		      __virtio16_to_cpu(false, vnet_hdr.hdr_len)))
2481 			vnet_hdr.hdr_len = __cpu_to_virtio16(false,
2482 				 __virtio16_to_cpu(false, vnet_hdr.csum_start) +
2483 				__virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2);
2484 
2485 		err = -EINVAL;
2486 		if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len)
2487 			goto out_unlock;
2488 
2489 		if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2490 			switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2491 			case VIRTIO_NET_HDR_GSO_TCPV4:
2492 				gso_type = SKB_GSO_TCPV4;
2493 				break;
2494 			case VIRTIO_NET_HDR_GSO_TCPV6:
2495 				gso_type = SKB_GSO_TCPV6;
2496 				break;
2497 			case VIRTIO_NET_HDR_GSO_UDP:
2498 				gso_type = SKB_GSO_UDP;
2499 				break;
2500 			default:
2501 				goto out_unlock;
2502 			}
2503 
2504 			if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2505 				gso_type |= SKB_GSO_TCP_ECN;
2506 
2507 			if (vnet_hdr.gso_size == 0)
2508 				goto out_unlock;
2509 
2510 		}
2511 	}
2512 
2513 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2514 		if (!netif_supports_nofcs(dev)) {
2515 			err = -EPROTONOSUPPORT;
2516 			goto out_unlock;
2517 		}
2518 		extra_len = 4; /* We're doing our own CRC */
2519 	}
2520 
2521 	err = -EMSGSIZE;
2522 	if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2523 		goto out_unlock;
2524 
2525 	err = -ENOBUFS;
2526 	hlen = LL_RESERVED_SPACE(dev);
2527 	tlen = dev->needed_tailroom;
2528 	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2529 			       __virtio16_to_cpu(false, vnet_hdr.hdr_len),
2530 			       msg->msg_flags & MSG_DONTWAIT, &err);
2531 	if (skb == NULL)
2532 		goto out_unlock;
2533 
2534 	skb_set_network_header(skb, reserve);
2535 
2536 	err = -EINVAL;
2537 	if (sock->type == SOCK_DGRAM) {
2538 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2539 		if (unlikely(offset < 0))
2540 			goto out_free;
2541 	} else {
2542 		if (ll_header_truncated(dev, len))
2543 			goto out_free;
2544 	}
2545 
2546 	/* Returns -EFAULT on error */
2547 	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2548 	if (err)
2549 		goto out_free;
2550 
2551 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2552 
2553 	if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
2554 	    !packet_extra_vlan_len_allowed(dev, skb)) {
2555 		err = -EMSGSIZE;
2556 		goto out_free;
2557 	}
2558 
2559 	skb->protocol = proto;
2560 	skb->dev = dev;
2561 	skb->priority = sk->sk_priority;
2562 	skb->mark = sk->sk_mark;
2563 
2564 	packet_pick_tx_queue(dev, skb);
2565 
2566 	if (po->has_vnet_hdr) {
2567 		if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2568 			u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start);
2569 			u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset);
2570 			if (!skb_partial_csum_set(skb, s, o)) {
2571 				err = -EINVAL;
2572 				goto out_free;
2573 			}
2574 		}
2575 
2576 		skb_shinfo(skb)->gso_size =
2577 			__virtio16_to_cpu(false, vnet_hdr.gso_size);
2578 		skb_shinfo(skb)->gso_type = gso_type;
2579 
2580 		/* Header must be checked, and gso_segs computed. */
2581 		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2582 		skb_shinfo(skb)->gso_segs = 0;
2583 
2584 		len += vnet_hdr_len;
2585 	}
2586 
2587 	skb_probe_transport_header(skb, reserve);
2588 
2589 	if (unlikely(extra_len == 4))
2590 		skb->no_fcs = 1;
2591 
2592 	err = po->xmit(skb);
2593 	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2594 		goto out_unlock;
2595 
2596 	dev_put(dev);
2597 
2598 	return len;
2599 
2600 out_free:
2601 	kfree_skb(skb);
2602 out_unlock:
2603 	if (dev)
2604 		dev_put(dev);
2605 out:
2606 	return err;
2607 }
2608 
packet_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)2609 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2610 {
2611 	struct sock *sk = sock->sk;
2612 	struct packet_sock *po = pkt_sk(sk);
2613 
2614 	if (po->tx_ring.pg_vec)
2615 		return tpacket_snd(po, msg);
2616 	else
2617 		return packet_snd(sock, msg, len);
2618 }
2619 
2620 /*
2621  *	Close a PACKET socket. This is fairly simple. We immediately go
2622  *	to 'closed' state and remove our protocol entry in the device list.
2623  */
2624 
packet_release(struct socket * sock)2625 static int packet_release(struct socket *sock)
2626 {
2627 	struct sock *sk = sock->sk;
2628 	struct packet_sock *po;
2629 	struct net *net;
2630 	union tpacket_req_u req_u;
2631 
2632 	if (!sk)
2633 		return 0;
2634 
2635 	net = sock_net(sk);
2636 	po = pkt_sk(sk);
2637 
2638 	mutex_lock(&net->packet.sklist_lock);
2639 	sk_del_node_init_rcu(sk);
2640 	mutex_unlock(&net->packet.sklist_lock);
2641 
2642 	preempt_disable();
2643 	sock_prot_inuse_add(net, sk->sk_prot, -1);
2644 	preempt_enable();
2645 
2646 	spin_lock(&po->bind_lock);
2647 	unregister_prot_hook(sk, false);
2648 	packet_cached_dev_reset(po);
2649 
2650 	if (po->prot_hook.dev) {
2651 		dev_put(po->prot_hook.dev);
2652 		po->prot_hook.dev = NULL;
2653 	}
2654 	spin_unlock(&po->bind_lock);
2655 
2656 	packet_flush_mclist(sk);
2657 
2658 	if (po->rx_ring.pg_vec) {
2659 		memset(&req_u, 0, sizeof(req_u));
2660 		packet_set_ring(sk, &req_u, 1, 0);
2661 	}
2662 
2663 	if (po->tx_ring.pg_vec) {
2664 		memset(&req_u, 0, sizeof(req_u));
2665 		packet_set_ring(sk, &req_u, 1, 1);
2666 	}
2667 
2668 	fanout_release(sk);
2669 
2670 	synchronize_net();
2671 	/*
2672 	 *	Now the socket is dead. No more input will appear.
2673 	 */
2674 	sock_orphan(sk);
2675 	sock->sk = NULL;
2676 
2677 	/* Purge queues */
2678 
2679 	skb_queue_purge(&sk->sk_receive_queue);
2680 	packet_free_pending(po);
2681 	sk_refcnt_debug_release(sk);
2682 
2683 	sock_put(sk);
2684 	return 0;
2685 }
2686 
2687 /*
2688  *	Attach a packet hook.
2689  */
2690 
packet_do_bind(struct sock * sk,const char * name,int ifindex,__be16 proto)2691 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
2692 			  __be16 proto)
2693 {
2694 	struct packet_sock *po = pkt_sk(sk);
2695 	struct net_device *dev_curr;
2696 	__be16 proto_curr;
2697 	bool need_rehook;
2698 	struct net_device *dev = NULL;
2699 	int ret = 0;
2700 	bool unlisted = false;
2701 
2702 	if (po->fanout)
2703 		return -EINVAL;
2704 
2705 	lock_sock(sk);
2706 	spin_lock(&po->bind_lock);
2707 	rcu_read_lock();
2708 
2709 	if (name) {
2710 		dev = dev_get_by_name_rcu(sock_net(sk), name);
2711 		if (!dev) {
2712 			ret = -ENODEV;
2713 			goto out_unlock;
2714 		}
2715 	} else if (ifindex) {
2716 		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
2717 		if (!dev) {
2718 			ret = -ENODEV;
2719 			goto out_unlock;
2720 		}
2721 	}
2722 
2723 	if (dev)
2724 		dev_hold(dev);
2725 
2726 	proto_curr = po->prot_hook.type;
2727 	dev_curr = po->prot_hook.dev;
2728 
2729 	need_rehook = proto_curr != proto || dev_curr != dev;
2730 
2731 	if (need_rehook) {
2732 		if (po->running) {
2733 			rcu_read_unlock();
2734 			__unregister_prot_hook(sk, true);
2735 			rcu_read_lock();
2736 			dev_curr = po->prot_hook.dev;
2737 			if (dev)
2738 				unlisted = !dev_get_by_index_rcu(sock_net(sk),
2739 								 dev->ifindex);
2740 		}
2741 
2742 		po->num = proto;
2743 		po->prot_hook.type = proto;
2744 
2745 		if (unlikely(unlisted)) {
2746 			dev_put(dev);
2747 			po->prot_hook.dev = NULL;
2748 			po->ifindex = -1;
2749 			packet_cached_dev_reset(po);
2750 		} else {
2751 			po->prot_hook.dev = dev;
2752 			po->ifindex = dev ? dev->ifindex : 0;
2753 			packet_cached_dev_assign(po, dev);
2754 		}
2755 	}
2756 	if (dev_curr)
2757 		dev_put(dev_curr);
2758 
2759 	if (proto == 0 || !need_rehook)
2760 		goto out_unlock;
2761 
2762 	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
2763 		register_prot_hook(sk);
2764 	} else {
2765 		sk->sk_err = ENETDOWN;
2766 		if (!sock_flag(sk, SOCK_DEAD))
2767 			sk->sk_error_report(sk);
2768 	}
2769 
2770 out_unlock:
2771 	rcu_read_unlock();
2772 	spin_unlock(&po->bind_lock);
2773 	release_sock(sk);
2774 	return ret;
2775 }
2776 
2777 /*
2778  *	Bind a packet socket to a device
2779  */
2780 
packet_bind_spkt(struct socket * sock,struct sockaddr * uaddr,int addr_len)2781 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2782 			    int addr_len)
2783 {
2784 	struct sock *sk = sock->sk;
2785 	char name[15];
2786 
2787 	/*
2788 	 *	Check legality
2789 	 */
2790 
2791 	if (addr_len != sizeof(struct sockaddr))
2792 		return -EINVAL;
2793 	strlcpy(name, uaddr->sa_data, sizeof(name));
2794 
2795 	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
2796 }
2797 
packet_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)2798 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2799 {
2800 	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2801 	struct sock *sk = sock->sk;
2802 
2803 	/*
2804 	 *	Check legality
2805 	 */
2806 
2807 	if (addr_len < sizeof(struct sockaddr_ll))
2808 		return -EINVAL;
2809 	if (sll->sll_family != AF_PACKET)
2810 		return -EINVAL;
2811 
2812 	return packet_do_bind(sk, NULL, sll->sll_ifindex,
2813 			      sll->sll_protocol ? : pkt_sk(sk)->num);
2814 }
2815 
2816 static struct proto packet_proto = {
2817 	.name	  = "PACKET",
2818 	.owner	  = THIS_MODULE,
2819 	.obj_size = sizeof(struct packet_sock),
2820 };
2821 
2822 /*
2823  *	Create a packet of type SOCK_PACKET.
2824  */
2825 
packet_create(struct net * net,struct socket * sock,int protocol,int kern)2826 static int packet_create(struct net *net, struct socket *sock, int protocol,
2827 			 int kern)
2828 {
2829 	struct sock *sk;
2830 	struct packet_sock *po;
2831 	__be16 proto = (__force __be16)protocol; /* weird, but documented */
2832 	int err;
2833 
2834 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
2835 		return -EPERM;
2836 	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2837 	    sock->type != SOCK_PACKET)
2838 		return -ESOCKTNOSUPPORT;
2839 
2840 	sock->state = SS_UNCONNECTED;
2841 
2842 	err = -ENOBUFS;
2843 	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2844 	if (sk == NULL)
2845 		goto out;
2846 
2847 	sock->ops = &packet_ops;
2848 	if (sock->type == SOCK_PACKET)
2849 		sock->ops = &packet_ops_spkt;
2850 
2851 	sock_init_data(sock, sk);
2852 
2853 	po = pkt_sk(sk);
2854 	sk->sk_family = PF_PACKET;
2855 	po->num = proto;
2856 	po->xmit = dev_queue_xmit;
2857 
2858 	err = packet_alloc_pending(po);
2859 	if (err)
2860 		goto out2;
2861 
2862 	packet_cached_dev_reset(po);
2863 
2864 	sk->sk_destruct = packet_sock_destruct;
2865 	sk_refcnt_debug_inc(sk);
2866 
2867 	/*
2868 	 *	Attach a protocol block
2869 	 */
2870 
2871 	spin_lock_init(&po->bind_lock);
2872 	mutex_init(&po->pg_vec_lock);
2873 	po->prot_hook.func = packet_rcv;
2874 
2875 	if (sock->type == SOCK_PACKET)
2876 		po->prot_hook.func = packet_rcv_spkt;
2877 
2878 	po->prot_hook.af_packet_priv = sk;
2879 
2880 	if (proto) {
2881 		po->prot_hook.type = proto;
2882 		register_prot_hook(sk);
2883 	}
2884 
2885 	mutex_lock(&net->packet.sklist_lock);
2886 	sk_add_node_rcu(sk, &net->packet.sklist);
2887 	mutex_unlock(&net->packet.sklist_lock);
2888 
2889 	preempt_disable();
2890 	sock_prot_inuse_add(net, &packet_proto, 1);
2891 	preempt_enable();
2892 
2893 	return 0;
2894 out2:
2895 	sk_free(sk);
2896 out:
2897 	return err;
2898 }
2899 
2900 /*
2901  *	Pull a packet from our receive queue and hand it to the user.
2902  *	If necessary we block.
2903  */
2904 
packet_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)2905 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2906 			  int flags)
2907 {
2908 	struct sock *sk = sock->sk;
2909 	struct sk_buff *skb;
2910 	int copied, err;
2911 	int vnet_hdr_len = 0;
2912 	unsigned int origlen = 0;
2913 
2914 	err = -EINVAL;
2915 	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2916 		goto out;
2917 
2918 #if 0
2919 	/* What error should we return now? EUNATTACH? */
2920 	if (pkt_sk(sk)->ifindex < 0)
2921 		return -ENODEV;
2922 #endif
2923 
2924 	if (flags & MSG_ERRQUEUE) {
2925 		err = sock_recv_errqueue(sk, msg, len,
2926 					 SOL_PACKET, PACKET_TX_TIMESTAMP);
2927 		goto out;
2928 	}
2929 
2930 	/*
2931 	 *	Call the generic datagram receiver. This handles all sorts
2932 	 *	of horrible races and re-entrancy so we can forget about it
2933 	 *	in the protocol layers.
2934 	 *
2935 	 *	Now it will return ENETDOWN, if device have just gone down,
2936 	 *	but then it will block.
2937 	 */
2938 
2939 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2940 
2941 	/*
2942 	 *	An error occurred so return it. Because skb_recv_datagram()
2943 	 *	handles the blocking we don't see and worry about blocking
2944 	 *	retries.
2945 	 */
2946 
2947 	if (skb == NULL)
2948 		goto out;
2949 
2950 	if (pkt_sk(sk)->has_vnet_hdr) {
2951 		struct virtio_net_hdr vnet_hdr = { 0 };
2952 
2953 		err = -EINVAL;
2954 		vnet_hdr_len = sizeof(vnet_hdr);
2955 		if (len < vnet_hdr_len)
2956 			goto out_free;
2957 
2958 		len -= vnet_hdr_len;
2959 
2960 		if (skb_is_gso(skb)) {
2961 			struct skb_shared_info *sinfo = skb_shinfo(skb);
2962 
2963 			/* This is a hint as to how much should be linear. */
2964 			vnet_hdr.hdr_len =
2965 				__cpu_to_virtio16(false, skb_headlen(skb));
2966 			vnet_hdr.gso_size =
2967 				__cpu_to_virtio16(false, sinfo->gso_size);
2968 			if (sinfo->gso_type & SKB_GSO_TCPV4)
2969 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2970 			else if (sinfo->gso_type & SKB_GSO_TCPV6)
2971 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2972 			else if (sinfo->gso_type & SKB_GSO_UDP)
2973 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2974 			else if (sinfo->gso_type & SKB_GSO_FCOE)
2975 				goto out_free;
2976 			else
2977 				BUG();
2978 			if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2979 				vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2980 		} else
2981 			vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2982 
2983 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2984 			vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2985 			vnet_hdr.csum_start = __cpu_to_virtio16(false,
2986 					  skb_checksum_start_offset(skb));
2987 			vnet_hdr.csum_offset = __cpu_to_virtio16(false,
2988 							 skb->csum_offset);
2989 		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2990 			vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2991 		} /* else everything is zero */
2992 
2993 		err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
2994 		if (err < 0)
2995 			goto out_free;
2996 	}
2997 
2998 	/* You lose any data beyond the buffer you gave. If it worries
2999 	 * a user program they can ask the device for its MTU
3000 	 * anyway.
3001 	 */
3002 	copied = skb->len;
3003 	if (copied > len) {
3004 		copied = len;
3005 		msg->msg_flags |= MSG_TRUNC;
3006 	}
3007 
3008 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3009 	if (err)
3010 		goto out_free;
3011 
3012 	if (sock->type != SOCK_PACKET) {
3013 		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3014 
3015 		/* Original length was stored in sockaddr_ll fields */
3016 		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3017 		sll->sll_family = AF_PACKET;
3018 		sll->sll_protocol = skb->protocol;
3019 	}
3020 
3021 	sock_recv_ts_and_drops(msg, sk, skb);
3022 
3023 	if (msg->msg_name) {
3024 		/* If the address length field is there to be filled
3025 		 * in, we fill it in now.
3026 		 */
3027 		if (sock->type == SOCK_PACKET) {
3028 			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3029 			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3030 		} else {
3031 			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3032 
3033 			msg->msg_namelen = sll->sll_halen +
3034 				offsetof(struct sockaddr_ll, sll_addr);
3035 		}
3036 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3037 		       msg->msg_namelen);
3038 	}
3039 
3040 	if (pkt_sk(sk)->auxdata) {
3041 		struct tpacket_auxdata aux;
3042 
3043 		aux.tp_status = TP_STATUS_USER;
3044 		if (skb->ip_summed == CHECKSUM_PARTIAL)
3045 			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3046 		else if (skb->pkt_type != PACKET_OUTGOING &&
3047 			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3048 			  skb_csum_unnecessary(skb)))
3049 			aux.tp_status |= TP_STATUS_CSUM_VALID;
3050 
3051 		aux.tp_len = origlen;
3052 		aux.tp_snaplen = skb->len;
3053 		aux.tp_mac = 0;
3054 		aux.tp_net = skb_network_offset(skb);
3055 		if (skb_vlan_tag_present(skb)) {
3056 			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3057 			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3058 			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3059 		} else {
3060 			aux.tp_vlan_tci = 0;
3061 			aux.tp_vlan_tpid = 0;
3062 		}
3063 		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3064 	}
3065 
3066 	/*
3067 	 *	Free or return the buffer as appropriate. Again this
3068 	 *	hides all the races and re-entrancy issues from us.
3069 	 */
3070 	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3071 
3072 out_free:
3073 	skb_free_datagram(sk, skb);
3074 out:
3075 	return err;
3076 }
3077 
packet_getname_spkt(struct socket * sock,struct sockaddr * uaddr,int * uaddr_len,int peer)3078 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3079 			       int *uaddr_len, int peer)
3080 {
3081 	struct net_device *dev;
3082 	struct sock *sk	= sock->sk;
3083 
3084 	if (peer)
3085 		return -EOPNOTSUPP;
3086 
3087 	uaddr->sa_family = AF_PACKET;
3088 	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3089 	rcu_read_lock();
3090 	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3091 	if (dev)
3092 		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3093 	rcu_read_unlock();
3094 	*uaddr_len = sizeof(*uaddr);
3095 
3096 	return 0;
3097 }
3098 
packet_getname(struct socket * sock,struct sockaddr * uaddr,int * uaddr_len,int peer)3099 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3100 			  int *uaddr_len, int peer)
3101 {
3102 	struct net_device *dev;
3103 	struct sock *sk = sock->sk;
3104 	struct packet_sock *po = pkt_sk(sk);
3105 	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3106 
3107 	if (peer)
3108 		return -EOPNOTSUPP;
3109 
3110 	sll->sll_family = AF_PACKET;
3111 	sll->sll_ifindex = po->ifindex;
3112 	sll->sll_protocol = po->num;
3113 	sll->sll_pkttype = 0;
3114 	rcu_read_lock();
3115 	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3116 	if (dev) {
3117 		sll->sll_hatype = dev->type;
3118 		sll->sll_halen = dev->addr_len;
3119 		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3120 	} else {
3121 		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3122 		sll->sll_halen = 0;
3123 	}
3124 	rcu_read_unlock();
3125 	*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3126 
3127 	return 0;
3128 }
3129 
packet_dev_mc(struct net_device * dev,struct packet_mclist * i,int what)3130 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3131 			 int what)
3132 {
3133 	switch (i->type) {
3134 	case PACKET_MR_MULTICAST:
3135 		if (i->alen != dev->addr_len)
3136 			return -EINVAL;
3137 		if (what > 0)
3138 			return dev_mc_add(dev, i->addr);
3139 		else
3140 			return dev_mc_del(dev, i->addr);
3141 		break;
3142 	case PACKET_MR_PROMISC:
3143 		return dev_set_promiscuity(dev, what);
3144 	case PACKET_MR_ALLMULTI:
3145 		return dev_set_allmulti(dev, what);
3146 	case PACKET_MR_UNICAST:
3147 		if (i->alen != dev->addr_len)
3148 			return -EINVAL;
3149 		if (what > 0)
3150 			return dev_uc_add(dev, i->addr);
3151 		else
3152 			return dev_uc_del(dev, i->addr);
3153 		break;
3154 	default:
3155 		break;
3156 	}
3157 	return 0;
3158 }
3159 
packet_dev_mclist_delete(struct net_device * dev,struct packet_mclist ** mlp)3160 static void packet_dev_mclist_delete(struct net_device *dev,
3161 				     struct packet_mclist **mlp)
3162 {
3163 	struct packet_mclist *ml;
3164 
3165 	while ((ml = *mlp) != NULL) {
3166 		if (ml->ifindex == dev->ifindex) {
3167 			packet_dev_mc(dev, ml, -1);
3168 			*mlp = ml->next;
3169 			kfree(ml);
3170 		} else
3171 			mlp = &ml->next;
3172 	}
3173 }
3174 
packet_mc_add(struct sock * sk,struct packet_mreq_max * mreq)3175 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3176 {
3177 	struct packet_sock *po = pkt_sk(sk);
3178 	struct packet_mclist *ml, *i;
3179 	struct net_device *dev;
3180 	int err;
3181 
3182 	rtnl_lock();
3183 
3184 	err = -ENODEV;
3185 	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3186 	if (!dev)
3187 		goto done;
3188 
3189 	err = -EINVAL;
3190 	if (mreq->mr_alen > dev->addr_len)
3191 		goto done;
3192 
3193 	err = -ENOBUFS;
3194 	i = kmalloc(sizeof(*i), GFP_KERNEL);
3195 	if (i == NULL)
3196 		goto done;
3197 
3198 	err = 0;
3199 	for (ml = po->mclist; ml; ml = ml->next) {
3200 		if (ml->ifindex == mreq->mr_ifindex &&
3201 		    ml->type == mreq->mr_type &&
3202 		    ml->alen == mreq->mr_alen &&
3203 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3204 			ml->count++;
3205 			/* Free the new element ... */
3206 			kfree(i);
3207 			goto done;
3208 		}
3209 	}
3210 
3211 	i->type = mreq->mr_type;
3212 	i->ifindex = mreq->mr_ifindex;
3213 	i->alen = mreq->mr_alen;
3214 	memcpy(i->addr, mreq->mr_address, i->alen);
3215 	i->count = 1;
3216 	i->next = po->mclist;
3217 	po->mclist = i;
3218 	err = packet_dev_mc(dev, i, 1);
3219 	if (err) {
3220 		po->mclist = i->next;
3221 		kfree(i);
3222 	}
3223 
3224 done:
3225 	rtnl_unlock();
3226 	return err;
3227 }
3228 
packet_mc_drop(struct sock * sk,struct packet_mreq_max * mreq)3229 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3230 {
3231 	struct packet_mclist *ml, **mlp;
3232 
3233 	rtnl_lock();
3234 
3235 	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3236 		if (ml->ifindex == mreq->mr_ifindex &&
3237 		    ml->type == mreq->mr_type &&
3238 		    ml->alen == mreq->mr_alen &&
3239 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3240 			if (--ml->count == 0) {
3241 				struct net_device *dev;
3242 				*mlp = ml->next;
3243 				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3244 				if (dev)
3245 					packet_dev_mc(dev, ml, -1);
3246 				kfree(ml);
3247 			}
3248 			break;
3249 		}
3250 	}
3251 	rtnl_unlock();
3252 	return 0;
3253 }
3254 
packet_flush_mclist(struct sock * sk)3255 static void packet_flush_mclist(struct sock *sk)
3256 {
3257 	struct packet_sock *po = pkt_sk(sk);
3258 	struct packet_mclist *ml;
3259 
3260 	if (!po->mclist)
3261 		return;
3262 
3263 	rtnl_lock();
3264 	while ((ml = po->mclist) != NULL) {
3265 		struct net_device *dev;
3266 
3267 		po->mclist = ml->next;
3268 		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3269 		if (dev != NULL)
3270 			packet_dev_mc(dev, ml, -1);
3271 		kfree(ml);
3272 	}
3273 	rtnl_unlock();
3274 }
3275 
3276 static int
packet_setsockopt(struct socket * sock,int level,int optname,char __user * optval,unsigned int optlen)3277 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3278 {
3279 	struct sock *sk = sock->sk;
3280 	struct packet_sock *po = pkt_sk(sk);
3281 	int ret;
3282 
3283 	if (level != SOL_PACKET)
3284 		return -ENOPROTOOPT;
3285 
3286 	switch (optname) {
3287 	case PACKET_ADD_MEMBERSHIP:
3288 	case PACKET_DROP_MEMBERSHIP:
3289 	{
3290 		struct packet_mreq_max mreq;
3291 		int len = optlen;
3292 		memset(&mreq, 0, sizeof(mreq));
3293 		if (len < sizeof(struct packet_mreq))
3294 			return -EINVAL;
3295 		if (len > sizeof(mreq))
3296 			len = sizeof(mreq);
3297 		if (copy_from_user(&mreq, optval, len))
3298 			return -EFAULT;
3299 		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3300 			return -EINVAL;
3301 		if (optname == PACKET_ADD_MEMBERSHIP)
3302 			ret = packet_mc_add(sk, &mreq);
3303 		else
3304 			ret = packet_mc_drop(sk, &mreq);
3305 		return ret;
3306 	}
3307 
3308 	case PACKET_RX_RING:
3309 	case PACKET_TX_RING:
3310 	{
3311 		union tpacket_req_u req_u;
3312 		int len;
3313 
3314 		switch (po->tp_version) {
3315 		case TPACKET_V1:
3316 		case TPACKET_V2:
3317 			len = sizeof(req_u.req);
3318 			break;
3319 		case TPACKET_V3:
3320 		default:
3321 			len = sizeof(req_u.req3);
3322 			break;
3323 		}
3324 		if (optlen < len)
3325 			return -EINVAL;
3326 		if (pkt_sk(sk)->has_vnet_hdr)
3327 			return -EINVAL;
3328 		if (copy_from_user(&req_u.req, optval, len))
3329 			return -EFAULT;
3330 		return packet_set_ring(sk, &req_u, 0,
3331 			optname == PACKET_TX_RING);
3332 	}
3333 	case PACKET_COPY_THRESH:
3334 	{
3335 		int val;
3336 
3337 		if (optlen != sizeof(val))
3338 			return -EINVAL;
3339 		if (copy_from_user(&val, optval, sizeof(val)))
3340 			return -EFAULT;
3341 
3342 		pkt_sk(sk)->copy_thresh = val;
3343 		return 0;
3344 	}
3345 	case PACKET_VERSION:
3346 	{
3347 		int val;
3348 
3349 		if (optlen != sizeof(val))
3350 			return -EINVAL;
3351 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3352 			return -EBUSY;
3353 		if (copy_from_user(&val, optval, sizeof(val)))
3354 			return -EFAULT;
3355 		switch (val) {
3356 		case TPACKET_V1:
3357 		case TPACKET_V2:
3358 		case TPACKET_V3:
3359 			po->tp_version = val;
3360 			return 0;
3361 		default:
3362 			return -EINVAL;
3363 		}
3364 	}
3365 	case PACKET_RESERVE:
3366 	{
3367 		unsigned int val;
3368 
3369 		if (optlen != sizeof(val))
3370 			return -EINVAL;
3371 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3372 			return -EBUSY;
3373 		if (copy_from_user(&val, optval, sizeof(val)))
3374 			return -EFAULT;
3375 		po->tp_reserve = val;
3376 		return 0;
3377 	}
3378 	case PACKET_LOSS:
3379 	{
3380 		unsigned int val;
3381 
3382 		if (optlen != sizeof(val))
3383 			return -EINVAL;
3384 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3385 			return -EBUSY;
3386 		if (copy_from_user(&val, optval, sizeof(val)))
3387 			return -EFAULT;
3388 		po->tp_loss = !!val;
3389 		return 0;
3390 	}
3391 	case PACKET_AUXDATA:
3392 	{
3393 		int val;
3394 
3395 		if (optlen < sizeof(val))
3396 			return -EINVAL;
3397 		if (copy_from_user(&val, optval, sizeof(val)))
3398 			return -EFAULT;
3399 
3400 		po->auxdata = !!val;
3401 		return 0;
3402 	}
3403 	case PACKET_ORIGDEV:
3404 	{
3405 		int val;
3406 
3407 		if (optlen < sizeof(val))
3408 			return -EINVAL;
3409 		if (copy_from_user(&val, optval, sizeof(val)))
3410 			return -EFAULT;
3411 
3412 		po->origdev = !!val;
3413 		return 0;
3414 	}
3415 	case PACKET_VNET_HDR:
3416 	{
3417 		int val;
3418 
3419 		if (sock->type != SOCK_RAW)
3420 			return -EINVAL;
3421 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3422 			return -EBUSY;
3423 		if (optlen < sizeof(val))
3424 			return -EINVAL;
3425 		if (copy_from_user(&val, optval, sizeof(val)))
3426 			return -EFAULT;
3427 
3428 		po->has_vnet_hdr = !!val;
3429 		return 0;
3430 	}
3431 	case PACKET_TIMESTAMP:
3432 	{
3433 		int val;
3434 
3435 		if (optlen != sizeof(val))
3436 			return -EINVAL;
3437 		if (copy_from_user(&val, optval, sizeof(val)))
3438 			return -EFAULT;
3439 
3440 		po->tp_tstamp = val;
3441 		return 0;
3442 	}
3443 	case PACKET_FANOUT:
3444 	{
3445 		int val;
3446 
3447 		if (optlen != sizeof(val))
3448 			return -EINVAL;
3449 		if (copy_from_user(&val, optval, sizeof(val)))
3450 			return -EFAULT;
3451 
3452 		return fanout_add(sk, val & 0xffff, val >> 16);
3453 	}
3454 	case PACKET_TX_HAS_OFF:
3455 	{
3456 		unsigned int val;
3457 
3458 		if (optlen != sizeof(val))
3459 			return -EINVAL;
3460 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3461 			return -EBUSY;
3462 		if (copy_from_user(&val, optval, sizeof(val)))
3463 			return -EFAULT;
3464 		po->tp_tx_has_off = !!val;
3465 		return 0;
3466 	}
3467 	case PACKET_QDISC_BYPASS:
3468 	{
3469 		int val;
3470 
3471 		if (optlen != sizeof(val))
3472 			return -EINVAL;
3473 		if (copy_from_user(&val, optval, sizeof(val)))
3474 			return -EFAULT;
3475 
3476 		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3477 		return 0;
3478 	}
3479 	default:
3480 		return -ENOPROTOOPT;
3481 	}
3482 }
3483 
packet_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)3484 static int packet_getsockopt(struct socket *sock, int level, int optname,
3485 			     char __user *optval, int __user *optlen)
3486 {
3487 	int len;
3488 	int val, lv = sizeof(val);
3489 	struct sock *sk = sock->sk;
3490 	struct packet_sock *po = pkt_sk(sk);
3491 	void *data = &val;
3492 	union tpacket_stats_u st;
3493 
3494 	if (level != SOL_PACKET)
3495 		return -ENOPROTOOPT;
3496 
3497 	if (get_user(len, optlen))
3498 		return -EFAULT;
3499 
3500 	if (len < 0)
3501 		return -EINVAL;
3502 
3503 	switch (optname) {
3504 	case PACKET_STATISTICS:
3505 		spin_lock_bh(&sk->sk_receive_queue.lock);
3506 		memcpy(&st, &po->stats, sizeof(st));
3507 		memset(&po->stats, 0, sizeof(po->stats));
3508 		spin_unlock_bh(&sk->sk_receive_queue.lock);
3509 
3510 		if (po->tp_version == TPACKET_V3) {
3511 			lv = sizeof(struct tpacket_stats_v3);
3512 			st.stats3.tp_packets += st.stats3.tp_drops;
3513 			data = &st.stats3;
3514 		} else {
3515 			lv = sizeof(struct tpacket_stats);
3516 			st.stats1.tp_packets += st.stats1.tp_drops;
3517 			data = &st.stats1;
3518 		}
3519 
3520 		break;
3521 	case PACKET_AUXDATA:
3522 		val = po->auxdata;
3523 		break;
3524 	case PACKET_ORIGDEV:
3525 		val = po->origdev;
3526 		break;
3527 	case PACKET_VNET_HDR:
3528 		val = po->has_vnet_hdr;
3529 		break;
3530 	case PACKET_VERSION:
3531 		val = po->tp_version;
3532 		break;
3533 	case PACKET_HDRLEN:
3534 		if (len > sizeof(int))
3535 			len = sizeof(int);
3536 		if (copy_from_user(&val, optval, len))
3537 			return -EFAULT;
3538 		switch (val) {
3539 		case TPACKET_V1:
3540 			val = sizeof(struct tpacket_hdr);
3541 			break;
3542 		case TPACKET_V2:
3543 			val = sizeof(struct tpacket2_hdr);
3544 			break;
3545 		case TPACKET_V3:
3546 			val = sizeof(struct tpacket3_hdr);
3547 			break;
3548 		default:
3549 			return -EINVAL;
3550 		}
3551 		break;
3552 	case PACKET_RESERVE:
3553 		val = po->tp_reserve;
3554 		break;
3555 	case PACKET_LOSS:
3556 		val = po->tp_loss;
3557 		break;
3558 	case PACKET_TIMESTAMP:
3559 		val = po->tp_tstamp;
3560 		break;
3561 	case PACKET_FANOUT:
3562 		val = (po->fanout ?
3563 		       ((u32)po->fanout->id |
3564 			((u32)po->fanout->type << 16) |
3565 			((u32)po->fanout->flags << 24)) :
3566 		       0);
3567 		break;
3568 	case PACKET_TX_HAS_OFF:
3569 		val = po->tp_tx_has_off;
3570 		break;
3571 	case PACKET_QDISC_BYPASS:
3572 		val = packet_use_direct_xmit(po);
3573 		break;
3574 	default:
3575 		return -ENOPROTOOPT;
3576 	}
3577 
3578 	if (len > lv)
3579 		len = lv;
3580 	if (put_user(len, optlen))
3581 		return -EFAULT;
3582 	if (copy_to_user(optval, data, len))
3583 		return -EFAULT;
3584 	return 0;
3585 }
3586 
3587 
packet_notifier(struct notifier_block * this,unsigned long msg,void * ptr)3588 static int packet_notifier(struct notifier_block *this,
3589 			   unsigned long msg, void *ptr)
3590 {
3591 	struct sock *sk;
3592 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3593 	struct net *net = dev_net(dev);
3594 
3595 	rcu_read_lock();
3596 	sk_for_each_rcu(sk, &net->packet.sklist) {
3597 		struct packet_sock *po = pkt_sk(sk);
3598 
3599 		switch (msg) {
3600 		case NETDEV_UNREGISTER:
3601 			if (po->mclist)
3602 				packet_dev_mclist_delete(dev, &po->mclist);
3603 			/* fallthrough */
3604 
3605 		case NETDEV_DOWN:
3606 			if (dev->ifindex == po->ifindex) {
3607 				spin_lock(&po->bind_lock);
3608 				if (po->running) {
3609 					__unregister_prot_hook(sk, false);
3610 					sk->sk_err = ENETDOWN;
3611 					if (!sock_flag(sk, SOCK_DEAD))
3612 						sk->sk_error_report(sk);
3613 				}
3614 				if (msg == NETDEV_UNREGISTER) {
3615 					packet_cached_dev_reset(po);
3616 					po->ifindex = -1;
3617 					if (po->prot_hook.dev)
3618 						dev_put(po->prot_hook.dev);
3619 					po->prot_hook.dev = NULL;
3620 				}
3621 				spin_unlock(&po->bind_lock);
3622 			}
3623 			break;
3624 		case NETDEV_UP:
3625 			if (dev->ifindex == po->ifindex) {
3626 				spin_lock(&po->bind_lock);
3627 				if (po->num)
3628 					register_prot_hook(sk);
3629 				spin_unlock(&po->bind_lock);
3630 			}
3631 			break;
3632 		}
3633 	}
3634 	rcu_read_unlock();
3635 	return NOTIFY_DONE;
3636 }
3637 
3638 
packet_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)3639 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3640 			unsigned long arg)
3641 {
3642 	struct sock *sk = sock->sk;
3643 
3644 	switch (cmd) {
3645 	case SIOCOUTQ:
3646 	{
3647 		int amount = sk_wmem_alloc_get(sk);
3648 
3649 		return put_user(amount, (int __user *)arg);
3650 	}
3651 	case SIOCINQ:
3652 	{
3653 		struct sk_buff *skb;
3654 		int amount = 0;
3655 
3656 		spin_lock_bh(&sk->sk_receive_queue.lock);
3657 		skb = skb_peek(&sk->sk_receive_queue);
3658 		if (skb)
3659 			amount = skb->len;
3660 		spin_unlock_bh(&sk->sk_receive_queue.lock);
3661 		return put_user(amount, (int __user *)arg);
3662 	}
3663 	case SIOCGSTAMP:
3664 		return sock_get_timestamp(sk, (struct timeval __user *)arg);
3665 	case SIOCGSTAMPNS:
3666 		return sock_get_timestampns(sk, (struct timespec __user *)arg);
3667 
3668 #ifdef CONFIG_INET
3669 	case SIOCADDRT:
3670 	case SIOCDELRT:
3671 	case SIOCDARP:
3672 	case SIOCGARP:
3673 	case SIOCSARP:
3674 	case SIOCGIFADDR:
3675 	case SIOCSIFADDR:
3676 	case SIOCGIFBRDADDR:
3677 	case SIOCSIFBRDADDR:
3678 	case SIOCGIFNETMASK:
3679 	case SIOCSIFNETMASK:
3680 	case SIOCGIFDSTADDR:
3681 	case SIOCSIFDSTADDR:
3682 	case SIOCSIFFLAGS:
3683 		return inet_dgram_ops.ioctl(sock, cmd, arg);
3684 #endif
3685 
3686 	default:
3687 		return -ENOIOCTLCMD;
3688 	}
3689 	return 0;
3690 }
3691 
packet_poll(struct file * file,struct socket * sock,poll_table * wait)3692 static unsigned int packet_poll(struct file *file, struct socket *sock,
3693 				poll_table *wait)
3694 {
3695 	struct sock *sk = sock->sk;
3696 	struct packet_sock *po = pkt_sk(sk);
3697 	unsigned int mask = datagram_poll(file, sock, wait);
3698 
3699 	spin_lock_bh(&sk->sk_receive_queue.lock);
3700 	if (po->rx_ring.pg_vec) {
3701 		if (!packet_previous_rx_frame(po, &po->rx_ring,
3702 			TP_STATUS_KERNEL))
3703 			mask |= POLLIN | POLLRDNORM;
3704 	}
3705 	spin_unlock_bh(&sk->sk_receive_queue.lock);
3706 	spin_lock_bh(&sk->sk_write_queue.lock);
3707 	if (po->tx_ring.pg_vec) {
3708 		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3709 			mask |= POLLOUT | POLLWRNORM;
3710 	}
3711 	spin_unlock_bh(&sk->sk_write_queue.lock);
3712 	return mask;
3713 }
3714 
3715 
3716 /* Dirty? Well, I still did not learn better way to account
3717  * for user mmaps.
3718  */
3719 
packet_mm_open(struct vm_area_struct * vma)3720 static void packet_mm_open(struct vm_area_struct *vma)
3721 {
3722 	struct file *file = vma->vm_file;
3723 	struct socket *sock = file->private_data;
3724 	struct sock *sk = sock->sk;
3725 
3726 	if (sk)
3727 		atomic_inc(&pkt_sk(sk)->mapped);
3728 }
3729 
packet_mm_close(struct vm_area_struct * vma)3730 static void packet_mm_close(struct vm_area_struct *vma)
3731 {
3732 	struct file *file = vma->vm_file;
3733 	struct socket *sock = file->private_data;
3734 	struct sock *sk = sock->sk;
3735 
3736 	if (sk)
3737 		atomic_dec(&pkt_sk(sk)->mapped);
3738 }
3739 
3740 static const struct vm_operations_struct packet_mmap_ops = {
3741 	.open	=	packet_mm_open,
3742 	.close	=	packet_mm_close,
3743 };
3744 
free_pg_vec(struct pgv * pg_vec,unsigned int order,unsigned int len)3745 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3746 			unsigned int len)
3747 {
3748 	int i;
3749 
3750 	for (i = 0; i < len; i++) {
3751 		if (likely(pg_vec[i].buffer)) {
3752 			if (is_vmalloc_addr(pg_vec[i].buffer))
3753 				vfree(pg_vec[i].buffer);
3754 			else
3755 				free_pages((unsigned long)pg_vec[i].buffer,
3756 					   order);
3757 			pg_vec[i].buffer = NULL;
3758 		}
3759 	}
3760 	kfree(pg_vec);
3761 }
3762 
alloc_one_pg_vec_page(unsigned long order)3763 static char *alloc_one_pg_vec_page(unsigned long order)
3764 {
3765 	char *buffer;
3766 	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3767 			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3768 
3769 	buffer = (char *) __get_free_pages(gfp_flags, order);
3770 	if (buffer)
3771 		return buffer;
3772 
3773 	/* __get_free_pages failed, fall back to vmalloc */
3774 	buffer = vzalloc((1 << order) * PAGE_SIZE);
3775 	if (buffer)
3776 		return buffer;
3777 
3778 	/* vmalloc failed, lets dig into swap here */
3779 	gfp_flags &= ~__GFP_NORETRY;
3780 	buffer = (char *) __get_free_pages(gfp_flags, order);
3781 	if (buffer)
3782 		return buffer;
3783 
3784 	/* complete and utter failure */
3785 	return NULL;
3786 }
3787 
alloc_pg_vec(struct tpacket_req * req,int order)3788 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3789 {
3790 	unsigned int block_nr = req->tp_block_nr;
3791 	struct pgv *pg_vec;
3792 	int i;
3793 
3794 	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3795 	if (unlikely(!pg_vec))
3796 		goto out;
3797 
3798 	for (i = 0; i < block_nr; i++) {
3799 		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3800 		if (unlikely(!pg_vec[i].buffer))
3801 			goto out_free_pgvec;
3802 	}
3803 
3804 out:
3805 	return pg_vec;
3806 
3807 out_free_pgvec:
3808 	free_pg_vec(pg_vec, order, block_nr);
3809 	pg_vec = NULL;
3810 	goto out;
3811 }
3812 
packet_set_ring(struct sock * sk,union tpacket_req_u * req_u,int closing,int tx_ring)3813 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3814 		int closing, int tx_ring)
3815 {
3816 	struct pgv *pg_vec = NULL;
3817 	struct packet_sock *po = pkt_sk(sk);
3818 	int was_running, order = 0;
3819 	struct packet_ring_buffer *rb;
3820 	struct sk_buff_head *rb_queue;
3821 	__be16 num;
3822 	int err = -EINVAL;
3823 	/* Added to avoid minimal code churn */
3824 	struct tpacket_req *req = &req_u->req;
3825 
3826 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3827 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3828 		WARN(1, "Tx-ring is not supported.\n");
3829 		goto out;
3830 	}
3831 
3832 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3833 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3834 
3835 	err = -EBUSY;
3836 	if (!closing) {
3837 		if (atomic_read(&po->mapped))
3838 			goto out;
3839 		if (packet_read_pending(rb))
3840 			goto out;
3841 	}
3842 
3843 	if (req->tp_block_nr) {
3844 		/* Sanity tests and some calculations */
3845 		err = -EBUSY;
3846 		if (unlikely(rb->pg_vec))
3847 			goto out;
3848 
3849 		switch (po->tp_version) {
3850 		case TPACKET_V1:
3851 			po->tp_hdrlen = TPACKET_HDRLEN;
3852 			break;
3853 		case TPACKET_V2:
3854 			po->tp_hdrlen = TPACKET2_HDRLEN;
3855 			break;
3856 		case TPACKET_V3:
3857 			po->tp_hdrlen = TPACKET3_HDRLEN;
3858 			break;
3859 		}
3860 
3861 		err = -EINVAL;
3862 		if (unlikely((int)req->tp_block_size <= 0))
3863 			goto out;
3864 		if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3865 			goto out;
3866 		if (po->tp_version >= TPACKET_V3 &&
3867 		    (int)(req->tp_block_size -
3868 			  BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
3869 			goto out;
3870 		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3871 					po->tp_reserve))
3872 			goto out;
3873 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3874 			goto out;
3875 
3876 		rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3877 		if (unlikely(rb->frames_per_block <= 0))
3878 			goto out;
3879 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3880 					req->tp_frame_nr))
3881 			goto out;
3882 
3883 		err = -ENOMEM;
3884 		order = get_order(req->tp_block_size);
3885 		pg_vec = alloc_pg_vec(req, order);
3886 		if (unlikely(!pg_vec))
3887 			goto out;
3888 		switch (po->tp_version) {
3889 		case TPACKET_V3:
3890 		/* Transmit path is not supported. We checked
3891 		 * it above but just being paranoid
3892 		 */
3893 			if (!tx_ring)
3894 				init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3895 			break;
3896 		default:
3897 			break;
3898 		}
3899 	}
3900 	/* Done */
3901 	else {
3902 		err = -EINVAL;
3903 		if (unlikely(req->tp_frame_nr))
3904 			goto out;
3905 	}
3906 
3907 	lock_sock(sk);
3908 
3909 	/* Detach socket from network */
3910 	spin_lock(&po->bind_lock);
3911 	was_running = po->running;
3912 	num = po->num;
3913 	if (was_running) {
3914 		po->num = 0;
3915 		__unregister_prot_hook(sk, false);
3916 	}
3917 	spin_unlock(&po->bind_lock);
3918 
3919 	synchronize_net();
3920 
3921 	err = -EBUSY;
3922 	mutex_lock(&po->pg_vec_lock);
3923 	if (closing || atomic_read(&po->mapped) == 0) {
3924 		err = 0;
3925 		spin_lock_bh(&rb_queue->lock);
3926 		swap(rb->pg_vec, pg_vec);
3927 		rb->frame_max = (req->tp_frame_nr - 1);
3928 		rb->head = 0;
3929 		rb->frame_size = req->tp_frame_size;
3930 		spin_unlock_bh(&rb_queue->lock);
3931 
3932 		swap(rb->pg_vec_order, order);
3933 		swap(rb->pg_vec_len, req->tp_block_nr);
3934 
3935 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3936 		po->prot_hook.func = (po->rx_ring.pg_vec) ?
3937 						tpacket_rcv : packet_rcv;
3938 		skb_queue_purge(rb_queue);
3939 		if (atomic_read(&po->mapped))
3940 			pr_err("packet_mmap: vma is busy: %d\n",
3941 			       atomic_read(&po->mapped));
3942 	}
3943 	mutex_unlock(&po->pg_vec_lock);
3944 
3945 	spin_lock(&po->bind_lock);
3946 	if (was_running) {
3947 		po->num = num;
3948 		register_prot_hook(sk);
3949 	}
3950 	spin_unlock(&po->bind_lock);
3951 	if (closing && (po->tp_version > TPACKET_V2)) {
3952 		/* Because we don't support block-based V3 on tx-ring */
3953 		if (!tx_ring)
3954 			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3955 	}
3956 	release_sock(sk);
3957 
3958 	if (pg_vec)
3959 		free_pg_vec(pg_vec, order, req->tp_block_nr);
3960 out:
3961 	return err;
3962 }
3963 
packet_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)3964 static int packet_mmap(struct file *file, struct socket *sock,
3965 		struct vm_area_struct *vma)
3966 {
3967 	struct sock *sk = sock->sk;
3968 	struct packet_sock *po = pkt_sk(sk);
3969 	unsigned long size, expected_size;
3970 	struct packet_ring_buffer *rb;
3971 	unsigned long start;
3972 	int err = -EINVAL;
3973 	int i;
3974 
3975 	if (vma->vm_pgoff)
3976 		return -EINVAL;
3977 
3978 	mutex_lock(&po->pg_vec_lock);
3979 
3980 	expected_size = 0;
3981 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3982 		if (rb->pg_vec) {
3983 			expected_size += rb->pg_vec_len
3984 						* rb->pg_vec_pages
3985 						* PAGE_SIZE;
3986 		}
3987 	}
3988 
3989 	if (expected_size == 0)
3990 		goto out;
3991 
3992 	size = vma->vm_end - vma->vm_start;
3993 	if (size != expected_size)
3994 		goto out;
3995 
3996 	start = vma->vm_start;
3997 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3998 		if (rb->pg_vec == NULL)
3999 			continue;
4000 
4001 		for (i = 0; i < rb->pg_vec_len; i++) {
4002 			struct page *page;
4003 			void *kaddr = rb->pg_vec[i].buffer;
4004 			int pg_num;
4005 
4006 			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4007 				page = pgv_to_page(kaddr);
4008 				err = vm_insert_page(vma, start, page);
4009 				if (unlikely(err))
4010 					goto out;
4011 				start += PAGE_SIZE;
4012 				kaddr += PAGE_SIZE;
4013 			}
4014 		}
4015 	}
4016 
4017 	atomic_inc(&po->mapped);
4018 	vma->vm_ops = &packet_mmap_ops;
4019 	err = 0;
4020 
4021 out:
4022 	mutex_unlock(&po->pg_vec_lock);
4023 	return err;
4024 }
4025 
4026 static const struct proto_ops packet_ops_spkt = {
4027 	.family =	PF_PACKET,
4028 	.owner =	THIS_MODULE,
4029 	.release =	packet_release,
4030 	.bind =		packet_bind_spkt,
4031 	.connect =	sock_no_connect,
4032 	.socketpair =	sock_no_socketpair,
4033 	.accept =	sock_no_accept,
4034 	.getname =	packet_getname_spkt,
4035 	.poll =		datagram_poll,
4036 	.ioctl =	packet_ioctl,
4037 	.listen =	sock_no_listen,
4038 	.shutdown =	sock_no_shutdown,
4039 	.setsockopt =	sock_no_setsockopt,
4040 	.getsockopt =	sock_no_getsockopt,
4041 	.sendmsg =	packet_sendmsg_spkt,
4042 	.recvmsg =	packet_recvmsg,
4043 	.mmap =		sock_no_mmap,
4044 	.sendpage =	sock_no_sendpage,
4045 };
4046 
4047 static const struct proto_ops packet_ops = {
4048 	.family =	PF_PACKET,
4049 	.owner =	THIS_MODULE,
4050 	.release =	packet_release,
4051 	.bind =		packet_bind,
4052 	.connect =	sock_no_connect,
4053 	.socketpair =	sock_no_socketpair,
4054 	.accept =	sock_no_accept,
4055 	.getname =	packet_getname,
4056 	.poll =		packet_poll,
4057 	.ioctl =	packet_ioctl,
4058 	.listen =	sock_no_listen,
4059 	.shutdown =	sock_no_shutdown,
4060 	.setsockopt =	packet_setsockopt,
4061 	.getsockopt =	packet_getsockopt,
4062 	.sendmsg =	packet_sendmsg,
4063 	.recvmsg =	packet_recvmsg,
4064 	.mmap =		packet_mmap,
4065 	.sendpage =	sock_no_sendpage,
4066 };
4067 
4068 static const struct net_proto_family packet_family_ops = {
4069 	.family =	PF_PACKET,
4070 	.create =	packet_create,
4071 	.owner	=	THIS_MODULE,
4072 };
4073 
4074 static struct notifier_block packet_netdev_notifier = {
4075 	.notifier_call =	packet_notifier,
4076 };
4077 
4078 #ifdef CONFIG_PROC_FS
4079 
packet_seq_start(struct seq_file * seq,loff_t * pos)4080 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4081 	__acquires(RCU)
4082 {
4083 	struct net *net = seq_file_net(seq);
4084 
4085 	rcu_read_lock();
4086 	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4087 }
4088 
packet_seq_next(struct seq_file * seq,void * v,loff_t * pos)4089 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4090 {
4091 	struct net *net = seq_file_net(seq);
4092 	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4093 }
4094 
packet_seq_stop(struct seq_file * seq,void * v)4095 static void packet_seq_stop(struct seq_file *seq, void *v)
4096 	__releases(RCU)
4097 {
4098 	rcu_read_unlock();
4099 }
4100 
packet_seq_show(struct seq_file * seq,void * v)4101 static int packet_seq_show(struct seq_file *seq, void *v)
4102 {
4103 	if (v == SEQ_START_TOKEN)
4104 		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4105 	else {
4106 		struct sock *s = sk_entry(v);
4107 		const struct packet_sock *po = pkt_sk(s);
4108 
4109 		seq_printf(seq,
4110 			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4111 			   s,
4112 			   atomic_read(&s->sk_refcnt),
4113 			   s->sk_type,
4114 			   ntohs(po->num),
4115 			   po->ifindex,
4116 			   po->running,
4117 			   atomic_read(&s->sk_rmem_alloc),
4118 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4119 			   sock_i_ino(s));
4120 	}
4121 
4122 	return 0;
4123 }
4124 
4125 static const struct seq_operations packet_seq_ops = {
4126 	.start	= packet_seq_start,
4127 	.next	= packet_seq_next,
4128 	.stop	= packet_seq_stop,
4129 	.show	= packet_seq_show,
4130 };
4131 
packet_seq_open(struct inode * inode,struct file * file)4132 static int packet_seq_open(struct inode *inode, struct file *file)
4133 {
4134 	return seq_open_net(inode, file, &packet_seq_ops,
4135 			    sizeof(struct seq_net_private));
4136 }
4137 
4138 static const struct file_operations packet_seq_fops = {
4139 	.owner		= THIS_MODULE,
4140 	.open		= packet_seq_open,
4141 	.read		= seq_read,
4142 	.llseek		= seq_lseek,
4143 	.release	= seq_release_net,
4144 };
4145 
4146 #endif
4147 
packet_net_init(struct net * net)4148 static int __net_init packet_net_init(struct net *net)
4149 {
4150 	mutex_init(&net->packet.sklist_lock);
4151 	INIT_HLIST_HEAD(&net->packet.sklist);
4152 
4153 	if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
4154 		return -ENOMEM;
4155 
4156 	return 0;
4157 }
4158 
packet_net_exit(struct net * net)4159 static void __net_exit packet_net_exit(struct net *net)
4160 {
4161 	remove_proc_entry("packet", net->proc_net);
4162 }
4163 
4164 static struct pernet_operations packet_net_ops = {
4165 	.init = packet_net_init,
4166 	.exit = packet_net_exit,
4167 };
4168 
4169 
packet_exit(void)4170 static void __exit packet_exit(void)
4171 {
4172 	unregister_netdevice_notifier(&packet_netdev_notifier);
4173 	unregister_pernet_subsys(&packet_net_ops);
4174 	sock_unregister(PF_PACKET);
4175 	proto_unregister(&packet_proto);
4176 }
4177 
packet_init(void)4178 static int __init packet_init(void)
4179 {
4180 	int rc = proto_register(&packet_proto, 0);
4181 
4182 	if (rc != 0)
4183 		goto out;
4184 
4185 	sock_register(&packet_family_ops);
4186 	register_pernet_subsys(&packet_net_ops);
4187 	register_netdevice_notifier(&packet_netdev_notifier);
4188 out:
4189 	return rc;
4190 }
4191 
4192 module_init(packet_init);
4193 module_exit(packet_exit);
4194 MODULE_LICENSE("GPL");
4195 MODULE_ALIAS_NETPROTO(PF_PACKET);
4196