1 #include <linux/skbuff.h>
2 #include <linux/export.h>
3 #include <linux/ip.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
6 #include <net/ip.h>
7 #include <net/ipv6.h>
8 #include <linux/igmp.h>
9 #include <linux/icmp.h>
10 #include <linux/sctp.h>
11 #include <linux/dccp.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15 #include <net/flow_keys.h>
16 #include <scsi/fc/fc_fcoe.h>
17 
18 /* copy saddr & daddr, possibly using 64bit load/store
19  * Equivalent to :	flow->src = iph->saddr;
20  *			flow->dst = iph->daddr;
21  */
iph_to_flow_copy_addrs(struct flow_keys * flow,const struct iphdr * iph)22 static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
23 {
24 	BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
25 		     offsetof(typeof(*flow), src) + sizeof(flow->src));
26 	memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
27 }
28 
29 /**
30  * __skb_flow_get_ports - extract the upper layer ports and return them
31  * @skb: sk_buff to extract the ports from
32  * @thoff: transport header offset
33  * @ip_proto: protocol for which to get port offset
34  * @data: raw buffer pointer to the packet, if NULL use skb->data
35  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
36  *
37  * The function will try to retrieve the ports at offset thoff + poff where poff
38  * is the protocol port offset returned from proto_ports_offset
39  */
__skb_flow_get_ports(const struct sk_buff * skb,int thoff,u8 ip_proto,void * data,int hlen)40 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
41 			    void *data, int hlen)
42 {
43 	int poff = proto_ports_offset(ip_proto);
44 
45 	if (!data) {
46 		data = skb->data;
47 		hlen = skb_headlen(skb);
48 	}
49 
50 	if (poff >= 0) {
51 		__be32 *ports, _ports;
52 
53 		ports = __skb_header_pointer(skb, thoff + poff,
54 					     sizeof(_ports), data, hlen, &_ports);
55 		if (ports)
56 			return *ports;
57 	}
58 
59 	return 0;
60 }
61 EXPORT_SYMBOL(__skb_flow_get_ports);
62 
63 /**
64  * __skb_flow_dissect - extract the flow_keys struct and return it
65  * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
66  * @data: raw buffer pointer to the packet, if NULL use skb->data
67  * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
68  * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
69  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
70  *
71  * The function will try to retrieve the struct flow_keys from either the skbuff
72  * or a raw buffer specified by the rest parameters
73  */
__skb_flow_dissect(const struct sk_buff * skb,struct flow_keys * flow,void * data,__be16 proto,int nhoff,int hlen)74 bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
75 			void *data, __be16 proto, int nhoff, int hlen)
76 {
77 	u8 ip_proto;
78 
79 	if (!data) {
80 		data = skb->data;
81 		proto = skb->protocol;
82 		nhoff = skb_network_offset(skb);
83 		hlen = skb_headlen(skb);
84 	}
85 
86 	memset(flow, 0, sizeof(*flow));
87 
88 again:
89 	switch (proto) {
90 	case htons(ETH_P_IP): {
91 		const struct iphdr *iph;
92 		struct iphdr _iph;
93 ip:
94 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
95 		if (!iph || iph->ihl < 5)
96 			return false;
97 		nhoff += iph->ihl * 4;
98 
99 		ip_proto = iph->protocol;
100 		if (ip_is_fragment(iph))
101 			ip_proto = 0;
102 
103 		/* skip the address processing if skb is NULL.  The assumption
104 		 * here is that if there is no skb we are not looking for flow
105 		 * info but lengths and protocols.
106 		 */
107 		if (!skb)
108 			break;
109 
110 		iph_to_flow_copy_addrs(flow, iph);
111 		break;
112 	}
113 	case htons(ETH_P_IPV6): {
114 		const struct ipv6hdr *iph;
115 		struct ipv6hdr _iph;
116 
117 ipv6:
118 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
119 		if (!iph)
120 			return false;
121 
122 		ip_proto = iph->nexthdr;
123 		nhoff += sizeof(struct ipv6hdr);
124 
125 		/* see comment above in IPv4 section */
126 		if (!skb)
127 			break;
128 
129 		flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
130 		flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
131 
132 		if (skb && ip6_flowlabel(iph)) {
133 			__be32 flow_label = ip6_flowlabel(iph);
134 
135 			/* Awesome, IPv6 packet has a flow label so we can
136 			 * use that to represent the ports without any
137 			 * further dissection.
138 			 */
139 			flow->n_proto = proto;
140 			flow->ip_proto = ip_proto;
141 			flow->ports = flow_label;
142 			flow->thoff = (u16)nhoff;
143 
144 			return true;
145 		}
146 
147 		break;
148 	}
149 	case htons(ETH_P_8021AD):
150 	case htons(ETH_P_8021Q): {
151 		const struct vlan_hdr *vlan;
152 		struct vlan_hdr _vlan;
153 
154 		vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan);
155 		if (!vlan)
156 			return false;
157 
158 		proto = vlan->h_vlan_encapsulated_proto;
159 		nhoff += sizeof(*vlan);
160 		goto again;
161 	}
162 	case htons(ETH_P_PPP_SES): {
163 		struct {
164 			struct pppoe_hdr hdr;
165 			__be16 proto;
166 		} *hdr, _hdr;
167 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
168 		if (!hdr)
169 			return false;
170 		proto = hdr->proto;
171 		nhoff += PPPOE_SES_HLEN;
172 		switch (proto) {
173 		case htons(PPP_IP):
174 			goto ip;
175 		case htons(PPP_IPV6):
176 			goto ipv6;
177 		default:
178 			return false;
179 		}
180 	}
181 	case htons(ETH_P_TIPC): {
182 		struct {
183 			__be32 pre[3];
184 			__be32 srcnode;
185 		} *hdr, _hdr;
186 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
187 		if (!hdr)
188 			return false;
189 		flow->src = hdr->srcnode;
190 		flow->dst = 0;
191 		flow->n_proto = proto;
192 		flow->thoff = (u16)nhoff;
193 		return true;
194 	}
195 	case htons(ETH_P_FCOE):
196 		flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
197 		/* fall through */
198 	default:
199 		return false;
200 	}
201 
202 	switch (ip_proto) {
203 	case IPPROTO_GRE: {
204 		struct gre_hdr {
205 			__be16 flags;
206 			__be16 proto;
207 		} *hdr, _hdr;
208 
209 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
210 		if (!hdr)
211 			return false;
212 		/*
213 		 * Only look inside GRE if version zero and no
214 		 * routing
215 		 */
216 		if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
217 			proto = hdr->proto;
218 			nhoff += 4;
219 			if (hdr->flags & GRE_CSUM)
220 				nhoff += 4;
221 			if (hdr->flags & GRE_KEY)
222 				nhoff += 4;
223 			if (hdr->flags & GRE_SEQ)
224 				nhoff += 4;
225 			if (proto == htons(ETH_P_TEB)) {
226 				const struct ethhdr *eth;
227 				struct ethhdr _eth;
228 
229 				eth = __skb_header_pointer(skb, nhoff,
230 							   sizeof(_eth),
231 							   data, hlen, &_eth);
232 				if (!eth)
233 					return false;
234 				proto = eth->h_proto;
235 				nhoff += sizeof(*eth);
236 
237 				/* Cap headers that we access via pointers at the
238 				 * end of the Ethernet header as our maximum alignment
239 				 * at that point is only 2 bytes.
240 				 */
241 				if (NET_IP_ALIGN)
242 					hlen = nhoff;
243 			}
244 			goto again;
245 		}
246 		break;
247 	}
248 	case IPPROTO_IPIP:
249 		proto = htons(ETH_P_IP);
250 		goto ip;
251 	case IPPROTO_IPV6:
252 		proto = htons(ETH_P_IPV6);
253 		goto ipv6;
254 	default:
255 		break;
256 	}
257 
258 	flow->n_proto = proto;
259 	flow->ip_proto = ip_proto;
260 	flow->thoff = (u16) nhoff;
261 
262 	/* unless skb is set we don't need to record port info */
263 	if (skb)
264 		flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
265 						   data, hlen);
266 
267 	return true;
268 }
269 EXPORT_SYMBOL(__skb_flow_dissect);
270 
271 static u32 hashrnd __read_mostly;
__flow_hash_secret_init(void)272 static __always_inline void __flow_hash_secret_init(void)
273 {
274 	net_get_random_once(&hashrnd, sizeof(hashrnd));
275 }
276 
__flow_hash_3words(u32 a,u32 b,u32 c)277 static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
278 {
279 	__flow_hash_secret_init();
280 	return jhash_3words(a, b, c, hashrnd);
281 }
282 
__flow_hash_from_keys(struct flow_keys * keys)283 static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
284 {
285 	u32 hash;
286 
287 	/* get a consistent hash (same value on both flow directions) */
288 	if (((__force u32)keys->dst < (__force u32)keys->src) ||
289 	    (((__force u32)keys->dst == (__force u32)keys->src) &&
290 	     ((__force u16)keys->port16[1] < (__force u16)keys->port16[0]))) {
291 		swap(keys->dst, keys->src);
292 		swap(keys->port16[0], keys->port16[1]);
293 	}
294 
295 	hash = __flow_hash_3words((__force u32)keys->dst,
296 				  (__force u32)keys->src,
297 				  (__force u32)keys->ports);
298 	if (!hash)
299 		hash = 1;
300 
301 	return hash;
302 }
303 
flow_hash_from_keys(struct flow_keys * keys)304 u32 flow_hash_from_keys(struct flow_keys *keys)
305 {
306 	return __flow_hash_from_keys(keys);
307 }
308 EXPORT_SYMBOL(flow_hash_from_keys);
309 
310 /*
311  * __skb_get_hash: calculate a flow hash based on src/dst addresses
312  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
313  * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
314  * if hash is a canonical 4-tuple hash over transport ports.
315  */
__skb_get_hash(struct sk_buff * skb)316 void __skb_get_hash(struct sk_buff *skb)
317 {
318 	struct flow_keys keys;
319 
320 	if (!skb_flow_dissect(skb, &keys))
321 		return;
322 
323 	if (keys.ports)
324 		skb->l4_hash = 1;
325 
326 	skb->sw_hash = 1;
327 
328 	skb->hash = __flow_hash_from_keys(&keys);
329 }
330 EXPORT_SYMBOL(__skb_get_hash);
331 
332 /*
333  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
334  * to be used as a distribution range.
335  */
__skb_tx_hash(const struct net_device * dev,struct sk_buff * skb,unsigned int num_tx_queues)336 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
337 		  unsigned int num_tx_queues)
338 {
339 	u32 hash;
340 	u16 qoffset = 0;
341 	u16 qcount = num_tx_queues;
342 
343 	if (skb_rx_queue_recorded(skb)) {
344 		hash = skb_get_rx_queue(skb);
345 		while (unlikely(hash >= num_tx_queues))
346 			hash -= num_tx_queues;
347 		return hash;
348 	}
349 
350 	if (dev->num_tc) {
351 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
352 		qoffset = dev->tc_to_txq[tc].offset;
353 		qcount = dev->tc_to_txq[tc].count;
354 	}
355 
356 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
357 }
358 EXPORT_SYMBOL(__skb_tx_hash);
359 
__skb_get_poff(const struct sk_buff * skb,void * data,const struct flow_keys * keys,int hlen)360 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
361 		   const struct flow_keys *keys, int hlen)
362 {
363 	u32 poff = keys->thoff;
364 
365 	switch (keys->ip_proto) {
366 	case IPPROTO_TCP: {
367 		/* access doff as u8 to avoid unaligned access */
368 		const u8 *doff;
369 		u8 _doff;
370 
371 		doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
372 					    data, hlen, &_doff);
373 		if (!doff)
374 			return poff;
375 
376 		poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
377 		break;
378 	}
379 	case IPPROTO_UDP:
380 	case IPPROTO_UDPLITE:
381 		poff += sizeof(struct udphdr);
382 		break;
383 	/* For the rest, we do not really care about header
384 	 * extensions at this point for now.
385 	 */
386 	case IPPROTO_ICMP:
387 		poff += sizeof(struct icmphdr);
388 		break;
389 	case IPPROTO_ICMPV6:
390 		poff += sizeof(struct icmp6hdr);
391 		break;
392 	case IPPROTO_IGMP:
393 		poff += sizeof(struct igmphdr);
394 		break;
395 	case IPPROTO_DCCP:
396 		poff += sizeof(struct dccp_hdr);
397 		break;
398 	case IPPROTO_SCTP:
399 		poff += sizeof(struct sctphdr);
400 		break;
401 	}
402 
403 	return poff;
404 }
405 
406 /* skb_get_poff() returns the offset to the payload as far as it could
407  * be dissected. The main user is currently BPF, so that we can dynamically
408  * truncate packets without needing to push actual payload to the user
409  * space and can analyze headers only, instead.
410  */
skb_get_poff(const struct sk_buff * skb)411 u32 skb_get_poff(const struct sk_buff *skb)
412 {
413 	struct flow_keys keys;
414 
415 	if (!skb_flow_dissect(skb, &keys))
416 		return 0;
417 
418 	return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
419 }
420 
get_xps_queue(struct net_device * dev,struct sk_buff * skb)421 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
422 {
423 #ifdef CONFIG_XPS
424 	struct xps_dev_maps *dev_maps;
425 	struct xps_map *map;
426 	int queue_index = -1;
427 
428 	rcu_read_lock();
429 	dev_maps = rcu_dereference(dev->xps_maps);
430 	if (dev_maps) {
431 		map = rcu_dereference(
432 		    dev_maps->cpu_map[skb->sender_cpu - 1]);
433 		if (map) {
434 			if (map->len == 1)
435 				queue_index = map->queues[0];
436 			else
437 				queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
438 									   map->len)];
439 			if (unlikely(queue_index >= dev->real_num_tx_queues))
440 				queue_index = -1;
441 		}
442 	}
443 	rcu_read_unlock();
444 
445 	return queue_index;
446 #else
447 	return -1;
448 #endif
449 }
450 
__netdev_pick_tx(struct net_device * dev,struct sk_buff * skb)451 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
452 {
453 	struct sock *sk = skb->sk;
454 	int queue_index = sk_tx_queue_get(sk);
455 
456 	if (queue_index < 0 || skb->ooo_okay ||
457 	    queue_index >= dev->real_num_tx_queues) {
458 		int new_index = get_xps_queue(dev, skb);
459 		if (new_index < 0)
460 			new_index = skb_tx_hash(dev, skb);
461 
462 		if (queue_index != new_index && sk &&
463 		    rcu_access_pointer(sk->sk_dst_cache))
464 			sk_tx_queue_set(sk, new_index);
465 
466 		queue_index = new_index;
467 	}
468 
469 	return queue_index;
470 }
471 
netdev_pick_tx(struct net_device * dev,struct sk_buff * skb,void * accel_priv)472 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
473 				    struct sk_buff *skb,
474 				    void *accel_priv)
475 {
476 	int queue_index = 0;
477 
478 #ifdef CONFIG_XPS
479 	if (skb->sender_cpu == 0)
480 		skb->sender_cpu = raw_smp_processor_id() + 1;
481 #endif
482 
483 	if (dev->real_num_tx_queues != 1) {
484 		const struct net_device_ops *ops = dev->netdev_ops;
485 		if (ops->ndo_select_queue)
486 			queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
487 							    __netdev_pick_tx);
488 		else
489 			queue_index = __netdev_pick_tx(dev, skb);
490 
491 		if (!accel_priv)
492 			queue_index = netdev_cap_txqueue(dev, queue_index);
493 	}
494 
495 	skb_set_queue_mapping(skb, queue_index);
496 	return netdev_get_tx_queue(dev, queue_index);
497 }
498