1/*
2 * Copyright (c) 2007-2014 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#include <linux/uaccess.h>
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/if_ether.h>
23#include <linux/if_vlan.h>
24#include <net/llc_pdu.h>
25#include <linux/kernel.h>
26#include <linux/jhash.h>
27#include <linux/jiffies.h>
28#include <linux/llc.h>
29#include <linux/module.h>
30#include <linux/in.h>
31#include <linux/rcupdate.h>
32#include <linux/if_arp.h>
33#include <linux/ip.h>
34#include <linux/ipv6.h>
35#include <linux/mpls.h>
36#include <linux/sctp.h>
37#include <linux/smp.h>
38#include <linux/tcp.h>
39#include <linux/udp.h>
40#include <linux/icmp.h>
41#include <linux/icmpv6.h>
42#include <linux/rculist.h>
43#include <net/ip.h>
44#include <net/ip_tunnels.h>
45#include <net/ipv6.h>
46#include <net/mpls.h>
47#include <net/ndisc.h>
48
49#include "conntrack.h"
50#include "datapath.h"
51#include "flow.h"
52#include "flow_netlink.h"
53#include "vport.h"
54
55u64 ovs_flow_used_time(unsigned long flow_jiffies)
56{
57	struct timespec cur_ts;
58	u64 cur_ms, idle_ms;
59
60	ktime_get_ts(&cur_ts);
61	idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
62	cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
63		 cur_ts.tv_nsec / NSEC_PER_MSEC;
64
65	return cur_ms - idle_ms;
66}
67
68#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
69
70void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
71			   const struct sk_buff *skb)
72{
73	struct flow_stats *stats;
74	int node = numa_node_id();
75	int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
76
77	stats = rcu_dereference(flow->stats[node]);
78
79	/* Check if already have node-specific stats. */
80	if (likely(stats)) {
81		spin_lock(&stats->lock);
82		/* Mark if we write on the pre-allocated stats. */
83		if (node == 0 && unlikely(flow->stats_last_writer != node))
84			flow->stats_last_writer = node;
85	} else {
86		stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
87		spin_lock(&stats->lock);
88
89		/* If the current NUMA-node is the only writer on the
90		 * pre-allocated stats keep using them.
91		 */
92		if (unlikely(flow->stats_last_writer != node)) {
93			/* A previous locker may have already allocated the
94			 * stats, so we need to check again.  If node-specific
95			 * stats were already allocated, we update the pre-
96			 * allocated stats as we have already locked them.
97			 */
98			if (likely(flow->stats_last_writer != NUMA_NO_NODE)
99			    && likely(!rcu_access_pointer(flow->stats[node]))) {
100				/* Try to allocate node-specific stats. */
101				struct flow_stats *new_stats;
102
103				new_stats =
104					kmem_cache_alloc_node(flow_stats_cache,
105							      GFP_NOWAIT |
106							      __GFP_THISNODE |
107							      __GFP_NOWARN |
108							      __GFP_NOMEMALLOC,
109							      node);
110				if (likely(new_stats)) {
111					new_stats->used = jiffies;
112					new_stats->packet_count = 1;
113					new_stats->byte_count = len;
114					new_stats->tcp_flags = tcp_flags;
115					spin_lock_init(&new_stats->lock);
116
117					rcu_assign_pointer(flow->stats[node],
118							   new_stats);
119					goto unlock;
120				}
121			}
122			flow->stats_last_writer = node;
123		}
124	}
125
126	stats->used = jiffies;
127	stats->packet_count++;
128	stats->byte_count += len;
129	stats->tcp_flags |= tcp_flags;
130unlock:
131	spin_unlock(&stats->lock);
132}
133
134/* Must be called with rcu_read_lock or ovs_mutex. */
135void ovs_flow_stats_get(const struct sw_flow *flow,
136			struct ovs_flow_stats *ovs_stats,
137			unsigned long *used, __be16 *tcp_flags)
138{
139	int node;
140
141	*used = 0;
142	*tcp_flags = 0;
143	memset(ovs_stats, 0, sizeof(*ovs_stats));
144
145	for_each_node(node) {
146		struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[node]);
147
148		if (stats) {
149			/* Local CPU may write on non-local stats, so we must
150			 * block bottom-halves here.
151			 */
152			spin_lock_bh(&stats->lock);
153			if (!*used || time_after(stats->used, *used))
154				*used = stats->used;
155			*tcp_flags |= stats->tcp_flags;
156			ovs_stats->n_packets += stats->packet_count;
157			ovs_stats->n_bytes += stats->byte_count;
158			spin_unlock_bh(&stats->lock);
159		}
160	}
161}
162
163/* Called with ovs_mutex. */
164void ovs_flow_stats_clear(struct sw_flow *flow)
165{
166	int node;
167
168	for_each_node(node) {
169		struct flow_stats *stats = ovsl_dereference(flow->stats[node]);
170
171		if (stats) {
172			spin_lock_bh(&stats->lock);
173			stats->used = 0;
174			stats->packet_count = 0;
175			stats->byte_count = 0;
176			stats->tcp_flags = 0;
177			spin_unlock_bh(&stats->lock);
178		}
179	}
180}
181
182static int check_header(struct sk_buff *skb, int len)
183{
184	if (unlikely(skb->len < len))
185		return -EINVAL;
186	if (unlikely(!pskb_may_pull(skb, len)))
187		return -ENOMEM;
188	return 0;
189}
190
191static bool arphdr_ok(struct sk_buff *skb)
192{
193	return pskb_may_pull(skb, skb_network_offset(skb) +
194				  sizeof(struct arp_eth_header));
195}
196
197static int check_iphdr(struct sk_buff *skb)
198{
199	unsigned int nh_ofs = skb_network_offset(skb);
200	unsigned int ip_len;
201	int err;
202
203	err = check_header(skb, nh_ofs + sizeof(struct iphdr));
204	if (unlikely(err))
205		return err;
206
207	ip_len = ip_hdrlen(skb);
208	if (unlikely(ip_len < sizeof(struct iphdr) ||
209		     skb->len < nh_ofs + ip_len))
210		return -EINVAL;
211
212	skb_set_transport_header(skb, nh_ofs + ip_len);
213	return 0;
214}
215
216static bool tcphdr_ok(struct sk_buff *skb)
217{
218	int th_ofs = skb_transport_offset(skb);
219	int tcp_len;
220
221	if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
222		return false;
223
224	tcp_len = tcp_hdrlen(skb);
225	if (unlikely(tcp_len < sizeof(struct tcphdr) ||
226		     skb->len < th_ofs + tcp_len))
227		return false;
228
229	return true;
230}
231
232static bool udphdr_ok(struct sk_buff *skb)
233{
234	return pskb_may_pull(skb, skb_transport_offset(skb) +
235				  sizeof(struct udphdr));
236}
237
238static bool sctphdr_ok(struct sk_buff *skb)
239{
240	return pskb_may_pull(skb, skb_transport_offset(skb) +
241				  sizeof(struct sctphdr));
242}
243
244static bool icmphdr_ok(struct sk_buff *skb)
245{
246	return pskb_may_pull(skb, skb_transport_offset(skb) +
247				  sizeof(struct icmphdr));
248}
249
250static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
251{
252	unsigned int nh_ofs = skb_network_offset(skb);
253	unsigned int nh_len;
254	int payload_ofs;
255	struct ipv6hdr *nh;
256	uint8_t nexthdr;
257	__be16 frag_off;
258	int err;
259
260	err = check_header(skb, nh_ofs + sizeof(*nh));
261	if (unlikely(err))
262		return err;
263
264	nh = ipv6_hdr(skb);
265	nexthdr = nh->nexthdr;
266	payload_ofs = (u8 *)(nh + 1) - skb->data;
267
268	key->ip.proto = NEXTHDR_NONE;
269	key->ip.tos = ipv6_get_dsfield(nh);
270	key->ip.ttl = nh->hop_limit;
271	key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
272	key->ipv6.addr.src = nh->saddr;
273	key->ipv6.addr.dst = nh->daddr;
274
275	payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
276
277	if (frag_off) {
278		if (frag_off & htons(~0x7))
279			key->ip.frag = OVS_FRAG_TYPE_LATER;
280		else
281			key->ip.frag = OVS_FRAG_TYPE_FIRST;
282	} else {
283		key->ip.frag = OVS_FRAG_TYPE_NONE;
284	}
285
286	/* Delayed handling of error in ipv6_skip_exthdr() as it
287	 * always sets frag_off to a valid value which may be
288	 * used to set key->ip.frag above.
289	 */
290	if (unlikely(payload_ofs < 0))
291		return -EPROTO;
292
293	nh_len = payload_ofs - nh_ofs;
294	skb_set_transport_header(skb, nh_ofs + nh_len);
295	key->ip.proto = nexthdr;
296	return nh_len;
297}
298
299static bool icmp6hdr_ok(struct sk_buff *skb)
300{
301	return pskb_may_pull(skb, skb_transport_offset(skb) +
302				  sizeof(struct icmp6hdr));
303}
304
305static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
306{
307	struct qtag_prefix {
308		__be16 eth_type; /* ETH_P_8021Q */
309		__be16 tci;
310	};
311	struct qtag_prefix *qp;
312
313	if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
314		return 0;
315
316	if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
317					 sizeof(__be16))))
318		return -ENOMEM;
319
320	qp = (struct qtag_prefix *) skb->data;
321	key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
322	__skb_pull(skb, sizeof(struct qtag_prefix));
323
324	return 0;
325}
326
327static __be16 parse_ethertype(struct sk_buff *skb)
328{
329	struct llc_snap_hdr {
330		u8  dsap;  /* Always 0xAA */
331		u8  ssap;  /* Always 0xAA */
332		u8  ctrl;
333		u8  oui[3];
334		__be16 ethertype;
335	};
336	struct llc_snap_hdr *llc;
337	__be16 proto;
338
339	proto = *(__be16 *) skb->data;
340	__skb_pull(skb, sizeof(__be16));
341
342	if (eth_proto_is_802_3(proto))
343		return proto;
344
345	if (skb->len < sizeof(struct llc_snap_hdr))
346		return htons(ETH_P_802_2);
347
348	if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
349		return htons(0);
350
351	llc = (struct llc_snap_hdr *) skb->data;
352	if (llc->dsap != LLC_SAP_SNAP ||
353	    llc->ssap != LLC_SAP_SNAP ||
354	    (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
355		return htons(ETH_P_802_2);
356
357	__skb_pull(skb, sizeof(struct llc_snap_hdr));
358
359	if (eth_proto_is_802_3(llc->ethertype))
360		return llc->ethertype;
361
362	return htons(ETH_P_802_2);
363}
364
365static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
366			int nh_len)
367{
368	struct icmp6hdr *icmp = icmp6_hdr(skb);
369
370	/* The ICMPv6 type and code fields use the 16-bit transport port
371	 * fields, so we need to store them in 16-bit network byte order.
372	 */
373	key->tp.src = htons(icmp->icmp6_type);
374	key->tp.dst = htons(icmp->icmp6_code);
375	memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
376
377	if (icmp->icmp6_code == 0 &&
378	    (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
379	     icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
380		int icmp_len = skb->len - skb_transport_offset(skb);
381		struct nd_msg *nd;
382		int offset;
383
384		/* In order to process neighbor discovery options, we need the
385		 * entire packet.
386		 */
387		if (unlikely(icmp_len < sizeof(*nd)))
388			return 0;
389
390		if (unlikely(skb_linearize(skb)))
391			return -ENOMEM;
392
393		nd = (struct nd_msg *)skb_transport_header(skb);
394		key->ipv6.nd.target = nd->target;
395
396		icmp_len -= sizeof(*nd);
397		offset = 0;
398		while (icmp_len >= 8) {
399			struct nd_opt_hdr *nd_opt =
400				 (struct nd_opt_hdr *)(nd->opt + offset);
401			int opt_len = nd_opt->nd_opt_len * 8;
402
403			if (unlikely(!opt_len || opt_len > icmp_len))
404				return 0;
405
406			/* Store the link layer address if the appropriate
407			 * option is provided.  It is considered an error if
408			 * the same link layer option is specified twice.
409			 */
410			if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
411			    && opt_len == 8) {
412				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
413					goto invalid;
414				ether_addr_copy(key->ipv6.nd.sll,
415						&nd->opt[offset+sizeof(*nd_opt)]);
416			} else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
417				   && opt_len == 8) {
418				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
419					goto invalid;
420				ether_addr_copy(key->ipv6.nd.tll,
421						&nd->opt[offset+sizeof(*nd_opt)]);
422			}
423
424			icmp_len -= opt_len;
425			offset += opt_len;
426		}
427	}
428
429	return 0;
430
431invalid:
432	memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
433	memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
434	memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
435
436	return 0;
437}
438
439/**
440 * key_extract - extracts a flow key from an Ethernet frame.
441 * @skb: sk_buff that contains the frame, with skb->data pointing to the
442 * Ethernet header
443 * @key: output flow key
444 *
445 * The caller must ensure that skb->len >= ETH_HLEN.
446 *
447 * Returns 0 if successful, otherwise a negative errno value.
448 *
449 * Initializes @skb header pointers as follows:
450 *
451 *    - skb->mac_header: the Ethernet header.
452 *
453 *    - skb->network_header: just past the Ethernet header, or just past the
454 *      VLAN header, to the first byte of the Ethernet payload.
455 *
456 *    - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
457 *      on output, then just past the IP header, if one is present and
458 *      of a correct length, otherwise the same as skb->network_header.
459 *      For other key->eth.type values it is left untouched.
460 */
461static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
462{
463	int error;
464	struct ethhdr *eth;
465
466	/* Flags are always used as part of stats */
467	key->tp.flags = 0;
468
469	skb_reset_mac_header(skb);
470
471	/* Link layer.  We are guaranteed to have at least the 14 byte Ethernet
472	 * header in the linear data area.
473	 */
474	eth = eth_hdr(skb);
475	ether_addr_copy(key->eth.src, eth->h_source);
476	ether_addr_copy(key->eth.dst, eth->h_dest);
477
478	__skb_pull(skb, 2 * ETH_ALEN);
479	/* We are going to push all headers that we pull, so no need to
480	 * update skb->csum here.
481	 */
482
483	key->eth.tci = 0;
484	if (skb_vlan_tag_present(skb))
485		key->eth.tci = htons(skb->vlan_tci);
486	else if (eth->h_proto == htons(ETH_P_8021Q))
487		if (unlikely(parse_vlan(skb, key)))
488			return -ENOMEM;
489
490	key->eth.type = parse_ethertype(skb);
491	if (unlikely(key->eth.type == htons(0)))
492		return -ENOMEM;
493
494	skb_reset_network_header(skb);
495	skb_reset_mac_len(skb);
496	__skb_push(skb, skb->data - skb_mac_header(skb));
497
498	/* Network layer. */
499	if (key->eth.type == htons(ETH_P_IP)) {
500		struct iphdr *nh;
501		__be16 offset;
502
503		error = check_iphdr(skb);
504		if (unlikely(error)) {
505			memset(&key->ip, 0, sizeof(key->ip));
506			memset(&key->ipv4, 0, sizeof(key->ipv4));
507			if (error == -EINVAL) {
508				skb->transport_header = skb->network_header;
509				error = 0;
510			}
511			return error;
512		}
513
514		nh = ip_hdr(skb);
515		key->ipv4.addr.src = nh->saddr;
516		key->ipv4.addr.dst = nh->daddr;
517
518		key->ip.proto = nh->protocol;
519		key->ip.tos = nh->tos;
520		key->ip.ttl = nh->ttl;
521
522		offset = nh->frag_off & htons(IP_OFFSET);
523		if (offset) {
524			key->ip.frag = OVS_FRAG_TYPE_LATER;
525			return 0;
526		}
527		if (nh->frag_off & htons(IP_MF) ||
528			skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
529			key->ip.frag = OVS_FRAG_TYPE_FIRST;
530		else
531			key->ip.frag = OVS_FRAG_TYPE_NONE;
532
533		/* Transport layer. */
534		if (key->ip.proto == IPPROTO_TCP) {
535			if (tcphdr_ok(skb)) {
536				struct tcphdr *tcp = tcp_hdr(skb);
537				key->tp.src = tcp->source;
538				key->tp.dst = tcp->dest;
539				key->tp.flags = TCP_FLAGS_BE16(tcp);
540			} else {
541				memset(&key->tp, 0, sizeof(key->tp));
542			}
543
544		} else if (key->ip.proto == IPPROTO_UDP) {
545			if (udphdr_ok(skb)) {
546				struct udphdr *udp = udp_hdr(skb);
547				key->tp.src = udp->source;
548				key->tp.dst = udp->dest;
549			} else {
550				memset(&key->tp, 0, sizeof(key->tp));
551			}
552		} else if (key->ip.proto == IPPROTO_SCTP) {
553			if (sctphdr_ok(skb)) {
554				struct sctphdr *sctp = sctp_hdr(skb);
555				key->tp.src = sctp->source;
556				key->tp.dst = sctp->dest;
557			} else {
558				memset(&key->tp, 0, sizeof(key->tp));
559			}
560		} else if (key->ip.proto == IPPROTO_ICMP) {
561			if (icmphdr_ok(skb)) {
562				struct icmphdr *icmp = icmp_hdr(skb);
563				/* The ICMP type and code fields use the 16-bit
564				 * transport port fields, so we need to store
565				 * them in 16-bit network byte order. */
566				key->tp.src = htons(icmp->type);
567				key->tp.dst = htons(icmp->code);
568			} else {
569				memset(&key->tp, 0, sizeof(key->tp));
570			}
571		}
572
573	} else if (key->eth.type == htons(ETH_P_ARP) ||
574		   key->eth.type == htons(ETH_P_RARP)) {
575		struct arp_eth_header *arp;
576		bool arp_available = arphdr_ok(skb);
577
578		arp = (struct arp_eth_header *)skb_network_header(skb);
579
580		if (arp_available &&
581		    arp->ar_hrd == htons(ARPHRD_ETHER) &&
582		    arp->ar_pro == htons(ETH_P_IP) &&
583		    arp->ar_hln == ETH_ALEN &&
584		    arp->ar_pln == 4) {
585
586			/* We only match on the lower 8 bits of the opcode. */
587			if (ntohs(arp->ar_op) <= 0xff)
588				key->ip.proto = ntohs(arp->ar_op);
589			else
590				key->ip.proto = 0;
591
592			memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
593			memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
594			ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
595			ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
596		} else {
597			memset(&key->ip, 0, sizeof(key->ip));
598			memset(&key->ipv4, 0, sizeof(key->ipv4));
599		}
600	} else if (eth_p_mpls(key->eth.type)) {
601		size_t stack_len = MPLS_HLEN;
602
603		/* In the presence of an MPLS label stack the end of the L2
604		 * header and the beginning of the L3 header differ.
605		 *
606		 * Advance network_header to the beginning of the L3
607		 * header. mac_len corresponds to the end of the L2 header.
608		 */
609		while (1) {
610			__be32 lse;
611
612			error = check_header(skb, skb->mac_len + stack_len);
613			if (unlikely(error))
614				return 0;
615
616			memcpy(&lse, skb_network_header(skb), MPLS_HLEN);
617
618			if (stack_len == MPLS_HLEN)
619				memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN);
620
621			skb_set_network_header(skb, skb->mac_len + stack_len);
622			if (lse & htonl(MPLS_LS_S_MASK))
623				break;
624
625			stack_len += MPLS_HLEN;
626		}
627	} else if (key->eth.type == htons(ETH_P_IPV6)) {
628		int nh_len;             /* IPv6 Header + Extensions */
629
630		nh_len = parse_ipv6hdr(skb, key);
631		if (unlikely(nh_len < 0)) {
632			switch (nh_len) {
633			case -EINVAL:
634				memset(&key->ip, 0, sizeof(key->ip));
635				memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
636				/* fall-through */
637			case -EPROTO:
638				skb->transport_header = skb->network_header;
639				error = 0;
640				break;
641			default:
642				error = nh_len;
643			}
644			return error;
645		}
646
647		if (key->ip.frag == OVS_FRAG_TYPE_LATER)
648			return 0;
649		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
650			key->ip.frag = OVS_FRAG_TYPE_FIRST;
651
652		/* Transport layer. */
653		if (key->ip.proto == NEXTHDR_TCP) {
654			if (tcphdr_ok(skb)) {
655				struct tcphdr *tcp = tcp_hdr(skb);
656				key->tp.src = tcp->source;
657				key->tp.dst = tcp->dest;
658				key->tp.flags = TCP_FLAGS_BE16(tcp);
659			} else {
660				memset(&key->tp, 0, sizeof(key->tp));
661			}
662		} else if (key->ip.proto == NEXTHDR_UDP) {
663			if (udphdr_ok(skb)) {
664				struct udphdr *udp = udp_hdr(skb);
665				key->tp.src = udp->source;
666				key->tp.dst = udp->dest;
667			} else {
668				memset(&key->tp, 0, sizeof(key->tp));
669			}
670		} else if (key->ip.proto == NEXTHDR_SCTP) {
671			if (sctphdr_ok(skb)) {
672				struct sctphdr *sctp = sctp_hdr(skb);
673				key->tp.src = sctp->source;
674				key->tp.dst = sctp->dest;
675			} else {
676				memset(&key->tp, 0, sizeof(key->tp));
677			}
678		} else if (key->ip.proto == NEXTHDR_ICMP) {
679			if (icmp6hdr_ok(skb)) {
680				error = parse_icmpv6(skb, key, nh_len);
681				if (error)
682					return error;
683			} else {
684				memset(&key->tp, 0, sizeof(key->tp));
685			}
686		}
687	}
688	return 0;
689}
690
691int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
692{
693	return key_extract(skb, key);
694}
695
696int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
697			 struct sk_buff *skb, struct sw_flow_key *key)
698{
699	/* Extract metadata from packet. */
700	if (tun_info) {
701		key->tun_proto = ip_tunnel_info_af(tun_info);
702		memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
703
704		if (tun_info->options_len) {
705			BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
706						   8)) - 1
707					> sizeof(key->tun_opts));
708
709			ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len),
710						tun_info);
711			key->tun_opts_len = tun_info->options_len;
712		} else {
713			key->tun_opts_len = 0;
714		}
715	} else  {
716		key->tun_proto = 0;
717		key->tun_opts_len = 0;
718		memset(&key->tun_key, 0, sizeof(key->tun_key));
719	}
720
721	key->phy.priority = skb->priority;
722	key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
723	key->phy.skb_mark = skb->mark;
724	ovs_ct_fill_key(skb, key);
725	key->ovs_flow_hash = 0;
726	key->recirc_id = 0;
727
728	return key_extract(skb, key);
729}
730
731int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
732				   struct sk_buff *skb,
733				   struct sw_flow_key *key, bool log)
734{
735	int err;
736
737	memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE);
738
739	/* Extract metadata from netlink attributes. */
740	err = ovs_nla_get_flow_metadata(net, attr, key, log);
741	if (err)
742		return err;
743
744	return key_extract(skb, key);
745}
746