Lines Matching refs:skb
47 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
52 struct sk_buff *skb; member
112 static struct deferred_action *add_deferred_actions(struct sk_buff *skb, in add_deferred_actions() argument
122 da->skb = skb; in add_deferred_actions()
140 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, in push_mpls() argument
147 if (skb->encapsulation) in push_mpls()
150 if (skb_cow_head(skb, MPLS_HLEN) < 0) in push_mpls()
153 skb_push(skb, MPLS_HLEN); in push_mpls()
154 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in push_mpls()
155 skb->mac_len); in push_mpls()
156 skb_reset_mac_header(skb); in push_mpls()
158 new_mpls_lse = (__be32 *)skb_mpls_header(skb); in push_mpls()
161 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); in push_mpls()
163 hdr = eth_hdr(skb); in push_mpls()
166 if (!skb->inner_protocol) in push_mpls()
167 skb_set_inner_protocol(skb, skb->protocol); in push_mpls()
168 skb->protocol = mpls->mpls_ethertype; in push_mpls()
174 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, in pop_mpls() argument
180 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in pop_mpls()
184 skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN); in pop_mpls()
186 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), in pop_mpls()
187 skb->mac_len); in pop_mpls()
189 __skb_pull(skb, MPLS_HLEN); in pop_mpls()
190 skb_reset_mac_header(skb); in pop_mpls()
195 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); in pop_mpls()
197 if (eth_p_mpls(skb->protocol)) in pop_mpls()
198 skb->protocol = ethertype; in pop_mpls()
204 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key, in set_mpls() argument
211 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in set_mpls()
215 stack = (__be32 *)skb_mpls_header(skb); in set_mpls()
217 if (skb->ip_summed == CHECKSUM_COMPLETE) { in set_mpls()
220 skb->csum = ~csum_partial((char *)diff, sizeof(diff), in set_mpls()
221 ~skb->csum); in set_mpls()
229 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key) in pop_vlan() argument
233 err = skb_vlan_pop(skb); in pop_vlan()
234 if (skb_vlan_tag_present(skb)) in pop_vlan()
241 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key, in push_vlan() argument
244 if (skb_vlan_tag_present(skb)) in push_vlan()
248 return skb_vlan_push(skb, vlan->vlan_tpid, in push_vlan()
264 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key, in set_eth_addr() argument
270 err = skb_ensure_writable(skb, ETH_HLEN); in set_eth_addr()
274 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); in set_eth_addr()
276 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src, in set_eth_addr()
278 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst, in set_eth_addr()
281 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); in set_eth_addr()
283 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source); in set_eth_addr()
284 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest); in set_eth_addr()
288 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh, in update_ip_l4_checksum() argument
291 int transport_len = skb->len - skb_transport_offset(skb); in update_ip_l4_checksum()
298 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, in update_ip_l4_checksum()
302 struct udphdr *uh = udp_hdr(skb); in update_ip_l4_checksum()
304 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { in update_ip_l4_checksum()
305 inet_proto_csum_replace4(&uh->check, skb, in update_ip_l4_checksum()
314 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, in set_ip_addr() argument
317 update_ip_l4_checksum(skb, nh, *addr, new_addr); in set_ip_addr()
319 skb_clear_hash(skb); in set_ip_addr()
323 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, in update_ipv6_checksum() argument
326 int transport_len = skb->len - skb_transport_offset(skb); in update_ipv6_checksum()
330 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, in update_ipv6_checksum()
334 struct udphdr *uh = udp_hdr(skb); in update_ipv6_checksum()
336 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { in update_ipv6_checksum()
337 inet_proto_csum_replace16(&uh->check, skb, in update_ipv6_checksum()
345 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum, in update_ipv6_checksum()
346 skb, addr, new_addr, true); in update_ipv6_checksum()
359 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, in set_ipv6_addr() argument
364 update_ipv6_checksum(skb, l4_proto, addr, new_addr); in set_ipv6_addr()
366 skb_clear_hash(skb); in set_ipv6_addr()
378 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, in set_ip_ttl() argument
387 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key, in set_ipv4() argument
395 err = skb_ensure_writable(skb, skb_network_offset(skb) + in set_ipv4()
400 nh = ip_hdr(skb); in set_ipv4()
410 set_ip_addr(skb, nh, &nh->saddr, new_addr); in set_ipv4()
418 set_ip_addr(skb, nh, &nh->daddr, new_addr); in set_ipv4()
427 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl); in set_ipv4()
439 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, in set_ipv6() argument
446 err = skb_ensure_writable(skb, skb_network_offset(skb) + in set_ipv6()
451 nh = ipv6_hdr(skb); in set_ipv6()
464 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked, in set_ipv6()
481 recalc_csum = (ipv6_find_hdr(skb, &offset, in set_ipv6()
486 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked, in set_ipv6()
511 static void set_tp_port(struct sk_buff *skb, __be16 *port, in set_tp_port() argument
514 inet_proto_csum_replace2(check, skb, *port, new_port, false); in set_tp_port()
518 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key, in set_udp() argument
526 err = skb_ensure_writable(skb, skb_transport_offset(skb) + in set_udp()
531 uh = udp_hdr(skb); in set_udp()
536 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { in set_udp()
538 set_tp_port(skb, &uh->source, src, &uh->check); in set_udp()
542 set_tp_port(skb, &uh->dest, dst, &uh->check); in set_udp()
555 skb_clear_hash(skb); in set_udp()
560 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key, in set_tcp() argument
568 err = skb_ensure_writable(skb, skb_transport_offset(skb) + in set_tcp()
573 th = tcp_hdr(skb); in set_tcp()
576 set_tp_port(skb, &th->source, src, &th->check); in set_tcp()
581 set_tp_port(skb, &th->dest, dst, &th->check); in set_tcp()
584 skb_clear_hash(skb); in set_tcp()
589 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key, in set_sctp() argument
593 unsigned int sctphoff = skb_transport_offset(skb); in set_sctp()
598 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr)); in set_sctp()
602 sh = sctp_hdr(skb); in set_sctp()
604 old_correct_csum = sctp_compute_cksum(skb, sctphoff); in set_sctp()
609 new_csum = sctp_compute_cksum(skb, sctphoff); in set_sctp()
614 skb_clear_hash(skb); in set_sctp()
621 static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb) in ovs_vport_output() argument
626 if (skb_cow_head(skb, data->l2_len) < 0) { in ovs_vport_output()
627 kfree_skb(skb); in ovs_vport_output()
631 __skb_dst_copy(skb, data->dst); in ovs_vport_output()
632 *OVS_CB(skb) = data->cb; in ovs_vport_output()
633 skb->inner_protocol = data->inner_protocol; in ovs_vport_output()
634 skb->vlan_tci = data->vlan_tci; in ovs_vport_output()
635 skb->vlan_proto = data->vlan_proto; in ovs_vport_output()
638 skb_push(skb, data->l2_len); in ovs_vport_output()
639 memcpy(skb->data, &data->l2_data, data->l2_len); in ovs_vport_output()
640 skb_postpush_rcsum(skb, skb->data, data->l2_len); in ovs_vport_output()
641 skb_reset_mac_header(skb); in ovs_vport_output()
643 ovs_vport_send(vport, skb); in ovs_vport_output()
661 static void prepare_frag(struct vport *vport, struct sk_buff *skb) in prepare_frag() argument
663 unsigned int hlen = skb_network_offset(skb); in prepare_frag()
667 data->dst = skb->_skb_refdst; in prepare_frag()
669 data->cb = *OVS_CB(skb); in prepare_frag()
670 data->inner_protocol = skb->inner_protocol; in prepare_frag()
671 data->vlan_tci = skb->vlan_tci; in prepare_frag()
672 data->vlan_proto = skb->vlan_proto; in prepare_frag()
674 memcpy(&data->l2_data, skb->data, hlen); in prepare_frag()
676 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); in prepare_frag()
677 skb_pull(skb, hlen); in prepare_frag()
681 struct sk_buff *skb, u16 mru, __be16 ethertype) in ovs_fragment() argument
683 if (skb_network_offset(skb) > MAX_L2_LEN) { in ovs_fragment()
692 prepare_frag(vport, skb); in ovs_fragment()
697 orig_dst = skb->_skb_refdst; in ovs_fragment()
698 skb_dst_set_noref(skb, &ovs_dst); in ovs_fragment()
699 IPCB(skb)->frag_max_size = mru; in ovs_fragment()
701 ip_do_fragment(net, skb->sk, skb, ovs_vport_output); in ovs_fragment()
712 prepare_frag(vport, skb); in ovs_fragment()
718 orig_dst = skb->_skb_refdst; in ovs_fragment()
719 skb_dst_set_noref(skb, &ovs_rt.dst); in ovs_fragment()
720 IP6CB(skb)->frag_max_size = mru; in ovs_fragment()
722 v6ops->fragment(net, skb->sk, skb, ovs_vport_output); in ovs_fragment()
733 kfree_skb(skb); in ovs_fragment()
736 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, in do_output() argument
742 u16 mru = OVS_CB(skb)->mru; in do_output()
744 if (likely(!mru || (skb->len <= mru + ETH_HLEN))) { in do_output()
745 ovs_vport_send(vport, skb); in do_output()
751 if (eth_p_mpls(skb->protocol)) in do_output()
752 ethertype = skb->inner_protocol; in do_output()
754 ethertype = vlan_get_protocol(skb); in do_output()
757 ovs_fragment(net, vport, skb, mru, ethertype); in do_output()
759 kfree_skb(skb); in do_output()
762 kfree_skb(skb); in do_output()
766 static int output_userspace(struct datapath *dp, struct sk_buff *skb, in output_userspace() argument
776 upcall.mru = OVS_CB(skb)->mru; in output_userspace()
797 err = dev_fill_metadata_dst(vport->dev, skb); in output_userspace()
799 upcall.egress_tun_info = skb_tunnel_info(skb); in output_userspace()
815 return ovs_dp_upcall(dp, skb, key, &upcall); in output_userspace()
818 static int sample(struct datapath *dp, struct sk_buff *skb, in sample() argument
857 return output_userspace(dp, skb, key, a, actions, actions_len); in sample()
859 skb = skb_clone(skb, GFP_ATOMIC); in sample()
860 if (!skb) in sample()
864 if (!add_deferred_actions(skb, key, a)) { in sample()
869 kfree_skb(skb); in sample()
874 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, in execute_hash() argument
881 hash = skb_get_hash(skb); in execute_hash()
889 static int execute_set_action(struct sk_buff *skb, in execute_set_action() argument
897 skb_dst_drop(skb); in execute_set_action()
899 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst); in execute_set_action()
909 static int execute_masked_set_action(struct sk_buff *skb, in execute_masked_set_action() argument
917 OVS_SET_MASKED(skb->priority, nla_get_u32(a), in execute_masked_set_action()
919 flow_key->phy.priority = skb->priority; in execute_masked_set_action()
923 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *)); in execute_masked_set_action()
924 flow_key->phy.skb_mark = skb->mark; in execute_masked_set_action()
933 err = set_eth_addr(skb, flow_key, nla_data(a), in execute_masked_set_action()
938 err = set_ipv4(skb, flow_key, nla_data(a), in execute_masked_set_action()
943 err = set_ipv6(skb, flow_key, nla_data(a), in execute_masked_set_action()
948 err = set_tcp(skb, flow_key, nla_data(a), in execute_masked_set_action()
953 err = set_udp(skb, flow_key, nla_data(a), in execute_masked_set_action()
958 err = set_sctp(skb, flow_key, nla_data(a), in execute_masked_set_action()
963 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a, in execute_masked_set_action()
978 static int execute_recirc(struct datapath *dp, struct sk_buff *skb, in execute_recirc() argument
987 err = ovs_flow_key_update(skb, key); in execute_recirc()
997 skb = skb_clone(skb, GFP_ATOMIC); in execute_recirc()
1002 if (!skb) in execute_recirc()
1006 da = add_deferred_actions(skb, key, NULL); in execute_recirc()
1010 kfree_skb(skb); in execute_recirc()
1021 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, in do_execute_actions() argument
1039 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC); in do_execute_actions()
1053 output_userspace(dp, skb, key, a, attr, len); in do_execute_actions()
1057 execute_hash(skb, key, a); in do_execute_actions()
1061 err = push_mpls(skb, key, nla_data(a)); in do_execute_actions()
1065 err = pop_mpls(skb, key, nla_get_be16(a)); in do_execute_actions()
1069 err = push_vlan(skb, key, nla_data(a)); in do_execute_actions()
1073 err = pop_vlan(skb, key); in do_execute_actions()
1077 err = execute_recirc(dp, skb, key, a, rem); in do_execute_actions()
1088 err = execute_set_action(skb, key, nla_data(a)); in do_execute_actions()
1093 err = execute_masked_set_action(skb, key, nla_data(a)); in do_execute_actions()
1097 err = sample(dp, skb, key, a, attr, len); in do_execute_actions()
1102 err = ovs_flow_key_update(skb, key); in do_execute_actions()
1107 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key, in do_execute_actions()
1117 kfree_skb(skb); in do_execute_actions()
1123 do_output(dp, skb, prev_port, key); in do_execute_actions()
1125 consume_skb(skb); in do_execute_actions()
1141 struct sk_buff *skb = da->skb; in process_deferred_actions() local
1146 do_execute_actions(dp, skb, key, actions, in process_deferred_actions()
1149 ovs_dp_process_packet(skb, key); in process_deferred_actions()
1157 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, in ovs_execute_actions() argument
1165 err = do_execute_actions(dp, skb, key, in ovs_execute_actions()