1/* 2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * Development of this code funded by Astaro AG (http://www.astaro.com/) 9 */ 10 11#include <linux/kernel.h> 12#include <linux/if_vlan.h> 13#include <linux/init.h> 14#include <linux/module.h> 15#include <linux/netlink.h> 16#include <linux/netfilter.h> 17#include <linux/netfilter/nf_tables.h> 18#include <net/netfilter/nf_tables_core.h> 19#include <net/netfilter/nf_tables.h> 20 21/* add vlan header into the user buffer for if tag was removed by offloads */ 22static bool 23nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len) 24{ 25 int mac_off = skb_mac_header(skb) - skb->data; 26 u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d; 27 struct vlan_ethhdr veth; 28 29 vlanh = (u8 *) &veth; 30 if (offset < ETH_HLEN) { 31 u8 ethlen = min_t(u8, len, ETH_HLEN - offset); 32 33 if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN)) 34 return false; 35 36 veth.h_vlan_proto = skb->vlan_proto; 37 38 memcpy(dst_u8, vlanh + offset, ethlen); 39 40 len -= ethlen; 41 if (len == 0) 42 return true; 43 44 dst_u8 += ethlen; 45 offset = ETH_HLEN; 46 } else if (offset >= VLAN_ETH_HLEN) { 47 offset -= VLAN_HLEN; 48 goto skip; 49 } 50 51 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 52 veth.h_vlan_encapsulated_proto = skb->protocol; 53 54 vlanh += offset; 55 56 vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset); 57 memcpy(dst_u8, vlanh, vlan_len); 58 59 len -= vlan_len; 60 if (!len) 61 return true; 62 63 dst_u8 += vlan_len; 64 skip: 65 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0; 66} 67 68static void nft_payload_eval(const struct nft_expr *expr, 69 struct nft_regs *regs, 70 const struct nft_pktinfo *pkt) 71{ 72 const struct nft_payload *priv = nft_expr_priv(expr); 73 const struct sk_buff *skb = pkt->skb; 74 u32 *dest = ®s->data[priv->dreg]; 75 int offset; 76 77 dest[priv->len / NFT_REG32_SIZE] = 0; 78 switch (priv->base) { 79 case NFT_PAYLOAD_LL_HEADER: 80 if (!skb_mac_header_was_set(skb)) 81 goto err; 82 83 if (skb_vlan_tag_present(skb)) { 84 if (!nft_payload_copy_vlan(dest, skb, 85 priv->offset, priv->len)) 86 goto err; 87 return; 88 } 89 offset = skb_mac_header(skb) - skb->data; 90 break; 91 case NFT_PAYLOAD_NETWORK_HEADER: 92 offset = skb_network_offset(skb); 93 break; 94 case NFT_PAYLOAD_TRANSPORT_HEADER: 95 offset = pkt->xt.thoff; 96 break; 97 default: 98 BUG(); 99 } 100 offset += priv->offset; 101 102 if (skb_copy_bits(skb, offset, dest, priv->len) < 0) 103 goto err; 104 return; 105err: 106 regs->verdict.code = NFT_BREAK; 107} 108 109static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = { 110 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 }, 111 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 }, 112 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 }, 113 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 }, 114}; 115 116static int nft_payload_init(const struct nft_ctx *ctx, 117 const struct nft_expr *expr, 118 const struct nlattr * const tb[]) 119{ 120 struct nft_payload *priv = nft_expr_priv(expr); 121 122 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); 123 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); 124 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); 125 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]); 126 127 return nft_validate_register_store(ctx, priv->dreg, NULL, 128 NFT_DATA_VALUE, priv->len); 129} 130 131static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr) 132{ 133 const struct nft_payload *priv = nft_expr_priv(expr); 134 135 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) || 136 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || 137 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) || 138 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len))) 139 goto nla_put_failure; 140 return 0; 141 142nla_put_failure: 143 return -1; 144} 145 146static struct nft_expr_type nft_payload_type; 147static const struct nft_expr_ops nft_payload_ops = { 148 .type = &nft_payload_type, 149 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)), 150 .eval = nft_payload_eval, 151 .init = nft_payload_init, 152 .dump = nft_payload_dump, 153}; 154 155const struct nft_expr_ops nft_payload_fast_ops = { 156 .type = &nft_payload_type, 157 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)), 158 .eval = nft_payload_eval, 159 .init = nft_payload_init, 160 .dump = nft_payload_dump, 161}; 162 163static const struct nft_expr_ops * 164nft_payload_select_ops(const struct nft_ctx *ctx, 165 const struct nlattr * const tb[]) 166{ 167 enum nft_payload_bases base; 168 unsigned int offset, len; 169 170 if (tb[NFTA_PAYLOAD_DREG] == NULL || 171 tb[NFTA_PAYLOAD_BASE] == NULL || 172 tb[NFTA_PAYLOAD_OFFSET] == NULL || 173 tb[NFTA_PAYLOAD_LEN] == NULL) 174 return ERR_PTR(-EINVAL); 175 176 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); 177 switch (base) { 178 case NFT_PAYLOAD_LL_HEADER: 179 case NFT_PAYLOAD_NETWORK_HEADER: 180 case NFT_PAYLOAD_TRANSPORT_HEADER: 181 break; 182 default: 183 return ERR_PTR(-EOPNOTSUPP); 184 } 185 186 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); 187 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); 188 189 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && 190 base != NFT_PAYLOAD_LL_HEADER) 191 return &nft_payload_fast_ops; 192 else 193 return &nft_payload_ops; 194} 195 196static struct nft_expr_type nft_payload_type __read_mostly = { 197 .name = "payload", 198 .select_ops = nft_payload_select_ops, 199 .policy = nft_payload_policy, 200 .maxattr = NFTA_PAYLOAD_MAX, 201 .owner = THIS_MODULE, 202}; 203 204int __init nft_payload_module_init(void) 205{ 206 return nft_register_expr(&nft_payload_type); 207} 208 209void nft_payload_module_exit(void) 210{ 211 nft_unregister_expr(&nft_payload_type); 212} 213