This source file includes following definitions.
- nft_tunnel_get_eval
- nft_tunnel_get_init
- nft_tunnel_get_dump
- nft_tunnel_obj_ip_init
- nft_tunnel_obj_ip6_init
- nft_tunnel_obj_vxlan_init
- nft_tunnel_obj_erspan_init
- nft_tunnel_obj_opts_init
- nft_tunnel_obj_init
- nft_tunnel_obj_eval
- nft_tunnel_ip_dump
- nft_tunnel_opts_dump
- nft_tunnel_ports_dump
- nft_tunnel_flags_dump
- nft_tunnel_obj_dump
- nft_tunnel_obj_destroy
- nft_tunnel_module_init
- nft_tunnel_module_exit
1
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/seqlock.h>
6 #include <linux/netlink.h>
7 #include <linux/netfilter.h>
8 #include <linux/netfilter/nf_tables.h>
9 #include <net/netfilter/nf_tables.h>
10 #include <net/dst_metadata.h>
11 #include <net/ip_tunnels.h>
12 #include <net/vxlan.h>
13 #include <net/erspan.h>
14
15 struct nft_tunnel {
16 enum nft_tunnel_keys key:8;
17 enum nft_registers dreg:8;
18 enum nft_tunnel_mode mode:8;
19 };
20
21 static void nft_tunnel_get_eval(const struct nft_expr *expr,
22 struct nft_regs *regs,
23 const struct nft_pktinfo *pkt)
24 {
25 const struct nft_tunnel *priv = nft_expr_priv(expr);
26 u32 *dest = ®s->data[priv->dreg];
27 struct ip_tunnel_info *tun_info;
28
29 tun_info = skb_tunnel_info(pkt->skb);
30
31 switch (priv->key) {
32 case NFT_TUNNEL_PATH:
33 if (!tun_info) {
34 nft_reg_store8(dest, false);
35 return;
36 }
37 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
38 (priv->mode == NFT_TUNNEL_MODE_RX &&
39 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
40 (priv->mode == NFT_TUNNEL_MODE_TX &&
41 (tun_info->mode & IP_TUNNEL_INFO_TX)))
42 nft_reg_store8(dest, true);
43 else
44 nft_reg_store8(dest, false);
45 break;
46 case NFT_TUNNEL_ID:
47 if (!tun_info) {
48 regs->verdict.code = NFT_BREAK;
49 return;
50 }
51 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
52 (priv->mode == NFT_TUNNEL_MODE_RX &&
53 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
54 (priv->mode == NFT_TUNNEL_MODE_TX &&
55 (tun_info->mode & IP_TUNNEL_INFO_TX)))
56 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
57 else
58 regs->verdict.code = NFT_BREAK;
59 break;
60 default:
61 WARN_ON(1);
62 regs->verdict.code = NFT_BREAK;
63 }
64 }
65
66 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
67 [NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
68 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
69 [NFTA_TUNNEL_MODE] = { .type = NLA_U32 },
70 };
71
72 static int nft_tunnel_get_init(const struct nft_ctx *ctx,
73 const struct nft_expr *expr,
74 const struct nlattr * const tb[])
75 {
76 struct nft_tunnel *priv = nft_expr_priv(expr);
77 u32 len;
78
79 if (!tb[NFTA_TUNNEL_KEY] ||
80 !tb[NFTA_TUNNEL_DREG])
81 return -EINVAL;
82
83 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
84 switch (priv->key) {
85 case NFT_TUNNEL_PATH:
86 len = sizeof(u8);
87 break;
88 case NFT_TUNNEL_ID:
89 len = sizeof(u32);
90 break;
91 default:
92 return -EOPNOTSUPP;
93 }
94
95 priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
96
97 if (tb[NFTA_TUNNEL_MODE]) {
98 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
99 if (priv->mode > NFT_TUNNEL_MODE_MAX)
100 return -EOPNOTSUPP;
101 } else {
102 priv->mode = NFT_TUNNEL_MODE_NONE;
103 }
104
105 return nft_validate_register_store(ctx, priv->dreg, NULL,
106 NFT_DATA_VALUE, len);
107 }
108
109 static int nft_tunnel_get_dump(struct sk_buff *skb,
110 const struct nft_expr *expr)
111 {
112 const struct nft_tunnel *priv = nft_expr_priv(expr);
113
114 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
115 goto nla_put_failure;
116 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
117 goto nla_put_failure;
118 if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
119 goto nla_put_failure;
120 return 0;
121
122 nla_put_failure:
123 return -1;
124 }
125
126 static struct nft_expr_type nft_tunnel_type;
127 static const struct nft_expr_ops nft_tunnel_get_ops = {
128 .type = &nft_tunnel_type,
129 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
130 .eval = nft_tunnel_get_eval,
131 .init = nft_tunnel_get_init,
132 .dump = nft_tunnel_get_dump,
133 };
134
135 static struct nft_expr_type nft_tunnel_type __read_mostly = {
136 .name = "tunnel",
137 .ops = &nft_tunnel_get_ops,
138 .policy = nft_tunnel_policy,
139 .maxattr = NFTA_TUNNEL_MAX,
140 .owner = THIS_MODULE,
141 };
142
143 struct nft_tunnel_opts {
144 union {
145 struct vxlan_metadata vxlan;
146 struct erspan_metadata erspan;
147 } u;
148 u32 len;
149 __be16 flags;
150 };
151
152 struct nft_tunnel_obj {
153 struct metadata_dst *md;
154 struct nft_tunnel_opts opts;
155 };
156
157 static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
158 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
159 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
160 };
161
162 static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
163 const struct nlattr *attr,
164 struct ip_tunnel_info *info)
165 {
166 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
167 int err;
168
169 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
170 nft_tunnel_ip_policy, NULL);
171 if (err < 0)
172 return err;
173
174 if (!tb[NFTA_TUNNEL_KEY_IP_DST])
175 return -EINVAL;
176
177 if (tb[NFTA_TUNNEL_KEY_IP_SRC])
178 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
179 if (tb[NFTA_TUNNEL_KEY_IP_DST])
180 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
181
182 return 0;
183 }
184
185 static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
186 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
187 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
188 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
189 };
190
191 static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
192 const struct nlattr *attr,
193 struct ip_tunnel_info *info)
194 {
195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
196 int err;
197
198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
199 nft_tunnel_ip6_policy, NULL);
200 if (err < 0)
201 return err;
202
203 if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
204 return -EINVAL;
205
206 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
207 memcpy(&info->key.u.ipv6.src,
208 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
209 sizeof(struct in6_addr));
210 }
211 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
212 memcpy(&info->key.u.ipv6.dst,
213 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
214 sizeof(struct in6_addr));
215 }
216 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
217 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
218
219 info->mode |= IP_TUNNEL_INFO_IPV6;
220
221 return 0;
222 }
223
224 static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
225 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
226 };
227
228 static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
229 struct nft_tunnel_opts *opts)
230 {
231 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
232 int err;
233
234 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
235 nft_tunnel_opts_vxlan_policy, NULL);
236 if (err < 0)
237 return err;
238
239 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
240 return -EINVAL;
241
242 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
243
244 opts->len = sizeof(struct vxlan_metadata);
245 opts->flags = TUNNEL_VXLAN_OPT;
246
247 return 0;
248 }
249
250 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
251 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
252 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
253 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
254 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
255 };
256
257 static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
258 struct nft_tunnel_opts *opts)
259 {
260 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
261 uint8_t hwid, dir;
262 int err, version;
263
264 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
265 attr, nft_tunnel_opts_erspan_policy,
266 NULL);
267 if (err < 0)
268 return err;
269
270 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
271 return -EINVAL;
272
273 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
274 switch (version) {
275 case ERSPAN_VERSION:
276 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
277 return -EINVAL;
278
279 opts->u.erspan.u.index =
280 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
281 break;
282 case ERSPAN_VERSION2:
283 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
284 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
285 return -EINVAL;
286
287 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
288 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
289
290 set_hwid(&opts->u.erspan.u.md2, hwid);
291 opts->u.erspan.u.md2.dir = dir;
292 break;
293 default:
294 return -EOPNOTSUPP;
295 }
296 opts->u.erspan.version = version;
297
298 opts->len = sizeof(struct erspan_metadata);
299 opts->flags = TUNNEL_ERSPAN_OPT;
300
301 return 0;
302 }
303
304 static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
305 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
306 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
307 };
308
309 static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
310 const struct nlattr *attr,
311 struct ip_tunnel_info *info,
312 struct nft_tunnel_opts *opts)
313 {
314 struct nlattr *tb[NFTA_TUNNEL_KEY_OPTS_MAX + 1];
315 int err;
316
317 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_OPTS_MAX, attr,
318 nft_tunnel_opts_policy, NULL);
319 if (err < 0)
320 return err;
321
322 if (tb[NFTA_TUNNEL_KEY_OPTS_VXLAN]) {
323 err = nft_tunnel_obj_vxlan_init(tb[NFTA_TUNNEL_KEY_OPTS_VXLAN],
324 opts);
325 } else if (tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN]) {
326 err = nft_tunnel_obj_erspan_init(tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN],
327 opts);
328 } else {
329 return -EOPNOTSUPP;
330 }
331
332 return err;
333 }
334
335 static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
336 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
337 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
338 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
339 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
340 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
341 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
342 [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
343 [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
344 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
345 };
346
347 static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
348 const struct nlattr * const tb[],
349 struct nft_object *obj)
350 {
351 struct nft_tunnel_obj *priv = nft_obj_data(obj);
352 struct ip_tunnel_info info;
353 struct metadata_dst *md;
354 int err;
355
356 if (!tb[NFTA_TUNNEL_KEY_ID])
357 return -EINVAL;
358
359 memset(&info, 0, sizeof(info));
360 info.mode = IP_TUNNEL_INFO_TX;
361 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
362 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
363
364 if (tb[NFTA_TUNNEL_KEY_IP]) {
365 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
366 if (err < 0)
367 return err;
368 } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
369 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
370 if (err < 0)
371 return err;
372 } else {
373 return -EINVAL;
374 }
375
376 if (tb[NFTA_TUNNEL_KEY_SPORT]) {
377 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
378 }
379 if (tb[NFTA_TUNNEL_KEY_DPORT]) {
380 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
381 }
382
383 if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
384 u32 tun_flags;
385
386 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
387 if (tun_flags & ~NFT_TUNNEL_F_MASK)
388 return -EOPNOTSUPP;
389
390 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
391 info.key.tun_flags &= ~TUNNEL_CSUM;
392 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
393 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
394 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
395 info.key.tun_flags |= TUNNEL_SEQ;
396 }
397 if (tb[NFTA_TUNNEL_KEY_TOS])
398 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
399 if (tb[NFTA_TUNNEL_KEY_TTL])
400 info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
401 else
402 info.key.ttl = U8_MAX;
403
404 if (tb[NFTA_TUNNEL_KEY_OPTS]) {
405 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
406 &info, &priv->opts);
407 if (err < 0)
408 return err;
409 }
410
411 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
412 if (!md)
413 return -ENOMEM;
414
415 memcpy(&md->u.tun_info, &info, sizeof(info));
416 #ifdef CONFIG_DST_CACHE
417 err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
418 if (err < 0) {
419 metadata_dst_free(md);
420 return err;
421 }
422 #endif
423 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
424 priv->opts.flags);
425 priv->md = md;
426
427 return 0;
428 }
429
430 static inline void nft_tunnel_obj_eval(struct nft_object *obj,
431 struct nft_regs *regs,
432 const struct nft_pktinfo *pkt)
433 {
434 struct nft_tunnel_obj *priv = nft_obj_data(obj);
435 struct sk_buff *skb = pkt->skb;
436
437 skb_dst_drop(skb);
438 dst_hold((struct dst_entry *) priv->md);
439 skb_dst_set(skb, (struct dst_entry *) priv->md);
440 }
441
442 static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
443 {
444 struct nlattr *nest;
445
446 if (info->mode & IP_TUNNEL_INFO_IPV6) {
447 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
448 if (!nest)
449 return -1;
450
451 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, &info->key.u.ipv6.src) < 0 ||
452 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, &info->key.u.ipv6.dst) < 0 ||
453 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, info->key.label))
454 return -1;
455
456 nla_nest_end(skb, nest);
457 } else {
458 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
459 if (!nest)
460 return -1;
461
462 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, info->key.u.ipv4.src) < 0 ||
463 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, info->key.u.ipv4.dst) < 0)
464 return -1;
465
466 nla_nest_end(skb, nest);
467 }
468
469 return 0;
470 }
471
472 static int nft_tunnel_opts_dump(struct sk_buff *skb,
473 struct nft_tunnel_obj *priv)
474 {
475 struct nft_tunnel_opts *opts = &priv->opts;
476 struct nlattr *nest;
477
478 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
479 if (!nest)
480 return -1;
481
482 if (opts->flags & TUNNEL_VXLAN_OPT) {
483 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
484 htonl(opts->u.vxlan.gbp)))
485 return -1;
486 } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
487 switch (opts->u.erspan.version) {
488 case ERSPAN_VERSION:
489 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
490 opts->u.erspan.u.index))
491 return -1;
492 break;
493 case ERSPAN_VERSION2:
494 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
495 get_hwid(&opts->u.erspan.u.md2)) ||
496 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
497 opts->u.erspan.u.md2.dir))
498 return -1;
499 break;
500 }
501 }
502 nla_nest_end(skb, nest);
503
504 return 0;
505 }
506
507 static int nft_tunnel_ports_dump(struct sk_buff *skb,
508 struct ip_tunnel_info *info)
509 {
510 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
511 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
512 return -1;
513
514 return 0;
515 }
516
517 static int nft_tunnel_flags_dump(struct sk_buff *skb,
518 struct ip_tunnel_info *info)
519 {
520 u32 flags = 0;
521
522 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
523 flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
524 if (!(info->key.tun_flags & TUNNEL_CSUM))
525 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
526 if (info->key.tun_flags & TUNNEL_SEQ)
527 flags |= NFT_TUNNEL_F_SEQ_NUMBER;
528
529 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
530 return -1;
531
532 return 0;
533 }
534
535 static int nft_tunnel_obj_dump(struct sk_buff *skb,
536 struct nft_object *obj, bool reset)
537 {
538 struct nft_tunnel_obj *priv = nft_obj_data(obj);
539 struct ip_tunnel_info *info = &priv->md->u.tun_info;
540
541 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
542 tunnel_id_to_key32(info->key.tun_id)) ||
543 nft_tunnel_ip_dump(skb, info) < 0 ||
544 nft_tunnel_ports_dump(skb, info) < 0 ||
545 nft_tunnel_flags_dump(skb, info) < 0 ||
546 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
547 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
548 nft_tunnel_opts_dump(skb, priv) < 0)
549 goto nla_put_failure;
550
551 return 0;
552
553 nla_put_failure:
554 return -1;
555 }
556
557 static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
558 struct nft_object *obj)
559 {
560 struct nft_tunnel_obj *priv = nft_obj_data(obj);
561
562 metadata_dst_free(priv->md);
563 }
564
565 static struct nft_object_type nft_tunnel_obj_type;
566 static const struct nft_object_ops nft_tunnel_obj_ops = {
567 .type = &nft_tunnel_obj_type,
568 .size = sizeof(struct nft_tunnel_obj),
569 .eval = nft_tunnel_obj_eval,
570 .init = nft_tunnel_obj_init,
571 .destroy = nft_tunnel_obj_destroy,
572 .dump = nft_tunnel_obj_dump,
573 };
574
575 static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
576 .type = NFT_OBJECT_TUNNEL,
577 .ops = &nft_tunnel_obj_ops,
578 .maxattr = NFTA_TUNNEL_KEY_MAX,
579 .policy = nft_tunnel_key_policy,
580 .owner = THIS_MODULE,
581 };
582
583 static int __init nft_tunnel_module_init(void)
584 {
585 int err;
586
587 err = nft_register_expr(&nft_tunnel_type);
588 if (err < 0)
589 return err;
590
591 err = nft_register_obj(&nft_tunnel_obj_type);
592 if (err < 0)
593 nft_unregister_expr(&nft_tunnel_type);
594
595 return err;
596 }
597
598 static void __exit nft_tunnel_module_exit(void)
599 {
600 nft_unregister_obj(&nft_tunnel_obj_type);
601 nft_unregister_expr(&nft_tunnel_type);
602 }
603
604 module_init(nft_tunnel_module_init);
605 module_exit(nft_tunnel_module_exit);
606
607 MODULE_LICENSE("GPL");
608 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
609 MODULE_ALIAS_NFT_EXPR("tunnel");
610 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);