1 /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
2  *                         Patrick Schaaf <bof@bof.de>
3  *                         Martin Josefsson <gandalf@wlug.westbo.se>
4  * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef _IP_SET_H
11 #define _IP_SET_H
12 
13 #include <linux/ip.h>
14 #include <linux/ipv6.h>
15 #include <linux/netlink.h>
16 #include <linux/netfilter.h>
17 #include <linux/netfilter/x_tables.h>
18 #include <linux/stringify.h>
19 #include <linux/vmalloc.h>
20 #include <net/netlink.h>
21 #include <uapi/linux/netfilter/ipset/ip_set.h>
22 
23 #define _IP_SET_MODULE_DESC(a, b, c)		\
24 	MODULE_DESCRIPTION(a " type of IP sets, revisions " b "-" c)
25 #define IP_SET_MODULE_DESC(a, b, c)		\
26 	_IP_SET_MODULE_DESC(a, __stringify(b), __stringify(c))
27 
28 /* Set features */
29 enum ip_set_feature {
30 	IPSET_TYPE_IP_FLAG = 0,
31 	IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG),
32 	IPSET_TYPE_PORT_FLAG = 1,
33 	IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG),
34 	IPSET_TYPE_MAC_FLAG = 2,
35 	IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG),
36 	IPSET_TYPE_IP2_FLAG = 3,
37 	IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG),
38 	IPSET_TYPE_NAME_FLAG = 4,
39 	IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
40 	IPSET_TYPE_IFACE_FLAG = 5,
41 	IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG),
42 	IPSET_TYPE_MARK_FLAG = 6,
43 	IPSET_TYPE_MARK = (1 << IPSET_TYPE_MARK_FLAG),
44 	IPSET_TYPE_NOMATCH_FLAG = 7,
45 	IPSET_TYPE_NOMATCH = (1 << IPSET_TYPE_NOMATCH_FLAG),
46 	/* Strictly speaking not a feature, but a flag for dumping:
47 	 * this settype must be dumped last */
48 	IPSET_DUMP_LAST_FLAG = 8,
49 	IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
50 };
51 
52 /* Set extensions */
53 enum ip_set_extension {
54 	IPSET_EXT_BIT_TIMEOUT = 0,
55 	IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT),
56 	IPSET_EXT_BIT_COUNTER = 1,
57 	IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
58 	IPSET_EXT_BIT_COMMENT = 2,
59 	IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
60 	IPSET_EXT_BIT_SKBINFO = 3,
61 	IPSET_EXT_SKBINFO = (1 << IPSET_EXT_BIT_SKBINFO),
62 	/* Mark set with an extension which needs to call destroy */
63 	IPSET_EXT_BIT_DESTROY = 7,
64 	IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
65 };
66 
67 #define SET_WITH_TIMEOUT(s)	((s)->extensions & IPSET_EXT_TIMEOUT)
68 #define SET_WITH_COUNTER(s)	((s)->extensions & IPSET_EXT_COUNTER)
69 #define SET_WITH_COMMENT(s)	((s)->extensions & IPSET_EXT_COMMENT)
70 #define SET_WITH_SKBINFO(s)	((s)->extensions & IPSET_EXT_SKBINFO)
71 #define SET_WITH_FORCEADD(s)	((s)->flags & IPSET_CREATE_FLAG_FORCEADD)
72 
73 /* Extension id, in size order */
74 enum ip_set_ext_id {
75 	IPSET_EXT_ID_COUNTER = 0,
76 	IPSET_EXT_ID_TIMEOUT,
77 	IPSET_EXT_ID_SKBINFO,
78 	IPSET_EXT_ID_COMMENT,
79 	IPSET_EXT_ID_MAX,
80 };
81 
82 /* Extension type */
83 struct ip_set_ext_type {
84 	/* Destroy extension private data (can be NULL) */
85 	void (*destroy)(void *ext);
86 	enum ip_set_extension type;
87 	enum ipset_cadt_flags flag;
88 	/* Size and minimal alignment */
89 	u8 len;
90 	u8 align;
91 };
92 
93 extern const struct ip_set_ext_type ip_set_extensions[];
94 
95 struct ip_set_ext {
96 	u64 packets;
97 	u64 bytes;
98 	u32 timeout;
99 	u32 skbmark;
100 	u32 skbmarkmask;
101 	u32 skbprio;
102 	u16 skbqueue;
103 	char *comment;
104 };
105 
106 struct ip_set_counter {
107 	atomic64_t bytes;
108 	atomic64_t packets;
109 };
110 
111 struct ip_set_comment_rcu {
112 	struct rcu_head rcu;
113 	char str[0];
114 };
115 
116 struct ip_set_comment {
117 	struct ip_set_comment_rcu __rcu *c;
118 };
119 
120 struct ip_set_skbinfo {
121 	u32 skbmark;
122 	u32 skbmarkmask;
123 	u32 skbprio;
124 	u16 skbqueue;
125 };
126 
127 struct ip_set;
128 
129 #define ext_timeout(e, s)	\
130 ((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
131 #define ext_counter(e, s)	\
132 ((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
133 #define ext_comment(e, s)	\
134 ((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
135 #define ext_skbinfo(e, s)	\
136 ((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
137 
138 typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
139 			   const struct ip_set_ext *ext,
140 			   struct ip_set_ext *mext, u32 cmdflags);
141 
142 /* Kernel API function options */
143 struct ip_set_adt_opt {
144 	u8 family;		/* Actual protocol family */
145 	u8 dim;			/* Dimension of match/target */
146 	u8 flags;		/* Direction and negation flags */
147 	u32 cmdflags;		/* Command-like flags */
148 	struct ip_set_ext ext;	/* Extensions */
149 };
150 
151 /* Set type, variant-specific part */
152 struct ip_set_type_variant {
153 	/* Kernelspace: test/add/del entries
154 	 *		returns negative error code,
155 	 *			zero for no match/success to add/delete
156 	 *			positive for matching element */
157 	int (*kadt)(struct ip_set *set, const struct sk_buff *skb,
158 		    const struct xt_action_param *par,
159 		    enum ipset_adt adt, struct ip_set_adt_opt *opt);
160 
161 	/* Userspace: test/add/del entries
162 	 *		returns negative error code,
163 	 *			zero for no match/success to add/delete
164 	 *			positive for matching element */
165 	int (*uadt)(struct ip_set *set, struct nlattr *tb[],
166 		    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
167 
168 	/* Low level add/del/test functions */
169 	ipset_adtfn adt[IPSET_ADT_MAX];
170 
171 	/* When adding entries and set is full, try to resize the set */
172 	int (*resize)(struct ip_set *set, bool retried);
173 	/* Destroy the set */
174 	void (*destroy)(struct ip_set *set);
175 	/* Flush the elements */
176 	void (*flush)(struct ip_set *set);
177 	/* Expire entries before listing */
178 	void (*expire)(struct ip_set *set);
179 	/* List set header data */
180 	int (*head)(struct ip_set *set, struct sk_buff *skb);
181 	/* List elements */
182 	int (*list)(const struct ip_set *set, struct sk_buff *skb,
183 		    struct netlink_callback *cb);
184 	/* Keep listing private when resizing runs parallel */
185 	void (*uref)(struct ip_set *set, struct netlink_callback *cb,
186 		     bool start);
187 
188 	/* Return true if "b" set is the same as "a"
189 	 * according to the create set parameters */
190 	bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
191 };
192 
193 /* The core set type structure */
194 struct ip_set_type {
195 	struct list_head list;
196 
197 	/* Typename */
198 	char name[IPSET_MAXNAMELEN];
199 	/* Protocol version */
200 	u8 protocol;
201 	/* Set type dimension */
202 	u8 dimension;
203 	/*
204 	 * Supported family: may be NFPROTO_UNSPEC for both
205 	 * NFPROTO_IPV4/NFPROTO_IPV6.
206 	 */
207 	u8 family;
208 	/* Type revisions */
209 	u8 revision_min, revision_max;
210 	/* Set features to control swapping */
211 	u16 features;
212 
213 	/* Create set */
214 	int (*create)(struct net *net, struct ip_set *set,
215 		      struct nlattr *tb[], u32 flags);
216 
217 	/* Attribute policies */
218 	const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
219 	const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1];
220 
221 	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
222 	struct module *me;
223 };
224 
225 /* register and unregister set type */
226 extern int ip_set_type_register(struct ip_set_type *set_type);
227 extern void ip_set_type_unregister(struct ip_set_type *set_type);
228 
229 /* A generic IP set */
230 struct ip_set {
231 	/* The name of the set */
232 	char name[IPSET_MAXNAMELEN];
233 	/* Lock protecting the set data */
234 	spinlock_t lock;
235 	/* References to the set */
236 	u32 ref;
237 	/* The core set type */
238 	struct ip_set_type *type;
239 	/* The type variant doing the real job */
240 	const struct ip_set_type_variant *variant;
241 	/* The actual INET family of the set */
242 	u8 family;
243 	/* The type revision */
244 	u8 revision;
245 	/* Extensions */
246 	u8 extensions;
247 	/* Create flags */
248 	u8 flags;
249 	/* Default timeout value, if enabled */
250 	u32 timeout;
251 	/* Element data size */
252 	size_t dsize;
253 	/* Offsets to extensions in elements */
254 	size_t offset[IPSET_EXT_ID_MAX];
255 	/* The type specific data */
256 	void *data;
257 };
258 
259 static inline void
ip_set_ext_destroy(struct ip_set * set,void * data)260 ip_set_ext_destroy(struct ip_set *set, void *data)
261 {
262 	/* Check that the extension is enabled for the set and
263 	 * call it's destroy function for its extension part in data.
264 	 */
265 	if (SET_WITH_COMMENT(set))
266 		ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
267 			ext_comment(data, set));
268 }
269 
270 static inline int
ip_set_put_flags(struct sk_buff * skb,struct ip_set * set)271 ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
272 {
273 	u32 cadt_flags = 0;
274 
275 	if (SET_WITH_TIMEOUT(set))
276 		if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
277 					   htonl(set->timeout))))
278 			return -EMSGSIZE;
279 	if (SET_WITH_COUNTER(set))
280 		cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
281 	if (SET_WITH_COMMENT(set))
282 		cadt_flags |= IPSET_FLAG_WITH_COMMENT;
283 	if (SET_WITH_SKBINFO(set))
284 		cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
285 	if (SET_WITH_FORCEADD(set))
286 		cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
287 
288 	if (!cadt_flags)
289 		return 0;
290 	return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
291 }
292 
293 static inline void
ip_set_add_bytes(u64 bytes,struct ip_set_counter * counter)294 ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
295 {
296 	atomic64_add((long long)bytes, &(counter)->bytes);
297 }
298 
299 static inline void
ip_set_add_packets(u64 packets,struct ip_set_counter * counter)300 ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
301 {
302 	atomic64_add((long long)packets, &(counter)->packets);
303 }
304 
305 static inline u64
ip_set_get_bytes(const struct ip_set_counter * counter)306 ip_set_get_bytes(const struct ip_set_counter *counter)
307 {
308 	return (u64)atomic64_read(&(counter)->bytes);
309 }
310 
311 static inline u64
ip_set_get_packets(const struct ip_set_counter * counter)312 ip_set_get_packets(const struct ip_set_counter *counter)
313 {
314 	return (u64)atomic64_read(&(counter)->packets);
315 }
316 
317 static inline void
ip_set_update_counter(struct ip_set_counter * counter,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)318 ip_set_update_counter(struct ip_set_counter *counter,
319 		      const struct ip_set_ext *ext,
320 		      struct ip_set_ext *mext, u32 flags)
321 {
322 	if (ext->packets != ULLONG_MAX &&
323 	    !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
324 		ip_set_add_bytes(ext->bytes, counter);
325 		ip_set_add_packets(ext->packets, counter);
326 	}
327 	if (flags & IPSET_FLAG_MATCH_COUNTERS) {
328 		mext->packets = ip_set_get_packets(counter);
329 		mext->bytes = ip_set_get_bytes(counter);
330 	}
331 }
332 
333 static inline void
ip_set_get_skbinfo(struct ip_set_skbinfo * skbinfo,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)334 ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
335 		      const struct ip_set_ext *ext,
336 		      struct ip_set_ext *mext, u32 flags)
337 {
338 		mext->skbmark = skbinfo->skbmark;
339 		mext->skbmarkmask = skbinfo->skbmarkmask;
340 		mext->skbprio = skbinfo->skbprio;
341 		mext->skbqueue = skbinfo->skbqueue;
342 }
343 static inline bool
ip_set_put_skbinfo(struct sk_buff * skb,struct ip_set_skbinfo * skbinfo)344 ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
345 {
346 	/* Send nonzero parameters only */
347 	return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
348 		nla_put_net64(skb, IPSET_ATTR_SKBMARK,
349 			      cpu_to_be64((u64)skbinfo->skbmark << 32 |
350 					  skbinfo->skbmarkmask))) ||
351 	       (skbinfo->skbprio &&
352 		nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
353 			      cpu_to_be32(skbinfo->skbprio))) ||
354 	       (skbinfo->skbqueue &&
355 		nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
356 			     cpu_to_be16(skbinfo->skbqueue)));
357 }
358 
359 static inline void
ip_set_init_skbinfo(struct ip_set_skbinfo * skbinfo,const struct ip_set_ext * ext)360 ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
361 		    const struct ip_set_ext *ext)
362 {
363 	skbinfo->skbmark = ext->skbmark;
364 	skbinfo->skbmarkmask = ext->skbmarkmask;
365 	skbinfo->skbprio = ext->skbprio;
366 	skbinfo->skbqueue = ext->skbqueue;
367 }
368 
369 static inline bool
ip_set_put_counter(struct sk_buff * skb,struct ip_set_counter * counter)370 ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
371 {
372 	return nla_put_net64(skb, IPSET_ATTR_BYTES,
373 			     cpu_to_be64(ip_set_get_bytes(counter))) ||
374 	       nla_put_net64(skb, IPSET_ATTR_PACKETS,
375 			     cpu_to_be64(ip_set_get_packets(counter)));
376 }
377 
378 static inline void
ip_set_init_counter(struct ip_set_counter * counter,const struct ip_set_ext * ext)379 ip_set_init_counter(struct ip_set_counter *counter,
380 		    const struct ip_set_ext *ext)
381 {
382 	if (ext->bytes != ULLONG_MAX)
383 		atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
384 	if (ext->packets != ULLONG_MAX)
385 		atomic64_set(&(counter)->packets, (long long)(ext->packets));
386 }
387 
388 /* Netlink CB args */
389 enum {
390 	IPSET_CB_NET = 0,	/* net namespace */
391 	IPSET_CB_DUMP,		/* dump single set/all sets */
392 	IPSET_CB_INDEX,		/* set index */
393 	IPSET_CB_PRIVATE,	/* set private data */
394 	IPSET_CB_ARG0,		/* type specific */
395 	IPSET_CB_ARG1,
396 };
397 
398 /* register and unregister set references */
399 extern ip_set_id_t ip_set_get_byname(struct net *net,
400 				     const char *name, struct ip_set **set);
401 extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
402 extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
403 extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
404 extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
405 
406 /* API for iptables set match, and SET target */
407 
408 extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
409 		      const struct xt_action_param *par,
410 		      struct ip_set_adt_opt *opt);
411 extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
412 		      const struct xt_action_param *par,
413 		      struct ip_set_adt_opt *opt);
414 extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
415 		       const struct xt_action_param *par,
416 		       struct ip_set_adt_opt *opt);
417 
418 /* Utility functions */
419 extern void *ip_set_alloc(size_t size);
420 extern void ip_set_free(void *members);
421 extern int ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr);
422 extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
423 extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
424 			      size_t len, size_t align);
425 extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
426 				 struct ip_set_ext *ext);
427 
428 static inline int
ip_set_get_hostipaddr4(struct nlattr * nla,u32 * ipaddr)429 ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
430 {
431 	__be32 ip;
432 	int ret = ip_set_get_ipaddr4(nla, &ip);
433 
434 	if (ret)
435 		return ret;
436 	*ipaddr = ntohl(ip);
437 	return 0;
438 }
439 
440 /* Ignore IPSET_ERR_EXIST errors if asked to do so? */
441 static inline bool
ip_set_eexist(int ret,u32 flags)442 ip_set_eexist(int ret, u32 flags)
443 {
444 	return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST);
445 }
446 
447 /* Match elements marked with nomatch */
448 static inline bool
ip_set_enomatch(int ret,u32 flags,enum ipset_adt adt,struct ip_set * set)449 ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set)
450 {
451 	return adt == IPSET_TEST &&
452 	       (set->type->features & IPSET_TYPE_NOMATCH) &&
453 	       ((flags >> 16) & IPSET_FLAG_NOMATCH) &&
454 	       (ret > 0 || ret == -ENOTEMPTY);
455 }
456 
457 /* Check the NLA_F_NET_BYTEORDER flag */
458 static inline bool
ip_set_attr_netorder(struct nlattr * tb[],int type)459 ip_set_attr_netorder(struct nlattr *tb[], int type)
460 {
461 	return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
462 }
463 
464 static inline bool
ip_set_optattr_netorder(struct nlattr * tb[],int type)465 ip_set_optattr_netorder(struct nlattr *tb[], int type)
466 {
467 	return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
468 }
469 
470 /* Useful converters */
471 static inline u32
ip_set_get_h32(const struct nlattr * attr)472 ip_set_get_h32(const struct nlattr *attr)
473 {
474 	return ntohl(nla_get_be32(attr));
475 }
476 
477 static inline u16
ip_set_get_h16(const struct nlattr * attr)478 ip_set_get_h16(const struct nlattr *attr)
479 {
480 	return ntohs(nla_get_be16(attr));
481 }
482 
483 #define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
484 #define ipset_nest_end(skb, start)  nla_nest_end(skb, start)
485 
nla_put_ipaddr4(struct sk_buff * skb,int type,__be32 ipaddr)486 static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
487 {
488 	struct nlattr *__nested = ipset_nest_start(skb, type);
489 	int ret;
490 
491 	if (!__nested)
492 		return -EMSGSIZE;
493 	ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
494 	if (!ret)
495 		ipset_nest_end(skb, __nested);
496 	return ret;
497 }
498 
nla_put_ipaddr6(struct sk_buff * skb,int type,const struct in6_addr * ipaddrptr)499 static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
500 				  const struct in6_addr *ipaddrptr)
501 {
502 	struct nlattr *__nested = ipset_nest_start(skb, type);
503 	int ret;
504 
505 	if (!__nested)
506 		return -EMSGSIZE;
507 	ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr);
508 	if (!ret)
509 		ipset_nest_end(skb, __nested);
510 	return ret;
511 }
512 
513 /* Get address from skbuff */
514 static inline __be32
ip4addr(const struct sk_buff * skb,bool src)515 ip4addr(const struct sk_buff *skb, bool src)
516 {
517 	return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
518 }
519 
520 static inline void
ip4addrptr(const struct sk_buff * skb,bool src,__be32 * addr)521 ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr)
522 {
523 	*addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
524 }
525 
526 static inline void
ip6addrptr(const struct sk_buff * skb,bool src,struct in6_addr * addr)527 ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
528 {
529 	memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr,
530 	       sizeof(*addr));
531 }
532 
533 /* Calculate the bytes required to store the inclusive range of a-b */
534 static inline int
bitmap_bytes(u32 a,u32 b)535 bitmap_bytes(u32 a, u32 b)
536 {
537 	return 4 * ((((b - a + 8) / 8) + 3) / 4);
538 }
539 
540 #include <linux/netfilter/ipset/ip_set_timeout.h>
541 #include <linux/netfilter/ipset/ip_set_comment.h>
542 
543 int
544 ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
545 		      const void *e, bool active);
546 
547 #define IP_SET_INIT_KEXT(skb, opt, set)			\
548 	{ .bytes = (skb)->len, .packets = 1,		\
549 	  .timeout = ip_set_adt_opt_timeout(opt, set) }
550 
551 #define IP_SET_INIT_UEXT(set)				\
552 	{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX,	\
553 	  .timeout = (set)->timeout }
554 
555 #define IPSET_CONCAT(a, b)		a##b
556 #define IPSET_TOKEN(a, b)		IPSET_CONCAT(a, b)
557 
558 #endif /*_IP_SET_H */
559