This source file includes following definitions.
- list_set_ktest
- list_set_kadd
- list_set_kdel
- list_set_kadt
- __list_set_del_rcu
- list_set_del
- list_set_replace
- set_cleanup_entries
- list_set_utest
- list_set_init_extensions
- list_set_uadd
- list_set_udel
- list_set_uadt
- list_set_flush
- list_set_destroy
- list_set_memsize
- list_set_head
- list_set_list
- list_set_same_set
- list_set_gc
- list_set_gc_init
- init_list_set
- list_set_create
- list_set_init
- list_set_fini
1
2
3
4
5
6 #include <linux/module.h>
7 #include <linux/ip.h>
8 #include <linux/rculist.h>
9 #include <linux/skbuff.h>
10 #include <linux/errno.h>
11
12 #include <linux/netfilter/ipset/ip_set.h>
13 #include <linux/netfilter/ipset/ip_set_list.h>
14
15 #define IPSET_TYPE_REV_MIN 0
16
17
18 #define IPSET_TYPE_REV_MAX 3
19
20 MODULE_LICENSE("GPL");
21 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
22 IP_SET_MODULE_DESC("list:set", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
23 MODULE_ALIAS("ip_set_list:set");
24
25
26 struct set_elem {
27 struct rcu_head rcu;
28 struct list_head list;
29 struct ip_set *set;
30 ip_set_id_t id;
31 } __aligned(__alignof__(u64));
32
33 struct set_adt_elem {
34 ip_set_id_t id;
35 ip_set_id_t refid;
36 int before;
37 };
38
39
40 struct list_set {
41 u32 size;
42 struct timer_list gc;
43 struct ip_set *set;
44 struct net *net;
45 struct list_head members;
46 };
47
48 static int
49 list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
50 const struct xt_action_param *par,
51 struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
52 {
53 struct list_set *map = set->data;
54 struct ip_set_ext *mext = &opt->ext;
55 struct set_elem *e;
56 u32 flags = opt->cmdflags;
57 int ret;
58
59
60 opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
61 if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
62 opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE;
63 list_for_each_entry_rcu(e, &map->members, list) {
64 ret = ip_set_test(e->id, skb, par, opt);
65 if (ret <= 0)
66 continue;
67 if (ip_set_match_extensions(set, ext, mext, flags, e))
68 return 1;
69 }
70 return 0;
71 }
72
73 static int
74 list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
75 const struct xt_action_param *par,
76 struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
77 {
78 struct list_set *map = set->data;
79 struct set_elem *e;
80 int ret;
81
82 list_for_each_entry(e, &map->members, list) {
83 if (SET_WITH_TIMEOUT(set) &&
84 ip_set_timeout_expired(ext_timeout(e, set)))
85 continue;
86 ret = ip_set_add(e->id, skb, par, opt);
87 if (ret == 0)
88 return ret;
89 }
90 return 0;
91 }
92
93 static int
94 list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
95 const struct xt_action_param *par,
96 struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
97 {
98 struct list_set *map = set->data;
99 struct set_elem *e;
100 int ret;
101
102 list_for_each_entry(e, &map->members, list) {
103 if (SET_WITH_TIMEOUT(set) &&
104 ip_set_timeout_expired(ext_timeout(e, set)))
105 continue;
106 ret = ip_set_del(e->id, skb, par, opt);
107 if (ret == 0)
108 return ret;
109 }
110 return 0;
111 }
112
113 static int
114 list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
115 const struct xt_action_param *par,
116 enum ipset_adt adt, struct ip_set_adt_opt *opt)
117 {
118 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
119 int ret = -EINVAL;
120
121 rcu_read_lock();
122 switch (adt) {
123 case IPSET_TEST:
124 ret = list_set_ktest(set, skb, par, opt, &ext);
125 break;
126 case IPSET_ADD:
127 ret = list_set_kadd(set, skb, par, opt, &ext);
128 break;
129 case IPSET_DEL:
130 ret = list_set_kdel(set, skb, par, opt, &ext);
131 break;
132 default:
133 break;
134 }
135 rcu_read_unlock();
136
137 return ret;
138 }
139
140
141
142 static void
143 __list_set_del_rcu(struct rcu_head * rcu)
144 {
145 struct set_elem *e = container_of(rcu, struct set_elem, rcu);
146 struct ip_set *set = e->set;
147
148 ip_set_ext_destroy(set, e);
149 kfree(e);
150 }
151
152 static inline void
153 list_set_del(struct ip_set *set, struct set_elem *e)
154 {
155 struct list_set *map = set->data;
156
157 set->elements--;
158 list_del_rcu(&e->list);
159 ip_set_put_byindex(map->net, e->id);
160 call_rcu(&e->rcu, __list_set_del_rcu);
161 }
162
163 static inline void
164 list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
165 {
166 struct list_set *map = set->data;
167
168 list_replace_rcu(&old->list, &e->list);
169 ip_set_put_byindex(map->net, old->id);
170 call_rcu(&old->rcu, __list_set_del_rcu);
171 }
172
173 static void
174 set_cleanup_entries(struct ip_set *set)
175 {
176 struct list_set *map = set->data;
177 struct set_elem *e, *n;
178
179 list_for_each_entry_safe(e, n, &map->members, list)
180 if (ip_set_timeout_expired(ext_timeout(e, set)))
181 list_set_del(set, e);
182 }
183
184 static int
185 list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
186 struct ip_set_ext *mext, u32 flags)
187 {
188 struct list_set *map = set->data;
189 struct set_adt_elem *d = value;
190 struct set_elem *e, *next, *prev = NULL;
191 int ret;
192
193 list_for_each_entry(e, &map->members, list) {
194 if (SET_WITH_TIMEOUT(set) &&
195 ip_set_timeout_expired(ext_timeout(e, set)))
196 continue;
197 else if (e->id != d->id) {
198 prev = e;
199 continue;
200 }
201
202 if (d->before == 0) {
203 ret = 1;
204 } else if (d->before > 0) {
205 next = list_next_entry(e, list);
206 ret = !list_is_last(&e->list, &map->members) &&
207 next->id == d->refid;
208 } else {
209 ret = prev && prev->id == d->refid;
210 }
211 return ret;
212 }
213 return 0;
214 }
215
216 static void
217 list_set_init_extensions(struct ip_set *set, const struct ip_set_ext *ext,
218 struct set_elem *e)
219 {
220 if (SET_WITH_COUNTER(set))
221 ip_set_init_counter(ext_counter(e, set), ext);
222 if (SET_WITH_COMMENT(set))
223 ip_set_init_comment(set, ext_comment(e, set), ext);
224 if (SET_WITH_SKBINFO(set))
225 ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
226
227 if (SET_WITH_TIMEOUT(set))
228 ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
229 }
230
231 static int
232 list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
233 struct ip_set_ext *mext, u32 flags)
234 {
235 struct list_set *map = set->data;
236 struct set_adt_elem *d = value;
237 struct set_elem *e, *n, *prev, *next;
238 bool flag_exist = flags & IPSET_FLAG_EXIST;
239
240
241 n = prev = next = NULL;
242 list_for_each_entry(e, &map->members, list) {
243 if (SET_WITH_TIMEOUT(set) &&
244 ip_set_timeout_expired(ext_timeout(e, set)))
245 continue;
246 else if (d->id == e->id)
247 n = e;
248 else if (d->before == 0 || e->id != d->refid)
249 continue;
250 else if (d->before > 0)
251 next = e;
252 else
253 prev = e;
254 }
255
256
257 if ((d->before > 0 && !next) ||
258 (d->before < 0 && !prev))
259 return -IPSET_ERR_REF_EXIST;
260
261
262 if (n) {
263 if (!flag_exist)
264 return -IPSET_ERR_EXIST;
265
266 ip_set_ext_destroy(set, n);
267 list_set_init_extensions(set, ext, n);
268
269
270 ip_set_put_byindex(map->net, d->id);
271 return 0;
272 }
273
274 if (d->before == 0) {
275
276 n = list_empty(&map->members) ? NULL :
277 list_last_entry(&map->members, struct set_elem, list);
278 } else if (d->before > 0) {
279
280 if (!list_is_last(&next->list, &map->members))
281 n = list_next_entry(next, list);
282 } else {
283
284 if (prev->list.prev != &map->members)
285 n = list_prev_entry(prev, list);
286 }
287
288 if (n &&
289 !(SET_WITH_TIMEOUT(set) &&
290 ip_set_timeout_expired(ext_timeout(n, set))))
291 n = NULL;
292
293 e = kzalloc(set->dsize, GFP_ATOMIC);
294 if (!e)
295 return -ENOMEM;
296 e->id = d->id;
297 e->set = set;
298 INIT_LIST_HEAD(&e->list);
299 list_set_init_extensions(set, ext, e);
300 if (n)
301 list_set_replace(set, e, n);
302 else if (next)
303 list_add_tail_rcu(&e->list, &next->list);
304 else if (prev)
305 list_add_rcu(&e->list, &prev->list);
306 else
307 list_add_tail_rcu(&e->list, &map->members);
308 set->elements++;
309
310 return 0;
311 }
312
313 static int
314 list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
315 struct ip_set_ext *mext, u32 flags)
316 {
317 struct list_set *map = set->data;
318 struct set_adt_elem *d = value;
319 struct set_elem *e, *next, *prev = NULL;
320
321 list_for_each_entry(e, &map->members, list) {
322 if (SET_WITH_TIMEOUT(set) &&
323 ip_set_timeout_expired(ext_timeout(e, set)))
324 continue;
325 else if (e->id != d->id) {
326 prev = e;
327 continue;
328 }
329
330 if (d->before > 0) {
331 next = list_next_entry(e, list);
332 if (list_is_last(&e->list, &map->members) ||
333 next->id != d->refid)
334 return -IPSET_ERR_REF_EXIST;
335 } else if (d->before < 0) {
336 if (!prev || prev->id != d->refid)
337 return -IPSET_ERR_REF_EXIST;
338 }
339 list_set_del(set, e);
340 return 0;
341 }
342 return d->before != 0 ? -IPSET_ERR_REF_EXIST : -IPSET_ERR_EXIST;
343 }
344
345 static int
346 list_set_uadt(struct ip_set *set, struct nlattr *tb[],
347 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
348 {
349 struct list_set *map = set->data;
350 ipset_adtfn adtfn = set->variant->adt[adt];
351 struct set_adt_elem e = { .refid = IPSET_INVALID_ID };
352 struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
353 struct ip_set *s;
354 int ret = 0;
355
356 if (tb[IPSET_ATTR_LINENO])
357 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
358
359 if (unlikely(!tb[IPSET_ATTR_NAME] ||
360 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
361 return -IPSET_ERR_PROTOCOL;
362
363 ret = ip_set_get_extensions(set, tb, &ext);
364 if (ret)
365 return ret;
366 e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s);
367 if (e.id == IPSET_INVALID_ID)
368 return -IPSET_ERR_NAME;
369
370 if (s->type->features & IPSET_TYPE_NAME) {
371 ret = -IPSET_ERR_LOOP;
372 goto finish;
373 }
374
375 if (tb[IPSET_ATTR_CADT_FLAGS]) {
376 u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
377
378 e.before = f & IPSET_FLAG_BEFORE;
379 }
380
381 if (e.before && !tb[IPSET_ATTR_NAMEREF]) {
382 ret = -IPSET_ERR_BEFORE;
383 goto finish;
384 }
385
386 if (tb[IPSET_ATTR_NAMEREF]) {
387 e.refid = ip_set_get_byname(map->net,
388 nla_data(tb[IPSET_ATTR_NAMEREF]),
389 &s);
390 if (e.refid == IPSET_INVALID_ID) {
391 ret = -IPSET_ERR_NAMEREF;
392 goto finish;
393 }
394 if (!e.before)
395 e.before = -1;
396 }
397 if (adt != IPSET_TEST && SET_WITH_TIMEOUT(set))
398 set_cleanup_entries(set);
399
400 ret = adtfn(set, &e, &ext, &ext, flags);
401
402 finish:
403 if (e.refid != IPSET_INVALID_ID)
404 ip_set_put_byindex(map->net, e.refid);
405 if (adt != IPSET_ADD || ret)
406 ip_set_put_byindex(map->net, e.id);
407
408 return ip_set_eexist(ret, flags) ? 0 : ret;
409 }
410
411 static void
412 list_set_flush(struct ip_set *set)
413 {
414 struct list_set *map = set->data;
415 struct set_elem *e, *n;
416
417 list_for_each_entry_safe(e, n, &map->members, list)
418 list_set_del(set, e);
419 set->elements = 0;
420 set->ext_size = 0;
421 }
422
423 static void
424 list_set_destroy(struct ip_set *set)
425 {
426 struct list_set *map = set->data;
427 struct set_elem *e, *n;
428
429 if (SET_WITH_TIMEOUT(set))
430 del_timer_sync(&map->gc);
431
432 list_for_each_entry_safe(e, n, &map->members, list) {
433 list_del(&e->list);
434 ip_set_put_byindex(map->net, e->id);
435 ip_set_ext_destroy(set, e);
436 kfree(e);
437 }
438 kfree(map);
439
440 set->data = NULL;
441 }
442
443
444 static size_t
445 list_set_memsize(const struct list_set *map, size_t dsize)
446 {
447 struct set_elem *e;
448 u32 n = 0;
449
450 rcu_read_lock();
451 list_for_each_entry_rcu(e, &map->members, list)
452 n++;
453 rcu_read_unlock();
454
455 return (sizeof(*map) + n * dsize);
456 }
457
458 static int
459 list_set_head(struct ip_set *set, struct sk_buff *skb)
460 {
461 const struct list_set *map = set->data;
462 struct nlattr *nested;
463 size_t memsize = list_set_memsize(map, set->dsize) + set->ext_size;
464
465 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
466 if (!nested)
467 goto nla_put_failure;
468 if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
469 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
470 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
471 nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
472 goto nla_put_failure;
473 if (unlikely(ip_set_put_flags(skb, set)))
474 goto nla_put_failure;
475 nla_nest_end(skb, nested);
476
477 return 0;
478 nla_put_failure:
479 return -EMSGSIZE;
480 }
481
482 static int
483 list_set_list(const struct ip_set *set,
484 struct sk_buff *skb, struct netlink_callback *cb)
485 {
486 const struct list_set *map = set->data;
487 struct nlattr *atd, *nested;
488 u32 i = 0, first = cb->args[IPSET_CB_ARG0];
489 char name[IPSET_MAXNAMELEN];
490 struct set_elem *e;
491 int ret = 0;
492
493 atd = nla_nest_start(skb, IPSET_ATTR_ADT);
494 if (!atd)
495 return -EMSGSIZE;
496
497 rcu_read_lock();
498 list_for_each_entry_rcu(e, &map->members, list) {
499 if (i < first ||
500 (SET_WITH_TIMEOUT(set) &&
501 ip_set_timeout_expired(ext_timeout(e, set)))) {
502 i++;
503 continue;
504 }
505 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
506 if (!nested)
507 goto nla_put_failure;
508 ip_set_name_byindex(map->net, e->id, name);
509 if (nla_put_string(skb, IPSET_ATTR_NAME, name))
510 goto nla_put_failure;
511 if (ip_set_put_extensions(skb, set, e, true))
512 goto nla_put_failure;
513 nla_nest_end(skb, nested);
514 i++;
515 }
516
517 nla_nest_end(skb, atd);
518
519 cb->args[IPSET_CB_ARG0] = 0;
520 goto out;
521
522 nla_put_failure:
523 nla_nest_cancel(skb, nested);
524 if (unlikely(i == first)) {
525 nla_nest_cancel(skb, atd);
526 cb->args[IPSET_CB_ARG0] = 0;
527 ret = -EMSGSIZE;
528 } else {
529 cb->args[IPSET_CB_ARG0] = i;
530 nla_nest_end(skb, atd);
531 }
532 out:
533 rcu_read_unlock();
534 return ret;
535 }
536
537 static bool
538 list_set_same_set(const struct ip_set *a, const struct ip_set *b)
539 {
540 const struct list_set *x = a->data;
541 const struct list_set *y = b->data;
542
543 return x->size == y->size &&
544 a->timeout == b->timeout &&
545 a->extensions == b->extensions;
546 }
547
548 static const struct ip_set_type_variant set_variant = {
549 .kadt = list_set_kadt,
550 .uadt = list_set_uadt,
551 .adt = {
552 [IPSET_ADD] = list_set_uadd,
553 [IPSET_DEL] = list_set_udel,
554 [IPSET_TEST] = list_set_utest,
555 },
556 .destroy = list_set_destroy,
557 .flush = list_set_flush,
558 .head = list_set_head,
559 .list = list_set_list,
560 .same_set = list_set_same_set,
561 };
562
563 static void
564 list_set_gc(struct timer_list *t)
565 {
566 struct list_set *map = from_timer(map, t, gc);
567 struct ip_set *set = map->set;
568
569 spin_lock_bh(&set->lock);
570 set_cleanup_entries(set);
571 spin_unlock_bh(&set->lock);
572
573 map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
574 add_timer(&map->gc);
575 }
576
577 static void
578 list_set_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
579 {
580 struct list_set *map = set->data;
581
582 timer_setup(&map->gc, gc, 0);
583 mod_timer(&map->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
584 }
585
586
587
588 static bool
589 init_list_set(struct net *net, struct ip_set *set, u32 size)
590 {
591 struct list_set *map;
592
593 map = kzalloc(sizeof(*map), GFP_KERNEL);
594 if (!map)
595 return false;
596
597 map->size = size;
598 map->net = net;
599 map->set = set;
600 INIT_LIST_HEAD(&map->members);
601 set->data = map;
602
603 return true;
604 }
605
606 static int
607 list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
608 u32 flags)
609 {
610 u32 size = IP_SET_LIST_DEFAULT_SIZE;
611
612 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
613 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
614 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
615 return -IPSET_ERR_PROTOCOL;
616
617 if (tb[IPSET_ATTR_SIZE])
618 size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
619 if (size < IP_SET_LIST_MIN_SIZE)
620 size = IP_SET_LIST_MIN_SIZE;
621
622 set->variant = &set_variant;
623 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
624 __alignof__(struct set_elem));
625 if (!init_list_set(net, set, size))
626 return -ENOMEM;
627 if (tb[IPSET_ATTR_TIMEOUT]) {
628 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
629 list_set_gc_init(set, list_set_gc);
630 }
631 return 0;
632 }
633
634 static struct ip_set_type list_set_type __read_mostly = {
635 .name = "list:set",
636 .protocol = IPSET_PROTOCOL,
637 .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
638 .dimension = IPSET_DIM_ONE,
639 .family = NFPROTO_UNSPEC,
640 .revision_min = IPSET_TYPE_REV_MIN,
641 .revision_max = IPSET_TYPE_REV_MAX,
642 .create = list_set_create,
643 .create_policy = {
644 [IPSET_ATTR_SIZE] = { .type = NLA_U32 },
645 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
646 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
647 },
648 .adt_policy = {
649 [IPSET_ATTR_NAME] = { .type = NLA_STRING,
650 .len = IPSET_MAXNAMELEN },
651 [IPSET_ATTR_NAMEREF] = { .type = NLA_STRING,
652 .len = IPSET_MAXNAMELEN },
653 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
654 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
655 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
656 [IPSET_ATTR_BYTES] = { .type = NLA_U64 },
657 [IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
658 [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
659 .len = IPSET_MAX_COMMENT_SIZE },
660 [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
661 [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
662 [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
663 },
664 .me = THIS_MODULE,
665 };
666
667 static int __init
668 list_set_init(void)
669 {
670 return ip_set_type_register(&list_set_type);
671 }
672
673 static void __exit
674 list_set_fini(void)
675 {
676 rcu_barrier();
677 ip_set_type_unregister(&list_set_type);
678 }
679
680 module_init(list_set_init);
681 module_exit(list_set_fini);