This source file includes following definitions.
- ct_write_lock_bh
- ct_write_unlock_bh
- ip_vs_conn_hashkey
- ip_vs_conn_hashkey_param
- ip_vs_conn_hashkey_conn
- ip_vs_conn_hash
- ip_vs_conn_unhash
- ip_vs_conn_unlink
- __ip_vs_conn_in_get
- ip_vs_conn_in_get
- ip_vs_conn_fill_param_proto
- ip_vs_conn_in_get_proto
- ip_vs_ct_in_get
- ip_vs_conn_out_get
- ip_vs_conn_out_get_proto
- __ip_vs_conn_put_timer
- ip_vs_conn_put
- ip_vs_conn_fill_cport
- ip_vs_bind_xmit
- ip_vs_bind_xmit_v6
- ip_vs_dest_totalconns
- ip_vs_bind_dest
- ip_vs_try_bind_dest
- ip_vs_unbind_dest
- expire_quiescent_template
- ip_vs_check_template
- ip_vs_conn_rcu_free
- ip_vs_conn_expire
- ip_vs_conn_expire_now
- ip_vs_conn_new
- ip_vs_conn_array
- ip_vs_conn_seq_start
- ip_vs_conn_seq_next
- ip_vs_conn_seq_stop
- ip_vs_conn_seq_show
- ip_vs_origin_name
- ip_vs_conn_sync_seq_show
- todrop_entry
- ip_vs_conn_ops_mode
- ip_vs_random_dropentry
- ip_vs_conn_flush
- ip_vs_conn_net_init
- ip_vs_conn_net_cleanup
- ip_vs_conn_init
- ip_vs_conn_cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #define KMSG_COMPONENT "IPVS"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22
23 #include <linux/interrupt.h>
24 #include <linux/in.h>
25 #include <linux/inet.h>
26 #include <linux/net.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/vmalloc.h>
30 #include <linux/proc_fs.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/jhash.h>
34 #include <linux/random.h>
35
36 #include <net/net_namespace.h>
37 #include <net/ip_vs.h>
38
39
40 #ifndef CONFIG_IP_VS_TAB_BITS
41 #define CONFIG_IP_VS_TAB_BITS 12
42 #endif
43
44
45
46
47 static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
48 module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
49 MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
50
51
52 int ip_vs_conn_tab_size __read_mostly;
53 static int ip_vs_conn_tab_mask __read_mostly;
54
55
56
57
58 static struct hlist_head *ip_vs_conn_tab __read_mostly;
59
60
61 static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
62
63
64 static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
65
66
67 static unsigned int ip_vs_conn_rnd __read_mostly;
68
69
70
71
72 #define CT_LOCKARRAY_BITS 5
73 #define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS)
74 #define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1)
75
76
77 #ifdef CONFIG_IP_VS_IPV6
78 #define IP_VS_ADDRSTRLEN INET6_ADDRSTRLEN
79 #else
80 #define IP_VS_ADDRSTRLEN (8+1)
81 #endif
82
83 struct ip_vs_aligned_lock
84 {
85 spinlock_t l;
86 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
87
88
89 static struct ip_vs_aligned_lock
90 __ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
91
92 static inline void ct_write_lock_bh(unsigned int key)
93 {
94 spin_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
95 }
96
97 static inline void ct_write_unlock_bh(unsigned int key)
98 {
99 spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
100 }
101
102 static void ip_vs_conn_expire(struct timer_list *t);
103
104
105
106
107 static unsigned int ip_vs_conn_hashkey(struct netns_ipvs *ipvs, int af, unsigned int proto,
108 const union nf_inet_addr *addr,
109 __be16 port)
110 {
111 #ifdef CONFIG_IP_VS_IPV6
112 if (af == AF_INET6)
113 return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
114 (__force u32)port, proto, ip_vs_conn_rnd) ^
115 ((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
116 #endif
117 return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
118 ip_vs_conn_rnd) ^
119 ((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
120 }
121
122 static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
123 bool inverse)
124 {
125 const union nf_inet_addr *addr;
126 __be16 port;
127
128 if (p->pe_data && p->pe->hashkey_raw)
129 return p->pe->hashkey_raw(p, ip_vs_conn_rnd, inverse) &
130 ip_vs_conn_tab_mask;
131
132 if (likely(!inverse)) {
133 addr = p->caddr;
134 port = p->cport;
135 } else {
136 addr = p->vaddr;
137 port = p->vport;
138 }
139
140 return ip_vs_conn_hashkey(p->ipvs, p->af, p->protocol, addr, port);
141 }
142
143 static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
144 {
145 struct ip_vs_conn_param p;
146
147 ip_vs_conn_fill_param(cp->ipvs, cp->af, cp->protocol,
148 &cp->caddr, cp->cport, NULL, 0, &p);
149
150 if (cp->pe) {
151 p.pe = cp->pe;
152 p.pe_data = cp->pe_data;
153 p.pe_data_len = cp->pe_data_len;
154 }
155
156 return ip_vs_conn_hashkey_param(&p, false);
157 }
158
159
160
161
162
163 static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
164 {
165 unsigned int hash;
166 int ret;
167
168 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
169 return 0;
170
171
172 hash = ip_vs_conn_hashkey_conn(cp);
173
174 ct_write_lock_bh(hash);
175 spin_lock(&cp->lock);
176
177 if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
178 cp->flags |= IP_VS_CONN_F_HASHED;
179 refcount_inc(&cp->refcnt);
180 hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]);
181 ret = 1;
182 } else {
183 pr_err("%s(): request for already hashed, called from %pS\n",
184 __func__, __builtin_return_address(0));
185 ret = 0;
186 }
187
188 spin_unlock(&cp->lock);
189 ct_write_unlock_bh(hash);
190
191 return ret;
192 }
193
194
195
196
197
198
199 static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
200 {
201 unsigned int hash;
202 int ret;
203
204
205 hash = ip_vs_conn_hashkey_conn(cp);
206
207 ct_write_lock_bh(hash);
208 spin_lock(&cp->lock);
209
210 if (cp->flags & IP_VS_CONN_F_HASHED) {
211 hlist_del_rcu(&cp->c_list);
212 cp->flags &= ~IP_VS_CONN_F_HASHED;
213 refcount_dec(&cp->refcnt);
214 ret = 1;
215 } else
216 ret = 0;
217
218 spin_unlock(&cp->lock);
219 ct_write_unlock_bh(hash);
220
221 return ret;
222 }
223
224
225
226
227 static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
228 {
229 unsigned int hash;
230 bool ret = false;
231
232 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
233 return refcount_dec_if_one(&cp->refcnt);
234
235 hash = ip_vs_conn_hashkey_conn(cp);
236
237 ct_write_lock_bh(hash);
238 spin_lock(&cp->lock);
239
240 if (cp->flags & IP_VS_CONN_F_HASHED) {
241
242 if (refcount_dec_if_one(&cp->refcnt)) {
243 hlist_del_rcu(&cp->c_list);
244 cp->flags &= ~IP_VS_CONN_F_HASHED;
245 ret = true;
246 }
247 }
248
249 spin_unlock(&cp->lock);
250 ct_write_unlock_bh(hash);
251
252 return ret;
253 }
254
255
256
257
258
259
260
261
262 static inline struct ip_vs_conn *
263 __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
264 {
265 unsigned int hash;
266 struct ip_vs_conn *cp;
267
268 hash = ip_vs_conn_hashkey_param(p, false);
269
270 rcu_read_lock();
271
272 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
273 if (p->cport == cp->cport && p->vport == cp->vport &&
274 cp->af == p->af &&
275 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
276 ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
277 ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
278 p->protocol == cp->protocol &&
279 cp->ipvs == p->ipvs) {
280 if (!__ip_vs_conn_get(cp))
281 continue;
282
283 rcu_read_unlock();
284 return cp;
285 }
286 }
287
288 rcu_read_unlock();
289
290 return NULL;
291 }
292
293 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
294 {
295 struct ip_vs_conn *cp;
296
297 cp = __ip_vs_conn_in_get(p);
298 if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) {
299 struct ip_vs_conn_param cport_zero_p = *p;
300 cport_zero_p.cport = 0;
301 cp = __ip_vs_conn_in_get(&cport_zero_p);
302 }
303
304 IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
305 ip_vs_proto_name(p->protocol),
306 IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
307 IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
308 cp ? "hit" : "not hit");
309
310 return cp;
311 }
312
313 static int
314 ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs,
315 int af, const struct sk_buff *skb,
316 const struct ip_vs_iphdr *iph,
317 struct ip_vs_conn_param *p)
318 {
319 __be16 _ports[2], *pptr;
320
321 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
322 if (pptr == NULL)
323 return 1;
324
325 if (likely(!ip_vs_iph_inverse(iph)))
326 ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->saddr,
327 pptr[0], &iph->daddr, pptr[1], p);
328 else
329 ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->daddr,
330 pptr[1], &iph->saddr, pptr[0], p);
331 return 0;
332 }
333
334 struct ip_vs_conn *
335 ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af,
336 const struct sk_buff *skb,
337 const struct ip_vs_iphdr *iph)
338 {
339 struct ip_vs_conn_param p;
340
341 if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
342 return NULL;
343
344 return ip_vs_conn_in_get(&p);
345 }
346 EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
347
348
349 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
350 {
351 unsigned int hash;
352 struct ip_vs_conn *cp;
353
354 hash = ip_vs_conn_hashkey_param(p, false);
355
356 rcu_read_lock();
357
358 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
359 if (unlikely(p->pe_data && p->pe->ct_match)) {
360 if (cp->ipvs != p->ipvs)
361 continue;
362 if (p->pe == cp->pe && p->pe->ct_match(p, cp)) {
363 if (__ip_vs_conn_get(cp))
364 goto out;
365 }
366 continue;
367 }
368
369 if (cp->af == p->af &&
370 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
371
372
373 ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC :
374 p->af, p->vaddr, &cp->vaddr) &&
375 p->vport == cp->vport && p->cport == cp->cport &&
376 cp->flags & IP_VS_CONN_F_TEMPLATE &&
377 p->protocol == cp->protocol &&
378 cp->ipvs == p->ipvs) {
379 if (__ip_vs_conn_get(cp))
380 goto out;
381 }
382 }
383 cp = NULL;
384
385 out:
386 rcu_read_unlock();
387
388 IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
389 ip_vs_proto_name(p->protocol),
390 IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
391 IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
392 cp ? "hit" : "not hit");
393
394 return cp;
395 }
396
397
398
399
400
401 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
402 {
403 unsigned int hash;
404 struct ip_vs_conn *cp, *ret=NULL;
405
406
407
408
409 hash = ip_vs_conn_hashkey_param(p, true);
410
411 rcu_read_lock();
412
413 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
414 if (p->vport == cp->cport && p->cport == cp->dport &&
415 cp->af == p->af &&
416 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
417 ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
418 p->protocol == cp->protocol &&
419 cp->ipvs == p->ipvs) {
420 if (!__ip_vs_conn_get(cp))
421 continue;
422
423 ret = cp;
424 break;
425 }
426 }
427
428 rcu_read_unlock();
429
430 IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
431 ip_vs_proto_name(p->protocol),
432 IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
433 IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
434 ret ? "hit" : "not hit");
435
436 return ret;
437 }
438
439 struct ip_vs_conn *
440 ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
441 const struct sk_buff *skb,
442 const struct ip_vs_iphdr *iph)
443 {
444 struct ip_vs_conn_param p;
445
446 if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
447 return NULL;
448
449 return ip_vs_conn_out_get(&p);
450 }
451 EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
452
453
454
455
456 static void __ip_vs_conn_put_timer(struct ip_vs_conn *cp)
457 {
458 unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
459 0 : cp->timeout;
460 mod_timer(&cp->timer, jiffies+t);
461
462 __ip_vs_conn_put(cp);
463 }
464
465 void ip_vs_conn_put(struct ip_vs_conn *cp)
466 {
467 if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) &&
468 (refcount_read(&cp->refcnt) == 1) &&
469 !timer_pending(&cp->timer))
470
471 ip_vs_conn_expire(&cp->timer);
472 else
473 __ip_vs_conn_put_timer(cp);
474 }
475
476
477
478
479 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport)
480 {
481 if (ip_vs_conn_unhash(cp)) {
482 spin_lock_bh(&cp->lock);
483 if (cp->flags & IP_VS_CONN_F_NO_CPORT) {
484 atomic_dec(&ip_vs_conn_no_cport_cnt);
485 cp->flags &= ~IP_VS_CONN_F_NO_CPORT;
486 cp->cport = cport;
487 }
488 spin_unlock_bh(&cp->lock);
489
490
491 ip_vs_conn_hash(cp);
492 }
493 }
494
495
496
497
498
499
500 static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp)
501 {
502 switch (IP_VS_FWD_METHOD(cp)) {
503 case IP_VS_CONN_F_MASQ:
504 cp->packet_xmit = ip_vs_nat_xmit;
505 break;
506
507 case IP_VS_CONN_F_TUNNEL:
508 #ifdef CONFIG_IP_VS_IPV6
509 if (cp->daf == AF_INET6)
510 cp->packet_xmit = ip_vs_tunnel_xmit_v6;
511 else
512 #endif
513 cp->packet_xmit = ip_vs_tunnel_xmit;
514 break;
515
516 case IP_VS_CONN_F_DROUTE:
517 cp->packet_xmit = ip_vs_dr_xmit;
518 break;
519
520 case IP_VS_CONN_F_LOCALNODE:
521 cp->packet_xmit = ip_vs_null_xmit;
522 break;
523
524 case IP_VS_CONN_F_BYPASS:
525 cp->packet_xmit = ip_vs_bypass_xmit;
526 break;
527 }
528 }
529
530 #ifdef CONFIG_IP_VS_IPV6
531 static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
532 {
533 switch (IP_VS_FWD_METHOD(cp)) {
534 case IP_VS_CONN_F_MASQ:
535 cp->packet_xmit = ip_vs_nat_xmit_v6;
536 break;
537
538 case IP_VS_CONN_F_TUNNEL:
539 if (cp->daf == AF_INET6)
540 cp->packet_xmit = ip_vs_tunnel_xmit_v6;
541 else
542 cp->packet_xmit = ip_vs_tunnel_xmit;
543 break;
544
545 case IP_VS_CONN_F_DROUTE:
546 cp->packet_xmit = ip_vs_dr_xmit_v6;
547 break;
548
549 case IP_VS_CONN_F_LOCALNODE:
550 cp->packet_xmit = ip_vs_null_xmit;
551 break;
552
553 case IP_VS_CONN_F_BYPASS:
554 cp->packet_xmit = ip_vs_bypass_xmit_v6;
555 break;
556 }
557 }
558 #endif
559
560
561 static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest)
562 {
563 return atomic_read(&dest->activeconns)
564 + atomic_read(&dest->inactconns);
565 }
566
567
568
569
570
571 static inline void
572 ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
573 {
574 unsigned int conn_flags;
575 __u32 flags;
576
577
578 if (!dest)
579 return;
580
581
582 ip_vs_dest_hold(dest);
583
584 conn_flags = atomic_read(&dest->conn_flags);
585 if (cp->protocol != IPPROTO_UDP)
586 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
587 flags = cp->flags;
588
589 if (flags & IP_VS_CONN_F_SYNC) {
590
591
592
593 if (!(flags & IP_VS_CONN_F_TEMPLATE))
594 conn_flags &= ~IP_VS_CONN_F_INACTIVE;
595
596 flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT);
597 }
598 flags |= conn_flags;
599 cp->flags = flags;
600 cp->dest = dest;
601
602 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
603 "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
604 "dest->refcnt:%d\n",
605 ip_vs_proto_name(cp->protocol),
606 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
607 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
608 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
609 ip_vs_fwd_tag(cp), cp->state,
610 cp->flags, refcount_read(&cp->refcnt),
611 refcount_read(&dest->refcnt));
612
613
614 if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
615
616
617
618
619 if (!(flags & IP_VS_CONN_F_INACTIVE))
620 atomic_inc(&dest->activeconns);
621 else
622 atomic_inc(&dest->inactconns);
623 } else {
624
625
626 atomic_inc(&dest->persistconns);
627 }
628
629 if (dest->u_threshold != 0 &&
630 ip_vs_dest_totalconns(dest) >= dest->u_threshold)
631 dest->flags |= IP_VS_DEST_F_OVERLOAD;
632 }
633
634
635
636
637
638
639 void ip_vs_try_bind_dest(struct ip_vs_conn *cp)
640 {
641 struct ip_vs_dest *dest;
642
643 rcu_read_lock();
644
645
646
647
648
649
650 dest = ip_vs_find_dest(cp->ipvs, cp->af, cp->af, &cp->daddr,
651 cp->dport, &cp->vaddr, cp->vport,
652 cp->protocol, cp->fwmark, cp->flags);
653 if (dest) {
654 struct ip_vs_proto_data *pd;
655
656 spin_lock_bh(&cp->lock);
657 if (cp->dest) {
658 spin_unlock_bh(&cp->lock);
659 rcu_read_unlock();
660 return;
661 }
662
663
664
665 if (cp->app)
666 ip_vs_unbind_app(cp);
667
668 ip_vs_bind_dest(cp, dest);
669 spin_unlock_bh(&cp->lock);
670
671
672 cp->packet_xmit = NULL;
673 #ifdef CONFIG_IP_VS_IPV6
674 if (cp->af == AF_INET6)
675 ip_vs_bind_xmit_v6(cp);
676 else
677 #endif
678 ip_vs_bind_xmit(cp);
679
680 pd = ip_vs_proto_data_get(cp->ipvs, cp->protocol);
681 if (pd && atomic_read(&pd->appcnt))
682 ip_vs_bind_app(cp, pd->pp);
683 }
684 rcu_read_unlock();
685 }
686
687
688
689
690
691
692 static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
693 {
694 struct ip_vs_dest *dest = cp->dest;
695
696 if (!dest)
697 return;
698
699 IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d "
700 "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
701 "dest->refcnt:%d\n",
702 ip_vs_proto_name(cp->protocol),
703 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
704 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
705 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
706 ip_vs_fwd_tag(cp), cp->state,
707 cp->flags, refcount_read(&cp->refcnt),
708 refcount_read(&dest->refcnt));
709
710
711 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
712
713
714 if (cp->flags & IP_VS_CONN_F_INACTIVE) {
715 atomic_dec(&dest->inactconns);
716 } else {
717 atomic_dec(&dest->activeconns);
718 }
719 } else {
720
721
722 atomic_dec(&dest->persistconns);
723 }
724
725 if (dest->l_threshold != 0) {
726 if (ip_vs_dest_totalconns(dest) < dest->l_threshold)
727 dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
728 } else if (dest->u_threshold != 0) {
729 if (ip_vs_dest_totalconns(dest) * 4 < dest->u_threshold * 3)
730 dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
731 } else {
732 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
733 dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
734 }
735
736 ip_vs_dest_put(dest);
737 }
738
739 static int expire_quiescent_template(struct netns_ipvs *ipvs,
740 struct ip_vs_dest *dest)
741 {
742 #ifdef CONFIG_SYSCTL
743 return ipvs->sysctl_expire_quiescent_template &&
744 (atomic_read(&dest->weight) == 0);
745 #else
746 return 0;
747 #endif
748 }
749
750
751
752
753
754
755 int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
756 {
757 struct ip_vs_dest *dest = ct->dest;
758 struct netns_ipvs *ipvs = ct->ipvs;
759
760
761
762
763 if ((dest == NULL) ||
764 !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
765 expire_quiescent_template(ipvs, dest) ||
766 (cdest && (dest != cdest))) {
767 IP_VS_DBG_BUF(9, "check_template: dest not available for "
768 "protocol %s s:%s:%d v:%s:%d "
769 "-> d:%s:%d\n",
770 ip_vs_proto_name(ct->protocol),
771 IP_VS_DBG_ADDR(ct->af, &ct->caddr),
772 ntohs(ct->cport),
773 IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
774 ntohs(ct->vport),
775 IP_VS_DBG_ADDR(ct->daf, &ct->daddr),
776 ntohs(ct->dport));
777
778
779
780
781 if (ct->vport != htons(0xffff)) {
782 if (ip_vs_conn_unhash(ct)) {
783 ct->dport = htons(0xffff);
784 ct->vport = htons(0xffff);
785 ct->cport = 0;
786 ip_vs_conn_hash(ct);
787 }
788 }
789
790
791
792
793
794 __ip_vs_conn_put(ct);
795 return 0;
796 }
797 return 1;
798 }
799
800 static void ip_vs_conn_rcu_free(struct rcu_head *head)
801 {
802 struct ip_vs_conn *cp = container_of(head, struct ip_vs_conn,
803 rcu_head);
804
805 ip_vs_pe_put(cp->pe);
806 kfree(cp->pe_data);
807 kmem_cache_free(ip_vs_conn_cachep, cp);
808 }
809
810 static void ip_vs_conn_expire(struct timer_list *t)
811 {
812 struct ip_vs_conn *cp = from_timer(cp, t, timer);
813 struct netns_ipvs *ipvs = cp->ipvs;
814
815
816
817
818 if (atomic_read(&cp->n_control))
819 goto expire_later;
820
821
822 if (likely(ip_vs_conn_unlink(cp))) {
823 struct ip_vs_conn *ct = cp->control;
824
825
826 del_timer(&cp->timer);
827
828
829 if (ct) {
830 ip_vs_control_del(cp);
831
832 if (!cp->timeout && !atomic_read(&ct->n_control) &&
833 (!(ct->flags & IP_VS_CONN_F_TEMPLATE) ||
834 !(ct->state & IP_VS_CTPL_S_ASSURED))) {
835 IP_VS_DBG(4, "drop controlling connection\n");
836 ct->timeout = 0;
837 ip_vs_conn_expire_now(ct);
838 }
839 }
840
841 if ((cp->flags & IP_VS_CONN_F_NFCT) &&
842 !(cp->flags & IP_VS_CONN_F_ONE_PACKET)) {
843
844
845
846
847 smp_rmb();
848 if (ipvs->enable)
849 ip_vs_conn_drop_conntrack(cp);
850 }
851
852 if (unlikely(cp->app != NULL))
853 ip_vs_unbind_app(cp);
854 ip_vs_unbind_dest(cp);
855 if (cp->flags & IP_VS_CONN_F_NO_CPORT)
856 atomic_dec(&ip_vs_conn_no_cport_cnt);
857 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
858 ip_vs_conn_rcu_free(&cp->rcu_head);
859 else
860 call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free);
861 atomic_dec(&ipvs->conn_count);
862 return;
863 }
864
865 expire_later:
866 IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n",
867 refcount_read(&cp->refcnt),
868 atomic_read(&cp->n_control));
869
870 refcount_inc(&cp->refcnt);
871 cp->timeout = 60*HZ;
872
873 if (ipvs->sync_state & IP_VS_STATE_MASTER)
874 ip_vs_sync_conn(ipvs, cp, sysctl_sync_threshold(ipvs));
875
876 __ip_vs_conn_put_timer(cp);
877 }
878
879
880
881
882
883
884
885
886 void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
887 {
888
889
890
891 if (timer_pending(&cp->timer) &&
892 time_after(cp->timer.expires, jiffies))
893 mod_timer_pending(&cp->timer, jiffies);
894 }
895
896
897
898
899
900 struct ip_vs_conn *
901 ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
902 const union nf_inet_addr *daddr, __be16 dport, unsigned int flags,
903 struct ip_vs_dest *dest, __u32 fwmark)
904 {
905 struct ip_vs_conn *cp;
906 struct netns_ipvs *ipvs = p->ipvs;
907 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs,
908 p->protocol);
909
910 cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);
911 if (cp == NULL) {
912 IP_VS_ERR_RL("%s(): no memory\n", __func__);
913 return NULL;
914 }
915
916 INIT_HLIST_NODE(&cp->c_list);
917 timer_setup(&cp->timer, ip_vs_conn_expire, 0);
918 cp->ipvs = ipvs;
919 cp->af = p->af;
920 cp->daf = dest_af;
921 cp->protocol = p->protocol;
922 ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
923 cp->cport = p->cport;
924
925 ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
926 &cp->vaddr, p->vaddr);
927 cp->vport = p->vport;
928 ip_vs_addr_set(cp->daf, &cp->daddr, daddr);
929 cp->dport = dport;
930 cp->flags = flags;
931 cp->fwmark = fwmark;
932 if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
933 ip_vs_pe_get(p->pe);
934 cp->pe = p->pe;
935 cp->pe_data = p->pe_data;
936 cp->pe_data_len = p->pe_data_len;
937 } else {
938 cp->pe = NULL;
939 cp->pe_data = NULL;
940 cp->pe_data_len = 0;
941 }
942 spin_lock_init(&cp->lock);
943
944
945
946
947
948
949 refcount_set(&cp->refcnt, 1);
950
951 cp->control = NULL;
952 atomic_set(&cp->n_control, 0);
953 atomic_set(&cp->in_pkts, 0);
954
955 cp->packet_xmit = NULL;
956 cp->app = NULL;
957 cp->app_data = NULL;
958
959 cp->in_seq.delta = 0;
960 cp->out_seq.delta = 0;
961
962 atomic_inc(&ipvs->conn_count);
963 if (flags & IP_VS_CONN_F_NO_CPORT)
964 atomic_inc(&ip_vs_conn_no_cport_cnt);
965
966
967 cp->dest = NULL;
968 ip_vs_bind_dest(cp, dest);
969
970
971 cp->state = 0;
972 cp->old_state = 0;
973 cp->timeout = 3*HZ;
974 cp->sync_endtime = jiffies & ~3UL;
975
976
977 #ifdef CONFIG_IP_VS_IPV6
978 if (p->af == AF_INET6)
979 ip_vs_bind_xmit_v6(cp);
980 else
981 #endif
982 ip_vs_bind_xmit(cp);
983
984 if (unlikely(pd && atomic_read(&pd->appcnt)))
985 ip_vs_bind_app(cp, pd->pp);
986
987
988
989
990
991
992
993
994 if (ip_vs_conntrack_enabled(ipvs))
995 cp->flags |= IP_VS_CONN_F_NFCT;
996
997
998 ip_vs_conn_hash(cp);
999
1000 return cp;
1001 }
1002
1003
1004
1005
1006 #ifdef CONFIG_PROC_FS
1007 struct ip_vs_iter_state {
1008 struct seq_net_private p;
1009 struct hlist_head *l;
1010 };
1011
1012 static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
1013 {
1014 int idx;
1015 struct ip_vs_conn *cp;
1016 struct ip_vs_iter_state *iter = seq->private;
1017
1018 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1019 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
1020
1021
1022
1023 if (pos-- == 0) {
1024 iter->l = &ip_vs_conn_tab[idx];
1025 return cp;
1026 }
1027 }
1028 cond_resched_rcu();
1029 }
1030
1031 return NULL;
1032 }
1033
1034 static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
1035 __acquires(RCU)
1036 {
1037 struct ip_vs_iter_state *iter = seq->private;
1038
1039 iter->l = NULL;
1040 rcu_read_lock();
1041 return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
1042 }
1043
1044 static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1045 {
1046 struct ip_vs_conn *cp = v;
1047 struct ip_vs_iter_state *iter = seq->private;
1048 struct hlist_node *e;
1049 struct hlist_head *l = iter->l;
1050 int idx;
1051
1052 ++*pos;
1053 if (v == SEQ_START_TOKEN)
1054 return ip_vs_conn_array(seq, 0);
1055
1056
1057 e = rcu_dereference(hlist_next_rcu(&cp->c_list));
1058 if (e)
1059 return hlist_entry(e, struct ip_vs_conn, c_list);
1060
1061 idx = l - ip_vs_conn_tab;
1062 while (++idx < ip_vs_conn_tab_size) {
1063 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
1064 iter->l = &ip_vs_conn_tab[idx];
1065 return cp;
1066 }
1067 cond_resched_rcu();
1068 }
1069 iter->l = NULL;
1070 return NULL;
1071 }
1072
1073 static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
1074 __releases(RCU)
1075 {
1076 rcu_read_unlock();
1077 }
1078
1079 static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
1080 {
1081
1082 if (v == SEQ_START_TOKEN)
1083 seq_puts(seq,
1084 "Pro FromIP FPrt ToIP TPrt DestIP DPrt State Expires PEName PEData\n");
1085 else {
1086 const struct ip_vs_conn *cp = v;
1087 struct net *net = seq_file_net(seq);
1088 char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
1089 size_t len = 0;
1090 char dbuf[IP_VS_ADDRSTRLEN];
1091
1092 if (!net_eq(cp->ipvs->net, net))
1093 return 0;
1094 if (cp->pe_data) {
1095 pe_data[0] = ' ';
1096 len = strlen(cp->pe->name);
1097 memcpy(pe_data + 1, cp->pe->name, len);
1098 pe_data[len + 1] = ' ';
1099 len += 2;
1100 len += cp->pe->show_pe_data(cp, pe_data + len);
1101 }
1102 pe_data[len] = '\0';
1103
1104 #ifdef CONFIG_IP_VS_IPV6
1105 if (cp->daf == AF_INET6)
1106 snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
1107 else
1108 #endif
1109 snprintf(dbuf, sizeof(dbuf), "%08X",
1110 ntohl(cp->daddr.ip));
1111
1112 #ifdef CONFIG_IP_VS_IPV6
1113 if (cp->af == AF_INET6)
1114 seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
1115 "%s %04X %-11s %7u%s\n",
1116 ip_vs_proto_name(cp->protocol),
1117 &cp->caddr.in6, ntohs(cp->cport),
1118 &cp->vaddr.in6, ntohs(cp->vport),
1119 dbuf, ntohs(cp->dport),
1120 ip_vs_state_name(cp),
1121 jiffies_delta_to_msecs(cp->timer.expires -
1122 jiffies) / 1000,
1123 pe_data);
1124 else
1125 #endif
1126 seq_printf(seq,
1127 "%-3s %08X %04X %08X %04X"
1128 " %s %04X %-11s %7u%s\n",
1129 ip_vs_proto_name(cp->protocol),
1130 ntohl(cp->caddr.ip), ntohs(cp->cport),
1131 ntohl(cp->vaddr.ip), ntohs(cp->vport),
1132 dbuf, ntohs(cp->dport),
1133 ip_vs_state_name(cp),
1134 jiffies_delta_to_msecs(cp->timer.expires -
1135 jiffies) / 1000,
1136 pe_data);
1137 }
1138 return 0;
1139 }
1140
1141 static const struct seq_operations ip_vs_conn_seq_ops = {
1142 .start = ip_vs_conn_seq_start,
1143 .next = ip_vs_conn_seq_next,
1144 .stop = ip_vs_conn_seq_stop,
1145 .show = ip_vs_conn_seq_show,
1146 };
1147
1148 static const char *ip_vs_origin_name(unsigned int flags)
1149 {
1150 if (flags & IP_VS_CONN_F_SYNC)
1151 return "SYNC";
1152 else
1153 return "LOCAL";
1154 }
1155
1156 static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
1157 {
1158 char dbuf[IP_VS_ADDRSTRLEN];
1159
1160 if (v == SEQ_START_TOKEN)
1161 seq_puts(seq,
1162 "Pro FromIP FPrt ToIP TPrt DestIP DPrt State Origin Expires\n");
1163 else {
1164 const struct ip_vs_conn *cp = v;
1165 struct net *net = seq_file_net(seq);
1166
1167 if (!net_eq(cp->ipvs->net, net))
1168 return 0;
1169
1170 #ifdef CONFIG_IP_VS_IPV6
1171 if (cp->daf == AF_INET6)
1172 snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
1173 else
1174 #endif
1175 snprintf(dbuf, sizeof(dbuf), "%08X",
1176 ntohl(cp->daddr.ip));
1177
1178 #ifdef CONFIG_IP_VS_IPV6
1179 if (cp->af == AF_INET6)
1180 seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
1181 "%s %04X %-11s %-6s %7u\n",
1182 ip_vs_proto_name(cp->protocol),
1183 &cp->caddr.in6, ntohs(cp->cport),
1184 &cp->vaddr.in6, ntohs(cp->vport),
1185 dbuf, ntohs(cp->dport),
1186 ip_vs_state_name(cp),
1187 ip_vs_origin_name(cp->flags),
1188 jiffies_delta_to_msecs(cp->timer.expires -
1189 jiffies) / 1000);
1190 else
1191 #endif
1192 seq_printf(seq,
1193 "%-3s %08X %04X %08X %04X "
1194 "%s %04X %-11s %-6s %7u\n",
1195 ip_vs_proto_name(cp->protocol),
1196 ntohl(cp->caddr.ip), ntohs(cp->cport),
1197 ntohl(cp->vaddr.ip), ntohs(cp->vport),
1198 dbuf, ntohs(cp->dport),
1199 ip_vs_state_name(cp),
1200 ip_vs_origin_name(cp->flags),
1201 jiffies_delta_to_msecs(cp->timer.expires -
1202 jiffies) / 1000);
1203 }
1204 return 0;
1205 }
1206
1207 static const struct seq_operations ip_vs_conn_sync_seq_ops = {
1208 .start = ip_vs_conn_seq_start,
1209 .next = ip_vs_conn_seq_next,
1210 .stop = ip_vs_conn_seq_stop,
1211 .show = ip_vs_conn_sync_seq_show,
1212 };
1213 #endif
1214
1215
1216
1217
1218
1219
1220
1221
1222 static inline int todrop_entry(struct ip_vs_conn *cp)
1223 {
1224
1225
1226
1227
1228 static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1229 static char todrop_counter[9] = {0};
1230 int i;
1231
1232
1233
1234
1235 if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ))
1236 return 0;
1237
1238
1239
1240 i = atomic_read(&cp->in_pkts);
1241 if (i > 8 || i < 0) return 0;
1242
1243 if (!todrop_rate[i]) return 0;
1244 if (--todrop_counter[i] > 0) return 0;
1245
1246 todrop_counter[i] = todrop_rate[i];
1247 return 1;
1248 }
1249
1250 static inline bool ip_vs_conn_ops_mode(struct ip_vs_conn *cp)
1251 {
1252 struct ip_vs_service *svc;
1253
1254 if (!cp->dest)
1255 return false;
1256 svc = rcu_dereference(cp->dest->svc);
1257 return svc && (svc->flags & IP_VS_SVC_F_ONEPACKET);
1258 }
1259
1260
1261 void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
1262 {
1263 int idx;
1264 struct ip_vs_conn *cp;
1265
1266 rcu_read_lock();
1267
1268
1269
1270 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
1271 unsigned int hash = prandom_u32() & ip_vs_conn_tab_mask;
1272
1273 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
1274 if (cp->ipvs != ipvs)
1275 continue;
1276 if (atomic_read(&cp->n_control))
1277 continue;
1278 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
1279
1280 if (ip_vs_conn_ops_mode(cp))
1281 goto try_drop;
1282 if (!(cp->state & IP_VS_CTPL_S_ASSURED))
1283 goto drop;
1284 continue;
1285 }
1286 if (cp->protocol == IPPROTO_TCP) {
1287 switch(cp->state) {
1288 case IP_VS_TCP_S_SYN_RECV:
1289 case IP_VS_TCP_S_SYNACK:
1290 break;
1291
1292 case IP_VS_TCP_S_ESTABLISHED:
1293 if (todrop_entry(cp))
1294 break;
1295 continue;
1296
1297 default:
1298 continue;
1299 }
1300 } else if (cp->protocol == IPPROTO_SCTP) {
1301 switch (cp->state) {
1302 case IP_VS_SCTP_S_INIT1:
1303 case IP_VS_SCTP_S_INIT:
1304 break;
1305 case IP_VS_SCTP_S_ESTABLISHED:
1306 if (todrop_entry(cp))
1307 break;
1308 continue;
1309 default:
1310 continue;
1311 }
1312 } else {
1313 try_drop:
1314 if (!todrop_entry(cp))
1315 continue;
1316 }
1317
1318 drop:
1319 IP_VS_DBG(4, "drop connection\n");
1320 cp->timeout = 0;
1321 ip_vs_conn_expire_now(cp);
1322 }
1323 cond_resched_rcu();
1324 }
1325 rcu_read_unlock();
1326 }
1327
1328
1329
1330
1331
1332 static void ip_vs_conn_flush(struct netns_ipvs *ipvs)
1333 {
1334 int idx;
1335 struct ip_vs_conn *cp, *cp_c;
1336
1337 flush_again:
1338 rcu_read_lock();
1339 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1340
1341 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
1342 if (cp->ipvs != ipvs)
1343 continue;
1344
1345
1346
1347
1348 cp_c = cp->control;
1349
1350 if (cp_c && __ip_vs_conn_get(cp)) {
1351 IP_VS_DBG(4, "del controlling connection\n");
1352 ip_vs_conn_expire_now(cp_c);
1353 __ip_vs_conn_put(cp);
1354 }
1355 IP_VS_DBG(4, "del connection\n");
1356 ip_vs_conn_expire_now(cp);
1357 }
1358 cond_resched_rcu();
1359 }
1360 rcu_read_unlock();
1361
1362
1363
1364 if (atomic_read(&ipvs->conn_count) != 0) {
1365 schedule();
1366 goto flush_again;
1367 }
1368 }
1369
1370
1371
1372 int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs)
1373 {
1374 atomic_set(&ipvs->conn_count, 0);
1375
1376 proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net,
1377 &ip_vs_conn_seq_ops, sizeof(struct ip_vs_iter_state));
1378 proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net,
1379 &ip_vs_conn_sync_seq_ops,
1380 sizeof(struct ip_vs_iter_state));
1381 return 0;
1382 }
1383
1384 void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs)
1385 {
1386
1387 ip_vs_conn_flush(ipvs);
1388 remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
1389 remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net);
1390 }
1391
1392 int __init ip_vs_conn_init(void)
1393 {
1394 int idx;
1395
1396
1397 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
1398 ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
1399
1400
1401
1402
1403 ip_vs_conn_tab = vmalloc(array_size(ip_vs_conn_tab_size,
1404 sizeof(*ip_vs_conn_tab)));
1405 if (!ip_vs_conn_tab)
1406 return -ENOMEM;
1407
1408
1409 ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
1410 sizeof(struct ip_vs_conn), 0,
1411 SLAB_HWCACHE_ALIGN, NULL);
1412 if (!ip_vs_conn_cachep) {
1413 vfree(ip_vs_conn_tab);
1414 return -ENOMEM;
1415 }
1416
1417 pr_info("Connection hash table configured "
1418 "(size=%d, memory=%ldKbytes)\n",
1419 ip_vs_conn_tab_size,
1420 (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
1421 IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n",
1422 sizeof(struct ip_vs_conn));
1423
1424 for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
1425 INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
1426
1427 for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
1428 spin_lock_init(&__ip_vs_conntbl_lock_array[idx].l);
1429 }
1430
1431
1432 get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
1433
1434 return 0;
1435 }
1436
1437 void ip_vs_conn_cleanup(void)
1438 {
1439
1440 rcu_barrier();
1441
1442 kmem_cache_destroy(ip_vs_conn_cachep);
1443 vfree(ip_vs_conn_tab);
1444 }