This source file includes following definitions.
- l2tp_sk_is_v6
- l2tp_pernet
- l2tp_session_id_hash_2
- l2tp_session_id_hash
- l2tp_tunnel_free
- l2tp_tunnel_get
- l2tp_tunnel_get_nth
- l2tp_tunnel_get_session
- l2tp_session_get
- l2tp_session_get_nth
- l2tp_session_get_by_ifname
- l2tp_session_register
- l2tp_recv_queue_skb
- l2tp_recv_dequeue_skb
- l2tp_recv_dequeue
- l2tp_seq_check_rx_window
- l2tp_recv_data_seq
- l2tp_recv_common
- l2tp_session_queue_purge
- l2tp_udp_recv_core
- l2tp_udp_encap_recv
- l2tp_build_l2tpv2_header
- l2tp_build_l2tpv3_header
- l2tp_xmit_core
- l2tp_xmit_skb
- l2tp_tunnel_destruct
- l2tp_tunnel_closeall
- l2tp_udp_encap_destroy
- l2tp_tunnel_del_work
- l2tp_tunnel_sock_create
- l2tp_tunnel_create
- l2tp_validate_socket
- l2tp_tunnel_register
- l2tp_tunnel_delete
- l2tp_session_free
- __l2tp_session_unhash
- l2tp_session_delete
- l2tp_session_set_header_len
- l2tp_session_create
- l2tp_init_net
- l2tp_exit_net
- l2tp_init
- l2tp_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/module.h>
21 #include <linux/string.h>
22 #include <linux/list.h>
23 #include <linux/rculist.h>
24 #include <linux/uaccess.h>
25
26 #include <linux/kernel.h>
27 #include <linux/spinlock.h>
28 #include <linux/kthread.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/errno.h>
32 #include <linux/jiffies.h>
33
34 #include <linux/netdevice.h>
35 #include <linux/net.h>
36 #include <linux/inetdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/init.h>
39 #include <linux/in.h>
40 #include <linux/ip.h>
41 #include <linux/udp.h>
42 #include <linux/l2tp.h>
43 #include <linux/hash.h>
44 #include <linux/sort.h>
45 #include <linux/file.h>
46 #include <linux/nsproxy.h>
47 #include <net/net_namespace.h>
48 #include <net/netns/generic.h>
49 #include <net/dst.h>
50 #include <net/ip.h>
51 #include <net/udp.h>
52 #include <net/udp_tunnel.h>
53 #include <net/inet_common.h>
54 #include <net/xfrm.h>
55 #include <net/protocol.h>
56 #include <net/inet6_connection_sock.h>
57 #include <net/inet_ecn.h>
58 #include <net/ip6_route.h>
59 #include <net/ip6_checksum.h>
60
61 #include <asm/byteorder.h>
62 #include <linux/atomic.h>
63
64 #include "l2tp_core.h"
65
66 #define L2TP_DRV_VERSION "V2.0"
67
68
69 #define L2TP_HDRFLAG_T 0x8000
70 #define L2TP_HDRFLAG_L 0x4000
71 #define L2TP_HDRFLAG_S 0x0800
72 #define L2TP_HDRFLAG_O 0x0200
73 #define L2TP_HDRFLAG_P 0x0100
74
75 #define L2TP_HDR_VER_MASK 0x000F
76 #define L2TP_HDR_VER_2 0x0002
77 #define L2TP_HDR_VER_3 0x0003
78
79
80 #define L2TP_SLFLAG_S 0x40000000
81 #define L2TP_SL_SEQ_MASK 0x00ffffff
82
83 #define L2TP_HDR_SIZE_MAX 14
84
85
86 #define L2TP_DEFAULT_DEBUG_FLAGS 0
87
88
89
90 struct l2tp_skb_cb {
91 u32 ns;
92 u16 has_seq;
93 u16 length;
94 unsigned long expires;
95 };
96
97 #define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
98
99 static struct workqueue_struct *l2tp_wq;
100
101
102 static unsigned int l2tp_net_id;
103 struct l2tp_net {
104 struct list_head l2tp_tunnel_list;
105 spinlock_t l2tp_tunnel_list_lock;
106 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
107 spinlock_t l2tp_session_hlist_lock;
108 };
109
110 #if IS_ENABLED(CONFIG_IPV6)
111 static bool l2tp_sk_is_v6(struct sock *sk)
112 {
113 return sk->sk_family == PF_INET6 &&
114 !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
115 }
116 #endif
117
118 static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
119 {
120 return sk->sk_user_data;
121 }
122
123 static inline struct l2tp_net *l2tp_pernet(const struct net *net)
124 {
125 BUG_ON(!net);
126
127 return net_generic(net, l2tp_net_id);
128 }
129
130
131
132
133
134
135 static inline struct hlist_head *
136 l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
137 {
138 return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
139
140 }
141
142
143
144
145
146
147
148 static inline struct hlist_head *
149 l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
150 {
151 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
152 }
153
154 void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
155 {
156 sock_put(tunnel->sock);
157
158 }
159 EXPORT_SYMBOL(l2tp_tunnel_free);
160
161
162 struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
163 {
164 const struct l2tp_net *pn = l2tp_pernet(net);
165 struct l2tp_tunnel *tunnel;
166
167 rcu_read_lock_bh();
168 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
169 if (tunnel->tunnel_id == tunnel_id &&
170 refcount_inc_not_zero(&tunnel->ref_count)) {
171 rcu_read_unlock_bh();
172
173 return tunnel;
174 }
175 }
176 rcu_read_unlock_bh();
177
178 return NULL;
179 }
180 EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
181
182 struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
183 {
184 const struct l2tp_net *pn = l2tp_pernet(net);
185 struct l2tp_tunnel *tunnel;
186 int count = 0;
187
188 rcu_read_lock_bh();
189 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
190 if (++count > nth &&
191 refcount_inc_not_zero(&tunnel->ref_count)) {
192 rcu_read_unlock_bh();
193 return tunnel;
194 }
195 }
196 rcu_read_unlock_bh();
197
198 return NULL;
199 }
200 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
201
202 struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
203 u32 session_id)
204 {
205 struct hlist_head *session_list;
206 struct l2tp_session *session;
207
208 session_list = l2tp_session_id_hash(tunnel, session_id);
209
210 read_lock_bh(&tunnel->hlist_lock);
211 hlist_for_each_entry(session, session_list, hlist)
212 if (session->session_id == session_id) {
213 l2tp_session_inc_refcount(session);
214 read_unlock_bh(&tunnel->hlist_lock);
215
216 return session;
217 }
218 read_unlock_bh(&tunnel->hlist_lock);
219
220 return NULL;
221 }
222 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
223
224 struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
225 {
226 struct hlist_head *session_list;
227 struct l2tp_session *session;
228
229 session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
230
231 rcu_read_lock_bh();
232 hlist_for_each_entry_rcu(session, session_list, global_hlist)
233 if (session->session_id == session_id) {
234 l2tp_session_inc_refcount(session);
235 rcu_read_unlock_bh();
236
237 return session;
238 }
239 rcu_read_unlock_bh();
240
241 return NULL;
242 }
243 EXPORT_SYMBOL_GPL(l2tp_session_get);
244
245 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
246 {
247 int hash;
248 struct l2tp_session *session;
249 int count = 0;
250
251 read_lock_bh(&tunnel->hlist_lock);
252 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
253 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
254 if (++count > nth) {
255 l2tp_session_inc_refcount(session);
256 read_unlock_bh(&tunnel->hlist_lock);
257 return session;
258 }
259 }
260 }
261
262 read_unlock_bh(&tunnel->hlist_lock);
263
264 return NULL;
265 }
266 EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
267
268
269
270
271 struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
272 const char *ifname)
273 {
274 struct l2tp_net *pn = l2tp_pernet(net);
275 int hash;
276 struct l2tp_session *session;
277
278 rcu_read_lock_bh();
279 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
280 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
281 if (!strcmp(session->ifname, ifname)) {
282 l2tp_session_inc_refcount(session);
283 rcu_read_unlock_bh();
284
285 return session;
286 }
287 }
288 }
289
290 rcu_read_unlock_bh();
291
292 return NULL;
293 }
294 EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
295
296 int l2tp_session_register(struct l2tp_session *session,
297 struct l2tp_tunnel *tunnel)
298 {
299 struct l2tp_session *session_walk;
300 struct hlist_head *g_head;
301 struct hlist_head *head;
302 struct l2tp_net *pn;
303 int err;
304
305 head = l2tp_session_id_hash(tunnel, session->session_id);
306
307 write_lock_bh(&tunnel->hlist_lock);
308 if (!tunnel->acpt_newsess) {
309 err = -ENODEV;
310 goto err_tlock;
311 }
312
313 hlist_for_each_entry(session_walk, head, hlist)
314 if (session_walk->session_id == session->session_id) {
315 err = -EEXIST;
316 goto err_tlock;
317 }
318
319 if (tunnel->version == L2TP_HDR_VER_3) {
320 pn = l2tp_pernet(tunnel->l2tp_net);
321 g_head = l2tp_session_id_hash_2(pn, session->session_id);
322
323 spin_lock_bh(&pn->l2tp_session_hlist_lock);
324
325
326
327
328 hlist_for_each_entry(session_walk, g_head, global_hlist)
329 if (session_walk->session_id == session->session_id &&
330 (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
331 tunnel->encap == L2TP_ENCAPTYPE_IP)) {
332 err = -EEXIST;
333 goto err_tlock_pnlock;
334 }
335
336 l2tp_tunnel_inc_refcount(tunnel);
337 hlist_add_head_rcu(&session->global_hlist, g_head);
338
339 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
340 } else {
341 l2tp_tunnel_inc_refcount(tunnel);
342 }
343
344 hlist_add_head(&session->hlist, head);
345 write_unlock_bh(&tunnel->hlist_lock);
346
347 return 0;
348
349 err_tlock_pnlock:
350 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
351 err_tlock:
352 write_unlock_bh(&tunnel->hlist_lock);
353
354 return err;
355 }
356 EXPORT_SYMBOL_GPL(l2tp_session_register);
357
358
359
360
361
362
363
364
365 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
366 {
367 struct sk_buff *skbp;
368 struct sk_buff *tmp;
369 u32 ns = L2TP_SKB_CB(skb)->ns;
370
371 spin_lock_bh(&session->reorder_q.lock);
372 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
373 if (L2TP_SKB_CB(skbp)->ns > ns) {
374 __skb_queue_before(&session->reorder_q, skbp, skb);
375 l2tp_dbg(session, L2TP_MSG_SEQ,
376 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
377 session->name, ns, L2TP_SKB_CB(skbp)->ns,
378 skb_queue_len(&session->reorder_q));
379 atomic_long_inc(&session->stats.rx_oos_packets);
380 goto out;
381 }
382 }
383
384 __skb_queue_tail(&session->reorder_q, skb);
385
386 out:
387 spin_unlock_bh(&session->reorder_q.lock);
388 }
389
390
391
392 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
393 {
394 struct l2tp_tunnel *tunnel = session->tunnel;
395 int length = L2TP_SKB_CB(skb)->length;
396
397
398
399
400 skb_orphan(skb);
401
402 atomic_long_inc(&tunnel->stats.rx_packets);
403 atomic_long_add(length, &tunnel->stats.rx_bytes);
404 atomic_long_inc(&session->stats.rx_packets);
405 atomic_long_add(length, &session->stats.rx_bytes);
406
407 if (L2TP_SKB_CB(skb)->has_seq) {
408
409 session->nr++;
410 session->nr &= session->nr_max;
411
412 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
413 session->name, session->nr);
414 }
415
416
417 if (session->recv_skb != NULL)
418 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
419 else
420 kfree_skb(skb);
421 }
422
423
424
425
426 static void l2tp_recv_dequeue(struct l2tp_session *session)
427 {
428 struct sk_buff *skb;
429 struct sk_buff *tmp;
430
431
432
433
434
435 start:
436 spin_lock_bh(&session->reorder_q.lock);
437 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
438 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
439 atomic_long_inc(&session->stats.rx_seq_discards);
440 atomic_long_inc(&session->stats.rx_errors);
441 l2tp_dbg(session, L2TP_MSG_SEQ,
442 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
443 session->name, L2TP_SKB_CB(skb)->ns,
444 L2TP_SKB_CB(skb)->length, session->nr,
445 skb_queue_len(&session->reorder_q));
446 session->reorder_skip = 1;
447 __skb_unlink(skb, &session->reorder_q);
448 kfree_skb(skb);
449 continue;
450 }
451
452 if (L2TP_SKB_CB(skb)->has_seq) {
453 if (session->reorder_skip) {
454 l2tp_dbg(session, L2TP_MSG_SEQ,
455 "%s: advancing nr to next pkt: %u -> %u",
456 session->name, session->nr,
457 L2TP_SKB_CB(skb)->ns);
458 session->reorder_skip = 0;
459 session->nr = L2TP_SKB_CB(skb)->ns;
460 }
461 if (L2TP_SKB_CB(skb)->ns != session->nr) {
462 l2tp_dbg(session, L2TP_MSG_SEQ,
463 "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
464 session->name, L2TP_SKB_CB(skb)->ns,
465 L2TP_SKB_CB(skb)->length, session->nr,
466 skb_queue_len(&session->reorder_q));
467 goto out;
468 }
469 }
470 __skb_unlink(skb, &session->reorder_q);
471
472
473
474
475 spin_unlock_bh(&session->reorder_q.lock);
476 l2tp_recv_dequeue_skb(session, skb);
477 goto start;
478 }
479
480 out:
481 spin_unlock_bh(&session->reorder_q.lock);
482 }
483
484 static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
485 {
486 u32 nws;
487
488 if (nr >= session->nr)
489 nws = nr - session->nr;
490 else
491 nws = (session->nr_max + 1) - (session->nr - nr);
492
493 return nws < session->nr_window_size;
494 }
495
496
497
498
499 static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
500 {
501 if (!l2tp_seq_check_rx_window(session, L2TP_SKB_CB(skb)->ns)) {
502
503
504
505 l2tp_dbg(session, L2TP_MSG_SEQ,
506 "%s: pkt %u len %d discarded, outside window, nr=%u\n",
507 session->name, L2TP_SKB_CB(skb)->ns,
508 L2TP_SKB_CB(skb)->length, session->nr);
509 goto discard;
510 }
511
512 if (session->reorder_timeout != 0) {
513
514
515
516 l2tp_recv_queue_skb(session, skb);
517 goto out;
518 }
519
520
521
522
523
524
525 if (L2TP_SKB_CB(skb)->ns == session->nr) {
526 skb_queue_tail(&session->reorder_q, skb);
527 } else {
528 u32 nr_oos = L2TP_SKB_CB(skb)->ns;
529 u32 nr_next = (session->nr_oos + 1) & session->nr_max;
530
531 if (nr_oos == nr_next)
532 session->nr_oos_count++;
533 else
534 session->nr_oos_count = 0;
535
536 session->nr_oos = nr_oos;
537 if (session->nr_oos_count > session->nr_oos_count_max) {
538 session->reorder_skip = 1;
539 l2tp_dbg(session, L2TP_MSG_SEQ,
540 "%s: %d oos packets received. Resetting sequence numbers\n",
541 session->name, session->nr_oos_count);
542 }
543 if (!session->reorder_skip) {
544 atomic_long_inc(&session->stats.rx_seq_discards);
545 l2tp_dbg(session, L2TP_MSG_SEQ,
546 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
547 session->name, L2TP_SKB_CB(skb)->ns,
548 L2TP_SKB_CB(skb)->length, session->nr,
549 skb_queue_len(&session->reorder_q));
550 goto discard;
551 }
552 skb_queue_tail(&session->reorder_q, skb);
553 }
554
555 out:
556 return 0;
557
558 discard:
559 return 1;
560 }
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
622 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
623 int length)
624 {
625 struct l2tp_tunnel *tunnel = session->tunnel;
626 int offset;
627 u32 ns, nr;
628
629
630 if (session->peer_cookie_len > 0) {
631 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
632 l2tp_info(tunnel, L2TP_MSG_DATA,
633 "%s: cookie mismatch (%u/%u). Discarding.\n",
634 tunnel->name, tunnel->tunnel_id,
635 session->session_id);
636 atomic_long_inc(&session->stats.rx_cookie_discards);
637 goto discard;
638 }
639 ptr += session->peer_cookie_len;
640 }
641
642
643
644
645
646
647
648
649 ns = nr = 0;
650 L2TP_SKB_CB(skb)->has_seq = 0;
651 if (tunnel->version == L2TP_HDR_VER_2) {
652 if (hdrflags & L2TP_HDRFLAG_S) {
653 ns = ntohs(*(__be16 *) ptr);
654 ptr += 2;
655 nr = ntohs(*(__be16 *) ptr);
656 ptr += 2;
657
658
659 L2TP_SKB_CB(skb)->ns = ns;
660 L2TP_SKB_CB(skb)->has_seq = 1;
661
662 l2tp_dbg(session, L2TP_MSG_SEQ,
663 "%s: recv data ns=%u, nr=%u, session nr=%u\n",
664 session->name, ns, nr, session->nr);
665 }
666 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
667 u32 l2h = ntohl(*(__be32 *) ptr);
668
669 if (l2h & 0x40000000) {
670 ns = l2h & 0x00ffffff;
671
672
673 L2TP_SKB_CB(skb)->ns = ns;
674 L2TP_SKB_CB(skb)->has_seq = 1;
675
676 l2tp_dbg(session, L2TP_MSG_SEQ,
677 "%s: recv data ns=%u, session nr=%u\n",
678 session->name, ns, session->nr);
679 }
680 ptr += 4;
681 }
682
683 if (L2TP_SKB_CB(skb)->has_seq) {
684
685
686
687
688 if ((!session->lns_mode) && (!session->send_seq)) {
689 l2tp_info(session, L2TP_MSG_SEQ,
690 "%s: requested to enable seq numbers by LNS\n",
691 session->name);
692 session->send_seq = 1;
693 l2tp_session_set_header_len(session, tunnel->version);
694 }
695 } else {
696
697
698
699 if (session->recv_seq) {
700 l2tp_warn(session, L2TP_MSG_SEQ,
701 "%s: recv data has no seq numbers when required. Discarding.\n",
702 session->name);
703 atomic_long_inc(&session->stats.rx_seq_discards);
704 goto discard;
705 }
706
707
708
709
710
711
712 if ((!session->lns_mode) && (session->send_seq)) {
713 l2tp_info(session, L2TP_MSG_SEQ,
714 "%s: requested to disable seq numbers by LNS\n",
715 session->name);
716 session->send_seq = 0;
717 l2tp_session_set_header_len(session, tunnel->version);
718 } else if (session->send_seq) {
719 l2tp_warn(session, L2TP_MSG_SEQ,
720 "%s: recv data has no seq numbers when required. Discarding.\n",
721 session->name);
722 atomic_long_inc(&session->stats.rx_seq_discards);
723 goto discard;
724 }
725 }
726
727
728
729
730 if (tunnel->version == L2TP_HDR_VER_2) {
731
732 if (hdrflags & L2TP_HDRFLAG_O) {
733 offset = ntohs(*(__be16 *)ptr);
734 ptr += 2 + offset;
735 }
736 }
737
738 offset = ptr - optr;
739 if (!pskb_may_pull(skb, offset))
740 goto discard;
741
742 __skb_pull(skb, offset);
743
744
745
746
747
748 L2TP_SKB_CB(skb)->length = length;
749 L2TP_SKB_CB(skb)->expires = jiffies +
750 (session->reorder_timeout ? session->reorder_timeout : HZ);
751
752
753
754
755 if (L2TP_SKB_CB(skb)->has_seq) {
756 if (l2tp_recv_data_seq(session, skb))
757 goto discard;
758 } else {
759
760
761
762
763 skb_queue_tail(&session->reorder_q, skb);
764 }
765
766
767 l2tp_recv_dequeue(session);
768
769 return;
770
771 discard:
772 atomic_long_inc(&session->stats.rx_errors);
773 kfree_skb(skb);
774 }
775 EXPORT_SYMBOL(l2tp_recv_common);
776
777
778
779 static int l2tp_session_queue_purge(struct l2tp_session *session)
780 {
781 struct sk_buff *skb = NULL;
782 BUG_ON(!session);
783 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
784 while ((skb = skb_dequeue(&session->reorder_q))) {
785 atomic_long_inc(&session->stats.rx_errors);
786 kfree_skb(skb);
787 }
788 return 0;
789 }
790
791
792
793
794
795
796
797 static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
798 {
799 struct l2tp_session *session = NULL;
800 unsigned char *ptr, *optr;
801 u16 hdrflags;
802 u32 tunnel_id, session_id;
803 u16 version;
804 int length;
805
806
807
808
809 __skb_pull(skb, sizeof(struct udphdr));
810
811
812 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
813 l2tp_info(tunnel, L2TP_MSG_DATA,
814 "%s: recv short packet (len=%d)\n",
815 tunnel->name, skb->len);
816 goto error;
817 }
818
819
820 if (tunnel->debug & L2TP_MSG_DATA) {
821 length = min(32u, skb->len);
822 if (!pskb_may_pull(skb, length))
823 goto error;
824
825 pr_debug("%s: recv\n", tunnel->name);
826 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
827 }
828
829
830 optr = ptr = skb->data;
831
832
833 hdrflags = ntohs(*(__be16 *) ptr);
834
835
836 version = hdrflags & L2TP_HDR_VER_MASK;
837 if (version != tunnel->version) {
838 l2tp_info(tunnel, L2TP_MSG_DATA,
839 "%s: recv protocol version mismatch: got %d expected %d\n",
840 tunnel->name, version, tunnel->version);
841 goto error;
842 }
843
844
845 length = skb->len;
846
847
848 if (hdrflags & L2TP_HDRFLAG_T) {
849 l2tp_dbg(tunnel, L2TP_MSG_DATA,
850 "%s: recv control packet, len=%d\n",
851 tunnel->name, length);
852 goto error;
853 }
854
855
856 ptr += 2;
857
858 if (tunnel->version == L2TP_HDR_VER_2) {
859
860 if (hdrflags & L2TP_HDRFLAG_L)
861 ptr += 2;
862
863
864 tunnel_id = ntohs(*(__be16 *) ptr);
865 ptr += 2;
866 session_id = ntohs(*(__be16 *) ptr);
867 ptr += 2;
868 } else {
869 ptr += 2;
870 tunnel_id = tunnel->tunnel_id;
871 session_id = ntohl(*(__be32 *) ptr);
872 ptr += 4;
873 }
874
875
876 session = l2tp_tunnel_get_session(tunnel, session_id);
877 if (!session || !session->recv_skb) {
878 if (session)
879 l2tp_session_dec_refcount(session);
880
881
882 l2tp_info(tunnel, L2TP_MSG_DATA,
883 "%s: no session found (%u/%u). Passing up.\n",
884 tunnel->name, tunnel_id, session_id);
885 goto error;
886 }
887
888 if (tunnel->version == L2TP_HDR_VER_3 &&
889 l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
890 goto error;
891
892 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
893 l2tp_session_dec_refcount(session);
894
895 return 0;
896
897 error:
898
899 __skb_push(skb, sizeof(struct udphdr));
900
901 return 1;
902 }
903
904
905
906
907
908
909
910 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
911 {
912 struct l2tp_tunnel *tunnel;
913
914 tunnel = rcu_dereference_sk_user_data(sk);
915 if (tunnel == NULL)
916 goto pass_up;
917
918 l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
919 tunnel->name, skb->len);
920
921 if (l2tp_udp_recv_core(tunnel, skb))
922 goto pass_up;
923
924 return 0;
925
926 pass_up:
927 return 1;
928 }
929 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
930
931
932
933
934
935
936
937 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
938 {
939 struct l2tp_tunnel *tunnel = session->tunnel;
940 __be16 *bufp = buf;
941 __be16 *optr = buf;
942 u16 flags = L2TP_HDR_VER_2;
943 u32 tunnel_id = tunnel->peer_tunnel_id;
944 u32 session_id = session->peer_session_id;
945
946 if (session->send_seq)
947 flags |= L2TP_HDRFLAG_S;
948
949
950 *bufp++ = htons(flags);
951 *bufp++ = htons(tunnel_id);
952 *bufp++ = htons(session_id);
953 if (session->send_seq) {
954 *bufp++ = htons(session->ns);
955 *bufp++ = 0;
956 session->ns++;
957 session->ns &= 0xffff;
958 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
959 session->name, session->ns);
960 }
961
962 return bufp - optr;
963 }
964
965 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
966 {
967 struct l2tp_tunnel *tunnel = session->tunnel;
968 char *bufp = buf;
969 char *optr = bufp;
970
971
972
973
974 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
975 u16 flags = L2TP_HDR_VER_3;
976 *((__be16 *) bufp) = htons(flags);
977 bufp += 2;
978 *((__be16 *) bufp) = 0;
979 bufp += 2;
980 }
981
982 *((__be32 *) bufp) = htonl(session->peer_session_id);
983 bufp += 4;
984 if (session->cookie_len) {
985 memcpy(bufp, &session->cookie[0], session->cookie_len);
986 bufp += session->cookie_len;
987 }
988 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
989 u32 l2h = 0;
990
991 if (session->send_seq) {
992 l2h = 0x40000000 | session->ns;
993 session->ns++;
994 session->ns &= 0xffffff;
995 l2tp_dbg(session, L2TP_MSG_SEQ,
996 "%s: updated ns to %u\n",
997 session->name, session->ns);
998 }
999
1000 *((__be32 *)bufp) = htonl(l2h);
1001 bufp += 4;
1002 }
1003
1004 return bufp - optr;
1005 }
1006
1007 static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1008 struct flowi *fl, size_t data_len)
1009 {
1010 struct l2tp_tunnel *tunnel = session->tunnel;
1011 unsigned int len = skb->len;
1012 int error;
1013
1014
1015 if (session->send_seq)
1016 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes, ns=%u\n",
1017 session->name, data_len, session->ns - 1);
1018 else
1019 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes\n",
1020 session->name, data_len);
1021
1022 if (session->debug & L2TP_MSG_DATA) {
1023 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1024 unsigned char *datap = skb->data + uhlen;
1025
1026 pr_debug("%s: xmit\n", session->name);
1027 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
1028 datap, min_t(size_t, 32, len - uhlen));
1029 }
1030
1031
1032 skb->ignore_df = 1;
1033 #if IS_ENABLED(CONFIG_IPV6)
1034 if (l2tp_sk_is_v6(tunnel->sock))
1035 error = inet6_csk_xmit(tunnel->sock, skb, NULL);
1036 else
1037 #endif
1038 error = ip_queue_xmit(tunnel->sock, skb, fl);
1039
1040
1041 if (error >= 0) {
1042 atomic_long_inc(&tunnel->stats.tx_packets);
1043 atomic_long_add(len, &tunnel->stats.tx_bytes);
1044 atomic_long_inc(&session->stats.tx_packets);
1045 atomic_long_add(len, &session->stats.tx_bytes);
1046 } else {
1047 atomic_long_inc(&tunnel->stats.tx_errors);
1048 atomic_long_inc(&session->stats.tx_errors);
1049 }
1050 }
1051
1052
1053
1054
1055 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
1056 {
1057 int data_len = skb->len;
1058 struct l2tp_tunnel *tunnel = session->tunnel;
1059 struct sock *sk = tunnel->sock;
1060 struct flowi *fl;
1061 struct udphdr *uh;
1062 struct inet_sock *inet;
1063 int headroom;
1064 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1065 int udp_len;
1066 int ret = NET_XMIT_SUCCESS;
1067
1068
1069
1070
1071
1072 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1073 uhlen + hdr_len;
1074 if (skb_cow_head(skb, headroom)) {
1075 kfree_skb(skb);
1076 return NET_XMIT_DROP;
1077 }
1078
1079
1080 session->build_header(session, __skb_push(skb, hdr_len));
1081
1082
1083 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1084 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1085 IPSKB_REROUTED);
1086 nf_reset_ct(skb);
1087
1088 bh_lock_sock(sk);
1089 if (sock_owned_by_user(sk)) {
1090 kfree_skb(skb);
1091 ret = NET_XMIT_DROP;
1092 goto out_unlock;
1093 }
1094
1095
1096
1097
1098 if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1099 kfree_skb(skb);
1100 ret = NET_XMIT_DROP;
1101 goto out_unlock;
1102 }
1103
1104
1105 skb_dst_drop(skb);
1106 skb_dst_set(skb, sk_dst_check(sk, 0));
1107
1108 inet = inet_sk(sk);
1109 fl = &inet->cork.fl;
1110 switch (tunnel->encap) {
1111 case L2TP_ENCAPTYPE_UDP:
1112
1113 __skb_push(skb, sizeof(*uh));
1114 skb_reset_transport_header(skb);
1115 uh = udp_hdr(skb);
1116 uh->source = inet->inet_sport;
1117 uh->dest = inet->inet_dport;
1118 udp_len = uhlen + hdr_len + data_len;
1119 uh->len = htons(udp_len);
1120
1121
1122 #if IS_ENABLED(CONFIG_IPV6)
1123 if (l2tp_sk_is_v6(sk))
1124 udp6_set_csum(udp_get_no_check6_tx(sk),
1125 skb, &inet6_sk(sk)->saddr,
1126 &sk->sk_v6_daddr, udp_len);
1127 else
1128 #endif
1129 udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1130 inet->inet_daddr, udp_len);
1131 break;
1132
1133 case L2TP_ENCAPTYPE_IP:
1134 break;
1135 }
1136
1137 l2tp_xmit_core(session, skb, fl, data_len);
1138 out_unlock:
1139 bh_unlock_sock(sk);
1140
1141 return ret;
1142 }
1143 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153 static void l2tp_tunnel_destruct(struct sock *sk)
1154 {
1155 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1156
1157 if (tunnel == NULL)
1158 goto end;
1159
1160 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1161
1162
1163 switch (tunnel->encap) {
1164 case L2TP_ENCAPTYPE_UDP:
1165
1166 (udp_sk(sk))->encap_type = 0;
1167 (udp_sk(sk))->encap_rcv = NULL;
1168 (udp_sk(sk))->encap_destroy = NULL;
1169 break;
1170 case L2TP_ENCAPTYPE_IP:
1171 break;
1172 }
1173
1174
1175 sk->sk_destruct = tunnel->old_sk_destruct;
1176 sk->sk_user_data = NULL;
1177
1178
1179 if (sk->sk_destruct)
1180 (*sk->sk_destruct)(sk);
1181
1182 kfree_rcu(tunnel, rcu);
1183 end:
1184 return;
1185 }
1186
1187
1188
1189 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1190 {
1191 int hash;
1192 struct hlist_node *walk;
1193 struct hlist_node *tmp;
1194 struct l2tp_session *session;
1195
1196 BUG_ON(tunnel == NULL);
1197
1198 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
1199 tunnel->name);
1200
1201 write_lock_bh(&tunnel->hlist_lock);
1202 tunnel->acpt_newsess = false;
1203 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1204 again:
1205 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1206 session = hlist_entry(walk, struct l2tp_session, hlist);
1207
1208 l2tp_info(session, L2TP_MSG_CONTROL,
1209 "%s: closing session\n", session->name);
1210
1211 hlist_del_init(&session->hlist);
1212
1213 if (test_and_set_bit(0, &session->dead))
1214 goto again;
1215
1216 write_unlock_bh(&tunnel->hlist_lock);
1217
1218 __l2tp_session_unhash(session);
1219 l2tp_session_queue_purge(session);
1220
1221 if (session->session_close != NULL)
1222 (*session->session_close)(session);
1223
1224 l2tp_session_dec_refcount(session);
1225
1226 write_lock_bh(&tunnel->hlist_lock);
1227
1228
1229
1230
1231
1232
1233 goto again;
1234 }
1235 }
1236 write_unlock_bh(&tunnel->hlist_lock);
1237 }
1238
1239
1240 static void l2tp_udp_encap_destroy(struct sock *sk)
1241 {
1242 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1243
1244 if (tunnel)
1245 l2tp_tunnel_delete(tunnel);
1246 }
1247
1248
1249 static void l2tp_tunnel_del_work(struct work_struct *work)
1250 {
1251 struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1252 del_work);
1253 struct sock *sk = tunnel->sock;
1254 struct socket *sock = sk->sk_socket;
1255 struct l2tp_net *pn;
1256
1257 l2tp_tunnel_closeall(tunnel);
1258
1259
1260
1261
1262 if (tunnel->fd < 0) {
1263 if (sock) {
1264 kernel_sock_shutdown(sock, SHUT_RDWR);
1265 sock_release(sock);
1266 }
1267 }
1268
1269
1270 pn = l2tp_pernet(tunnel->l2tp_net);
1271 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1272 list_del_rcu(&tunnel->list);
1273 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1274
1275
1276 l2tp_tunnel_dec_refcount(tunnel);
1277
1278
1279 l2tp_tunnel_dec_refcount(tunnel);
1280 }
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 static int l2tp_tunnel_sock_create(struct net *net,
1292 u32 tunnel_id,
1293 u32 peer_tunnel_id,
1294 struct l2tp_tunnel_cfg *cfg,
1295 struct socket **sockp)
1296 {
1297 int err = -EINVAL;
1298 struct socket *sock = NULL;
1299 struct udp_port_cfg udp_conf;
1300
1301 switch (cfg->encap) {
1302 case L2TP_ENCAPTYPE_UDP:
1303 memset(&udp_conf, 0, sizeof(udp_conf));
1304
1305 #if IS_ENABLED(CONFIG_IPV6)
1306 if (cfg->local_ip6 && cfg->peer_ip6) {
1307 udp_conf.family = AF_INET6;
1308 memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1309 sizeof(udp_conf.local_ip6));
1310 memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1311 sizeof(udp_conf.peer_ip6));
1312 udp_conf.use_udp6_tx_checksums =
1313 ! cfg->udp6_zero_tx_checksums;
1314 udp_conf.use_udp6_rx_checksums =
1315 ! cfg->udp6_zero_rx_checksums;
1316 } else
1317 #endif
1318 {
1319 udp_conf.family = AF_INET;
1320 udp_conf.local_ip = cfg->local_ip;
1321 udp_conf.peer_ip = cfg->peer_ip;
1322 udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1323 }
1324
1325 udp_conf.local_udp_port = htons(cfg->local_udp_port);
1326 udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1327
1328 err = udp_sock_create(net, &udp_conf, &sock);
1329 if (err < 0)
1330 goto out;
1331
1332 break;
1333
1334 case L2TP_ENCAPTYPE_IP:
1335 #if IS_ENABLED(CONFIG_IPV6)
1336 if (cfg->local_ip6 && cfg->peer_ip6) {
1337 struct sockaddr_l2tpip6 ip6_addr = {0};
1338
1339 err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1340 IPPROTO_L2TP, &sock);
1341 if (err < 0)
1342 goto out;
1343
1344 ip6_addr.l2tp_family = AF_INET6;
1345 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1346 sizeof(ip6_addr.l2tp_addr));
1347 ip6_addr.l2tp_conn_id = tunnel_id;
1348 err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
1349 sizeof(ip6_addr));
1350 if (err < 0)
1351 goto out;
1352
1353 ip6_addr.l2tp_family = AF_INET6;
1354 memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1355 sizeof(ip6_addr.l2tp_addr));
1356 ip6_addr.l2tp_conn_id = peer_tunnel_id;
1357 err = kernel_connect(sock,
1358 (struct sockaddr *) &ip6_addr,
1359 sizeof(ip6_addr), 0);
1360 if (err < 0)
1361 goto out;
1362 } else
1363 #endif
1364 {
1365 struct sockaddr_l2tpip ip_addr = {0};
1366
1367 err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1368 IPPROTO_L2TP, &sock);
1369 if (err < 0)
1370 goto out;
1371
1372 ip_addr.l2tp_family = AF_INET;
1373 ip_addr.l2tp_addr = cfg->local_ip;
1374 ip_addr.l2tp_conn_id = tunnel_id;
1375 err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
1376 sizeof(ip_addr));
1377 if (err < 0)
1378 goto out;
1379
1380 ip_addr.l2tp_family = AF_INET;
1381 ip_addr.l2tp_addr = cfg->peer_ip;
1382 ip_addr.l2tp_conn_id = peer_tunnel_id;
1383 err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
1384 sizeof(ip_addr), 0);
1385 if (err < 0)
1386 goto out;
1387 }
1388 break;
1389
1390 default:
1391 goto out;
1392 }
1393
1394 out:
1395 *sockp = sock;
1396 if ((err < 0) && sock) {
1397 kernel_sock_shutdown(sock, SHUT_RDWR);
1398 sock_release(sock);
1399 *sockp = NULL;
1400 }
1401
1402 return err;
1403 }
1404
1405 static struct lock_class_key l2tp_socket_class;
1406
1407 int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1408 {
1409 struct l2tp_tunnel *tunnel = NULL;
1410 int err;
1411 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1412
1413 if (cfg != NULL)
1414 encap = cfg->encap;
1415
1416 tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1417 if (tunnel == NULL) {
1418 err = -ENOMEM;
1419 goto err;
1420 }
1421
1422 tunnel->version = version;
1423 tunnel->tunnel_id = tunnel_id;
1424 tunnel->peer_tunnel_id = peer_tunnel_id;
1425 tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1426
1427 tunnel->magic = L2TP_TUNNEL_MAGIC;
1428 sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1429 rwlock_init(&tunnel->hlist_lock);
1430 tunnel->acpt_newsess = true;
1431
1432 if (cfg != NULL)
1433 tunnel->debug = cfg->debug;
1434
1435 tunnel->encap = encap;
1436
1437 refcount_set(&tunnel->ref_count, 1);
1438 tunnel->fd = fd;
1439
1440
1441 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1442
1443 INIT_LIST_HEAD(&tunnel->list);
1444
1445 err = 0;
1446 err:
1447 if (tunnelp)
1448 *tunnelp = tunnel;
1449
1450 return err;
1451 }
1452 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1453
1454 static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1455 enum l2tp_encap_type encap)
1456 {
1457 if (!net_eq(sock_net(sk), net))
1458 return -EINVAL;
1459
1460 if (sk->sk_type != SOCK_DGRAM)
1461 return -EPROTONOSUPPORT;
1462
1463 if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1464 return -EPROTONOSUPPORT;
1465
1466 if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1467 (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1468 return -EPROTONOSUPPORT;
1469
1470 if (sk->sk_user_data)
1471 return -EBUSY;
1472
1473 return 0;
1474 }
1475
1476 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1477 struct l2tp_tunnel_cfg *cfg)
1478 {
1479 struct l2tp_tunnel *tunnel_walk;
1480 struct l2tp_net *pn;
1481 struct socket *sock;
1482 struct sock *sk;
1483 int ret;
1484
1485 if (tunnel->fd < 0) {
1486 ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1487 tunnel->peer_tunnel_id, cfg,
1488 &sock);
1489 if (ret < 0)
1490 goto err;
1491 } else {
1492 sock = sockfd_lookup(tunnel->fd, &ret);
1493 if (!sock)
1494 goto err;
1495
1496 ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
1497 if (ret < 0)
1498 goto err_sock;
1499 }
1500
1501 tunnel->l2tp_net = net;
1502 pn = l2tp_pernet(net);
1503
1504 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1505 list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
1506 if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
1507 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1508
1509 ret = -EEXIST;
1510 goto err_sock;
1511 }
1512 }
1513 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1514 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1515
1516 sk = sock->sk;
1517 sock_hold(sk);
1518 tunnel->sock = sk;
1519
1520 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1521 struct udp_tunnel_sock_cfg udp_cfg = {
1522 .sk_user_data = tunnel,
1523 .encap_type = UDP_ENCAP_L2TPINUDP,
1524 .encap_rcv = l2tp_udp_encap_recv,
1525 .encap_destroy = l2tp_udp_encap_destroy,
1526 };
1527
1528 setup_udp_tunnel_sock(net, sock, &udp_cfg);
1529 } else {
1530 sk->sk_user_data = tunnel;
1531 }
1532
1533 tunnel->old_sk_destruct = sk->sk_destruct;
1534 sk->sk_destruct = &l2tp_tunnel_destruct;
1535 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
1536 "l2tp_sock");
1537 sk->sk_allocation = GFP_ATOMIC;
1538
1539 if (tunnel->fd >= 0)
1540 sockfd_put(sock);
1541
1542 return 0;
1543
1544 err_sock:
1545 if (tunnel->fd < 0)
1546 sock_release(sock);
1547 else
1548 sockfd_put(sock);
1549 err:
1550 return ret;
1551 }
1552 EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1553
1554
1555
1556 void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1557 {
1558 if (!test_and_set_bit(0, &tunnel->dead)) {
1559 l2tp_tunnel_inc_refcount(tunnel);
1560 queue_work(l2tp_wq, &tunnel->del_work);
1561 }
1562 }
1563 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1564
1565
1566
1567 void l2tp_session_free(struct l2tp_session *session)
1568 {
1569 struct l2tp_tunnel *tunnel = session->tunnel;
1570
1571 BUG_ON(refcount_read(&session->ref_count) != 0);
1572
1573 if (tunnel) {
1574 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1575 l2tp_tunnel_dec_refcount(tunnel);
1576 }
1577
1578 kfree(session);
1579 }
1580 EXPORT_SYMBOL_GPL(l2tp_session_free);
1581
1582
1583
1584
1585
1586
1587 void __l2tp_session_unhash(struct l2tp_session *session)
1588 {
1589 struct l2tp_tunnel *tunnel = session->tunnel;
1590
1591
1592 if (tunnel) {
1593
1594 write_lock_bh(&tunnel->hlist_lock);
1595 hlist_del_init(&session->hlist);
1596 write_unlock_bh(&tunnel->hlist_lock);
1597
1598
1599 if (tunnel->version != L2TP_HDR_VER_2) {
1600 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1601 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1602 hlist_del_init_rcu(&session->global_hlist);
1603 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1604 synchronize_rcu();
1605 }
1606 }
1607 }
1608 EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
1609
1610
1611
1612
1613 int l2tp_session_delete(struct l2tp_session *session)
1614 {
1615 if (test_and_set_bit(0, &session->dead))
1616 return 0;
1617
1618 __l2tp_session_unhash(session);
1619 l2tp_session_queue_purge(session);
1620 if (session->session_close != NULL)
1621 (*session->session_close)(session);
1622
1623 l2tp_session_dec_refcount(session);
1624
1625 return 0;
1626 }
1627 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1628
1629
1630
1631
1632 void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1633 {
1634 if (version == L2TP_HDR_VER_2) {
1635 session->hdr_len = 6;
1636 if (session->send_seq)
1637 session->hdr_len += 4;
1638 } else {
1639 session->hdr_len = 4 + session->cookie_len;
1640 session->hdr_len += l2tp_get_l2specific_len(session);
1641 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1642 session->hdr_len += 4;
1643 }
1644
1645 }
1646 EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1647
1648 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1649 {
1650 struct l2tp_session *session;
1651
1652 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1653 if (session != NULL) {
1654 session->magic = L2TP_SESSION_MAGIC;
1655 session->tunnel = tunnel;
1656
1657 session->session_id = session_id;
1658 session->peer_session_id = peer_session_id;
1659 session->nr = 0;
1660 if (tunnel->version == L2TP_HDR_VER_2)
1661 session->nr_max = 0xffff;
1662 else
1663 session->nr_max = 0xffffff;
1664 session->nr_window_size = session->nr_max / 2;
1665 session->nr_oos_count_max = 4;
1666
1667
1668 session->reorder_skip = 1;
1669
1670 sprintf(&session->name[0], "sess %u/%u",
1671 tunnel->tunnel_id, session->session_id);
1672
1673 skb_queue_head_init(&session->reorder_q);
1674
1675 INIT_HLIST_NODE(&session->hlist);
1676 INIT_HLIST_NODE(&session->global_hlist);
1677
1678
1679 session->debug = tunnel->debug;
1680
1681 if (cfg) {
1682 session->pwtype = cfg->pw_type;
1683 session->debug = cfg->debug;
1684 session->send_seq = cfg->send_seq;
1685 session->recv_seq = cfg->recv_seq;
1686 session->lns_mode = cfg->lns_mode;
1687 session->reorder_timeout = cfg->reorder_timeout;
1688 session->l2specific_type = cfg->l2specific_type;
1689 session->cookie_len = cfg->cookie_len;
1690 memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1691 session->peer_cookie_len = cfg->peer_cookie_len;
1692 memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1693 }
1694
1695 if (tunnel->version == L2TP_HDR_VER_2)
1696 session->build_header = l2tp_build_l2tpv2_header;
1697 else
1698 session->build_header = l2tp_build_l2tpv3_header;
1699
1700 l2tp_session_set_header_len(session, tunnel->version);
1701
1702 refcount_set(&session->ref_count, 1);
1703
1704 return session;
1705 }
1706
1707 return ERR_PTR(-ENOMEM);
1708 }
1709 EXPORT_SYMBOL_GPL(l2tp_session_create);
1710
1711
1712
1713
1714
1715 static __net_init int l2tp_init_net(struct net *net)
1716 {
1717 struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1718 int hash;
1719
1720 INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1721 spin_lock_init(&pn->l2tp_tunnel_list_lock);
1722
1723 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1724 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1725
1726 spin_lock_init(&pn->l2tp_session_hlist_lock);
1727
1728 return 0;
1729 }
1730
1731 static __net_exit void l2tp_exit_net(struct net *net)
1732 {
1733 struct l2tp_net *pn = l2tp_pernet(net);
1734 struct l2tp_tunnel *tunnel = NULL;
1735 int hash;
1736
1737 rcu_read_lock_bh();
1738 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1739 l2tp_tunnel_delete(tunnel);
1740 }
1741 rcu_read_unlock_bh();
1742
1743 if (l2tp_wq)
1744 flush_workqueue(l2tp_wq);
1745 rcu_barrier();
1746
1747 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1748 WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
1749 }
1750
1751 static struct pernet_operations l2tp_net_ops = {
1752 .init = l2tp_init_net,
1753 .exit = l2tp_exit_net,
1754 .id = &l2tp_net_id,
1755 .size = sizeof(struct l2tp_net),
1756 };
1757
1758 static int __init l2tp_init(void)
1759 {
1760 int rc = 0;
1761
1762 rc = register_pernet_device(&l2tp_net_ops);
1763 if (rc)
1764 goto out;
1765
1766 l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1767 if (!l2tp_wq) {
1768 pr_err("alloc_workqueue failed\n");
1769 unregister_pernet_device(&l2tp_net_ops);
1770 rc = -ENOMEM;
1771 goto out;
1772 }
1773
1774 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1775
1776 out:
1777 return rc;
1778 }
1779
1780 static void __exit l2tp_exit(void)
1781 {
1782 unregister_pernet_device(&l2tp_net_ops);
1783 if (l2tp_wq) {
1784 destroy_workqueue(l2tp_wq);
1785 l2tp_wq = NULL;
1786 }
1787 }
1788
1789 module_init(l2tp_init);
1790 module_exit(l2tp_exit);
1791
1792 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1793 MODULE_DESCRIPTION("L2TP core");
1794 MODULE_LICENSE("GPL");
1795 MODULE_VERSION(L2TP_DRV_VERSION);