This source file includes following definitions.
- dccp_event_ack_sent
- dccp_skb_entail
- dccp_transmit_skb
- dccp_determine_ccmps
- dccp_sync_mss
- dccp_write_space
- dccp_wait_for_ccid
- dccp_xmit_packet
- dccp_flush_write_queue
- dccp_write_xmit
- dccp_retransmit_skb
- dccp_make_response
- dccp_ctl_make_reset
- dccp_send_reset
- dccp_connect
- dccp_send_ack
- dccp_send_delayed_ack
- dccp_send_sync
- dccp_send_close
1
2
3
4
5
6
7
8
9 #include <linux/dccp.h>
10 #include <linux/kernel.h>
11 #include <linux/skbuff.h>
12 #include <linux/slab.h>
13 #include <linux/sched/signal.h>
14
15 #include <net/inet_sock.h>
16 #include <net/sock.h>
17
18 #include "ackvec.h"
19 #include "ccid.h"
20 #include "dccp.h"
21
22 static inline void dccp_event_ack_sent(struct sock *sk)
23 {
24 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
25 }
26
27
28 static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
29 {
30 skb_set_owner_w(skb, sk);
31 WARN_ON(sk->sk_send_head);
32 sk->sk_send_head = skb;
33 return skb_clone(sk->sk_send_head, gfp_any());
34 }
35
36
37
38
39
40
41
42 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
43 {
44 if (likely(skb != NULL)) {
45 struct inet_sock *inet = inet_sk(sk);
46 const struct inet_connection_sock *icsk = inet_csk(sk);
47 struct dccp_sock *dp = dccp_sk(sk);
48 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
49 struct dccp_hdr *dh;
50
51 const u32 dccp_header_size = sizeof(*dh) +
52 sizeof(struct dccp_hdr_ext) +
53 dccp_packet_hdr_len(dcb->dccpd_type);
54 int err, set_ack = 1;
55 u64 ackno = dp->dccps_gsr;
56
57
58
59
60 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
61
62 switch (dcb->dccpd_type) {
63 case DCCP_PKT_DATA:
64 set_ack = 0;
65
66 case DCCP_PKT_DATAACK:
67 case DCCP_PKT_RESET:
68 break;
69
70 case DCCP_PKT_REQUEST:
71 set_ack = 0;
72
73 if (icsk->icsk_retransmits == 0)
74 dcb->dccpd_seq = dp->dccps_iss;
75
76
77 case DCCP_PKT_SYNC:
78 case DCCP_PKT_SYNCACK:
79 ackno = dcb->dccpd_ack_seq;
80
81 default:
82
83
84
85
86
87
88 WARN_ON(skb->sk);
89 skb_set_owner_w(skb, sk);
90 break;
91 }
92
93 if (dccp_insert_options(sk, skb)) {
94 kfree_skb(skb);
95 return -EPROTO;
96 }
97
98
99
100 dh = dccp_zeroed_hdr(skb, dccp_header_size);
101 dh->dccph_type = dcb->dccpd_type;
102 dh->dccph_sport = inet->inet_sport;
103 dh->dccph_dport = inet->inet_dport;
104 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
105 dh->dccph_ccval = dcb->dccpd_ccval;
106 dh->dccph_cscov = dp->dccps_pcslen;
107
108 dh->dccph_x = 1;
109
110 dccp_update_gss(sk, dcb->dccpd_seq);
111 dccp_hdr_set_seq(dh, dp->dccps_gss);
112 if (set_ack)
113 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
114
115 switch (dcb->dccpd_type) {
116 case DCCP_PKT_REQUEST:
117 dccp_hdr_request(skb)->dccph_req_service =
118 dp->dccps_service;
119
120
121
122
123 dp->dccps_awl = dp->dccps_iss;
124 break;
125 case DCCP_PKT_RESET:
126 dccp_hdr_reset(skb)->dccph_reset_code =
127 dcb->dccpd_reset_code;
128 break;
129 }
130
131 icsk->icsk_af_ops->send_check(sk, skb);
132
133 if (set_ack)
134 dccp_event_ack_sent(sk);
135
136 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
137
138 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
139 return net_xmit_eval(err);
140 }
141 return -ENOBUFS;
142 }
143
144
145
146
147
148
149
150 static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
151 {
152 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
153
154 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
155 return 0;
156 return tx_ccid->ccid_ops->ccid_ccmps;
157 }
158
159 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
160 {
161 struct inet_connection_sock *icsk = inet_csk(sk);
162 struct dccp_sock *dp = dccp_sk(sk);
163 u32 ccmps = dccp_determine_ccmps(dp);
164 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
165
166
167 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
168 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
184 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
185
186
187 icsk->icsk_pmtu_cookie = pmtu;
188 dp->dccps_mss_cache = cur_mps;
189
190 return cur_mps;
191 }
192
193 EXPORT_SYMBOL_GPL(dccp_sync_mss);
194
195 void dccp_write_space(struct sock *sk)
196 {
197 struct socket_wq *wq;
198
199 rcu_read_lock();
200 wq = rcu_dereference(sk->sk_wq);
201 if (skwq_has_sleeper(wq))
202 wake_up_interruptible(&wq->wait);
203
204 if (sock_writeable(sk))
205 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
206
207 rcu_read_unlock();
208 }
209
210
211
212
213
214
215
216
217 static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
218 {
219 DEFINE_WAIT(wait);
220 long remaining;
221
222 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
223 sk->sk_write_pending++;
224 release_sock(sk);
225
226 remaining = schedule_timeout(delay);
227
228 lock_sock(sk);
229 sk->sk_write_pending--;
230 finish_wait(sk_sleep(sk), &wait);
231
232 if (signal_pending(current) || sk->sk_err)
233 return -1;
234 return remaining;
235 }
236
237
238
239
240
241 static void dccp_xmit_packet(struct sock *sk)
242 {
243 int err, len;
244 struct dccp_sock *dp = dccp_sk(sk);
245 struct sk_buff *skb = dccp_qpolicy_pop(sk);
246
247 if (unlikely(skb == NULL))
248 return;
249 len = skb->len;
250
251 if (sk->sk_state == DCCP_PARTOPEN) {
252 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
253
254
255
256
257
258
259
260 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
261 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
262 dccp_send_ack(sk);
263 dccp_feat_list_purge(&dp->dccps_featneg);
264 }
265
266 inet_csk_schedule_ack(sk);
267 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
268 inet_csk(sk)->icsk_rto,
269 DCCP_RTO_MAX);
270 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
271 } else if (dccp_ack_pending(sk)) {
272 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
273 } else {
274 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
275 }
276
277 err = dccp_transmit_skb(sk, skb);
278 if (err)
279 dccp_pr_debug("transmit_skb() returned err=%d\n", err);
280
281
282
283
284
285 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
286
287
288
289
290
291
292
293 if (dp->dccps_sync_scheduled)
294 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
295 }
296
297
298
299
300
301
302
303
304 void dccp_flush_write_queue(struct sock *sk, long *time_budget)
305 {
306 struct dccp_sock *dp = dccp_sk(sk);
307 struct sk_buff *skb;
308 long delay, rc;
309
310 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
311 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
312
313 switch (ccid_packet_dequeue_eval(rc)) {
314 case CCID_PACKET_WILL_DEQUEUE_LATER:
315
316
317
318
319
320 DCCP_WARN("CCID did not manage to send all packets\n");
321 return;
322 case CCID_PACKET_DELAY:
323 delay = msecs_to_jiffies(rc);
324 if (delay > *time_budget)
325 return;
326 rc = dccp_wait_for_ccid(sk, delay);
327 if (rc < 0)
328 return;
329 *time_budget -= (delay - rc);
330
331 break;
332 case CCID_PACKET_SEND_AT_ONCE:
333 dccp_xmit_packet(sk);
334 break;
335 case CCID_PACKET_ERR:
336 skb_dequeue(&sk->sk_write_queue);
337 kfree_skb(skb);
338 dccp_pr_debug("packet discarded due to err=%ld\n", rc);
339 }
340 }
341 }
342
343 void dccp_write_xmit(struct sock *sk)
344 {
345 struct dccp_sock *dp = dccp_sk(sk);
346 struct sk_buff *skb;
347
348 while ((skb = dccp_qpolicy_top(sk))) {
349 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
350
351 switch (ccid_packet_dequeue_eval(rc)) {
352 case CCID_PACKET_WILL_DEQUEUE_LATER:
353 return;
354 case CCID_PACKET_DELAY:
355 sk_reset_timer(sk, &dp->dccps_xmit_timer,
356 jiffies + msecs_to_jiffies(rc));
357 return;
358 case CCID_PACKET_SEND_AT_ONCE:
359 dccp_xmit_packet(sk);
360 break;
361 case CCID_PACKET_ERR:
362 dccp_qpolicy_drop(sk, skb);
363 dccp_pr_debug("packet discarded due to err=%d\n", rc);
364 }
365 }
366 }
367
368
369
370
371
372
373
374
375
376
377 int dccp_retransmit_skb(struct sock *sk)
378 {
379 WARN_ON(sk->sk_send_head == NULL);
380
381 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
382 return -EHOSTUNREACH;
383
384
385 inet_csk(sk)->icsk_retransmits++;
386
387 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
388 }
389
390 struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
391 struct request_sock *req)
392 {
393 struct dccp_hdr *dh;
394 struct dccp_request_sock *dreq;
395 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
396 sizeof(struct dccp_hdr_ext) +
397 sizeof(struct dccp_hdr_response);
398 struct sk_buff *skb;
399
400
401
402
403
404 skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
405 GFP_ATOMIC);
406 if (!skb)
407 return NULL;
408
409 skb_reserve(skb, MAX_DCCP_HEADER);
410
411 skb_dst_set(skb, dst_clone(dst));
412
413 dreq = dccp_rsk(req);
414 if (inet_rsk(req)->acked)
415 dccp_inc_seqno(&dreq->dreq_gss);
416 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
417 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss;
418
419
420 if (dccp_feat_server_ccid_dependencies(dreq))
421 goto response_failed;
422
423 if (dccp_insert_options_rsk(dreq, skb))
424 goto response_failed;
425
426
427 dh = dccp_zeroed_hdr(skb, dccp_header_size);
428
429 dh->dccph_sport = htons(inet_rsk(req)->ir_num);
430 dh->dccph_dport = inet_rsk(req)->ir_rmt_port;
431 dh->dccph_doff = (dccp_header_size +
432 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
433 dh->dccph_type = DCCP_PKT_RESPONSE;
434 dh->dccph_x = 1;
435 dccp_hdr_set_seq(dh, dreq->dreq_gss);
436 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
437 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
438
439 dccp_csum_outgoing(skb);
440
441
442 inet_rsk(req)->acked = 1;
443 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
444 return skb;
445 response_failed:
446 kfree_skb(skb);
447 return NULL;
448 }
449
450 EXPORT_SYMBOL_GPL(dccp_make_response);
451
452
453 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
454 {
455 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
456 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
457 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
458 sizeof(struct dccp_hdr_ext) +
459 sizeof(struct dccp_hdr_reset);
460 struct dccp_hdr_reset *dhr;
461 struct sk_buff *skb;
462
463 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
464 if (skb == NULL)
465 return NULL;
466
467 skb_reserve(skb, sk->sk_prot->max_header);
468
469
470 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
471 dh->dccph_type = DCCP_PKT_RESET;
472 dh->dccph_sport = rxdh->dccph_dport;
473 dh->dccph_dport = rxdh->dccph_sport;
474 dh->dccph_doff = dccp_hdr_reset_len / 4;
475 dh->dccph_x = 1;
476
477 dhr = dccp_hdr_reset(skb);
478 dhr->dccph_reset_code = dcb->dccpd_reset_code;
479
480 switch (dcb->dccpd_reset_code) {
481 case DCCP_RESET_CODE_PACKET_ERROR:
482 dhr->dccph_reset_data[0] = rxdh->dccph_type;
483 break;
484 case DCCP_RESET_CODE_OPTION_ERROR:
485 case DCCP_RESET_CODE_MANDATORY_ERROR:
486 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
487 break;
488 }
489
490
491
492
493
494 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
495 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
496 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
497
498 dccp_csum_outgoing(skb);
499 return skb;
500 }
501
502 EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
503
504
505 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
506 {
507 struct sk_buff *skb;
508
509
510
511
512 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
513
514 if (err != 0)
515 return err;
516
517 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
518 if (skb == NULL)
519 return -ENOBUFS;
520
521
522 skb_reserve(skb, sk->sk_prot->max_header);
523 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
524 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
525
526 return dccp_transmit_skb(sk, skb);
527 }
528
529
530
531
532 int dccp_connect(struct sock *sk)
533 {
534 struct sk_buff *skb;
535 struct dccp_sock *dp = dccp_sk(sk);
536 struct dst_entry *dst = __sk_dst_get(sk);
537 struct inet_connection_sock *icsk = inet_csk(sk);
538
539 sk->sk_err = 0;
540 sock_reset_flag(sk, SOCK_DONE);
541
542 dccp_sync_mss(sk, dst_mtu(dst));
543
544
545 if (dccp_feat_finalise_settings(dccp_sk(sk)))
546 return -EPROTO;
547
548
549 dp->dccps_gar = dp->dccps_iss;
550
551 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
552 if (unlikely(skb == NULL))
553 return -ENOBUFS;
554
555
556 skb_reserve(skb, sk->sk_prot->max_header);
557
558 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
559
560 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
561 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
562
563
564 icsk->icsk_retransmits = 0;
565 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
566 icsk->icsk_rto, DCCP_RTO_MAX);
567 return 0;
568 }
569
570 EXPORT_SYMBOL_GPL(dccp_connect);
571
572 void dccp_send_ack(struct sock *sk)
573 {
574
575 if (sk->sk_state != DCCP_CLOSED) {
576 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
577 GFP_ATOMIC);
578
579 if (skb == NULL) {
580 inet_csk_schedule_ack(sk);
581 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
582 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
583 TCP_DELACK_MAX,
584 DCCP_RTO_MAX);
585 return;
586 }
587
588
589 skb_reserve(skb, sk->sk_prot->max_header);
590 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
591 dccp_transmit_skb(sk, skb);
592 }
593 }
594
595 EXPORT_SYMBOL_GPL(dccp_send_ack);
596
597 #if 0
598
599 void dccp_send_delayed_ack(struct sock *sk)
600 {
601 struct inet_connection_sock *icsk = inet_csk(sk);
602
603
604
605
606
607 unsigned long timeout = jiffies + 2 * HZ;
608
609
610 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
611
612
613
614
615
616 if (icsk->icsk_ack.blocked) {
617 dccp_send_ack(sk);
618 return;
619 }
620
621 if (!time_before(timeout, icsk->icsk_ack.timeout))
622 timeout = icsk->icsk_ack.timeout;
623 }
624 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
625 icsk->icsk_ack.timeout = timeout;
626 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
627 }
628 #endif
629
630 void dccp_send_sync(struct sock *sk, const u64 ackno,
631 const enum dccp_pkt_type pkt_type)
632 {
633
634
635
636
637
638 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
639
640 if (skb == NULL) {
641
642 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
643 return;
644 }
645
646
647 skb_reserve(skb, sk->sk_prot->max_header);
648 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
649 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
650
651
652
653
654
655 dccp_sk(sk)->dccps_sync_scheduled = 0;
656
657 dccp_transmit_skb(sk, skb);
658 }
659
660 EXPORT_SYMBOL_GPL(dccp_send_sync);
661
662
663
664
665
666
667 void dccp_send_close(struct sock *sk, const int active)
668 {
669 struct dccp_sock *dp = dccp_sk(sk);
670 struct sk_buff *skb;
671 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
672
673 skb = alloc_skb(sk->sk_prot->max_header, prio);
674 if (skb == NULL)
675 return;
676
677
678 skb_reserve(skb, sk->sk_prot->max_header);
679 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
680 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
681 else
682 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
683
684 if (active) {
685 skb = dccp_skb_entail(sk, skb);
686
687
688
689
690
691
692
693
694
695
696 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
697 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
698 }
699 dccp_transmit_skb(sk, skb);
700 }