This source file includes following definitions.
- ccid2_hc_tx_alloc_seq
- ccid2_hc_tx_send_packet
- ccid2_change_l_ack_ratio
- ccid2_check_l_ack_ratio
- ccid2_change_l_seq_window
- dccp_tasklet_schedule
- ccid2_hc_tx_rto_expire
- ccid2_update_used_window
- ccid2_cwnd_application_limited
- ccid2_cwnd_restart
- ccid2_hc_tx_packet_sent
- ccid2_rtt_estimator
- ccid2_new_ack
- ccid2_congestion_event
- ccid2_hc_tx_parse_options
- ccid2_hc_tx_packet_recv
- ccid2_hc_tx_init
- ccid2_hc_tx_exit
- ccid2_hc_rx_packet_recv
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/slab.h>
14 #include "../feat.h"
15 #include "ccid2.h"
16
17
18 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
19 static bool ccid2_debug;
20 #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
21 #else
22 #define ccid2_pr_debug(format, a...)
23 #endif
24
25 static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
26 {
27 struct ccid2_seq *seqp;
28 int i;
29
30
31 if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) /
32 sizeof(struct ccid2_seq *)))
33 return -ENOMEM;
34
35
36 seqp = kmalloc_array(CCID2_SEQBUF_LEN, sizeof(struct ccid2_seq),
37 gfp_any());
38 if (seqp == NULL)
39 return -ENOMEM;
40
41 for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) {
42 seqp[i].ccid2s_next = &seqp[i + 1];
43 seqp[i + 1].ccid2s_prev = &seqp[i];
44 }
45 seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp;
46 seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
47
48
49 if (hc->tx_seqbufc == 0)
50 hc->tx_seqh = hc->tx_seqt = seqp;
51 else {
52
53 hc->tx_seqh->ccid2s_next = seqp;
54 seqp->ccid2s_prev = hc->tx_seqh;
55
56 hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
57 seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt;
58 }
59
60
61 hc->tx_seqbuf[hc->tx_seqbufc] = seqp;
62 hc->tx_seqbufc++;
63
64 return 0;
65 }
66
67 static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
68 {
69 if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
70 return CCID_PACKET_WILL_DEQUEUE_LATER;
71 return CCID_PACKET_SEND_AT_ONCE;
72 }
73
74 static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
75 {
76 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
77
78
79
80
81
82
83
84 if (val == 0 || val > max_ratio) {
85 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
86 val = max_ratio;
87 }
88 dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
89 min_t(u32, val, DCCPF_ACK_RATIO_MAX));
90 }
91
92 static void ccid2_check_l_ack_ratio(struct sock *sk)
93 {
94 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
95
96
97
98
99
100
101
102
103
104
105
106 if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
107 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
108 }
109
110 static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
111 {
112 dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
113 clamp_val(val, DCCPF_SEQ_WMIN,
114 DCCPF_SEQ_WMAX));
115 }
116
117 static void dccp_tasklet_schedule(struct sock *sk)
118 {
119 struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
120
121 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
122 sock_hold(sk);
123 __tasklet_schedule(t);
124 }
125 }
126
127 static void ccid2_hc_tx_rto_expire(struct timer_list *t)
128 {
129 struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
130 struct sock *sk = hc->sk;
131 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
132
133 bh_lock_sock(sk);
134 if (sock_owned_by_user(sk)) {
135 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5);
136 goto out;
137 }
138
139 ccid2_pr_debug("RTO_EXPIRE\n");
140
141 if (sk->sk_state == DCCP_CLOSED)
142 goto out;
143
144
145 hc->tx_rto <<= 1;
146 if (hc->tx_rto > DCCP_RTO_MAX)
147 hc->tx_rto = DCCP_RTO_MAX;
148
149
150 hc->tx_ssthresh = hc->tx_cwnd / 2;
151 if (hc->tx_ssthresh < 2)
152 hc->tx_ssthresh = 2;
153 hc->tx_cwnd = 1;
154 hc->tx_pipe = 0;
155
156
157 hc->tx_seqt = hc->tx_seqh;
158 hc->tx_packets_acked = 0;
159
160
161 hc->tx_rpseq = 0;
162 hc->tx_rpdupack = -1;
163 ccid2_change_l_ack_ratio(sk, 1);
164
165
166 if (sender_was_blocked)
167 dccp_tasklet_schedule(sk);
168
169 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
170 out:
171 bh_unlock_sock(sk);
172 sock_put(sk);
173 }
174
175
176
177
178 static bool ccid2_do_cwv = true;
179 module_param(ccid2_do_cwv, bool, 0644);
180 MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
181
182
183
184
185
186
187
188
189 static void ccid2_update_used_window(struct ccid2_hc_tx_sock *hc, u32 new_wnd)
190 {
191 hc->tx_expected_wnd = (3 * hc->tx_expected_wnd + new_wnd) / 4;
192 }
193
194
195 static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
196 {
197 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
198
199 u32 init_win = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache),
200 win_used = max(hc->tx_cwnd_used, init_win);
201
202 if (win_used < hc->tx_cwnd) {
203 hc->tx_ssthresh = max(hc->tx_ssthresh,
204 (hc->tx_cwnd >> 1) + (hc->tx_cwnd >> 2));
205 hc->tx_cwnd = (hc->tx_cwnd + win_used) >> 1;
206 }
207 hc->tx_cwnd_used = 0;
208 hc->tx_cwnd_stamp = now;
209
210 ccid2_check_l_ack_ratio(sk);
211 }
212
213
214 static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
215 {
216 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
217 u32 cwnd = hc->tx_cwnd, restart_cwnd,
218 iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
219 s32 delta = now - hc->tx_lsndtime;
220
221 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
222
223
224 restart_cwnd = min(cwnd, iwnd);
225
226 while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
227 cwnd >>= 1;
228 hc->tx_cwnd = max(cwnd, restart_cwnd);
229 hc->tx_cwnd_stamp = now;
230 hc->tx_cwnd_used = 0;
231
232 ccid2_check_l_ack_ratio(sk);
233 }
234
235 static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
236 {
237 struct dccp_sock *dp = dccp_sk(sk);
238 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
239 const u32 now = ccid2_jiffies32;
240 struct ccid2_seq *next;
241
242
243 if (ccid2_do_cwv && !hc->tx_pipe &&
244 (s32)(now - hc->tx_lsndtime) >= hc->tx_rto)
245 ccid2_cwnd_restart(sk, now);
246
247 hc->tx_lsndtime = now;
248 hc->tx_pipe += 1;
249
250
251 if (ccid2_cwnd_network_limited(hc)) {
252 ccid2_update_used_window(hc, hc->tx_cwnd);
253 hc->tx_cwnd_used = 0;
254 hc->tx_cwnd_stamp = now;
255 } else {
256 if (hc->tx_pipe > hc->tx_cwnd_used)
257 hc->tx_cwnd_used = hc->tx_pipe;
258
259 ccid2_update_used_window(hc, hc->tx_cwnd_used);
260
261 if (ccid2_do_cwv && (s32)(now - hc->tx_cwnd_stamp) >= hc->tx_rto)
262 ccid2_cwnd_application_limited(sk, now);
263 }
264
265 hc->tx_seqh->ccid2s_seq = dp->dccps_gss;
266 hc->tx_seqh->ccid2s_acked = 0;
267 hc->tx_seqh->ccid2s_sent = now;
268
269 next = hc->tx_seqh->ccid2s_next;
270
271 if (next == hc->tx_seqt) {
272 if (ccid2_hc_tx_alloc_seq(hc)) {
273 DCCP_CRIT("packet history - out of memory!");
274
275 return;
276 }
277 next = hc->tx_seqh->ccid2s_next;
278 BUG_ON(next == hc->tx_seqt);
279 }
280 hc->tx_seqh = next;
281
282 ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe);
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303 #if 0
304
305 hc->tx_arsent++;
306
307 if (hc->tx_ackloss) {
308 if (hc->tx_arsent >= hc->tx_cwnd) {
309 hc->tx_arsent = 0;
310 hc->tx_ackloss = 0;
311 }
312 } else {
313
314
315 if (dp->dccps_l_ack_ratio > 1) {
316
317 int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio -
318 dp->dccps_l_ack_ratio;
319
320 denom = hc->tx_cwnd * hc->tx_cwnd / denom;
321
322 if (hc->tx_arsent >= denom) {
323 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
324 hc->tx_arsent = 0;
325 }
326 } else {
327
328 hc->tx_arsent = 0;
329 }
330 }
331 #endif
332
333 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
334
335 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
336 do {
337 struct ccid2_seq *seqp = hc->tx_seqt;
338
339 while (seqp != hc->tx_seqh) {
340 ccid2_pr_debug("out seq=%llu acked=%d time=%u\n",
341 (unsigned long long)seqp->ccid2s_seq,
342 seqp->ccid2s_acked, seqp->ccid2s_sent);
343 seqp = seqp->ccid2s_next;
344 }
345 } while (0);
346 ccid2_pr_debug("=========\n");
347 #endif
348 }
349
350
351
352
353
354
355
356
357
358
359 static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
360 {
361 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
362 long m = mrtt ? : 1;
363
364 if (hc->tx_srtt == 0) {
365
366 hc->tx_srtt = m << 3;
367 hc->tx_mdev = m << 1;
368
369 hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk));
370 hc->tx_rttvar = hc->tx_mdev_max;
371
372 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
373 } else {
374
375 m -= (hc->tx_srtt >> 3);
376 hc->tx_srtt += m;
377
378
379 if (m < 0) {
380 m = -m;
381 m -= (hc->tx_mdev >> 2);
382
383
384
385
386
387 if (m > 0)
388 m >>= 3;
389 } else {
390 m -= (hc->tx_mdev >> 2);
391 }
392 hc->tx_mdev += m;
393
394 if (hc->tx_mdev > hc->tx_mdev_max) {
395 hc->tx_mdev_max = hc->tx_mdev;
396 if (hc->tx_mdev_max > hc->tx_rttvar)
397 hc->tx_rttvar = hc->tx_mdev_max;
398 }
399
400
401
402
403
404
405
406
407 if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) {
408 if (hc->tx_mdev_max < hc->tx_rttvar)
409 hc->tx_rttvar -= (hc->tx_rttvar -
410 hc->tx_mdev_max) >> 2;
411 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
412 hc->tx_mdev_max = tcp_rto_min(sk);
413 }
414 }
415
416
417
418
419
420
421
422
423 hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar;
424
425 if (hc->tx_rto > DCCP_RTO_MAX)
426 hc->tx_rto = DCCP_RTO_MAX;
427 }
428
429 static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
430 unsigned int *maxincr)
431 {
432 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
433 struct dccp_sock *dp = dccp_sk(sk);
434 int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
435
436 if (hc->tx_cwnd < dp->dccps_l_seq_win &&
437 r_seq_used < dp->dccps_r_seq_win) {
438 if (hc->tx_cwnd < hc->tx_ssthresh) {
439 if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
440 hc->tx_cwnd += 1;
441 *maxincr -= 1;
442 hc->tx_packets_acked = 0;
443 }
444 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
445 hc->tx_cwnd += 1;
446 hc->tx_packets_acked = 0;
447 }
448 }
449
450
451
452
453
454 if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
455 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
456 else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
457 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
458
459 if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
460 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
461 else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
462 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
463
464
465
466
467
468
469
470
471
472 ccid2_rtt_estimator(sk, ccid2_jiffies32 - seqp->ccid2s_sent);
473 }
474
475 static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
476 {
477 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
478
479 if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) {
480 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
481 return;
482 }
483
484 hc->tx_last_cong = ccid2_jiffies32;
485
486 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
487 hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
488
489 ccid2_check_l_ack_ratio(sk);
490 }
491
492 static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
493 u8 option, u8 *optval, u8 optlen)
494 {
495 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
496
497 switch (option) {
498 case DCCPO_ACK_VECTOR_0:
499 case DCCPO_ACK_VECTOR_1:
500 return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen,
501 option - DCCPO_ACK_VECTOR_0);
502 }
503 return 0;
504 }
505
506 static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
507 {
508 struct dccp_sock *dp = dccp_sk(sk);
509 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
510 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
511 struct dccp_ackvec_parsed *avp;
512 u64 ackno, seqno;
513 struct ccid2_seq *seqp;
514 int done = 0;
515 unsigned int maxincr = 0;
516
517
518 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
519
520
521
522
523
524
525 if (hc->tx_rpdupack == -1) {
526 hc->tx_rpdupack = 0;
527 hc->tx_rpseq = seqno;
528 } else {
529
530 if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1)
531 hc->tx_rpseq = seqno;
532
533 else if (after48(seqno, hc->tx_rpseq)) {
534 hc->tx_rpdupack++;
535
536
537 if (hc->tx_rpdupack >= NUMDUPACK) {
538 hc->tx_rpdupack = -1;
539 hc->tx_rpseq = 0;
540 #ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
541
542
543
544
545
546
547
548 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
549 #endif
550 }
551 }
552 }
553
554
555 if (dccp_packet_without_ack(skb))
556 return;
557
558
559 if (hc->tx_seqh == hc->tx_seqt)
560 goto done;
561
562 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
563 if (after48(ackno, hc->tx_high_ack))
564 hc->tx_high_ack = ackno;
565
566 seqp = hc->tx_seqt;
567 while (before48(seqp->ccid2s_seq, ackno)) {
568 seqp = seqp->ccid2s_next;
569 if (seqp == hc->tx_seqh) {
570 seqp = hc->tx_seqh->ccid2s_prev;
571 break;
572 }
573 }
574
575
576
577
578
579
580 if (hc->tx_cwnd < hc->tx_ssthresh)
581 maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
582
583
584 list_for_each_entry(avp, &hc->tx_av_chunks, node) {
585
586 for (; avp->len--; avp->vec++) {
587 u64 ackno_end_rl = SUB48(ackno,
588 dccp_ackvec_runlen(avp->vec));
589
590 ccid2_pr_debug("ackvec %llu |%u,%u|\n",
591 (unsigned long long)ackno,
592 dccp_ackvec_state(avp->vec) >> 6,
593 dccp_ackvec_runlen(avp->vec));
594
595
596
597
598 while (after48(seqp->ccid2s_seq, ackno)) {
599 if (seqp == hc->tx_seqt) {
600 done = 1;
601 break;
602 }
603 seqp = seqp->ccid2s_prev;
604 }
605 if (done)
606 break;
607
608
609
610
611 while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
612 const u8 state = dccp_ackvec_state(avp->vec);
613
614
615 if (state != DCCPAV_NOT_RECEIVED &&
616 !seqp->ccid2s_acked) {
617 if (state == DCCPAV_ECN_MARKED)
618 ccid2_congestion_event(sk,
619 seqp);
620 else
621 ccid2_new_ack(sk, seqp,
622 &maxincr);
623
624 seqp->ccid2s_acked = 1;
625 ccid2_pr_debug("Got ack for %llu\n",
626 (unsigned long long)seqp->ccid2s_seq);
627 hc->tx_pipe--;
628 }
629 if (seqp == hc->tx_seqt) {
630 done = 1;
631 break;
632 }
633 seqp = seqp->ccid2s_prev;
634 }
635 if (done)
636 break;
637
638 ackno = SUB48(ackno_end_rl, 1);
639 }
640 if (done)
641 break;
642 }
643
644
645
646
647 seqp = hc->tx_seqt;
648 while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) {
649 seqp = seqp->ccid2s_next;
650 if (seqp == hc->tx_seqh) {
651 seqp = hc->tx_seqh->ccid2s_prev;
652 break;
653 }
654 }
655 done = 0;
656 while (1) {
657 if (seqp->ccid2s_acked) {
658 done++;
659 if (done == NUMDUPACK)
660 break;
661 }
662 if (seqp == hc->tx_seqt)
663 break;
664 seqp = seqp->ccid2s_prev;
665 }
666
667
668
669
670 if (done == NUMDUPACK) {
671 struct ccid2_seq *last_acked = seqp;
672
673
674 while (1) {
675 if (!seqp->ccid2s_acked) {
676 ccid2_pr_debug("Packet lost: %llu\n",
677 (unsigned long long)seqp->ccid2s_seq);
678
679
680
681
682 ccid2_congestion_event(sk, seqp);
683 hc->tx_pipe--;
684 }
685 if (seqp == hc->tx_seqt)
686 break;
687 seqp = seqp->ccid2s_prev;
688 }
689
690 hc->tx_seqt = last_acked;
691 }
692
693
694 while (hc->tx_seqt != hc->tx_seqh) {
695 if (!hc->tx_seqt->ccid2s_acked)
696 break;
697
698 hc->tx_seqt = hc->tx_seqt->ccid2s_next;
699 }
700
701
702 if (hc->tx_pipe == 0)
703 sk_stop_timer(sk, &hc->tx_rtotimer);
704 else
705 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
706 done:
707
708 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
709 dccp_tasklet_schedule(sk);
710 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
711 }
712
713 static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
714 {
715 struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
716 struct dccp_sock *dp = dccp_sk(sk);
717 u32 max_ratio;
718
719
720 hc->tx_ssthresh = ~0U;
721
722
723 hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache);
724 hc->tx_expected_wnd = hc->tx_cwnd;
725
726
727 max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
728 if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio)
729 dp->dccps_l_ack_ratio = max_ratio;
730
731
732 if (ccid2_hc_tx_alloc_seq(hc))
733 return -ENOMEM;
734
735 hc->tx_rto = DCCP_TIMEOUT_INIT;
736 hc->tx_rpdupack = -1;
737 hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_jiffies32;
738 hc->tx_cwnd_used = 0;
739 hc->sk = sk;
740 timer_setup(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, 0);
741 INIT_LIST_HEAD(&hc->tx_av_chunks);
742 return 0;
743 }
744
745 static void ccid2_hc_tx_exit(struct sock *sk)
746 {
747 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
748 int i;
749
750 sk_stop_timer(sk, &hc->tx_rtotimer);
751
752 for (i = 0; i < hc->tx_seqbufc; i++)
753 kfree(hc->tx_seqbuf[i]);
754 hc->tx_seqbufc = 0;
755 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
756 }
757
758 static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
759 {
760 struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
761
762 if (!dccp_data_packet(skb))
763 return;
764
765 if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) {
766 dccp_send_ack(sk);
767 hc->rx_num_data_pkts = 0;
768 }
769 }
770
771 struct ccid_operations ccid2_ops = {
772 .ccid_id = DCCPC_CCID2,
773 .ccid_name = "TCP-like",
774 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock),
775 .ccid_hc_tx_init = ccid2_hc_tx_init,
776 .ccid_hc_tx_exit = ccid2_hc_tx_exit,
777 .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet,
778 .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent,
779 .ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options,
780 .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv,
781 .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock),
782 .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv,
783 };
784
785 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
786 module_param(ccid2_debug, bool, 0644);
787 MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages");
788 #endif