This source file includes following definitions.
- sctp_ulpq_init
- sctp_ulpq_flush
- sctp_ulpq_free
- sctp_ulpq_tail_data
- sctp_clear_pd
- sctp_ulpq_set_pd
- sctp_ulpq_clear_pd
- sctp_ulpq_tail_event
- sctp_ulpq_store_reasm
- sctp_make_reassembled_event
- sctp_ulpq_retrieve_reassembled
- sctp_ulpq_retrieve_partial
- sctp_ulpq_reasm
- sctp_ulpq_retrieve_first
- sctp_ulpq_reasm_flushtsn
- sctp_ulpq_reasm_drain
- sctp_ulpq_retrieve_ordered
- sctp_ulpq_store_ordered
- sctp_ulpq_order
- sctp_ulpq_reap_ordered
- sctp_ulpq_skip
- sctp_ulpq_renege_list
- sctp_ulpq_renege_order
- sctp_ulpq_renege_frags
- sctp_ulpq_partial_delivery
- sctp_ulpq_renege
- sctp_ulpq_abort_pd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include <linux/slab.h>
23 #include <linux/types.h>
24 #include <linux/skbuff.h>
25 #include <net/sock.h>
26 #include <net/busy_poll.h>
27 #include <net/sctp/structs.h>
28 #include <net/sctp/sctp.h>
29 #include <net/sctp/sm.h>
30
31
32 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
33 struct sctp_ulpevent *);
34 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
35 struct sctp_ulpevent *);
36 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
37
38
39
40
41 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
42 struct sctp_association *asoc)
43 {
44 memset(ulpq, 0, sizeof(struct sctp_ulpq));
45
46 ulpq->asoc = asoc;
47 skb_queue_head_init(&ulpq->reasm);
48 skb_queue_head_init(&ulpq->reasm_uo);
49 skb_queue_head_init(&ulpq->lobby);
50 ulpq->pd_mode = 0;
51
52 return ulpq;
53 }
54
55
56
57 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
58 {
59 struct sk_buff *skb;
60 struct sctp_ulpevent *event;
61
62 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
63 event = sctp_skb2event(skb);
64 sctp_ulpevent_free(event);
65 }
66
67 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
68 event = sctp_skb2event(skb);
69 sctp_ulpevent_free(event);
70 }
71
72 while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
73 event = sctp_skb2event(skb);
74 sctp_ulpevent_free(event);
75 }
76 }
77
78
79 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
80 {
81 sctp_ulpq_flush(ulpq);
82 }
83
84
85 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
86 gfp_t gfp)
87 {
88 struct sk_buff_head temp;
89 struct sctp_ulpevent *event;
90 int event_eor = 0;
91
92
93 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
94 if (!event)
95 return -ENOMEM;
96
97 event->ssn = ntohs(chunk->subh.data_hdr->ssn);
98 event->ppid = chunk->subh.data_hdr->ppid;
99
100
101 event = sctp_ulpq_reasm(ulpq, event);
102
103
104 if (event) {
105
106 skb_queue_head_init(&temp);
107 __skb_queue_tail(&temp, sctp_event2skb(event));
108
109 if (event->msg_flags & MSG_EOR)
110 event = sctp_ulpq_order(ulpq, event);
111 }
112
113
114
115
116 if (event) {
117 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
118 sctp_ulpq_tail_event(ulpq, &temp);
119 }
120
121 return event_eor;
122 }
123
124
125
126
127
128 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
129 {
130 struct sctp_sock *sp = sctp_sk(sk);
131
132 if (atomic_dec_and_test(&sp->pd_mode)) {
133
134
135
136 if (!skb_queue_empty(&sp->pd_lobby)) {
137 skb_queue_splice_tail_init(&sp->pd_lobby,
138 &sk->sk_receive_queue);
139 return 1;
140 }
141 } else {
142
143
144
145
146
147 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
148 struct sk_buff *skb, *tmp;
149 struct sctp_ulpevent *event;
150
151 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
152 event = sctp_skb2event(skb);
153 if (event->asoc == asoc) {
154 __skb_unlink(skb, &sp->pd_lobby);
155 __skb_queue_tail(&sk->sk_receive_queue,
156 skb);
157 }
158 }
159 }
160 }
161
162 return 0;
163 }
164
165
166 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
167 {
168 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
169
170 atomic_inc(&sp->pd_mode);
171 ulpq->pd_mode = 1;
172 }
173
174
175 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
176 {
177 ulpq->pd_mode = 0;
178 sctp_ulpq_reasm_drain(ulpq);
179 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
180 }
181
182 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
183 {
184 struct sock *sk = ulpq->asoc->base.sk;
185 struct sctp_sock *sp = sctp_sk(sk);
186 struct sctp_ulpevent *event;
187 struct sk_buff_head *queue;
188 struct sk_buff *skb;
189 int clear_pd = 0;
190
191 skb = __skb_peek(skb_list);
192 event = sctp_skb2event(skb);
193
194
195
196
197 if (sk->sk_shutdown & RCV_SHUTDOWN &&
198 (sk->sk_shutdown & SEND_SHUTDOWN ||
199 !sctp_ulpevent_is_notification(event)))
200 goto out_free;
201
202 if (!sctp_ulpevent_is_notification(event)) {
203 sk_mark_napi_id(sk, skb);
204 sk_incoming_cpu_update(sk);
205 }
206
207 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
208 goto out_free;
209
210
211
212
213
214
215 if (atomic_read(&sp->pd_mode) == 0) {
216 queue = &sk->sk_receive_queue;
217 } else {
218 if (ulpq->pd_mode) {
219
220
221
222
223
224 if ((event->msg_flags & MSG_NOTIFICATION) ||
225 (SCTP_DATA_NOT_FRAG ==
226 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
227 queue = &sp->pd_lobby;
228 else {
229 clear_pd = event->msg_flags & MSG_EOR;
230 queue = &sk->sk_receive_queue;
231 }
232 } else {
233
234
235
236
237
238 if (sp->frag_interleave)
239 queue = &sk->sk_receive_queue;
240 else
241 queue = &sp->pd_lobby;
242 }
243 }
244
245 skb_queue_splice_tail_init(skb_list, queue);
246
247
248
249
250
251 if (clear_pd)
252 sctp_ulpq_clear_pd(ulpq);
253
254 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
255 if (!sock_owned_by_user(sk))
256 sp->data_ready_signalled = 1;
257 sk->sk_data_ready(sk);
258 }
259 return 1;
260
261 out_free:
262 if (skb_list)
263 sctp_queue_purge_ulpevents(skb_list);
264 else
265 sctp_ulpevent_free(event);
266
267 return 0;
268 }
269
270
271
272
273 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
274 struct sctp_ulpevent *event)
275 {
276 struct sk_buff *pos;
277 struct sctp_ulpevent *cevent;
278 __u32 tsn, ctsn;
279
280 tsn = event->tsn;
281
282
283 pos = skb_peek_tail(&ulpq->reasm);
284 if (!pos) {
285 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
286 return;
287 }
288
289
290 cevent = sctp_skb2event(pos);
291 ctsn = cevent->tsn;
292 if (TSN_lt(ctsn, tsn)) {
293 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
294 return;
295 }
296
297
298 skb_queue_walk(&ulpq->reasm, pos) {
299 cevent = sctp_skb2event(pos);
300 ctsn = cevent->tsn;
301
302 if (TSN_lt(tsn, ctsn))
303 break;
304 }
305
306
307 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
308
309 }
310
311
312
313
314
315
316
317
318 struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
319 struct sk_buff_head *queue,
320 struct sk_buff *f_frag,
321 struct sk_buff *l_frag)
322 {
323 struct sk_buff *pos;
324 struct sk_buff *new = NULL;
325 struct sctp_ulpevent *event;
326 struct sk_buff *pnext, *last;
327 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
328
329
330 if (f_frag == l_frag)
331 pos = NULL;
332 else
333 pos = f_frag->next;
334
335
336 for (last = list; list; last = list, list = list->next)
337 ;
338
339
340
341
342 if (last)
343 last->next = pos;
344 else {
345 if (skb_cloned(f_frag)) {
346
347
348
349
350
351 new = skb_copy(f_frag, GFP_ATOMIC);
352 if (!new)
353 return NULL;
354
355 sctp_skb_set_owner_r(new, f_frag->sk);
356
357 skb_shinfo(new)->frag_list = pos;
358 } else
359 skb_shinfo(f_frag)->frag_list = pos;
360 }
361
362
363 __skb_unlink(f_frag, queue);
364
365
366 if (new) {
367 kfree_skb(f_frag);
368 f_frag = new;
369 }
370
371 while (pos) {
372
373 pnext = pos->next;
374
375
376 f_frag->len += pos->len;
377 f_frag->data_len += pos->len;
378
379
380 __skb_unlink(pos, queue);
381
382
383 if (pos == l_frag)
384 break;
385 pos->next = pnext;
386 pos = pnext;
387 }
388
389 event = sctp_skb2event(f_frag);
390 SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
391
392 return event;
393 }
394
395
396
397
398
399 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
400 {
401 struct sk_buff *pos;
402 struct sctp_ulpevent *cevent;
403 struct sk_buff *first_frag = NULL;
404 __u32 ctsn, next_tsn;
405 struct sctp_ulpevent *retval = NULL;
406 struct sk_buff *pd_first = NULL;
407 struct sk_buff *pd_last = NULL;
408 size_t pd_len = 0;
409 struct sctp_association *asoc;
410 u32 pd_point;
411
412
413
414
415
416 next_tsn = 0;
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431 skb_queue_walk(&ulpq->reasm, pos) {
432 cevent = sctp_skb2event(pos);
433 ctsn = cevent->tsn;
434
435 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
436 case SCTP_DATA_FIRST_FRAG:
437
438
439
440
441 if (skb_queue_is_first(&ulpq->reasm, pos)) {
442 pd_first = pos;
443 pd_last = pos;
444 pd_len = pos->len;
445 } else {
446 pd_first = NULL;
447 pd_last = NULL;
448 pd_len = 0;
449 }
450
451 first_frag = pos;
452 next_tsn = ctsn + 1;
453 break;
454
455 case SCTP_DATA_MIDDLE_FRAG:
456 if ((first_frag) && (ctsn == next_tsn)) {
457 next_tsn++;
458 if (pd_first) {
459 pd_last = pos;
460 pd_len += pos->len;
461 }
462 } else
463 first_frag = NULL;
464 break;
465
466 case SCTP_DATA_LAST_FRAG:
467 if (first_frag && (ctsn == next_tsn))
468 goto found;
469 else
470 first_frag = NULL;
471 break;
472 }
473 }
474
475 asoc = ulpq->asoc;
476 if (pd_first) {
477
478
479
480
481
482 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
483 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
484 goto done;
485
486 cevent = sctp_skb2event(pd_first);
487 pd_point = sctp_sk(asoc->base.sk)->pd_point;
488 if (pd_point && pd_point <= pd_len) {
489 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
490 &ulpq->reasm,
491 pd_first,
492 pd_last);
493 if (retval)
494 sctp_ulpq_set_pd(ulpq);
495 }
496 }
497 done:
498 return retval;
499 found:
500 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
501 &ulpq->reasm, first_frag, pos);
502 if (retval)
503 retval->msg_flags |= MSG_EOR;
504 goto done;
505 }
506
507
508 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
509 {
510 struct sk_buff *pos, *last_frag, *first_frag;
511 struct sctp_ulpevent *cevent;
512 __u32 ctsn, next_tsn;
513 int is_last;
514 struct sctp_ulpevent *retval;
515
516
517
518
519
520
521 if (skb_queue_empty(&ulpq->reasm))
522 return NULL;
523
524 last_frag = first_frag = NULL;
525 retval = NULL;
526 next_tsn = 0;
527 is_last = 0;
528
529 skb_queue_walk(&ulpq->reasm, pos) {
530 cevent = sctp_skb2event(pos);
531 ctsn = cevent->tsn;
532
533 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
534 case SCTP_DATA_FIRST_FRAG:
535 if (!first_frag)
536 return NULL;
537 goto done;
538 case SCTP_DATA_MIDDLE_FRAG:
539 if (!first_frag) {
540 first_frag = pos;
541 next_tsn = ctsn + 1;
542 last_frag = pos;
543 } else if (next_tsn == ctsn) {
544 next_tsn++;
545 last_frag = pos;
546 } else
547 goto done;
548 break;
549 case SCTP_DATA_LAST_FRAG:
550 if (!first_frag)
551 first_frag = pos;
552 else if (ctsn != next_tsn)
553 goto done;
554 last_frag = pos;
555 is_last = 1;
556 goto done;
557 default:
558 return NULL;
559 }
560 }
561
562
563
564
565 done:
566 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
567 &ulpq->reasm, first_frag, last_frag);
568 if (retval && is_last)
569 retval->msg_flags |= MSG_EOR;
570
571 return retval;
572 }
573
574
575
576
577
578 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
579 struct sctp_ulpevent *event)
580 {
581 struct sctp_ulpevent *retval = NULL;
582
583
584 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
585 event->msg_flags |= MSG_EOR;
586 return event;
587 }
588
589 sctp_ulpq_store_reasm(ulpq, event);
590 if (!ulpq->pd_mode)
591 retval = sctp_ulpq_retrieve_reassembled(ulpq);
592 else {
593 __u32 ctsn, ctsnap;
594
595
596
597
598 ctsn = event->tsn;
599 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
600 if (TSN_lte(ctsn, ctsnap))
601 retval = sctp_ulpq_retrieve_partial(ulpq);
602 }
603
604 return retval;
605 }
606
607
608 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
609 {
610 struct sk_buff *pos, *last_frag, *first_frag;
611 struct sctp_ulpevent *cevent;
612 __u32 ctsn, next_tsn;
613 struct sctp_ulpevent *retval;
614
615
616
617
618
619
620 if (skb_queue_empty(&ulpq->reasm))
621 return NULL;
622
623 last_frag = first_frag = NULL;
624 retval = NULL;
625 next_tsn = 0;
626
627 skb_queue_walk(&ulpq->reasm, pos) {
628 cevent = sctp_skb2event(pos);
629 ctsn = cevent->tsn;
630
631 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
632 case SCTP_DATA_FIRST_FRAG:
633 if (!first_frag) {
634 first_frag = pos;
635 next_tsn = ctsn + 1;
636 last_frag = pos;
637 } else
638 goto done;
639 break;
640
641 case SCTP_DATA_MIDDLE_FRAG:
642 if (!first_frag)
643 return NULL;
644 if (ctsn == next_tsn) {
645 next_tsn++;
646 last_frag = pos;
647 } else
648 goto done;
649 break;
650
651 case SCTP_DATA_LAST_FRAG:
652 if (!first_frag)
653 return NULL;
654 else
655 goto done;
656 break;
657
658 default:
659 return NULL;
660 }
661 }
662
663
664
665
666 done:
667 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
668 &ulpq->reasm, first_frag, last_frag);
669 return retval;
670 }
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
687 {
688 struct sk_buff *pos, *tmp;
689 struct sctp_ulpevent *event;
690 __u32 tsn;
691
692 if (skb_queue_empty(&ulpq->reasm))
693 return;
694
695 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
696 event = sctp_skb2event(pos);
697 tsn = event->tsn;
698
699
700
701
702
703
704 if (TSN_lte(tsn, fwd_tsn)) {
705 __skb_unlink(pos, &ulpq->reasm);
706 sctp_ulpevent_free(event);
707 } else
708 break;
709 }
710 }
711
712
713
714
715
716
717 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
718 {
719 struct sctp_ulpevent *event = NULL;
720
721 if (skb_queue_empty(&ulpq->reasm))
722 return;
723
724 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
725 struct sk_buff_head temp;
726
727 skb_queue_head_init(&temp);
728 __skb_queue_tail(&temp, sctp_event2skb(event));
729
730
731 if (event->msg_flags & MSG_EOR)
732 event = sctp_ulpq_order(ulpq, event);
733
734
735
736
737 if (event)
738 sctp_ulpq_tail_event(ulpq, &temp);
739 }
740 }
741
742
743
744
745
746 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
747 struct sctp_ulpevent *event)
748 {
749 struct sk_buff_head *event_list;
750 struct sk_buff *pos, *tmp;
751 struct sctp_ulpevent *cevent;
752 struct sctp_stream *stream;
753 __u16 sid, csid, cssn;
754
755 sid = event->stream;
756 stream = &ulpq->asoc->stream;
757
758 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
759
760
761 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
762 cevent = (struct sctp_ulpevent *) pos->cb;
763 csid = cevent->stream;
764 cssn = cevent->ssn;
765
766
767 if (csid > sid)
768 break;
769
770
771 if (csid < sid)
772 continue;
773
774 if (cssn != sctp_ssn_peek(stream, in, sid))
775 break;
776
777
778 sctp_ssn_next(stream, in, sid);
779
780 __skb_unlink(pos, &ulpq->lobby);
781
782
783 __skb_queue_tail(event_list, pos);
784 }
785 }
786
787
788 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
789 struct sctp_ulpevent *event)
790 {
791 struct sk_buff *pos;
792 struct sctp_ulpevent *cevent;
793 __u16 sid, csid;
794 __u16 ssn, cssn;
795
796 pos = skb_peek_tail(&ulpq->lobby);
797 if (!pos) {
798 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
799 return;
800 }
801
802 sid = event->stream;
803 ssn = event->ssn;
804
805 cevent = (struct sctp_ulpevent *) pos->cb;
806 csid = cevent->stream;
807 cssn = cevent->ssn;
808 if (sid > csid) {
809 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
810 return;
811 }
812
813 if ((sid == csid) && SSN_lt(cssn, ssn)) {
814 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
815 return;
816 }
817
818
819
820
821 skb_queue_walk(&ulpq->lobby, pos) {
822 cevent = (struct sctp_ulpevent *) pos->cb;
823 csid = cevent->stream;
824 cssn = cevent->ssn;
825
826 if (csid > sid)
827 break;
828 if (csid == sid && SSN_lt(ssn, cssn))
829 break;
830 }
831
832
833
834 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
835 }
836
837 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
838 struct sctp_ulpevent *event)
839 {
840 __u16 sid, ssn;
841 struct sctp_stream *stream;
842
843
844 if (event->msg_flags & SCTP_DATA_UNORDERED)
845 return event;
846
847
848 sid = event->stream;
849 ssn = event->ssn;
850 stream = &ulpq->asoc->stream;
851
852
853 if (ssn != sctp_ssn_peek(stream, in, sid)) {
854
855
856
857 sctp_ulpq_store_ordered(ulpq, event);
858 return NULL;
859 }
860
861
862 sctp_ssn_next(stream, in, sid);
863
864
865
866
867 sctp_ulpq_retrieve_ordered(ulpq, event);
868
869 return event;
870 }
871
872
873
874
875 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
876 {
877 struct sk_buff *pos, *tmp;
878 struct sctp_ulpevent *cevent;
879 struct sctp_ulpevent *event;
880 struct sctp_stream *stream;
881 struct sk_buff_head temp;
882 struct sk_buff_head *lobby = &ulpq->lobby;
883 __u16 csid, cssn;
884
885 stream = &ulpq->asoc->stream;
886
887
888 skb_queue_head_init(&temp);
889 event = NULL;
890 sctp_skb_for_each(pos, lobby, tmp) {
891 cevent = (struct sctp_ulpevent *) pos->cb;
892 csid = cevent->stream;
893 cssn = cevent->ssn;
894
895
896 if (csid > sid)
897 break;
898
899
900 if (csid < sid)
901 continue;
902
903
904 if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
905 break;
906
907 __skb_unlink(pos, lobby);
908 if (!event)
909
910 event = sctp_skb2event(pos);
911
912
913 __skb_queue_tail(&temp, pos);
914 }
915
916
917
918
919 if (event == NULL && pos != (struct sk_buff *)lobby) {
920 cevent = (struct sctp_ulpevent *) pos->cb;
921 csid = cevent->stream;
922 cssn = cevent->ssn;
923
924 if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
925 sctp_ssn_next(stream, in, csid);
926 __skb_unlink(pos, lobby);
927 __skb_queue_tail(&temp, pos);
928 event = sctp_skb2event(pos);
929 }
930 }
931
932
933
934
935 if (event) {
936
937 sctp_ulpq_retrieve_ordered(ulpq, event);
938 sctp_ulpq_tail_event(ulpq, &temp);
939 }
940 }
941
942
943
944
945 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
946 {
947 struct sctp_stream *stream;
948
949
950 stream = &ulpq->asoc->stream;
951
952
953 if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
954 return;
955
956
957 sctp_ssn_skip(stream, in, sid, ssn);
958
959
960
961
962 sctp_ulpq_reap_ordered(ulpq, sid);
963 }
964
965 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
966 __u16 needed)
967 {
968 __u16 freed = 0;
969 __u32 tsn, last_tsn;
970 struct sk_buff *skb, *flist, *last;
971 struct sctp_ulpevent *event;
972 struct sctp_tsnmap *tsnmap;
973
974 tsnmap = &ulpq->asoc->peer.tsn_map;
975
976 while ((skb = skb_peek_tail(list)) != NULL) {
977 event = sctp_skb2event(skb);
978 tsn = event->tsn;
979
980
981 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
982 break;
983
984
985
986
987
988 freed += skb_headlen(skb);
989 flist = skb_shinfo(skb)->frag_list;
990 for (last = flist; flist; flist = flist->next) {
991 last = flist;
992 freed += skb_headlen(last);
993 }
994 if (last)
995 last_tsn = sctp_skb2event(last)->tsn;
996 else
997 last_tsn = tsn;
998
999
1000 __skb_unlink(skb, list);
1001 sctp_ulpevent_free(event);
1002 while (TSN_lte(tsn, last_tsn)) {
1003 sctp_tsnmap_renege(tsnmap, tsn);
1004 tsn++;
1005 }
1006 if (freed >= needed)
1007 return freed;
1008 }
1009
1010 return freed;
1011 }
1012
1013
1014 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1015 {
1016 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1017 }
1018
1019
1020 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1021 {
1022 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1023 }
1024
1025
1026 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1027 gfp_t gfp)
1028 {
1029 struct sctp_ulpevent *event;
1030 struct sctp_association *asoc;
1031 struct sctp_sock *sp;
1032 __u32 ctsn;
1033 struct sk_buff *skb;
1034
1035 asoc = ulpq->asoc;
1036 sp = sctp_sk(asoc->base.sk);
1037
1038
1039
1040
1041 if (ulpq->pd_mode)
1042 return;
1043
1044
1045
1046
1047 skb = skb_peek(&asoc->ulpq.reasm);
1048 if (skb != NULL) {
1049 ctsn = sctp_skb2event(skb)->tsn;
1050 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1051 return;
1052 }
1053
1054
1055
1056
1057
1058
1059 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1060
1061 event = sctp_ulpq_retrieve_first(ulpq);
1062
1063 if (event) {
1064 struct sk_buff_head temp;
1065
1066 skb_queue_head_init(&temp);
1067 __skb_queue_tail(&temp, sctp_event2skb(event));
1068 sctp_ulpq_tail_event(ulpq, &temp);
1069 sctp_ulpq_set_pd(ulpq);
1070 return;
1071 }
1072 }
1073 }
1074
1075
1076 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1077 gfp_t gfp)
1078 {
1079 struct sctp_association *asoc = ulpq->asoc;
1080 __u32 freed = 0;
1081 __u16 needed;
1082
1083 needed = ntohs(chunk->chunk_hdr->length) -
1084 sizeof(struct sctp_data_chunk);
1085
1086 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1087 freed = sctp_ulpq_renege_order(ulpq, needed);
1088 if (freed < needed)
1089 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1090 }
1091
1092 if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
1093 freed >= needed) {
1094 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1095
1096
1097
1098
1099 if (retval <= 0)
1100 sctp_ulpq_partial_delivery(ulpq, gfp);
1101 else if (retval == 1)
1102 sctp_ulpq_reasm_drain(ulpq);
1103 }
1104
1105 sk_mem_reclaim(asoc->base.sk);
1106 }
1107
1108
1109
1110
1111
1112
1113 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1114 {
1115 struct sctp_ulpevent *ev = NULL;
1116 struct sctp_sock *sp;
1117 struct sock *sk;
1118
1119 if (!ulpq->pd_mode)
1120 return;
1121
1122 sk = ulpq->asoc->base.sk;
1123 sp = sctp_sk(sk);
1124 if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1125 SCTP_PARTIAL_DELIVERY_EVENT))
1126 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1127 SCTP_PARTIAL_DELIVERY_ABORTED,
1128 0, 0, 0, gfp);
1129 if (ev)
1130 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1131
1132
1133 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1134 sp->data_ready_signalled = 1;
1135 sk->sk_data_ready(sk);
1136 }
1137 }