This source file includes following definitions.
- dccp_ackvec_alloc
- dccp_ackvec_purge_records
- dccp_ackvec_free
- dccp_ackvec_update_records
- dccp_ackvec_lookup
- __ackvec_idx_add
- __ackvec_idx_sub
- dccp_ackvec_buflen
- dccp_ackvec_update_old
- dccp_ackvec_reserve_seats
- dccp_ackvec_add_new
- dccp_ackvec_input
- dccp_ackvec_clear_state
- dccp_ackvec_parsed_add
- dccp_ackvec_parsed_cleanup
- dccp_ackvec_init
- dccp_ackvec_exit
1
2
3
4
5
6
7
8
9 #include "dccp.h"
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13
14 static struct kmem_cache *dccp_ackvec_slab;
15 static struct kmem_cache *dccp_ackvec_record_slab;
16
17 struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
18 {
19 struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
20
21 if (av != NULL) {
22 av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
23 INIT_LIST_HEAD(&av->av_records);
24 }
25 return av;
26 }
27
28 static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
29 {
30 struct dccp_ackvec_record *cur, *next;
31
32 list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
33 kmem_cache_free(dccp_ackvec_record_slab, cur);
34 INIT_LIST_HEAD(&av->av_records);
35 }
36
37 void dccp_ackvec_free(struct dccp_ackvec *av)
38 {
39 if (likely(av != NULL)) {
40 dccp_ackvec_purge_records(av);
41 kmem_cache_free(dccp_ackvec_slab, av);
42 }
43 }
44
45
46
47
48
49
50
51 int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
52 {
53 struct dccp_ackvec_record *avr;
54
55 avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
56 if (avr == NULL)
57 return -ENOBUFS;
58
59 avr->avr_ack_seqno = seqno;
60 avr->avr_ack_ptr = av->av_buf_head;
61 avr->avr_ack_ackno = av->av_buf_ackno;
62 avr->avr_ack_nonce = nonce_sum;
63 avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
64
65
66
67
68
69
70 if (av->av_overflow)
71 dccp_ackvec_purge_records(av);
72
73
74
75
76 list_add(&avr->avr_node, &av->av_records);
77
78 dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
79 (unsigned long long)avr->avr_ack_seqno,
80 (unsigned long long)avr->avr_ack_ackno,
81 avr->avr_ack_runlen);
82 return 0;
83 }
84
85 static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list,
86 const u64 ackno)
87 {
88 struct dccp_ackvec_record *avr;
89
90
91
92
93
94 list_for_each_entry_reverse(avr, av_list, avr_node) {
95 if (avr->avr_ack_seqno == ackno)
96 return avr;
97 if (before48(ackno, avr->avr_ack_seqno))
98 break;
99 }
100 return NULL;
101 }
102
103
104
105
106
107 static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
108 {
109 return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
110 }
111
112 static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
113 {
114 return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
115 }
116
117 u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
118 {
119 if (unlikely(av->av_overflow))
120 return DCCPAV_MAX_ACKVEC_LEN;
121 return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
122 }
123
124
125
126
127
128
129
130
131 static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance,
132 u64 seqno, enum dccp_ackvec_states state)
133 {
134 u16 ptr = av->av_buf_head;
135
136 BUG_ON(distance > 0);
137 if (unlikely(dccp_ackvec_is_empty(av)))
138 return;
139
140 do {
141 u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr);
142
143 if (distance + runlen >= 0) {
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159 if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED)
160 av->av_buf[ptr] = state;
161 else
162 dccp_pr_debug("Not changing %llu state to %u\n",
163 (unsigned long long)seqno, state);
164 break;
165 }
166
167 distance += runlen + 1;
168 ptr = __ackvec_idx_add(ptr, 1);
169
170 } while (ptr != av->av_buf_tail);
171 }
172
173
174 static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num)
175 {
176 u16 start = __ackvec_idx_add(av->av_buf_head, 1),
177 len = DCCPAV_MAX_ACKVEC_LEN - start;
178
179
180 if (num > len) {
181 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len);
182 start = 0;
183 num -= len;
184 }
185 if (num)
186 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num);
187 }
188
189
190
191
192
193
194
195
196 static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
197 u64 seqno, enum dccp_ackvec_states state)
198 {
199 u32 num_cells = num_packets;
200
201 if (num_packets > DCCPAV_BURST_THRESH) {
202 u32 lost_packets = num_packets - 1;
203
204 DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets);
205
206
207
208
209
210
211
212
213
214
215
216
217 for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
218 u8 len = min_t(u32, lost_packets, DCCPAV_MAX_RUNLEN);
219
220 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
221 av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
222
223 lost_packets -= len;
224 }
225 }
226
227 if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
228 DCCP_CRIT("Ack Vector buffer overflow: dropping old entries");
229 av->av_overflow = true;
230 }
231
232 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets);
233 if (av->av_overflow)
234 av->av_buf_tail = av->av_buf_head;
235
236 av->av_buf[av->av_buf_head] = state;
237 av->av_buf_ackno = seqno;
238
239 if (num_packets > 1)
240 dccp_ackvec_reserve_seats(av, num_packets - 1);
241 }
242
243
244
245
246 void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
247 {
248 u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
249 enum dccp_ackvec_states state = DCCPAV_RECEIVED;
250
251 if (dccp_ackvec_is_empty(av)) {
252 dccp_ackvec_add_new(av, 1, seqno, state);
253 av->av_tail_ackno = seqno;
254
255 } else {
256 s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno);
257 u8 *current_head = av->av_buf + av->av_buf_head;
258
259 if (num_packets == 1 &&
260 dccp_ackvec_state(current_head) == state &&
261 dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) {
262
263 *current_head += 1;
264 av->av_buf_ackno = seqno;
265
266 } else if (num_packets > 0) {
267 dccp_ackvec_add_new(av, num_packets, seqno, state);
268 } else {
269 dccp_ackvec_update_old(av, num_packets, seqno, state);
270 }
271 }
272 }
273
274
275
276
277
278
279
280
281
282 void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno)
283 {
284 struct dccp_ackvec_record *avr, *next;
285 u8 runlen_now, eff_runlen;
286 s64 delta;
287
288 avr = dccp_ackvec_lookup(&av->av_records, ackno);
289 if (avr == NULL)
290 return;
291
292
293
294
295
296 delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno);
297 if (delta < 0)
298 goto free_records;
299
300
301
302
303 eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen;
304
305 runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr);
306
307
308
309
310
311
312
313 if (runlen_now > eff_runlen) {
314
315 av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1;
316 av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1);
317
318
319 if (av->av_overflow)
320 av->av_overflow = (av->av_buf_head == av->av_buf_tail);
321 } else {
322 av->av_buf_tail = avr->avr_ack_ptr;
323
324
325
326
327
328 av->av_overflow = 0;
329 }
330
331
332
333
334
335 av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1);
336
337 free_records:
338 list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
339 list_del(&avr->avr_node);
340 kmem_cache_free(dccp_ackvec_record_slab, avr);
341 }
342 }
343
344
345
346
347 int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce)
348 {
349 struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC);
350
351 if (new == NULL)
352 return -ENOBUFS;
353 new->vec = vec;
354 new->len = len;
355 new->nonce = nonce;
356
357 list_add_tail(&new->node, head);
358 return 0;
359 }
360 EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_add);
361
362 void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks)
363 {
364 struct dccp_ackvec_parsed *cur, *next;
365
366 list_for_each_entry_safe(cur, next, parsed_chunks, node)
367 kfree(cur);
368 INIT_LIST_HEAD(parsed_chunks);
369 }
370 EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
371
372 int __init dccp_ackvec_init(void)
373 {
374 dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
375 sizeof(struct dccp_ackvec), 0,
376 SLAB_HWCACHE_ALIGN, NULL);
377 if (dccp_ackvec_slab == NULL)
378 goto out_err;
379
380 dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
381 sizeof(struct dccp_ackvec_record),
382 0, SLAB_HWCACHE_ALIGN, NULL);
383 if (dccp_ackvec_record_slab == NULL)
384 goto out_destroy_slab;
385
386 return 0;
387
388 out_destroy_slab:
389 kmem_cache_destroy(dccp_ackvec_slab);
390 dccp_ackvec_slab = NULL;
391 out_err:
392 DCCP_CRIT("Unable to create Ack Vector slab cache");
393 return -ENOBUFS;
394 }
395
396 void dccp_ackvec_exit(void)
397 {
398 kmem_cache_destroy(dccp_ackvec_slab);
399 dccp_ackvec_slab = NULL;
400 kmem_cache_destroy(dccp_ackvec_record_slab);
401 dccp_ackvec_record_slab = NULL;
402 }