This source file includes following definitions.
- init_crandom
- get_crandom
- loss_4state
- loss_gilb_ell
- loss_event
- tabledist
- packet_time_ns
- tfifo_reset
- tfifo_enqueue
- netem_segment
- netem_enqueue
- get_slot_next
- netem_peek
- netem_erase_head
- netem_dequeue
- netem_reset
- dist_free
- get_dist_table
- get_slot
- get_correlation
- get_reorder
- get_corrupt
- get_rate
- get_loss_clg
- parse_attr
- netem_change
- netem_init
- netem_destroy
- dump_loss_model
- netem_dump
- netem_dump_class
- netem_graft
- netem_leaf
- netem_find
- netem_walk
- netem_module_init
- netem_module_exit
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/vmalloc.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/reciprocal_div.h>
22 #include <linux/rbtree.h>
23
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/inet_ecn.h>
27
28 #define VERSION "1.3"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67 struct disttable {
68 u32 size;
69 s16 table[0];
70 };
71
72 struct netem_sched_data {
73
74 struct rb_root t_root;
75
76
77 struct sk_buff *t_head;
78 struct sk_buff *t_tail;
79
80
81 struct Qdisc *qdisc;
82
83 struct qdisc_watchdog watchdog;
84
85 s64 latency;
86 s64 jitter;
87
88 u32 loss;
89 u32 ecn;
90 u32 limit;
91 u32 counter;
92 u32 gap;
93 u32 duplicate;
94 u32 reorder;
95 u32 corrupt;
96 u64 rate;
97 s32 packet_overhead;
98 u32 cell_size;
99 struct reciprocal_value cell_size_reciprocal;
100 s32 cell_overhead;
101
102 struct crndstate {
103 u32 last;
104 u32 rho;
105 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
106
107 struct disttable *delay_dist;
108
109 enum {
110 CLG_RANDOM,
111 CLG_4_STATES,
112 CLG_GILB_ELL,
113 } loss_model;
114
115 enum {
116 TX_IN_GAP_PERIOD = 1,
117 TX_IN_BURST_PERIOD,
118 LOST_IN_GAP_PERIOD,
119 LOST_IN_BURST_PERIOD,
120 } _4_state_model;
121
122 enum {
123 GOOD_STATE = 1,
124 BAD_STATE,
125 } GE_state_model;
126
127
128 struct clgstate {
129
130 u8 state;
131
132
133 u32 a1;
134 u32 a2;
135 u32 a3;
136 u32 a4;
137 u32 a5;
138 } clg;
139
140 struct tc_netem_slot slot_config;
141 struct slotstate {
142 u64 slot_next;
143 s32 packets_left;
144 s32 bytes_left;
145 } slot;
146
147 struct disttable *slot_dist;
148 };
149
150
151
152
153
154
155
156
157 struct netem_skb_cb {
158 u64 time_to_send;
159 };
160
161 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
162 {
163
164 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
165 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
166 }
167
168
169
170
171 static void init_crandom(struct crndstate *state, unsigned long rho)
172 {
173 state->rho = rho;
174 state->last = prandom_u32();
175 }
176
177
178
179
180
181 static u32 get_crandom(struct crndstate *state)
182 {
183 u64 value, rho;
184 unsigned long answer;
185
186 if (!state || state->rho == 0)
187 return prandom_u32();
188
189 value = prandom_u32();
190 rho = (u64)state->rho + 1;
191 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
192 state->last = answer;
193 return answer;
194 }
195
196
197
198
199
200 static bool loss_4state(struct netem_sched_data *q)
201 {
202 struct clgstate *clg = &q->clg;
203 u32 rnd = prandom_u32();
204
205
206
207
208
209
210
211
212
213
214
215 switch (clg->state) {
216 case TX_IN_GAP_PERIOD:
217 if (rnd < clg->a4) {
218 clg->state = LOST_IN_BURST_PERIOD;
219 return true;
220 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
221 clg->state = LOST_IN_GAP_PERIOD;
222 return true;
223 } else if (clg->a1 + clg->a4 < rnd) {
224 clg->state = TX_IN_GAP_PERIOD;
225 }
226
227 break;
228 case TX_IN_BURST_PERIOD:
229 if (rnd < clg->a5) {
230 clg->state = LOST_IN_GAP_PERIOD;
231 return true;
232 } else {
233 clg->state = TX_IN_BURST_PERIOD;
234 }
235
236 break;
237 case LOST_IN_GAP_PERIOD:
238 if (rnd < clg->a3)
239 clg->state = TX_IN_BURST_PERIOD;
240 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
241 clg->state = TX_IN_GAP_PERIOD;
242 } else if (clg->a2 + clg->a3 < rnd) {
243 clg->state = LOST_IN_GAP_PERIOD;
244 return true;
245 }
246 break;
247 case LOST_IN_BURST_PERIOD:
248 clg->state = TX_IN_GAP_PERIOD;
249 break;
250 }
251
252 return false;
253 }
254
255
256
257
258
259
260
261
262
263
264
265 static bool loss_gilb_ell(struct netem_sched_data *q)
266 {
267 struct clgstate *clg = &q->clg;
268
269 switch (clg->state) {
270 case GOOD_STATE:
271 if (prandom_u32() < clg->a1)
272 clg->state = BAD_STATE;
273 if (prandom_u32() < clg->a4)
274 return true;
275 break;
276 case BAD_STATE:
277 if (prandom_u32() < clg->a2)
278 clg->state = GOOD_STATE;
279 if (prandom_u32() > clg->a3)
280 return true;
281 }
282
283 return false;
284 }
285
286 static bool loss_event(struct netem_sched_data *q)
287 {
288 switch (q->loss_model) {
289 case CLG_RANDOM:
290
291 return q->loss && q->loss >= get_crandom(&q->loss_cor);
292
293 case CLG_4_STATES:
294
295
296
297
298
299 return loss_4state(q);
300
301 case CLG_GILB_ELL:
302
303
304
305
306
307 return loss_gilb_ell(q);
308 }
309
310 return false;
311 }
312
313
314
315
316
317
318 static s64 tabledist(s64 mu, s32 sigma,
319 struct crndstate *state,
320 const struct disttable *dist)
321 {
322 s64 x;
323 long t;
324 u32 rnd;
325
326 if (sigma == 0)
327 return mu;
328
329 rnd = get_crandom(state);
330
331
332 if (dist == NULL)
333 return ((rnd % (2 * sigma)) + mu) - sigma;
334
335 t = dist->table[rnd % dist->size];
336 x = (sigma % NETEM_DIST_SCALE) * t;
337 if (x >= 0)
338 x += NETEM_DIST_SCALE/2;
339 else
340 x -= NETEM_DIST_SCALE/2;
341
342 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
343 }
344
345 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
346 {
347 len += q->packet_overhead;
348
349 if (q->cell_size) {
350 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
351
352 if (len > cells * q->cell_size)
353 cells++;
354 len = cells * (q->cell_size + q->cell_overhead);
355 }
356
357 return div64_u64(len * NSEC_PER_SEC, q->rate);
358 }
359
360 static void tfifo_reset(struct Qdisc *sch)
361 {
362 struct netem_sched_data *q = qdisc_priv(sch);
363 struct rb_node *p = rb_first(&q->t_root);
364
365 while (p) {
366 struct sk_buff *skb = rb_to_skb(p);
367
368 p = rb_next(p);
369 rb_erase(&skb->rbnode, &q->t_root);
370 rtnl_kfree_skbs(skb, skb);
371 }
372
373 rtnl_kfree_skbs(q->t_head, q->t_tail);
374 q->t_head = NULL;
375 q->t_tail = NULL;
376 }
377
378 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
379 {
380 struct netem_sched_data *q = qdisc_priv(sch);
381 u64 tnext = netem_skb_cb(nskb)->time_to_send;
382
383 if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
384 if (q->t_tail)
385 q->t_tail->next = nskb;
386 else
387 q->t_head = nskb;
388 q->t_tail = nskb;
389 } else {
390 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
391
392 while (*p) {
393 struct sk_buff *skb;
394
395 parent = *p;
396 skb = rb_to_skb(parent);
397 if (tnext >= netem_skb_cb(skb)->time_to_send)
398 p = &parent->rb_right;
399 else
400 p = &parent->rb_left;
401 }
402 rb_link_node(&nskb->rbnode, parent, p);
403 rb_insert_color(&nskb->rbnode, &q->t_root);
404 }
405 sch->q.qlen++;
406 }
407
408
409
410
411
412 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
413 struct sk_buff **to_free)
414 {
415 struct sk_buff *segs;
416 netdev_features_t features = netif_skb_features(skb);
417
418 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
419
420 if (IS_ERR_OR_NULL(segs)) {
421 qdisc_drop(skb, sch, to_free);
422 return NULL;
423 }
424 consume_skb(skb);
425 return segs;
426 }
427
428
429
430
431
432
433
434 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
435 struct sk_buff **to_free)
436 {
437 struct netem_sched_data *q = qdisc_priv(sch);
438
439 struct netem_skb_cb *cb;
440 struct sk_buff *skb2;
441 struct sk_buff *segs = NULL;
442 unsigned int prev_len = qdisc_pkt_len(skb);
443 int count = 1;
444 int rc = NET_XMIT_SUCCESS;
445 int rc_drop = NET_XMIT_DROP;
446
447
448 skb->prev = NULL;
449
450
451 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
452 ++count;
453
454
455 if (loss_event(q)) {
456 if (q->ecn && INET_ECN_set_ce(skb))
457 qdisc_qstats_drop(sch);
458 else
459 --count;
460 }
461 if (count == 0) {
462 qdisc_qstats_drop(sch);
463 __qdisc_drop(skb, to_free);
464 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
465 }
466
467
468
469
470 if (q->latency || q->jitter || q->rate)
471 skb_orphan_partial(skb);
472
473
474
475
476
477
478 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
479 struct Qdisc *rootq = qdisc_root_bh(sch);
480 u32 dupsave = q->duplicate;
481
482 q->duplicate = 0;
483 rootq->enqueue(skb2, rootq, to_free);
484 q->duplicate = dupsave;
485 rc_drop = NET_XMIT_SUCCESS;
486 }
487
488
489
490
491
492
493
494 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
495 if (skb_is_gso(skb)) {
496 skb = netem_segment(skb, sch, to_free);
497 if (!skb)
498 return rc_drop;
499 segs = skb->next;
500 skb_mark_not_on_list(skb);
501 qdisc_skb_cb(skb)->pkt_len = skb->len;
502 }
503
504 skb = skb_unshare(skb, GFP_ATOMIC);
505 if (unlikely(!skb)) {
506 qdisc_qstats_drop(sch);
507 goto finish_segs;
508 }
509 if (skb->ip_summed == CHECKSUM_PARTIAL &&
510 skb_checksum_help(skb)) {
511 qdisc_drop(skb, sch, to_free);
512 skb = NULL;
513 goto finish_segs;
514 }
515
516 skb->data[prandom_u32() % skb_headlen(skb)] ^=
517 1<<(prandom_u32() % 8);
518 }
519
520 if (unlikely(sch->q.qlen >= sch->limit)) {
521
522 skb->next = segs;
523 qdisc_drop_all(skb, sch, to_free);
524 return rc_drop;
525 }
526
527 qdisc_qstats_backlog_inc(sch, skb);
528
529 cb = netem_skb_cb(skb);
530 if (q->gap == 0 ||
531 q->counter < q->gap - 1 ||
532 q->reorder < get_crandom(&q->reorder_cor)) {
533 u64 now;
534 s64 delay;
535
536 delay = tabledist(q->latency, q->jitter,
537 &q->delay_cor, q->delay_dist);
538
539 now = ktime_get_ns();
540
541 if (q->rate) {
542 struct netem_skb_cb *last = NULL;
543
544 if (sch->q.tail)
545 last = netem_skb_cb(sch->q.tail);
546 if (q->t_root.rb_node) {
547 struct sk_buff *t_skb;
548 struct netem_skb_cb *t_last;
549
550 t_skb = skb_rb_last(&q->t_root);
551 t_last = netem_skb_cb(t_skb);
552 if (!last ||
553 t_last->time_to_send > last->time_to_send)
554 last = t_last;
555 }
556 if (q->t_tail) {
557 struct netem_skb_cb *t_last =
558 netem_skb_cb(q->t_tail);
559
560 if (!last ||
561 t_last->time_to_send > last->time_to_send)
562 last = t_last;
563 }
564
565 if (last) {
566
567
568
569
570
571 delay -= last->time_to_send - now;
572 delay = max_t(s64, 0, delay);
573 now = last->time_to_send;
574 }
575
576 delay += packet_time_ns(qdisc_pkt_len(skb), q);
577 }
578
579 cb->time_to_send = now + delay;
580 ++q->counter;
581 tfifo_enqueue(skb, sch);
582 } else {
583
584
585
586
587 cb->time_to_send = ktime_get_ns();
588 q->counter = 0;
589
590 __qdisc_enqueue_head(skb, &sch->q);
591 sch->qstats.requeues++;
592 }
593
594 finish_segs:
595 if (segs) {
596 unsigned int len, last_len;
597 int nb;
598
599 len = skb ? skb->len : 0;
600 nb = skb ? 1 : 0;
601
602 while (segs) {
603 skb2 = segs->next;
604 skb_mark_not_on_list(segs);
605 qdisc_skb_cb(segs)->pkt_len = segs->len;
606 last_len = segs->len;
607 rc = qdisc_enqueue(segs, sch, to_free);
608 if (rc != NET_XMIT_SUCCESS) {
609 if (net_xmit_drop_count(rc))
610 qdisc_qstats_drop(sch);
611 } else {
612 nb++;
613 len += last_len;
614 }
615 segs = skb2;
616 }
617
618 qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
619 } else if (!skb) {
620 return NET_XMIT_DROP;
621 }
622 return NET_XMIT_SUCCESS;
623 }
624
625
626
627
628
629 static void get_slot_next(struct netem_sched_data *q, u64 now)
630 {
631 s64 next_delay;
632
633 if (!q->slot_dist)
634 next_delay = q->slot_config.min_delay +
635 (prandom_u32() *
636 (q->slot_config.max_delay -
637 q->slot_config.min_delay) >> 32);
638 else
639 next_delay = tabledist(q->slot_config.dist_delay,
640 (s32)(q->slot_config.dist_jitter),
641 NULL, q->slot_dist);
642
643 q->slot.slot_next = now + next_delay;
644 q->slot.packets_left = q->slot_config.max_packets;
645 q->slot.bytes_left = q->slot_config.max_bytes;
646 }
647
648 static struct sk_buff *netem_peek(struct netem_sched_data *q)
649 {
650 struct sk_buff *skb = skb_rb_first(&q->t_root);
651 u64 t1, t2;
652
653 if (!skb)
654 return q->t_head;
655 if (!q->t_head)
656 return skb;
657
658 t1 = netem_skb_cb(skb)->time_to_send;
659 t2 = netem_skb_cb(q->t_head)->time_to_send;
660 if (t1 < t2)
661 return skb;
662 return q->t_head;
663 }
664
665 static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
666 {
667 if (skb == q->t_head) {
668 q->t_head = skb->next;
669 if (!q->t_head)
670 q->t_tail = NULL;
671 } else {
672 rb_erase(&skb->rbnode, &q->t_root);
673 }
674 }
675
676 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
677 {
678 struct netem_sched_data *q = qdisc_priv(sch);
679 struct sk_buff *skb;
680
681 tfifo_dequeue:
682 skb = __qdisc_dequeue_head(&sch->q);
683 if (skb) {
684 qdisc_qstats_backlog_dec(sch, skb);
685 deliver:
686 qdisc_bstats_update(sch, skb);
687 return skb;
688 }
689 skb = netem_peek(q);
690 if (skb) {
691 u64 time_to_send;
692 u64 now = ktime_get_ns();
693
694
695 time_to_send = netem_skb_cb(skb)->time_to_send;
696 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
697 get_slot_next(q, now);
698
699 if (time_to_send <= now && q->slot.slot_next <= now) {
700 netem_erase_head(q, skb);
701 sch->q.qlen--;
702 qdisc_qstats_backlog_dec(sch, skb);
703 skb->next = NULL;
704 skb->prev = NULL;
705
706
707
708 skb->dev = qdisc_dev(sch);
709
710 if (q->slot.slot_next) {
711 q->slot.packets_left--;
712 q->slot.bytes_left -= qdisc_pkt_len(skb);
713 if (q->slot.packets_left <= 0 ||
714 q->slot.bytes_left <= 0)
715 get_slot_next(q, now);
716 }
717
718 if (q->qdisc) {
719 unsigned int pkt_len = qdisc_pkt_len(skb);
720 struct sk_buff *to_free = NULL;
721 int err;
722
723 err = qdisc_enqueue(skb, q->qdisc, &to_free);
724 kfree_skb_list(to_free);
725 if (err != NET_XMIT_SUCCESS &&
726 net_xmit_drop_count(err)) {
727 qdisc_qstats_drop(sch);
728 qdisc_tree_reduce_backlog(sch, 1,
729 pkt_len);
730 }
731 goto tfifo_dequeue;
732 }
733 goto deliver;
734 }
735
736 if (q->qdisc) {
737 skb = q->qdisc->ops->dequeue(q->qdisc);
738 if (skb)
739 goto deliver;
740 }
741
742 qdisc_watchdog_schedule_ns(&q->watchdog,
743 max(time_to_send,
744 q->slot.slot_next));
745 }
746
747 if (q->qdisc) {
748 skb = q->qdisc->ops->dequeue(q->qdisc);
749 if (skb)
750 goto deliver;
751 }
752 return NULL;
753 }
754
755 static void netem_reset(struct Qdisc *sch)
756 {
757 struct netem_sched_data *q = qdisc_priv(sch);
758
759 qdisc_reset_queue(sch);
760 tfifo_reset(sch);
761 if (q->qdisc)
762 qdisc_reset(q->qdisc);
763 qdisc_watchdog_cancel(&q->watchdog);
764 }
765
766 static void dist_free(struct disttable *d)
767 {
768 kvfree(d);
769 }
770
771
772
773
774
775
776 static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
777 const struct nlattr *attr)
778 {
779 size_t n = nla_len(attr)/sizeof(__s16);
780 const __s16 *data = nla_data(attr);
781 spinlock_t *root_lock;
782 struct disttable *d;
783 int i;
784
785 if (!n || n > NETEM_DIST_MAX)
786 return -EINVAL;
787
788 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
789 if (!d)
790 return -ENOMEM;
791
792 d->size = n;
793 for (i = 0; i < n; i++)
794 d->table[i] = data[i];
795
796 root_lock = qdisc_root_sleeping_lock(sch);
797
798 spin_lock_bh(root_lock);
799 swap(*tbl, d);
800 spin_unlock_bh(root_lock);
801
802 dist_free(d);
803 return 0;
804 }
805
806 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
807 {
808 const struct tc_netem_slot *c = nla_data(attr);
809
810 q->slot_config = *c;
811 if (q->slot_config.max_packets == 0)
812 q->slot_config.max_packets = INT_MAX;
813 if (q->slot_config.max_bytes == 0)
814 q->slot_config.max_bytes = INT_MAX;
815 q->slot.packets_left = q->slot_config.max_packets;
816 q->slot.bytes_left = q->slot_config.max_bytes;
817 if (q->slot_config.min_delay | q->slot_config.max_delay |
818 q->slot_config.dist_jitter)
819 q->slot.slot_next = ktime_get_ns();
820 else
821 q->slot.slot_next = 0;
822 }
823
824 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
825 {
826 const struct tc_netem_corr *c = nla_data(attr);
827
828 init_crandom(&q->delay_cor, c->delay_corr);
829 init_crandom(&q->loss_cor, c->loss_corr);
830 init_crandom(&q->dup_cor, c->dup_corr);
831 }
832
833 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
834 {
835 const struct tc_netem_reorder *r = nla_data(attr);
836
837 q->reorder = r->probability;
838 init_crandom(&q->reorder_cor, r->correlation);
839 }
840
841 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
842 {
843 const struct tc_netem_corrupt *r = nla_data(attr);
844
845 q->corrupt = r->probability;
846 init_crandom(&q->corrupt_cor, r->correlation);
847 }
848
849 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
850 {
851 const struct tc_netem_rate *r = nla_data(attr);
852
853 q->rate = r->rate;
854 q->packet_overhead = r->packet_overhead;
855 q->cell_size = r->cell_size;
856 q->cell_overhead = r->cell_overhead;
857 if (q->cell_size)
858 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
859 else
860 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
861 }
862
863 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
864 {
865 const struct nlattr *la;
866 int rem;
867
868 nla_for_each_nested(la, attr, rem) {
869 u16 type = nla_type(la);
870
871 switch (type) {
872 case NETEM_LOSS_GI: {
873 const struct tc_netem_gimodel *gi = nla_data(la);
874
875 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
876 pr_info("netem: incorrect gi model size\n");
877 return -EINVAL;
878 }
879
880 q->loss_model = CLG_4_STATES;
881
882 q->clg.state = TX_IN_GAP_PERIOD;
883 q->clg.a1 = gi->p13;
884 q->clg.a2 = gi->p31;
885 q->clg.a3 = gi->p32;
886 q->clg.a4 = gi->p14;
887 q->clg.a5 = gi->p23;
888 break;
889 }
890
891 case NETEM_LOSS_GE: {
892 const struct tc_netem_gemodel *ge = nla_data(la);
893
894 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
895 pr_info("netem: incorrect ge model size\n");
896 return -EINVAL;
897 }
898
899 q->loss_model = CLG_GILB_ELL;
900 q->clg.state = GOOD_STATE;
901 q->clg.a1 = ge->p;
902 q->clg.a2 = ge->r;
903 q->clg.a3 = ge->h;
904 q->clg.a4 = ge->k1;
905 break;
906 }
907
908 default:
909 pr_info("netem: unknown loss type %u\n", type);
910 return -EINVAL;
911 }
912 }
913
914 return 0;
915 }
916
917 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
918 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
919 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
920 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
921 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
922 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
923 [TCA_NETEM_ECN] = { .type = NLA_U32 },
924 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
925 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
926 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
927 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
928 };
929
930 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
931 const struct nla_policy *policy, int len)
932 {
933 int nested_len = nla_len(nla) - NLA_ALIGN(len);
934
935 if (nested_len < 0) {
936 pr_info("netem: invalid attributes len %d\n", nested_len);
937 return -EINVAL;
938 }
939
940 if (nested_len >= nla_attr_size(0))
941 return nla_parse_deprecated(tb, maxtype,
942 nla_data(nla) + NLA_ALIGN(len),
943 nested_len, policy, NULL);
944
945 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
946 return 0;
947 }
948
949
950 static int netem_change(struct Qdisc *sch, struct nlattr *opt,
951 struct netlink_ext_ack *extack)
952 {
953 struct netem_sched_data *q = qdisc_priv(sch);
954 struct nlattr *tb[TCA_NETEM_MAX + 1];
955 struct tc_netem_qopt *qopt;
956 struct clgstate old_clg;
957 int old_loss_model = CLG_RANDOM;
958 int ret;
959
960 if (opt == NULL)
961 return -EINVAL;
962
963 qopt = nla_data(opt);
964 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
965 if (ret < 0)
966 return ret;
967
968
969 old_clg = q->clg;
970 old_loss_model = q->loss_model;
971
972 if (tb[TCA_NETEM_LOSS]) {
973 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
974 if (ret) {
975 q->loss_model = old_loss_model;
976 return ret;
977 }
978 } else {
979 q->loss_model = CLG_RANDOM;
980 }
981
982 if (tb[TCA_NETEM_DELAY_DIST]) {
983 ret = get_dist_table(sch, &q->delay_dist,
984 tb[TCA_NETEM_DELAY_DIST]);
985 if (ret)
986 goto get_table_failure;
987 }
988
989 if (tb[TCA_NETEM_SLOT_DIST]) {
990 ret = get_dist_table(sch, &q->slot_dist,
991 tb[TCA_NETEM_SLOT_DIST]);
992 if (ret)
993 goto get_table_failure;
994 }
995
996 sch->limit = qopt->limit;
997
998 q->latency = PSCHED_TICKS2NS(qopt->latency);
999 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1000 q->limit = qopt->limit;
1001 q->gap = qopt->gap;
1002 q->counter = 0;
1003 q->loss = qopt->loss;
1004 q->duplicate = qopt->duplicate;
1005
1006
1007
1008
1009 if (q->gap)
1010 q->reorder = ~0;
1011
1012 if (tb[TCA_NETEM_CORR])
1013 get_correlation(q, tb[TCA_NETEM_CORR]);
1014
1015 if (tb[TCA_NETEM_REORDER])
1016 get_reorder(q, tb[TCA_NETEM_REORDER]);
1017
1018 if (tb[TCA_NETEM_CORRUPT])
1019 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1020
1021 if (tb[TCA_NETEM_RATE])
1022 get_rate(q, tb[TCA_NETEM_RATE]);
1023
1024 if (tb[TCA_NETEM_RATE64])
1025 q->rate = max_t(u64, q->rate,
1026 nla_get_u64(tb[TCA_NETEM_RATE64]));
1027
1028 if (tb[TCA_NETEM_LATENCY64])
1029 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1030
1031 if (tb[TCA_NETEM_JITTER64])
1032 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1033
1034 if (tb[TCA_NETEM_ECN])
1035 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1036
1037 if (tb[TCA_NETEM_SLOT])
1038 get_slot(q, tb[TCA_NETEM_SLOT]);
1039
1040 return ret;
1041
1042 get_table_failure:
1043
1044
1045
1046
1047 q->clg = old_clg;
1048 q->loss_model = old_loss_model;
1049 return ret;
1050 }
1051
1052 static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1053 struct netlink_ext_ack *extack)
1054 {
1055 struct netem_sched_data *q = qdisc_priv(sch);
1056 int ret;
1057
1058 qdisc_watchdog_init(&q->watchdog, sch);
1059
1060 if (!opt)
1061 return -EINVAL;
1062
1063 q->loss_model = CLG_RANDOM;
1064 ret = netem_change(sch, opt, extack);
1065 if (ret)
1066 pr_info("netem: change failed\n");
1067 return ret;
1068 }
1069
1070 static void netem_destroy(struct Qdisc *sch)
1071 {
1072 struct netem_sched_data *q = qdisc_priv(sch);
1073
1074 qdisc_watchdog_cancel(&q->watchdog);
1075 if (q->qdisc)
1076 qdisc_put(q->qdisc);
1077 dist_free(q->delay_dist);
1078 dist_free(q->slot_dist);
1079 }
1080
1081 static int dump_loss_model(const struct netem_sched_data *q,
1082 struct sk_buff *skb)
1083 {
1084 struct nlattr *nest;
1085
1086 nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1087 if (nest == NULL)
1088 goto nla_put_failure;
1089
1090 switch (q->loss_model) {
1091 case CLG_RANDOM:
1092
1093 nla_nest_cancel(skb, nest);
1094 return 0;
1095
1096 case CLG_4_STATES: {
1097 struct tc_netem_gimodel gi = {
1098 .p13 = q->clg.a1,
1099 .p31 = q->clg.a2,
1100 .p32 = q->clg.a3,
1101 .p14 = q->clg.a4,
1102 .p23 = q->clg.a5,
1103 };
1104
1105 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1106 goto nla_put_failure;
1107 break;
1108 }
1109 case CLG_GILB_ELL: {
1110 struct tc_netem_gemodel ge = {
1111 .p = q->clg.a1,
1112 .r = q->clg.a2,
1113 .h = q->clg.a3,
1114 .k1 = q->clg.a4,
1115 };
1116
1117 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1118 goto nla_put_failure;
1119 break;
1120 }
1121 }
1122
1123 nla_nest_end(skb, nest);
1124 return 0;
1125
1126 nla_put_failure:
1127 nla_nest_cancel(skb, nest);
1128 return -1;
1129 }
1130
1131 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1132 {
1133 const struct netem_sched_data *q = qdisc_priv(sch);
1134 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1135 struct tc_netem_qopt qopt;
1136 struct tc_netem_corr cor;
1137 struct tc_netem_reorder reorder;
1138 struct tc_netem_corrupt corrupt;
1139 struct tc_netem_rate rate;
1140 struct tc_netem_slot slot;
1141
1142 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1143 UINT_MAX);
1144 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1145 UINT_MAX);
1146 qopt.limit = q->limit;
1147 qopt.loss = q->loss;
1148 qopt.gap = q->gap;
1149 qopt.duplicate = q->duplicate;
1150 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1151 goto nla_put_failure;
1152
1153 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1154 goto nla_put_failure;
1155
1156 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1157 goto nla_put_failure;
1158
1159 cor.delay_corr = q->delay_cor.rho;
1160 cor.loss_corr = q->loss_cor.rho;
1161 cor.dup_corr = q->dup_cor.rho;
1162 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1163 goto nla_put_failure;
1164
1165 reorder.probability = q->reorder;
1166 reorder.correlation = q->reorder_cor.rho;
1167 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1168 goto nla_put_failure;
1169
1170 corrupt.probability = q->corrupt;
1171 corrupt.correlation = q->corrupt_cor.rho;
1172 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1173 goto nla_put_failure;
1174
1175 if (q->rate >= (1ULL << 32)) {
1176 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1177 TCA_NETEM_PAD))
1178 goto nla_put_failure;
1179 rate.rate = ~0U;
1180 } else {
1181 rate.rate = q->rate;
1182 }
1183 rate.packet_overhead = q->packet_overhead;
1184 rate.cell_size = q->cell_size;
1185 rate.cell_overhead = q->cell_overhead;
1186 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1187 goto nla_put_failure;
1188
1189 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1190 goto nla_put_failure;
1191
1192 if (dump_loss_model(q, skb) != 0)
1193 goto nla_put_failure;
1194
1195 if (q->slot_config.min_delay | q->slot_config.max_delay |
1196 q->slot_config.dist_jitter) {
1197 slot = q->slot_config;
1198 if (slot.max_packets == INT_MAX)
1199 slot.max_packets = 0;
1200 if (slot.max_bytes == INT_MAX)
1201 slot.max_bytes = 0;
1202 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1203 goto nla_put_failure;
1204 }
1205
1206 return nla_nest_end(skb, nla);
1207
1208 nla_put_failure:
1209 nlmsg_trim(skb, nla);
1210 return -1;
1211 }
1212
1213 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1214 struct sk_buff *skb, struct tcmsg *tcm)
1215 {
1216 struct netem_sched_data *q = qdisc_priv(sch);
1217
1218 if (cl != 1 || !q->qdisc)
1219 return -ENOENT;
1220
1221 tcm->tcm_handle |= TC_H_MIN(1);
1222 tcm->tcm_info = q->qdisc->handle;
1223
1224 return 0;
1225 }
1226
1227 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1228 struct Qdisc **old, struct netlink_ext_ack *extack)
1229 {
1230 struct netem_sched_data *q = qdisc_priv(sch);
1231
1232 *old = qdisc_replace(sch, new, &q->qdisc);
1233 return 0;
1234 }
1235
1236 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1237 {
1238 struct netem_sched_data *q = qdisc_priv(sch);
1239 return q->qdisc;
1240 }
1241
1242 static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1243 {
1244 return 1;
1245 }
1246
1247 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1248 {
1249 if (!walker->stop) {
1250 if (walker->count >= walker->skip)
1251 if (walker->fn(sch, 1, walker) < 0) {
1252 walker->stop = 1;
1253 return;
1254 }
1255 walker->count++;
1256 }
1257 }
1258
1259 static const struct Qdisc_class_ops netem_class_ops = {
1260 .graft = netem_graft,
1261 .leaf = netem_leaf,
1262 .find = netem_find,
1263 .walk = netem_walk,
1264 .dump = netem_dump_class,
1265 };
1266
1267 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1268 .id = "netem",
1269 .cl_ops = &netem_class_ops,
1270 .priv_size = sizeof(struct netem_sched_data),
1271 .enqueue = netem_enqueue,
1272 .dequeue = netem_dequeue,
1273 .peek = qdisc_peek_dequeued,
1274 .init = netem_init,
1275 .reset = netem_reset,
1276 .destroy = netem_destroy,
1277 .change = netem_change,
1278 .dump = netem_dump,
1279 .owner = THIS_MODULE,
1280 };
1281
1282
1283 static int __init netem_module_init(void)
1284 {
1285 pr_info("netem: version " VERSION "\n");
1286 return register_qdisc(&netem_qdisc_ops);
1287 }
1288 static void __exit netem_module_exit(void)
1289 {
1290 unregister_qdisc(&netem_qdisc_ops);
1291 }
1292 module_init(netem_module_init)
1293 module_exit(netem_module_exit)
1294 MODULE_LICENSE("GPL");