Lines Matching refs:q

197 static bool loss_4state(struct netem_sched_data *q)  in loss_4state()  argument
199 struct clgstate *clg = &q->clg; in loss_4state()
262 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
264 struct clgstate *clg = &q->clg; in loss_gilb_ell()
283 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
285 switch (q->loss_model) { in loss_event()
288 return q->loss && q->loss >= get_crandom(&q->loss_cor); in loss_event()
296 return loss_4state(q); in loss_event()
304 return loss_gilb_ell(q); in loss_event()
342 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) in packet_len_2_sched_time() argument
346 len += q->packet_overhead; in packet_len_2_sched_time()
348 if (q->cell_size) { in packet_len_2_sched_time()
349 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); in packet_len_2_sched_time()
351 if (len > cells * q->cell_size) /* extra cell needed for remainder */ in packet_len_2_sched_time()
353 len = cells * (q->cell_size + q->cell_overhead); in packet_len_2_sched_time()
358 do_div(ticks, q->rate); in packet_len_2_sched_time()
364 struct netem_sched_data *q = qdisc_priv(sch); in tfifo_reset() local
367 while ((p = rb_first(&q->t_root))) { in tfifo_reset()
370 rb_erase(p, &q->t_root); in tfifo_reset()
379 struct netem_sched_data *q = qdisc_priv(sch); in tfifo_enqueue() local
381 struct rb_node **p = &q->t_root.rb_node, *parent = NULL; in tfifo_enqueue()
394 rb_insert_color(&nskb->rbnode, &q->t_root); in tfifo_enqueue()
395 sch->q.qlen++; in tfifo_enqueue()
406 struct netem_sched_data *q = qdisc_priv(sch); in netem_enqueue() local
413 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) in netem_enqueue()
417 if (loss_event(q)) { in netem_enqueue()
418 if (q->ecn && INET_ECN_set_ce(skb)) in netem_enqueue()
432 if (q->latency || q->jitter) in netem_enqueue()
442 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ in netem_enqueue()
443 q->duplicate = 0; in netem_enqueue()
446 q->duplicate = dupsave; in netem_enqueue()
455 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { in netem_enqueue()
465 if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) in netem_enqueue()
471 if (q->gap == 0 || /* not doing reordering */ in netem_enqueue()
472 q->counter < q->gap - 1 || /* inside last reordering gap */ in netem_enqueue()
473 q->reorder < get_crandom(&q->reorder_cor)) { in netem_enqueue()
477 delay = tabledist(q->latency, q->jitter, in netem_enqueue()
478 &q->delay_cor, q->delay_dist); in netem_enqueue()
482 if (q->rate) { in netem_enqueue()
485 if (!skb_queue_empty(&sch->q)) in netem_enqueue()
486 last = skb_peek_tail(&sch->q); in netem_enqueue()
488 last = netem_rb_to_skb(rb_last(&q->t_root)); in netem_enqueue()
500 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); in netem_enqueue()
505 ++q->counter; in netem_enqueue()
513 q->counter = 0; in netem_enqueue()
515 __skb_queue_head(&sch->q, skb); in netem_enqueue()
524 struct netem_sched_data *q = qdisc_priv(sch); in netem_drop() local
530 struct rb_node *p = rb_first(&q->t_root); in netem_drop()
535 rb_erase(p, &q->t_root); in netem_drop()
536 sch->q.qlen--; in netem_drop()
543 if (!len && q->qdisc && q->qdisc->ops->drop) in netem_drop()
544 len = q->qdisc->ops->drop(q->qdisc); in netem_drop()
553 struct netem_sched_data *q = qdisc_priv(sch); in netem_dequeue() local
561 skb = __skb_dequeue(&sch->q); in netem_dequeue()
569 p = rb_first(&q->t_root); in netem_dequeue()
578 rb_erase(p, &q->t_root); in netem_dequeue()
580 sch->q.qlen--; in netem_dequeue()
595 if (q->qdisc) { in netem_dequeue()
596 int err = qdisc_enqueue(skb, q->qdisc); in netem_dequeue()
609 if (q->qdisc) { in netem_dequeue()
610 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
614 qdisc_watchdog_schedule(&q->watchdog, time_to_send); in netem_dequeue()
617 if (q->qdisc) { in netem_dequeue()
618 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
627 struct netem_sched_data *q = qdisc_priv(sch); in netem_reset() local
631 if (q->qdisc) in netem_reset()
632 qdisc_reset(q->qdisc); in netem_reset()
633 qdisc_watchdog_cancel(&q->watchdog); in netem_reset()
647 struct netem_sched_data *q = qdisc_priv(sch); in get_dist_table() local
672 swap(q->delay_dist, d); in get_dist_table()
679 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) in get_correlation() argument
683 init_crandom(&q->delay_cor, c->delay_corr); in get_correlation()
684 init_crandom(&q->loss_cor, c->loss_corr); in get_correlation()
685 init_crandom(&q->dup_cor, c->dup_corr); in get_correlation()
688 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) in get_reorder() argument
692 q->reorder = r->probability; in get_reorder()
693 init_crandom(&q->reorder_cor, r->correlation); in get_reorder()
696 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) in get_corrupt() argument
700 q->corrupt = r->probability; in get_corrupt()
701 init_crandom(&q->corrupt_cor, r->correlation); in get_corrupt()
704 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) in get_rate() argument
708 q->rate = r->rate; in get_rate()
709 q->packet_overhead = r->packet_overhead; in get_rate()
710 q->cell_size = r->cell_size; in get_rate()
711 q->cell_overhead = r->cell_overhead; in get_rate()
712 if (q->cell_size) in get_rate()
713 q->cell_size_reciprocal = reciprocal_value(q->cell_size); in get_rate()
715 q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; in get_rate()
718 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) in get_loss_clg() argument
735 q->loss_model = CLG_4_STATES; in get_loss_clg()
737 q->clg.state = TX_IN_GAP_PERIOD; in get_loss_clg()
738 q->clg.a1 = gi->p13; in get_loss_clg()
739 q->clg.a2 = gi->p31; in get_loss_clg()
740 q->clg.a3 = gi->p32; in get_loss_clg()
741 q->clg.a4 = gi->p14; in get_loss_clg()
742 q->clg.a5 = gi->p23; in get_loss_clg()
754 q->loss_model = CLG_GILB_ELL; in get_loss_clg()
755 q->clg.state = GOOD_STATE; in get_loss_clg()
756 q->clg.a1 = ge->p; in get_loss_clg()
757 q->clg.a2 = ge->r; in get_loss_clg()
758 q->clg.a3 = ge->h; in get_loss_clg()
759 q->clg.a4 = ge->k1; in get_loss_clg()
803 struct netem_sched_data *q = qdisc_priv(sch); in netem_change() local
819 old_clg = q->clg; in netem_change()
820 old_loss_model = q->loss_model; in netem_change()
823 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); in netem_change()
825 q->loss_model = old_loss_model; in netem_change()
829 q->loss_model = CLG_RANDOM; in netem_change()
839 q->clg = old_clg; in netem_change()
840 q->loss_model = old_loss_model; in netem_change()
847 q->latency = qopt->latency; in netem_change()
848 q->jitter = qopt->jitter; in netem_change()
849 q->limit = qopt->limit; in netem_change()
850 q->gap = qopt->gap; in netem_change()
851 q->counter = 0; in netem_change()
852 q->loss = qopt->loss; in netem_change()
853 q->duplicate = qopt->duplicate; in netem_change()
858 if (q->gap) in netem_change()
859 q->reorder = ~0; in netem_change()
862 get_correlation(q, tb[TCA_NETEM_CORR]); in netem_change()
865 get_reorder(q, tb[TCA_NETEM_REORDER]); in netem_change()
868 get_corrupt(q, tb[TCA_NETEM_CORRUPT]); in netem_change()
871 get_rate(q, tb[TCA_NETEM_RATE]); in netem_change()
874 q->rate = max_t(u64, q->rate, in netem_change()
878 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); in netem_change()
885 struct netem_sched_data *q = qdisc_priv(sch); in netem_init() local
891 qdisc_watchdog_init(&q->watchdog, sch); in netem_init()
893 q->loss_model = CLG_RANDOM; in netem_init()
902 struct netem_sched_data *q = qdisc_priv(sch); in netem_destroy() local
904 qdisc_watchdog_cancel(&q->watchdog); in netem_destroy()
905 if (q->qdisc) in netem_destroy()
906 qdisc_destroy(q->qdisc); in netem_destroy()
907 dist_free(q->delay_dist); in netem_destroy()
910 static int dump_loss_model(const struct netem_sched_data *q, in dump_loss_model() argument
919 switch (q->loss_model) { in dump_loss_model()
927 .p13 = q->clg.a1, in dump_loss_model()
928 .p31 = q->clg.a2, in dump_loss_model()
929 .p32 = q->clg.a3, in dump_loss_model()
930 .p14 = q->clg.a4, in dump_loss_model()
931 .p23 = q->clg.a5, in dump_loss_model()
940 .p = q->clg.a1, in dump_loss_model()
941 .r = q->clg.a2, in dump_loss_model()
942 .h = q->clg.a3, in dump_loss_model()
943 .k1 = q->clg.a4, in dump_loss_model()
962 const struct netem_sched_data *q = qdisc_priv(sch); in netem_dump() local
970 qopt.latency = q->latency; in netem_dump()
971 qopt.jitter = q->jitter; in netem_dump()
972 qopt.limit = q->limit; in netem_dump()
973 qopt.loss = q->loss; in netem_dump()
974 qopt.gap = q->gap; in netem_dump()
975 qopt.duplicate = q->duplicate; in netem_dump()
979 cor.delay_corr = q->delay_cor.rho; in netem_dump()
980 cor.loss_corr = q->loss_cor.rho; in netem_dump()
981 cor.dup_corr = q->dup_cor.rho; in netem_dump()
985 reorder.probability = q->reorder; in netem_dump()
986 reorder.correlation = q->reorder_cor.rho; in netem_dump()
990 corrupt.probability = q->corrupt; in netem_dump()
991 corrupt.correlation = q->corrupt_cor.rho; in netem_dump()
995 if (q->rate >= (1ULL << 32)) { in netem_dump()
996 if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate)) in netem_dump()
1000 rate.rate = q->rate; in netem_dump()
1002 rate.packet_overhead = q->packet_overhead; in netem_dump()
1003 rate.cell_size = q->cell_size; in netem_dump()
1004 rate.cell_overhead = q->cell_overhead; in netem_dump()
1008 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) in netem_dump()
1011 if (dump_loss_model(q, skb) != 0) in netem_dump()
1024 struct netem_sched_data *q = qdisc_priv(sch); in netem_dump_class() local
1026 if (cl != 1 || !q->qdisc) /* only one class */ in netem_dump_class()
1030 tcm->tcm_info = q->qdisc->handle; in netem_dump_class()
1038 struct netem_sched_data *q = qdisc_priv(sch); in netem_graft() local
1041 *old = q->qdisc; in netem_graft()
1042 q->qdisc = new; in netem_graft()
1044 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); in netem_graft()
1054 struct netem_sched_data *q = qdisc_priv(sch); in netem_leaf() local
1055 return q->qdisc; in netem_leaf()