Lines Matching refs:q
129 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_set_throttled() argument
131 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; in fq_flow_set_throttled()
144 rb_insert_color(&f->rate_node, &q->delayed); in fq_flow_set_throttled()
145 q->throttled_flows++; in fq_flow_set_throttled()
146 q->stat_throttled++; in fq_flow_set_throttled()
149 if (q->time_next_delayed_flow > f->time_next_packet) in fq_flow_set_throttled()
150 q->time_next_delayed_flow = f->time_next_packet; in fq_flow_set_throttled()
176 static void fq_gc(struct fq_sched_data *q, in fq_gc() argument
205 q->flows -= fcnt; in fq_gc()
206 q->inactive_flows -= fcnt; in fq_gc()
207 q->stat_gc_flows += fcnt; in fq_gc()
216 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) in fq_classify() argument
225 return &q->internal; in fq_classify()
234 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; in fq_classify()
243 root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)]; in fq_classify()
245 if (q->flows >= (2U << q->fq_trees_log) && in fq_classify()
246 q->inactive_flows > q->flows/2) in fq_classify()
247 fq_gc(q, root, sk); in fq_classify()
263 f->credit = q->initial_quantum; in fq_classify()
277 q->stat_allocation_errors++; in fq_classify()
278 return &q->internal; in fq_classify()
284 f->credit = q->initial_quantum; in fq_classify()
289 q->flows++; in fq_classify()
290 q->inactive_flows++; in fq_classify()
305 sch->q.qlen--; in fq_dequeue_head()
370 struct fq_sched_data *q = qdisc_priv(sch); in fq_enqueue() local
373 if (unlikely(sch->q.qlen >= sch->limit)) in fq_enqueue()
376 f = fq_classify(skb, q); in fq_enqueue()
377 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { in fq_enqueue()
378 q->stat_flows_plimit++; in fq_enqueue()
384 q->stat_tcp_retrans++; in fq_enqueue()
387 fq_flow_add_tail(&q->new_flows, f); in fq_enqueue()
388 if (time_after(jiffies, f->age + q->flow_refill_delay)) in fq_enqueue()
389 f->credit = max_t(u32, f->credit, q->quantum); in fq_enqueue()
390 q->inactive_flows--; in fq_enqueue()
396 if (unlikely(f == &q->internal)) { in fq_enqueue()
397 q->stat_internal_packets++; in fq_enqueue()
399 sch->q.qlen++; in fq_enqueue()
404 static void fq_check_throttled(struct fq_sched_data *q, u64 now) in fq_check_throttled() argument
408 if (q->time_next_delayed_flow > now) in fq_check_throttled()
411 q->time_next_delayed_flow = ~0ULL; in fq_check_throttled()
412 while ((p = rb_first(&q->delayed)) != NULL) { in fq_check_throttled()
416 q->time_next_delayed_flow = f->time_next_packet; in fq_check_throttled()
419 rb_erase(p, &q->delayed); in fq_check_throttled()
420 q->throttled_flows--; in fq_check_throttled()
421 fq_flow_add_tail(&q->old_flows, f); in fq_check_throttled()
427 struct fq_sched_data *q = qdisc_priv(sch); in fq_dequeue() local
434 skb = fq_dequeue_head(sch, &q->internal); in fq_dequeue()
437 fq_check_throttled(q, now); in fq_dequeue()
439 head = &q->new_flows; in fq_dequeue()
441 head = &q->old_flows; in fq_dequeue()
443 if (q->time_next_delayed_flow != ~0ULL) in fq_dequeue()
444 qdisc_watchdog_schedule_ns(&q->watchdog, in fq_dequeue()
445 q->time_next_delayed_flow, in fq_dequeue()
453 f->credit += q->quantum; in fq_dequeue()
455 fq_flow_add_tail(&q->old_flows, f); in fq_dequeue()
463 fq_flow_set_throttled(q, f); in fq_dequeue()
471 if ((head == &q->new_flows) && q->old_flows.first) { in fq_dequeue()
472 fq_flow_add_tail(&q->old_flows, f); in fq_dequeue()
475 q->inactive_flows++; in fq_dequeue()
482 if (f->credit > 0 || !q->rate_enable) in fq_dequeue()
489 rate = q->flow_max_rate; in fq_dequeue()
494 u32 plen = max(qdisc_pkt_len(skb), q->quantum); in fq_dequeue()
505 q->stat_pkts_too_long++; in fq_dequeue()
517 struct fq_sched_data *q = qdisc_priv(sch); in fq_reset() local
524 while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL) in fq_reset()
527 if (!q->fq_root) in fq_reset()
530 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { in fq_reset()
531 root = &q->fq_root[idx]; in fq_reset()
542 q->new_flows.first = NULL; in fq_reset()
543 q->old_flows.first = NULL; in fq_reset()
544 q->delayed = RB_ROOT; in fq_reset()
545 q->flows = 0; in fq_reset()
546 q->inactive_flows = 0; in fq_reset()
547 q->throttled_flows = 0; in fq_reset()
550 static void fq_rehash(struct fq_sched_data *q, in fq_rehash() argument
590 q->flows -= fcnt; in fq_rehash()
591 q->inactive_flows -= fcnt; in fq_rehash()
592 q->stat_gc_flows += fcnt; in fq_rehash()
612 struct fq_sched_data *q = qdisc_priv(sch); in fq_resize() local
617 if (q->fq_root && log == q->fq_trees_log) in fq_resize()
631 old_fq_root = q->fq_root; in fq_resize()
633 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); in fq_resize()
635 q->fq_root = array; in fq_resize()
636 q->fq_trees_log = log; in fq_resize()
659 struct fq_sched_data *q = qdisc_priv(sch); in fq_change() local
673 fq_log = q->fq_trees_log; in fq_change()
687 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); in fq_change()
693 q->quantum = quantum; in fq_change()
699 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); in fq_change()
706 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); in fq_change()
712 q->rate_enable = enable; in fq_change()
720 q->flow_refill_delay = usecs_to_jiffies(usecs_delay); in fq_change()
724 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]); in fq_change()
731 while (sch->q.qlen > sch->limit) { in fq_change()
747 struct fq_sched_data *q = qdisc_priv(sch); in fq_destroy() local
750 fq_free(q->fq_root); in fq_destroy()
751 qdisc_watchdog_cancel(&q->watchdog); in fq_destroy()
756 struct fq_sched_data *q = qdisc_priv(sch); in fq_init() local
760 q->flow_plimit = 100; in fq_init()
761 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); in fq_init()
762 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); in fq_init()
763 q->flow_refill_delay = msecs_to_jiffies(40); in fq_init()
764 q->flow_max_rate = ~0U; in fq_init()
765 q->rate_enable = 1; in fq_init()
766 q->new_flows.first = NULL; in fq_init()
767 q->old_flows.first = NULL; in fq_init()
768 q->delayed = RB_ROOT; in fq_init()
769 q->fq_root = NULL; in fq_init()
770 q->fq_trees_log = ilog2(1024); in fq_init()
771 q->orphan_mask = 1024 - 1; in fq_init()
772 qdisc_watchdog_init(&q->watchdog, sch); in fq_init()
777 err = fq_resize(sch, q->fq_trees_log); in fq_init()
784 struct fq_sched_data *q = qdisc_priv(sch); in fq_dump() local
794 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || in fq_dump()
795 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || in fq_dump()
796 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || in fq_dump()
797 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || in fq_dump()
798 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || in fq_dump()
800 jiffies_to_usecs(q->flow_refill_delay)) || in fq_dump()
801 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) || in fq_dump()
802 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) in fq_dump()
813 struct fq_sched_data *q = qdisc_priv(sch); in fq_dump_stats() local
816 .gc_flows = q->stat_gc_flows, in fq_dump_stats()
817 .highprio_packets = q->stat_internal_packets, in fq_dump_stats()
818 .tcp_retrans = q->stat_tcp_retrans, in fq_dump_stats()
819 .throttled = q->stat_throttled, in fq_dump_stats()
820 .flows_plimit = q->stat_flows_plimit, in fq_dump_stats()
821 .pkts_too_long = q->stat_pkts_too_long, in fq_dump_stats()
822 .allocation_errors = q->stat_allocation_errors, in fq_dump_stats()
823 .flows = q->flows, in fq_dump_stats()
824 .inactive_flows = q->inactive_flows, in fq_dump_stats()
825 .throttled_flows = q->throttled_flows, in fq_dump_stats()
826 .time_next_delayed_flow = q->time_next_delayed_flow - now, in fq_dump_stats()