Lines Matching refs:q

131 			struct Qdisc	*q;  member
183 struct htb_sched *q = qdisc_priv(sch); in htb_find() local
186 clc = qdisc_class_find(&q->clhash, handle); in htb_find()
209 struct htb_sched *q = qdisc_priv(sch); in htb_classify() local
228 tcf = rcu_dereference_bh(q->filter_list); in htb_classify()
257 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
295 static void htb_add_to_wait_tree(struct htb_sched *q, in htb_add_to_wait_tree() argument
298 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
300 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
301 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
305 if (q->near_ev_cache[cl->level] > cl->pq_key) in htb_add_to_wait_tree()
306 q->near_ev_cache[cl->level] = cl->pq_key; in htb_add_to_wait_tree()
318 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_add_to_wait_tree()
338 static inline void htb_add_class_to_row(struct htb_sched *q, in htb_add_class_to_row() argument
341 q->row_mask[cl->level] |= mask; in htb_add_class_to_row()
345 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); in htb_add_class_to_row()
367 static inline void htb_remove_class_from_row(struct htb_sched *q, in htb_remove_class_from_row() argument
371 struct htb_level *hlevel = &q->hlevel[cl->level]; in htb_remove_class_from_row()
385 q->row_mask[cl->level] &= ~m; in htb_remove_class_from_row()
395 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) in htb_activate_prios() argument
420 htb_add_class_to_row(q, cl, mask); in htb_activate_prios()
430 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) in htb_deactivate_prios() argument
464 htb_remove_class_from_row(q, cl, mask); in htb_deactivate_prios()
521 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) in htb_change_class_mode() argument
530 htb_deactivate_prios(q, cl); in htb_change_class_mode()
533 htb_activate_prios(q, cl); in htb_change_class_mode()
545 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) in htb_activate() argument
547 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen); in htb_activate()
551 htb_activate_prios(q, cl); in htb_activate()
553 q->drops + cl->prio); in htb_activate()
563 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) in htb_deactivate() argument
567 htb_deactivate_prios(q, cl); in htb_deactivate()
575 struct htb_sched *q = qdisc_priv(sch); in htb_enqueue() local
580 if (q->direct_queue.qlen < q->direct_qlen) { in htb_enqueue()
581 __skb_queue_tail(&q->direct_queue, skb); in htb_enqueue()
582 q->direct_pkts++; in htb_enqueue()
593 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { in htb_enqueue()
600 htb_activate(q, cl); in htb_enqueue()
603 sch->q.qlen++; in htb_enqueue()
644 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, in htb_charge_class() argument
652 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_charge_class()
662 cl->t_c = q->now; in htb_charge_class()
666 htb_change_class_mode(q, cl, &diff); in htb_charge_class()
669 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_charge_class()
671 htb_add_to_wait_tree(q, cl, diff); in htb_charge_class()
689 static s64 htb_do_events(struct htb_sched *q, const int level, in htb_do_events() argument
697 struct rb_root *wait_pq = &q->hlevel[level].wait_pq; in htb_do_events()
708 if (cl->pq_key > q->now) in htb_do_events()
712 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_do_events()
713 htb_change_class_mode(q, cl, &diff); in htb_do_events()
715 htb_add_to_wait_tree(q, cl, diff); in htb_do_events()
719 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { in htb_do_events()
721 q->warned |= HTB_WARN_TOOMANYEVENTS; in htb_do_events()
724 return q->now; in htb_do_events()
812 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio, in htb_dequeue_tree() argument
817 struct htb_level *hlevel = &q->hlevel[level]; in htb_dequeue_tree()
833 if (unlikely(cl->un.leaf.q->q.qlen == 0)) { in htb_dequeue_tree()
835 htb_deactivate(q, cl); in htb_dequeue_tree()
838 if ((q->row_mask[level] & (1 << prio)) == 0) in htb_dequeue_tree()
849 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q); in htb_dequeue_tree()
853 qdisc_warn_nonwc("htb", cl->un.leaf.q); in htb_dequeue_tree()
855 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
866 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
871 if (!cl->un.leaf.q->q.qlen) in htb_dequeue_tree()
872 htb_deactivate(q, cl); in htb_dequeue_tree()
873 htb_charge_class(q, cl, level, skb); in htb_dequeue_tree()
881 struct htb_sched *q = qdisc_priv(sch); in htb_dequeue() local
887 skb = __skb_dequeue(&q->direct_queue); in htb_dequeue()
892 sch->q.qlen--; in htb_dequeue()
896 if (!sch->q.qlen) in htb_dequeue()
898 q->now = ktime_get_ns(); in htb_dequeue()
901 next_event = q->now + 5LLU * NSEC_PER_SEC; in htb_dequeue()
906 s64 event = q->near_ev_cache[level]; in htb_dequeue()
908 if (q->now >= event) { in htb_dequeue()
909 event = htb_do_events(q, level, start_at); in htb_dequeue()
911 event = q->now + NSEC_PER_SEC; in htb_dequeue()
912 q->near_ev_cache[level] = event; in htb_dequeue()
918 m = ~q->row_mask[level]; in htb_dequeue()
923 skb = htb_dequeue_tree(q, prio, level); in htb_dequeue()
929 if (likely(next_event > q->now)) { in htb_dequeue()
931 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { in htb_dequeue()
933 qdisc_throttled(q->watchdog.qdisc); in htb_dequeue()
934 hrtimer_start(&q->watchdog.timer, time, in htb_dequeue()
938 schedule_work(&q->work); in htb_dequeue()
947 struct htb_sched *q = qdisc_priv(sch); in htb_drop() local
952 list_for_each(p, q->drops + prio) { in htb_drop()
956 if (cl->un.leaf.q->ops->drop && in htb_drop()
957 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) { in htb_drop()
958 sch->q.qlen--; in htb_drop()
959 if (!cl->un.leaf.q->q.qlen) in htb_drop()
960 htb_deactivate(q, cl); in htb_drop()
972 struct htb_sched *q = qdisc_priv(sch); in htb_reset() local
976 for (i = 0; i < q->clhash.hashsize; i++) { in htb_reset()
977 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_reset()
981 if (cl->un.leaf.q) in htb_reset()
982 qdisc_reset(cl->un.leaf.q); in htb_reset()
990 qdisc_watchdog_cancel(&q->watchdog); in htb_reset()
991 __skb_queue_purge(&q->direct_queue); in htb_reset()
992 sch->q.qlen = 0; in htb_reset()
993 memset(q->hlevel, 0, sizeof(q->hlevel)); in htb_reset()
994 memset(q->row_mask, 0, sizeof(q->row_mask)); in htb_reset()
996 INIT_LIST_HEAD(q->drops + i); in htb_reset()
1011 struct htb_sched *q = container_of(work, struct htb_sched, work); in htb_work_func() local
1012 struct Qdisc *sch = q->watchdog.qdisc; in htb_work_func()
1019 struct htb_sched *q = qdisc_priv(sch); in htb_init() local
1039 err = qdisc_class_hash_init(&q->clhash); in htb_init()
1043 INIT_LIST_HEAD(q->drops + i); in htb_init()
1045 qdisc_watchdog_init(&q->watchdog, sch); in htb_init()
1046 INIT_WORK(&q->work, htb_work_func); in htb_init()
1047 __skb_queue_head_init(&q->direct_queue); in htb_init()
1050 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); in htb_init()
1052 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; in htb_init()
1053 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ in htb_init()
1054 q->direct_qlen = 2; in htb_init()
1056 if ((q->rate2quantum = gopt->rate2quantum) < 1) in htb_init()
1057 q->rate2quantum = 1; in htb_init()
1058 q->defcls = gopt->defcls; in htb_init()
1065 struct htb_sched *q = qdisc_priv(sch); in htb_dump() local
1073 gopt.direct_pkts = q->direct_pkts; in htb_dump()
1075 gopt.rate2quantum = q->rate2quantum; in htb_dump()
1076 gopt.defcls = q->defcls; in htb_dump()
1083 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) in htb_dump()
1105 if (!cl->level && cl->un.leaf.q) in htb_dump_class()
1106 tcm->tcm_info = cl->un.leaf.q->handle; in htb_dump_class()
1143 if (!cl->level && cl->un.leaf.q) in htb_dump_class_stats()
1144 qlen = cl->un.leaf.q->q.qlen; in htb_dump_class_stats()
1169 *old = cl->un.leaf.q; in htb_graft()
1170 cl->un.leaf.q = new; in htb_graft()
1172 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); in htb_graft()
1182 return !cl->level ? cl->un.leaf.q : NULL; in htb_leaf()
1189 if (cl->un.leaf.q->q.qlen == 0) in htb_qlen_notify()
1212 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, in htb_parent_to_leaf() argument
1217 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity); in htb_parent_to_leaf()
1221 &q->hlevel[parent->level].wait_pq); in htb_parent_to_leaf()
1226 parent->un.leaf.q = new_q ? new_q : &noop_qdisc; in htb_parent_to_leaf()
1236 WARN_ON(!cl->un.leaf.q); in htb_destroy_class()
1237 qdisc_destroy(cl->un.leaf.q); in htb_destroy_class()
1246 struct htb_sched *q = qdisc_priv(sch); in htb_destroy() local
1251 cancel_work_sync(&q->work); in htb_destroy()
1252 qdisc_watchdog_cancel(&q->watchdog); in htb_destroy()
1258 tcf_destroy_chain(&q->filter_list); in htb_destroy()
1260 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1261 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) in htb_destroy()
1264 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1265 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], in htb_destroy()
1269 qdisc_class_hash_destroy(&q->clhash); in htb_destroy()
1270 __skb_queue_purge(&q->direct_queue); in htb_destroy()
1275 struct htb_sched *q = qdisc_priv(sch); in htb_delete() local
1297 qlen = cl->un.leaf.q->q.qlen; in htb_delete()
1298 qdisc_reset(cl->un.leaf.q); in htb_delete()
1299 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen); in htb_delete()
1303 qdisc_class_hash_remove(&q->clhash, &cl->common); in htb_delete()
1308 htb_deactivate(q, cl); in htb_delete()
1312 &q->hlevel[cl->level].wait_pq); in htb_delete()
1315 htb_parent_to_leaf(q, cl, new_q); in htb_delete()
1340 struct htb_sched *q = qdisc_priv(sch); in htb_change_class() local
1432 unsigned int qlen = parent->un.leaf.q->q.qlen; in htb_change_class()
1435 qdisc_reset(parent->un.leaf.q); in htb_change_class()
1436 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen); in htb_change_class()
1437 qdisc_destroy(parent->un.leaf.q); in htb_change_class()
1439 htb_deactivate(q, parent); in htb_change_class()
1443 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq); in htb_change_class()
1451 cl->un.leaf.q = new_q ? new_q : &noop_qdisc; in htb_change_class()
1464 qdisc_class_hash_insert(&q->clhash, &cl->common); in htb_change_class()
1494 do_div(quantum, q->rate2quantum); in htb_change_class()
1518 qdisc_class_hash_grow(sch, &q->clhash); in htb_change_class()
1530 struct htb_sched *q = qdisc_priv(sch); in htb_find_tcf() local
1532 struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list; in htb_find_tcf()
1566 struct htb_sched *q = qdisc_priv(sch); in htb_walk() local
1573 for (i = 0; i < q->clhash.hashsize; i++) { in htb_walk()
1574 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_walk()