Lines Matching refs:cl

210 	struct htb_class *cl;  in htb_classify()  local
221 cl = htb_find(skb->priority, sch); in htb_classify()
222 if (cl) { in htb_classify()
223 if (cl->level == 0) in htb_classify()
224 return cl; in htb_classify()
226 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
242 cl = (void *)res.class; in htb_classify()
243 if (!cl) { in htb_classify()
246 cl = htb_find(res.classid, sch); in htb_classify()
247 if (!cl) in htb_classify()
250 if (!cl->level) in htb_classify()
251 return cl; /* we hit leaf; return it */ in htb_classify()
254 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
257 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
258 if (!cl || cl->level) in htb_classify()
260 return cl; in htb_classify()
270 struct htb_class *cl, int prio) in htb_add_to_id_tree() argument
279 if (cl->common.classid > c->common.classid) in htb_add_to_id_tree()
284 rb_link_node(&cl->node[prio], parent, p); in htb_add_to_id_tree()
285 rb_insert_color(&cl->node[prio], root); in htb_add_to_id_tree()
296 struct htb_class *cl, s64 delay) in htb_add_to_wait_tree() argument
298 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
300 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
301 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
302 cl->pq_key++; in htb_add_to_wait_tree()
305 if (q->near_ev_cache[cl->level] > cl->pq_key) in htb_add_to_wait_tree()
306 q->near_ev_cache[cl->level] = cl->pq_key; in htb_add_to_wait_tree()
312 if (cl->pq_key >= c->pq_key) in htb_add_to_wait_tree()
317 rb_link_node(&cl->pq_node, parent, p); in htb_add_to_wait_tree()
318 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_add_to_wait_tree()
339 struct htb_class *cl, int mask) in htb_add_class_to_row() argument
341 q->row_mask[cl->level] |= mask; in htb_add_class_to_row()
345 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); in htb_add_class_to_row()
368 struct htb_class *cl, int mask) in htb_remove_class_from_row() argument
371 struct htb_level *hlevel = &q->hlevel[cl->level]; in htb_remove_class_from_row()
378 if (hprio->ptr == cl->node + prio) in htb_remove_class_from_row()
381 htb_safe_rb_erase(cl->node + prio, &hprio->row); in htb_remove_class_from_row()
385 q->row_mask[cl->level] &= ~m; in htb_remove_class_from_row()
395 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) in htb_activate_prios() argument
397 struct htb_class *p = cl->parent; in htb_activate_prios()
398 long m, mask = cl->prio_activity; in htb_activate_prios()
400 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_activate_prios()
412 htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio); in htb_activate_prios()
415 cl = p; in htb_activate_prios()
416 p = cl->parent; in htb_activate_prios()
419 if (cl->cmode == HTB_CAN_SEND && mask) in htb_activate_prios()
420 htb_add_class_to_row(q, cl, mask); in htb_activate_prios()
430 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) in htb_deactivate_prios() argument
432 struct htb_class *p = cl->parent; in htb_deactivate_prios()
433 long m, mask = cl->prio_activity; in htb_deactivate_prios()
435 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_deactivate_prios()
442 if (p->un.inner.clprio[prio].ptr == cl->node + prio) { in htb_deactivate_prios()
447 p->un.inner.clprio[prio].last_ptr_id = cl->common.classid; in htb_deactivate_prios()
451 htb_safe_rb_erase(cl->node + prio, in htb_deactivate_prios()
459 cl = p; in htb_deactivate_prios()
460 p = cl->parent; in htb_deactivate_prios()
463 if (cl->cmode == HTB_CAN_SEND && mask) in htb_deactivate_prios()
464 htb_remove_class_from_row(q, cl, mask); in htb_deactivate_prios()
467 static inline s64 htb_lowater(const struct htb_class *cl) in htb_lowater() argument
470 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; in htb_lowater()
474 static inline s64 htb_hiwater(const struct htb_class *cl) in htb_hiwater() argument
477 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; in htb_hiwater()
495 htb_class_mode(struct htb_class *cl, s64 *diff) in htb_class_mode() argument
499 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { in htb_class_mode()
504 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl)) in htb_class_mode()
521 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) in htb_change_class_mode() argument
523 enum htb_cmode new_mode = htb_class_mode(cl, diff); in htb_change_class_mode()
525 if (new_mode == cl->cmode) in htb_change_class_mode()
528 if (cl->prio_activity) { /* not necessary: speed optimization */ in htb_change_class_mode()
529 if (cl->cmode != HTB_CANT_SEND) in htb_change_class_mode()
530 htb_deactivate_prios(q, cl); in htb_change_class_mode()
531 cl->cmode = new_mode; in htb_change_class_mode()
533 htb_activate_prios(q, cl); in htb_change_class_mode()
535 cl->cmode = new_mode; in htb_change_class_mode()
545 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) in htb_activate() argument
547 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen); in htb_activate()
549 if (!cl->prio_activity) { in htb_activate()
550 cl->prio_activity = 1 << cl->prio; in htb_activate()
551 htb_activate_prios(q, cl); in htb_activate()
552 list_add_tail(&cl->un.leaf.drop_list, in htb_activate()
553 q->drops + cl->prio); in htb_activate()
563 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) in htb_deactivate() argument
565 WARN_ON(!cl->prio_activity); in htb_deactivate()
567 htb_deactivate_prios(q, cl); in htb_deactivate()
568 cl->prio_activity = 0; in htb_deactivate()
569 list_del_init(&cl->un.leaf.drop_list); in htb_deactivate()
576 struct htb_class *cl = htb_classify(skb, sch, &ret); in htb_enqueue() local
578 if (cl == HTB_DIRECT) { in htb_enqueue()
587 } else if (!cl) { in htb_enqueue()
593 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { in htb_enqueue()
596 cl->qstats.drops++; in htb_enqueue()
600 htb_activate(q, cl); in htb_enqueue()
608 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff) in htb_accnt_tokens() argument
610 s64 toks = diff + cl->tokens; in htb_accnt_tokens()
612 if (toks > cl->buffer) in htb_accnt_tokens()
613 toks = cl->buffer; in htb_accnt_tokens()
614 toks -= (s64) psched_l2t_ns(&cl->rate, bytes); in htb_accnt_tokens()
615 if (toks <= -cl->mbuffer) in htb_accnt_tokens()
616 toks = 1 - cl->mbuffer; in htb_accnt_tokens()
618 cl->tokens = toks; in htb_accnt_tokens()
621 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff) in htb_accnt_ctokens() argument
623 s64 toks = diff + cl->ctokens; in htb_accnt_ctokens()
625 if (toks > cl->cbuffer) in htb_accnt_ctokens()
626 toks = cl->cbuffer; in htb_accnt_ctokens()
627 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes); in htb_accnt_ctokens()
628 if (toks <= -cl->mbuffer) in htb_accnt_ctokens()
629 toks = 1 - cl->mbuffer; in htb_accnt_ctokens()
631 cl->ctokens = toks; in htb_accnt_ctokens()
645 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, in htb_charge_class() argument
652 while (cl) { in htb_charge_class()
653 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_charge_class()
654 if (cl->level >= level) { in htb_charge_class()
655 if (cl->level == level) in htb_charge_class()
656 cl->xstats.lends++; in htb_charge_class()
657 htb_accnt_tokens(cl, bytes, diff); in htb_charge_class()
659 cl->xstats.borrows++; in htb_charge_class()
660 cl->tokens += diff; /* we moved t_c; update tokens */ in htb_charge_class()
662 htb_accnt_ctokens(cl, bytes, diff); in htb_charge_class()
663 cl->t_c = q->now; in htb_charge_class()
665 old_mode = cl->cmode; in htb_charge_class()
667 htb_change_class_mode(q, cl, &diff); in htb_charge_class()
668 if (old_mode != cl->cmode) { in htb_charge_class()
670 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_charge_class()
671 if (cl->cmode != HTB_CAN_SEND) in htb_charge_class()
672 htb_add_to_wait_tree(q, cl, diff); in htb_charge_class()
676 if (cl->level) in htb_charge_class()
677 bstats_update(&cl->bstats, skb); in htb_charge_class()
679 cl = cl->parent; in htb_charge_class()
701 struct htb_class *cl; in htb_do_events() local
708 cl = rb_entry(p, struct htb_class, pq_node); in htb_do_events()
709 if (cl->pq_key > q->now) in htb_do_events()
710 return cl->pq_key; in htb_do_events()
713 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_do_events()
714 htb_change_class_mode(q, cl, &diff); in htb_do_events()
715 if (cl->cmode != HTB_CAN_SEND) in htb_do_events()
716 htb_add_to_wait_tree(q, cl, diff); in htb_do_events()
736 struct htb_class *cl = in htb_id_find_next_upper() local
739 if (id > cl->common.classid) { in htb_id_find_next_upper()
741 } else if (id < cl->common.classid) { in htb_id_find_next_upper()
794 struct htb_class *cl; in htb_lookup_leaf() local
797 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); in htb_lookup_leaf()
798 if (!cl->level) in htb_lookup_leaf()
799 return cl; in htb_lookup_leaf()
800 clp = &cl->un.inner.clprio[prio]; in htb_lookup_leaf()
817 struct htb_class *cl, *start; in htb_dequeue_tree() local
822 start = cl = htb_lookup_leaf(hprio, prio); in htb_dequeue_tree()
826 if (unlikely(!cl)) in htb_dequeue_tree()
834 if (unlikely(cl->un.leaf.q->q.qlen == 0)) { in htb_dequeue_tree()
836 htb_deactivate(q, cl); in htb_dequeue_tree()
844 if (cl == start) /* fix start if we just deleted it */ in htb_dequeue_tree()
846 cl = next; in htb_dequeue_tree()
850 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q); in htb_dequeue_tree()
854 qdisc_warn_nonwc("htb", cl->un.leaf.q); in htb_dequeue_tree()
855 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr: in htb_dequeue_tree()
857 cl = htb_lookup_leaf(hprio, prio); in htb_dequeue_tree()
859 } while (cl != start); in htb_dequeue_tree()
862 bstats_update(&cl->bstats, skb); in htb_dequeue_tree()
863 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb); in htb_dequeue_tree()
864 if (cl->un.leaf.deficit[level] < 0) { in htb_dequeue_tree()
865 cl->un.leaf.deficit[level] += cl->quantum; in htb_dequeue_tree()
866 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr : in htb_dequeue_tree()
872 if (!cl->un.leaf.q->q.qlen) in htb_dequeue_tree()
873 htb_deactivate(q, cl); in htb_dequeue_tree()
874 htb_charge_class(q, cl, level, skb); in htb_dequeue_tree()
955 struct htb_class *cl = list_entry(p, struct htb_class, in htb_drop() local
958 if (cl->un.leaf.q->ops->drop && in htb_drop()
959 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) { in htb_drop()
962 if (!cl->un.leaf.q->q.qlen) in htb_drop()
963 htb_deactivate(q, cl); in htb_drop()
976 struct htb_class *cl; in htb_reset() local
980 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_reset()
981 if (cl->level) in htb_reset()
982 memset(&cl->un.inner, 0, sizeof(cl->un.inner)); in htb_reset()
984 if (cl->un.leaf.q) in htb_reset()
985 qdisc_reset(cl->un.leaf.q); in htb_reset()
986 INIT_LIST_HEAD(&cl->un.leaf.drop_list); in htb_reset()
988 cl->prio_activity = 0; in htb_reset()
989 cl->cmode = HTB_CAN_SEND; in htb_reset()
1097 struct htb_class *cl = (struct htb_class *)arg; in htb_dump_class() local
1104 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; in htb_dump_class()
1105 tcm->tcm_handle = cl->common.classid; in htb_dump_class()
1106 if (!cl->level && cl->un.leaf.q) in htb_dump_class()
1107 tcm->tcm_info = cl->un.leaf.q->handle; in htb_dump_class()
1115 psched_ratecfg_getrate(&opt.rate, &cl->rate); in htb_dump_class()
1116 opt.buffer = PSCHED_NS2TICKS(cl->buffer); in htb_dump_class()
1117 psched_ratecfg_getrate(&opt.ceil, &cl->ceil); in htb_dump_class()
1118 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); in htb_dump_class()
1119 opt.quantum = cl->quantum; in htb_dump_class()
1120 opt.prio = cl->prio; in htb_dump_class()
1121 opt.level = cl->level; in htb_dump_class()
1124 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1125 nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps)) in htb_dump_class()
1127 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1128 nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps)) in htb_dump_class()
1141 struct htb_class *cl = (struct htb_class *)arg; in htb_dump_class_stats() local
1144 if (!cl->level && cl->un.leaf.q) in htb_dump_class_stats()
1145 qlen = cl->un.leaf.q->q.qlen; in htb_dump_class_stats()
1146 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); in htb_dump_class_stats()
1147 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); in htb_dump_class_stats()
1149 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || in htb_dump_class_stats()
1150 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || in htb_dump_class_stats()
1151 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) in htb_dump_class_stats()
1154 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); in htb_dump_class_stats()
1160 struct htb_class *cl = (struct htb_class *)arg; in htb_graft() local
1162 if (cl->level) in htb_graft()
1166 cl->common.classid)) == NULL) in htb_graft()
1169 *old = qdisc_replace(sch, new, &cl->un.leaf.q); in htb_graft()
1175 struct htb_class *cl = (struct htb_class *)arg; in htb_leaf() local
1176 return !cl->level ? cl->un.leaf.q : NULL; in htb_leaf()
1181 struct htb_class *cl = (struct htb_class *)arg; in htb_qlen_notify() local
1183 if (cl->un.leaf.q->q.qlen == 0) in htb_qlen_notify()
1184 htb_deactivate(qdisc_priv(sch), cl); in htb_qlen_notify()
1189 struct htb_class *cl = htb_find(classid, sch); in htb_get() local
1190 if (cl) in htb_get()
1191 cl->refcnt++; in htb_get()
1192 return (unsigned long)cl; in htb_get()
1195 static inline int htb_parent_last_child(struct htb_class *cl) in htb_parent_last_child() argument
1197 if (!cl->parent) in htb_parent_last_child()
1200 if (cl->parent->children > 1) in htb_parent_last_child()
1206 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, in htb_parent_to_leaf() argument
1209 struct htb_class *parent = cl->parent; in htb_parent_to_leaf()
1211 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity); in htb_parent_to_leaf()
1227 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) in htb_destroy_class() argument
1229 if (!cl->level) { in htb_destroy_class()
1230 WARN_ON(!cl->un.leaf.q); in htb_destroy_class()
1231 qdisc_destroy(cl->un.leaf.q); in htb_destroy_class()
1233 gen_kill_estimator(&cl->bstats, &cl->rate_est); in htb_destroy_class()
1234 tcf_destroy_chain(&cl->filter_list); in htb_destroy_class()
1235 kfree(cl); in htb_destroy_class()
1242 struct htb_class *cl; in htb_destroy() local
1255 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) in htb_destroy()
1256 tcf_destroy_chain(&cl->filter_list); in htb_destroy()
1259 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], in htb_destroy()
1261 htb_destroy_class(sch, cl); in htb_destroy()
1270 struct htb_class *cl = (struct htb_class *)arg; in htb_delete() local
1278 if (cl->children || cl->filter_cnt) in htb_delete()
1281 if (!cl->level && htb_parent_last_child(cl)) { in htb_delete()
1283 cl->parent->common.classid); in htb_delete()
1289 if (!cl->level) { in htb_delete()
1290 unsigned int qlen = cl->un.leaf.q->q.qlen; in htb_delete()
1291 unsigned int backlog = cl->un.leaf.q->qstats.backlog; in htb_delete()
1293 qdisc_reset(cl->un.leaf.q); in htb_delete()
1294 qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog); in htb_delete()
1298 qdisc_class_hash_remove(&q->clhash, &cl->common); in htb_delete()
1299 if (cl->parent) in htb_delete()
1300 cl->parent->children--; in htb_delete()
1302 if (cl->prio_activity) in htb_delete()
1303 htb_deactivate(q, cl); in htb_delete()
1305 if (cl->cmode != HTB_CAN_SEND) in htb_delete()
1306 htb_safe_rb_erase(&cl->pq_node, in htb_delete()
1307 &q->hlevel[cl->level].wait_pq); in htb_delete()
1310 htb_parent_to_leaf(q, cl, new_q); in htb_delete()
1312 BUG_ON(--cl->refcnt == 0); in htb_delete()
1324 struct htb_class *cl = (struct htb_class *)arg; in htb_put() local
1326 if (--cl->refcnt == 0) in htb_put()
1327 htb_destroy_class(sch, cl); in htb_put()
1336 struct htb_class *cl = (struct htb_class *)*arg, *parent; in htb_change_class() local
1367 if (!cl) { /* new class */ in htb_change_class()
1396 cl = kzalloc(sizeof(*cl), GFP_KERNEL); in htb_change_class()
1397 if (!cl) in htb_change_class()
1401 err = gen_new_estimator(&cl->bstats, NULL, in htb_change_class()
1402 &cl->rate_est, in htb_change_class()
1406 kfree(cl); in htb_change_class()
1411 cl->refcnt = 1; in htb_change_class()
1412 cl->children = 0; in htb_change_class()
1413 INIT_LIST_HEAD(&cl->un.leaf.drop_list); in htb_change_class()
1414 RB_CLEAR_NODE(&cl->pq_node); in htb_change_class()
1417 RB_CLEAR_NODE(&cl->node[prio]); in htb_change_class()
1447 cl->un.leaf.q = new_q ? new_q : &noop_qdisc; in htb_change_class()
1449 cl->common.classid = classid; in htb_change_class()
1450 cl->parent = parent; in htb_change_class()
1453 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
1454 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
1455 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ in htb_change_class()
1456 cl->t_c = ktime_get_ns(); in htb_change_class()
1457 cl->cmode = HTB_CAN_SEND; in htb_change_class()
1460 qdisc_class_hash_insert(&q->clhash, &cl->common); in htb_change_class()
1467 err = gen_replace_estimator(&cl->bstats, NULL, in htb_change_class()
1468 &cl->rate_est, in htb_change_class()
1481 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); in htb_change_class()
1482 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); in htb_change_class()
1487 if (!cl->level) { in htb_change_class()
1488 u64 quantum = cl->rate.rate_bytes_ps; in htb_change_class()
1491 cl->quantum = min_t(u64, quantum, INT_MAX); in htb_change_class()
1493 if (!hopt->quantum && cl->quantum < 1000) { in htb_change_class()
1495 cl->common.classid); in htb_change_class()
1496 cl->quantum = 1000; in htb_change_class()
1498 if (!hopt->quantum && cl->quantum > 200000) { in htb_change_class()
1500 cl->common.classid); in htb_change_class()
1501 cl->quantum = 200000; in htb_change_class()
1504 cl->quantum = hopt->quantum; in htb_change_class()
1505 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO) in htb_change_class()
1506 cl->prio = TC_HTB_NUMPRIO - 1; in htb_change_class()
1509 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
1510 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
1516 *arg = (unsigned long)cl; in htb_change_class()
1527 struct htb_class *cl = (struct htb_class *)arg; in htb_find_tcf() local
1528 struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list; in htb_find_tcf()
1536 struct htb_class *cl = htb_find(classid, sch); in htb_bind_filter() local
1547 if (cl) in htb_bind_filter()
1548 cl->filter_cnt++; in htb_bind_filter()
1549 return (unsigned long)cl; in htb_bind_filter()
1554 struct htb_class *cl = (struct htb_class *)arg; in htb_unbind_filter() local
1556 if (cl) in htb_unbind_filter()
1557 cl->filter_cnt--; in htb_unbind_filter()
1563 struct htb_class *cl; in htb_walk() local
1570 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_walk()
1575 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { in htb_walk()