Lines Matching refs:cl

98 	void			(*overlimit)(struct cbq_class *cl);
175 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) argument
193 struct cbq_class *cl; in cbq_reclassify() local
195 for (cl = this->tparent; cl; cl = cl->tparent) { in cbq_reclassify()
196 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; in cbq_reclassify()
222 struct cbq_class *cl = NULL; in cbq_classify() local
231 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify()
232 return cl; in cbq_classify()
247 cl = (void *)res.class; in cbq_classify()
248 if (!cl) { in cbq_classify()
250 cl = cbq_class_lookup(q, res.classid); in cbq_classify()
251 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) in cbq_classify()
252 cl = defmap[TC_PRIO_BESTEFFORT]; in cbq_classify()
254 if (cl == NULL) in cbq_classify()
257 if (cl->level >= head->level) in cbq_classify()
267 return cbq_reclassify(skb, cl); in cbq_classify()
270 if (cl->level == 0) in cbq_classify()
271 return cl; in cbq_classify()
278 head = cl; in cbq_classify()
282 cl = head; in cbq_classify()
288 !(cl = head->defaults[prio & TC_PRIO_MAX]) && in cbq_classify()
289 !(cl = head->defaults[TC_PRIO_BESTEFFORT])) in cbq_classify()
292 return cl; in cbq_classify()
301 static inline void cbq_activate_class(struct cbq_class *cl) in cbq_activate_class() argument
303 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class()
304 int prio = cl->cpriority; in cbq_activate_class()
308 q->active[prio] = cl; in cbq_activate_class()
311 cl->next_alive = cl_tail->next_alive; in cbq_activate_class()
312 cl_tail->next_alive = cl; in cbq_activate_class()
314 cl->next_alive = cl; in cbq_activate_class()
329 struct cbq_class *cl; in cbq_deactivate_class() local
333 cl = cl_prev->next_alive; in cbq_deactivate_class()
334 if (cl == this) { in cbq_deactivate_class()
335 cl_prev->next_alive = cl->next_alive; in cbq_deactivate_class()
336 cl->next_alive = NULL; in cbq_deactivate_class()
338 if (cl == q->active[prio]) { in cbq_deactivate_class()
340 if (cl == q->active[prio]) { in cbq_deactivate_class()
348 } while ((cl_prev = cl) != q->active[prio]); in cbq_deactivate_class()
352 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_mark_toplevel() argument
356 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { in cbq_mark_toplevel()
360 if (cl->undertime < now) { in cbq_mark_toplevel()
361 q->toplevel = cl->level; in cbq_mark_toplevel()
364 } while ((cl = cl->borrow) != NULL && toplevel > cl->level); in cbq_mark_toplevel()
373 struct cbq_class *cl = cbq_classify(skb, sch, &ret); in cbq_enqueue() local
376 q->rx_class = cl; in cbq_enqueue()
378 if (cl == NULL) { in cbq_enqueue()
386 cl->q->__parent = sch; in cbq_enqueue()
388 ret = qdisc_enqueue(skb, cl->q); in cbq_enqueue()
391 cbq_mark_toplevel(q, cl); in cbq_enqueue()
392 if (!cl->next_alive) in cbq_enqueue()
393 cbq_activate_class(cl); in cbq_enqueue()
399 cbq_mark_toplevel(q, cl); in cbq_enqueue()
400 cl->qstats.drops++; in cbq_enqueue()
409 static void cbq_ovl_classic(struct cbq_class *cl) in cbq_ovl_classic() argument
411 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_classic()
412 psched_tdiff_t delay = cl->undertime - q->now; in cbq_ovl_classic()
414 if (!cl->delayed) { in cbq_ovl_classic()
415 delay += cl->offtime; in cbq_ovl_classic()
424 if (cl->avgidle < 0) in cbq_ovl_classic()
425 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); in cbq_ovl_classic()
426 if (cl->avgidle < cl->minidle) in cbq_ovl_classic()
427 cl->avgidle = cl->minidle; in cbq_ovl_classic()
430 cl->undertime = q->now + delay; in cbq_ovl_classic()
432 cl->xstats.overactions++; in cbq_ovl_classic()
433 cl->delayed = 1; in cbq_ovl_classic()
446 for (b = cl->borrow; b; b = b->borrow) { in cbq_ovl_classic()
463 static void cbq_ovl_rclassic(struct cbq_class *cl) in cbq_ovl_rclassic() argument
465 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_rclassic()
466 struct cbq_class *this = cl; in cbq_ovl_rclassic()
469 if (cl->level > q->toplevel) { in cbq_ovl_rclassic()
470 cl = NULL; in cbq_ovl_rclassic()
473 } while ((cl = cl->borrow) != NULL); in cbq_ovl_rclassic()
475 if (cl == NULL) in cbq_ovl_rclassic()
476 cl = this; in cbq_ovl_rclassic()
477 cbq_ovl_classic(cl); in cbq_ovl_rclassic()
482 static void cbq_ovl_delay(struct cbq_class *cl) in cbq_ovl_delay() argument
484 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_delay()
485 psched_tdiff_t delay = cl->undertime - q->now; in cbq_ovl_delay()
488 &qdisc_root_sleeping(cl->qdisc)->state)) in cbq_ovl_delay()
491 if (!cl->delayed) { in cbq_ovl_delay()
495 delay += cl->offtime; in cbq_ovl_delay()
496 if (cl->avgidle < 0) in cbq_ovl_delay()
497 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); in cbq_ovl_delay()
498 if (cl->avgidle < cl->minidle) in cbq_ovl_delay()
499 cl->avgidle = cl->minidle; in cbq_ovl_delay()
500 cl->undertime = q->now + delay; in cbq_ovl_delay()
503 sched += delay + cl->penalty; in cbq_ovl_delay()
504 cl->penalized = sched; in cbq_ovl_delay()
505 cl->cpriority = TC_CBQ_MAXPRIO; in cbq_ovl_delay()
515 cl->delayed = 1; in cbq_ovl_delay()
516 cl->xstats.overactions++; in cbq_ovl_delay()
527 static void cbq_ovl_lowprio(struct cbq_class *cl) in cbq_ovl_lowprio() argument
529 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_ovl_lowprio()
531 cl->penalized = q->now + cl->penalty; in cbq_ovl_lowprio()
533 if (cl->cpriority != cl->priority2) { in cbq_ovl_lowprio()
534 cl->cpriority = cl->priority2; in cbq_ovl_lowprio()
535 q->pmask |= (1<<cl->cpriority); in cbq_ovl_lowprio()
536 cl->xstats.overactions++; in cbq_ovl_lowprio()
538 cbq_ovl_classic(cl); in cbq_ovl_lowprio()
543 static void cbq_ovl_drop(struct cbq_class *cl) in cbq_ovl_drop() argument
545 if (cl->q->ops->drop) in cbq_ovl_drop()
546 if (cl->q->ops->drop(cl->q)) in cbq_ovl_drop()
547 cl->qdisc->q.qlen--; in cbq_ovl_drop()
548 cl->xstats.overactions++; in cbq_ovl_drop()
549 cbq_ovl_classic(cl); in cbq_ovl_drop()
555 struct cbq_class *cl; in cbq_undelay_prio() local
563 cl = cl_prev->next_alive; in cbq_undelay_prio()
564 if (now - cl->penalized > 0) { in cbq_undelay_prio()
565 cl_prev->next_alive = cl->next_alive; in cbq_undelay_prio()
566 cl->next_alive = NULL; in cbq_undelay_prio()
567 cl->cpriority = cl->priority; in cbq_undelay_prio()
568 cl->delayed = 0; in cbq_undelay_prio()
569 cbq_activate_class(cl); in cbq_undelay_prio()
571 if (cl == q->active[prio]) { in cbq_undelay_prio()
573 if (cl == q->active[prio]) { in cbq_undelay_prio()
579 cl = cl_prev->next_alive; in cbq_undelay_prio()
580 } else if (sched - cl->penalized > 0) in cbq_undelay_prio()
581 sched = cl->penalized; in cbq_undelay_prio()
582 } while ((cl_prev = cl) != q->active[prio]); in cbq_undelay_prio()
633 struct cbq_class *cl = q->rx_class; in cbq_reshape_fail() local
637 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) { in cbq_reshape_fail()
640 cbq_mark_toplevel(q, cl); in cbq_reshape_fail()
642 q->rx_class = cl; in cbq_reshape_fail()
643 cl->q->__parent = sch; in cbq_reshape_fail()
645 ret = qdisc_enqueue(skb, cl->q); in cbq_reshape_fail()
648 if (!cl->next_alive) in cbq_reshape_fail()
649 cbq_activate_class(cl); in cbq_reshape_fail()
672 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, in cbq_update_toplevel() argument
675 if (cl && q->toplevel >= borrowed->level) { in cbq_update_toplevel()
676 if (cl->q->q.qlen > 1) { in cbq_update_toplevel()
697 struct cbq_class *cl = this; in cbq_update() local
707 for ( ; cl; cl = cl->share) { in cbq_update()
708 long avgidle = cl->avgidle; in cbq_update()
711 cl->bstats.packets++; in cbq_update()
712 cl->bstats.bytes += len; in cbq_update()
721 idle = now - cl->last; in cbq_update()
723 avgidle = cl->maxidle; in cbq_update()
725 idle -= L2T(cl, len); in cbq_update()
732 avgidle += idle - (avgidle>>cl->ewma_log); in cbq_update()
738 if (avgidle < cl->minidle) in cbq_update()
739 avgidle = cl->minidle; in cbq_update()
741 cl->avgidle = avgidle; in cbq_update()
751 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); in cbq_update()
763 idle += L2T(cl, len); in cbq_update()
765 cl->undertime = now + idle; in cbq_update()
769 cl->undertime = PSCHED_PASTPERFECT; in cbq_update()
770 if (avgidle > cl->maxidle) in cbq_update()
771 cl->avgidle = cl->maxidle; in cbq_update()
773 cl->avgidle = avgidle; in cbq_update()
775 if ((s64)(now - cl->last) > 0) in cbq_update()
776 cl->last = now; in cbq_update()
783 cbq_under_limit(struct cbq_class *cl) in cbq_under_limit() argument
785 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_under_limit()
786 struct cbq_class *this_cl = cl; in cbq_under_limit()
788 if (cl->tparent == NULL) in cbq_under_limit()
789 return cl; in cbq_under_limit()
791 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { in cbq_under_limit()
792 cl->delayed = 0; in cbq_under_limit()
793 return cl; in cbq_under_limit()
807 cl = cl->borrow; in cbq_under_limit()
808 if (!cl) { in cbq_under_limit()
813 if (cl->level > q->toplevel) in cbq_under_limit()
815 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); in cbq_under_limit()
817 cl->delayed = 0; in cbq_under_limit()
818 return cl; in cbq_under_limit()
825 struct cbq_class *cl_tail, *cl_prev, *cl; in cbq_dequeue_prio() local
830 cl = cl_prev->next_alive; in cbq_dequeue_prio()
837 struct cbq_class *borrow = cl; in cbq_dequeue_prio()
839 if (cl->q->q.qlen && in cbq_dequeue_prio()
840 (borrow = cbq_under_limit(cl)) == NULL) in cbq_dequeue_prio()
843 if (cl->deficit <= 0) { in cbq_dequeue_prio()
848 cl->deficit += cl->quantum; in cbq_dequeue_prio()
852 skb = cl->q->dequeue(cl->q); in cbq_dequeue_prio()
861 cl->deficit -= qdisc_pkt_len(skb); in cbq_dequeue_prio()
862 q->tx_class = cl; in cbq_dequeue_prio()
864 if (borrow != cl) { in cbq_dequeue_prio()
867 cl->xstats.borrows++; in cbq_dequeue_prio()
870 cl->xstats.borrows += qdisc_pkt_len(skb); in cbq_dequeue_prio()
875 if (cl->deficit <= 0) { in cbq_dequeue_prio()
876 q->active[prio] = cl; in cbq_dequeue_prio()
877 cl = cl->next_alive; in cbq_dequeue_prio()
878 cl->deficit += cl->quantum; in cbq_dequeue_prio()
883 if (cl->q->q.qlen == 0 || prio != cl->cpriority) { in cbq_dequeue_prio()
887 cl_prev->next_alive = cl->next_alive; in cbq_dequeue_prio()
888 cl->next_alive = NULL; in cbq_dequeue_prio()
891 if (cl == cl_tail) { in cbq_dequeue_prio()
896 if (cl == cl_tail) { in cbq_dequeue_prio()
900 if (cl->q->q.qlen) in cbq_dequeue_prio()
901 cbq_activate_class(cl); in cbq_dequeue_prio()
907 if (cl->q->q.qlen) in cbq_dequeue_prio()
908 cbq_activate_class(cl); in cbq_dequeue_prio()
910 cl = cl_prev; in cbq_dequeue_prio()
914 cl_prev = cl; in cbq_dequeue_prio()
915 cl = cl->next_alive; in cbq_dequeue_prio()
1015 struct cbq_class *cl; in cbq_adjust_levels() local
1017 cl = this->children; in cbq_adjust_levels()
1018 if (cl) { in cbq_adjust_levels()
1020 if (cl->level > level) in cbq_adjust_levels()
1021 level = cl->level; in cbq_adjust_levels()
1022 } while ((cl = cl->sibling) != this->children); in cbq_adjust_levels()
1030 struct cbq_class *cl; in cbq_normalize_quanta() local
1037 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_normalize_quanta()
1041 if (cl->priority == prio) { in cbq_normalize_quanta()
1042 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ in cbq_normalize_quanta()
1045 if (cl->quantum <= 0 || in cbq_normalize_quanta()
1046 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) { in cbq_normalize_quanta()
1048 cl->common.classid, cl->quantum); in cbq_normalize_quanta()
1049 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; in cbq_normalize_quanta()
1055 static void cbq_sync_defmap(struct cbq_class *cl) in cbq_sync_defmap() argument
1057 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_sync_defmap()
1058 struct cbq_class *split = cl->split; in cbq_sync_defmap()
1066 if (split->defaults[i] == cl && !(cl->defmap & (1<<i))) in cbq_sync_defmap()
1091 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask) in cbq_change_defmap() argument
1096 split = cl->split; in cbq_change_defmap()
1103 for (split = cl->tparent; split; split = split->tparent) in cbq_change_defmap()
1111 if (cl->split != split) { in cbq_change_defmap()
1112 cl->defmap = 0; in cbq_change_defmap()
1113 cbq_sync_defmap(cl); in cbq_change_defmap()
1114 cl->split = split; in cbq_change_defmap()
1115 cl->defmap = def & mask; in cbq_change_defmap()
1117 cl->defmap = (cl->defmap & ~mask) | (def & mask); in cbq_change_defmap()
1119 cbq_sync_defmap(cl); in cbq_change_defmap()
1124 struct cbq_class *cl, **clp; in cbq_unlink_class() local
1131 cl = *clp; in cbq_unlink_class()
1133 if (cl == this) { in cbq_unlink_class()
1134 *clp = cl->sibling; in cbq_unlink_class()
1137 clp = &cl->sibling; in cbq_unlink_class()
1138 } while ((cl = *clp) != this->sibling); in cbq_unlink_class()
1172 struct cbq_class *cl, *cl_head; in cbq_drop() local
1181 cl = cl_head; in cbq_drop()
1183 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) { in cbq_drop()
1185 if (!cl->q->q.qlen) in cbq_drop()
1186 cbq_deactivate_class(cl); in cbq_drop()
1189 } while ((cl = cl->next_alive) != cl_head); in cbq_drop()
1198 struct cbq_class *cl; in cbq_reset() local
1215 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_reset()
1216 qdisc_reset(cl->q); in cbq_reset()
1218 cl->next_alive = NULL; in cbq_reset()
1219 cl->undertime = PSCHED_PASTPERFECT; in cbq_reset()
1220 cl->avgidle = cl->maxidle; in cbq_reset()
1221 cl->deficit = cl->quantum; in cbq_reset()
1222 cl->cpriority = cl->priority; in cbq_reset()
1229 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) in cbq_set_lss() argument
1232 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; in cbq_set_lss()
1233 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; in cbq_set_lss()
1236 cl->ewma_log = lss->ewma_log; in cbq_set_lss()
1238 cl->avpkt = lss->avpkt; in cbq_set_lss()
1240 cl->minidle = -(long)lss->minidle; in cbq_set_lss()
1242 cl->maxidle = lss->maxidle; in cbq_set_lss()
1243 cl->avgidle = lss->maxidle; in cbq_set_lss()
1246 cl->offtime = lss->offtime; in cbq_set_lss()
1250 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_rmprio() argument
1252 q->nclasses[cl->priority]--; in cbq_rmprio()
1253 q->quanta[cl->priority] -= cl->weight; in cbq_rmprio()
1254 cbq_normalize_quanta(q, cl->priority); in cbq_rmprio()
1257 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_addprio() argument
1259 q->nclasses[cl->priority]++; in cbq_addprio()
1260 q->quanta[cl->priority] += cl->weight; in cbq_addprio()
1261 cbq_normalize_quanta(q, cl->priority); in cbq_addprio()
1264 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr) in cbq_set_wrr() argument
1266 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_set_wrr()
1269 cl->allot = wrr->allot; in cbq_set_wrr()
1271 cl->weight = wrr->weight; in cbq_set_wrr()
1273 cl->priority = wrr->priority - 1; in cbq_set_wrr()
1274 cl->cpriority = cl->priority; in cbq_set_wrr()
1275 if (cl->priority >= cl->priority2) in cbq_set_wrr()
1276 cl->priority2 = TC_CBQ_MAXPRIO - 1; in cbq_set_wrr()
1279 cbq_addprio(q, cl); in cbq_set_wrr()
1283 static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl) in cbq_set_overlimit() argument
1287 cl->overlimit = cbq_ovl_classic; in cbq_set_overlimit()
1290 cl->overlimit = cbq_ovl_delay; in cbq_set_overlimit()
1294 ovl->priority2 - 1 <= cl->priority) in cbq_set_overlimit()
1296 cl->priority2 = ovl->priority2 - 1; in cbq_set_overlimit()
1297 cl->overlimit = cbq_ovl_lowprio; in cbq_set_overlimit()
1300 cl->overlimit = cbq_ovl_drop; in cbq_set_overlimit()
1303 cl->overlimit = cbq_ovl_rclassic; in cbq_set_overlimit()
1308 cl->penalty = ovl->penalty; in cbq_set_overlimit()
1313 static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p) in cbq_set_police() argument
1315 cl->police = p->police; in cbq_set_police()
1317 if (cl->q->handle) { in cbq_set_police()
1319 cl->q->reshape_fail = cbq_reshape_fail; in cbq_set_police()
1321 cl->q->reshape_fail = NULL; in cbq_set_police()
1327 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt) in cbq_set_fopt() argument
1329 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange); in cbq_set_fopt()
1407 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_rate() argument
1411 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate)) in cbq_dump_rate()
1420 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_lss() argument
1426 if (cl->borrow == NULL) in cbq_dump_lss()
1428 if (cl->share == NULL) in cbq_dump_lss()
1430 opt.ewma_log = cl->ewma_log; in cbq_dump_lss()
1431 opt.level = cl->level; in cbq_dump_lss()
1432 opt.avpkt = cl->avpkt; in cbq_dump_lss()
1433 opt.maxidle = cl->maxidle; in cbq_dump_lss()
1434 opt.minidle = (u32)(-cl->minidle); in cbq_dump_lss()
1435 opt.offtime = cl->offtime; in cbq_dump_lss()
1446 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_wrr() argument
1453 opt.allot = cl->allot; in cbq_dump_wrr()
1454 opt.priority = cl->priority + 1; in cbq_dump_wrr()
1455 opt.cpriority = cl->cpriority + 1; in cbq_dump_wrr()
1456 opt.weight = cl->weight; in cbq_dump_wrr()
1466 static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_ovl() argument
1471 opt.strategy = cl->ovl_strategy; in cbq_dump_ovl()
1472 opt.priority2 = cl->priority2 + 1; in cbq_dump_ovl()
1474 opt.penalty = cl->penalty; in cbq_dump_ovl()
1484 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_fopt() argument
1489 if (cl->split || cl->defmap) { in cbq_dump_fopt()
1490 opt.split = cl->split ? cl->split->common.classid : 0; in cbq_dump_fopt()
1491 opt.defmap = cl->defmap; in cbq_dump_fopt()
1504 static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_police() argument
1509 if (cl->police) { in cbq_dump_police()
1510 opt.police = cl->police; in cbq_dump_police()
1524 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_attr() argument
1526 if (cbq_dump_lss(skb, cl) < 0 || in cbq_dump_attr()
1527 cbq_dump_rate(skb, cl) < 0 || in cbq_dump_attr()
1528 cbq_dump_wrr(skb, cl) < 0 || in cbq_dump_attr()
1529 cbq_dump_ovl(skb, cl) < 0 || in cbq_dump_attr()
1531 cbq_dump_police(skb, cl) < 0 || in cbq_dump_attr()
1533 cbq_dump_fopt(skb, cl) < 0) in cbq_dump_attr()
1568 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_dump_class() local
1571 if (cl->tparent) in cbq_dump_class()
1572 tcm->tcm_parent = cl->tparent->common.classid; in cbq_dump_class()
1575 tcm->tcm_handle = cl->common.classid; in cbq_dump_class()
1576 tcm->tcm_info = cl->q->handle; in cbq_dump_class()
1581 if (cbq_dump_attr(skb, cl) < 0) in cbq_dump_class()
1595 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_dump_class_stats() local
1597 cl->xstats.avgidle = cl->avgidle; in cbq_dump_class_stats()
1598 cl->xstats.undertime = 0; in cbq_dump_class_stats()
1600 if (cl->undertime != PSCHED_PASTPERFECT) in cbq_dump_class_stats()
1601 cl->xstats.undertime = cl->undertime - q->now; in cbq_dump_class_stats()
1603 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || in cbq_dump_class_stats()
1604 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || in cbq_dump_class_stats()
1605 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) in cbq_dump_class_stats()
1608 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); in cbq_dump_class_stats()
1614 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_graft() local
1618 &pfifo_qdisc_ops, cl->common.classid); in cbq_graft()
1623 if (cl->police == TC_POLICE_RECLASSIFY) in cbq_graft()
1628 *old = qdisc_replace(sch, new, &cl->q); in cbq_graft()
1634 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_leaf() local
1636 return cl->q; in cbq_leaf()
1641 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_qlen_notify() local
1643 if (cl->q->q.qlen == 0) in cbq_qlen_notify()
1644 cbq_deactivate_class(cl); in cbq_qlen_notify()
1650 struct cbq_class *cl = cbq_class_lookup(q, classid); in cbq_get() local
1652 if (cl) { in cbq_get()
1653 cl->refcnt++; in cbq_get()
1654 return (unsigned long)cl; in cbq_get()
1659 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) in cbq_destroy_class() argument
1663 WARN_ON(cl->filters); in cbq_destroy_class()
1665 tcf_destroy_chain(&cl->filter_list); in cbq_destroy_class()
1666 qdisc_destroy(cl->q); in cbq_destroy_class()
1667 qdisc_put_rtab(cl->R_tab); in cbq_destroy_class()
1668 gen_kill_estimator(&cl->bstats, &cl->rate_est); in cbq_destroy_class()
1669 if (cl != &q->link) in cbq_destroy_class()
1670 kfree(cl); in cbq_destroy_class()
1677 struct cbq_class *cl; in cbq_destroy() local
1689 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) in cbq_destroy()
1690 tcf_destroy_chain(&cl->filter_list); in cbq_destroy()
1693 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], in cbq_destroy()
1695 cbq_destroy_class(sch, cl); in cbq_destroy()
1702 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_put() local
1704 if (--cl->refcnt == 0) { in cbq_put()
1710 if (q->rx_class == cl) in cbq_put()
1715 cbq_destroy_class(sch, cl); in cbq_put()
1725 struct cbq_class *cl = (struct cbq_class *)*arg; in cbq_change_class() local
1738 if (cl) { in cbq_change_class()
1741 if (cl->tparent && in cbq_change_class()
1742 cl->tparent->common.classid != parentid) in cbq_change_class()
1744 if (!cl->tparent && parentid != TC_H_ROOT) in cbq_change_class()
1756 err = gen_replace_estimator(&cl->bstats, NULL, in cbq_change_class()
1757 &cl->rate_est, in cbq_change_class()
1769 if (cl->next_alive != NULL) in cbq_change_class()
1770 cbq_deactivate_class(cl); in cbq_change_class()
1773 qdisc_put_rtab(cl->R_tab); in cbq_change_class()
1774 cl->R_tab = rtab; in cbq_change_class()
1778 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_change_class()
1781 cbq_rmprio(q, cl); in cbq_change_class()
1782 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); in cbq_change_class()
1786 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); in cbq_change_class()
1790 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); in cbq_change_class()
1794 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); in cbq_change_class()
1796 if (cl->q->q.qlen) in cbq_change_class()
1797 cbq_activate_class(cl); in cbq_change_class()
1845 cl = kzalloc(sizeof(*cl), GFP_KERNEL); in cbq_change_class()
1846 if (cl == NULL) in cbq_change_class()
1850 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, in cbq_change_class()
1854 kfree(cl); in cbq_change_class()
1859 cl->R_tab = rtab; in cbq_change_class()
1861 cl->refcnt = 1; in cbq_change_class()
1862 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); in cbq_change_class()
1863 if (!cl->q) in cbq_change_class()
1864 cl->q = &noop_qdisc; in cbq_change_class()
1865 cl->common.classid = classid; in cbq_change_class()
1866 cl->tparent = parent; in cbq_change_class()
1867 cl->qdisc = sch; in cbq_change_class()
1868 cl->allot = parent->allot; in cbq_change_class()
1869 cl->quantum = cl->allot; in cbq_change_class()
1870 cl->weight = cl->R_tab->rate.rate; in cbq_change_class()
1873 cbq_link_class(cl); in cbq_change_class()
1874 cl->borrow = cl->tparent; in cbq_change_class()
1875 if (cl->tparent != &q->link) in cbq_change_class()
1876 cl->share = cl->tparent; in cbq_change_class()
1878 cl->minidle = -0x7FFFFFFF; in cbq_change_class()
1879 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_change_class()
1880 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); in cbq_change_class()
1881 if (cl->ewma_log == 0) in cbq_change_class()
1882 cl->ewma_log = q->link.ewma_log; in cbq_change_class()
1883 if (cl->maxidle == 0) in cbq_change_class()
1884 cl->maxidle = q->link.maxidle; in cbq_change_class()
1885 if (cl->avpkt == 0) in cbq_change_class()
1886 cl->avpkt = q->link.avpkt; in cbq_change_class()
1887 cl->overlimit = cbq_ovl_classic; in cbq_change_class()
1889 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); in cbq_change_class()
1892 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); in cbq_change_class()
1895 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); in cbq_change_class()
1900 *arg = (unsigned long)cl; in cbq_change_class()
1911 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_delete() local
1914 if (cl->filters || cl->children || cl == &q->link) in cbq_delete()
1919 qlen = cl->q->q.qlen; in cbq_delete()
1920 backlog = cl->q->qstats.backlog; in cbq_delete()
1921 qdisc_reset(cl->q); in cbq_delete()
1922 qdisc_tree_reduce_backlog(cl->q, qlen, backlog); in cbq_delete()
1924 if (cl->next_alive) in cbq_delete()
1925 cbq_deactivate_class(cl); in cbq_delete()
1927 if (q->tx_borrowed == cl) in cbq_delete()
1929 if (q->tx_class == cl) { in cbq_delete()
1934 if (q->rx_class == cl) in cbq_delete()
1938 cbq_unlink_class(cl); in cbq_delete()
1939 cbq_adjust_levels(cl->tparent); in cbq_delete()
1940 cl->defmap = 0; in cbq_delete()
1941 cbq_sync_defmap(cl); in cbq_delete()
1943 cbq_rmprio(q, cl); in cbq_delete()
1946 BUG_ON(--cl->refcnt == 0); in cbq_delete()
1959 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_find_tcf() local
1961 if (cl == NULL) in cbq_find_tcf()
1962 cl = &q->link; in cbq_find_tcf()
1964 return &cl->filter_list; in cbq_find_tcf()
1972 struct cbq_class *cl = cbq_class_lookup(q, classid); in cbq_bind_filter() local
1974 if (cl) { in cbq_bind_filter()
1975 if (p && p->level <= cl->level) in cbq_bind_filter()
1977 cl->filters++; in cbq_bind_filter()
1978 return (unsigned long)cl; in cbq_bind_filter()
1985 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_unbind_filter() local
1987 cl->filters--; in cbq_unbind_filter()
1993 struct cbq_class *cl; in cbq_walk() local
2000 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_walk()
2005 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { in cbq_walk()