1/* 2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net> 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 2 7 * of the License, or (at your option) any later version. 8 * 9 * 2003-10-17 - Ported from altq 10 */ 11/* 12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 13 * 14 * Permission to use, copy, modify, and distribute this software and 15 * its documentation is hereby granted (including for commercial or 16 * for-profit use), provided that both the copyright notice and this 17 * permission notice appear in all copies of the software, derivative 18 * works, or modified versions, and any portions thereof. 19 * 20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 33 * DAMAGE. 34 * 35 * Carnegie Mellon encourages (but does not require) users of this 36 * software to return any improvements or extensions that they make, 37 * and to grant Carnegie Mellon the rights to redistribute these 38 * changes without encumbrance. 39 */ 40/* 41 * H-FSC is described in Proceedings of SIGCOMM'97, 42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 43 * Real-Time and Priority Service" 44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 45 * 46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 47 * when a class has an upperlimit, the fit-time is computed from the 48 * upperlimit service curve. the link-sharing scheduler does not schedule 49 * a class whose fit-time exceeds the current time. 50 */ 51 52#include <linux/kernel.h> 53#include <linux/module.h> 54#include <linux/types.h> 55#include <linux/errno.h> 56#include <linux/compiler.h> 57#include <linux/spinlock.h> 58#include <linux/skbuff.h> 59#include <linux/string.h> 60#include <linux/slab.h> 61#include <linux/list.h> 62#include <linux/rbtree.h> 63#include <linux/init.h> 64#include <linux/rtnetlink.h> 65#include <linux/pkt_sched.h> 66#include <net/netlink.h> 67#include <net/pkt_sched.h> 68#include <net/pkt_cls.h> 69#include <asm/div64.h> 70 71/* 72 * kernel internal service curve representation: 73 * coordinates are given by 64 bit unsigned integers. 74 * x-axis: unit is clock count. 75 * y-axis: unit is byte. 76 * 77 * The service curve parameters are converted to the internal 78 * representation. The slope values are scaled to avoid overflow. 79 * the inverse slope values as well as the y-projection of the 1st 80 * segment are kept in order to avoid 64-bit divide operations 81 * that are expensive on 32-bit architectures. 82 */ 83 84struct internal_sc { 85 u64 sm1; /* scaled slope of the 1st segment */ 86 u64 ism1; /* scaled inverse-slope of the 1st segment */ 87 u64 dx; /* the x-projection of the 1st segment */ 88 u64 dy; /* the y-projection of the 1st segment */ 89 u64 sm2; /* scaled slope of the 2nd segment */ 90 u64 ism2; /* scaled inverse-slope of the 2nd segment */ 91}; 92 93/* runtime service curve */ 94struct runtime_sc { 95 u64 x; /* current starting position on x-axis */ 96 u64 y; /* current starting position on y-axis */ 97 u64 sm1; /* scaled slope of the 1st segment */ 98 u64 ism1; /* scaled inverse-slope of the 1st segment */ 99 u64 dx; /* the x-projection of the 1st segment */ 100 u64 dy; /* the y-projection of the 1st segment */ 101 u64 sm2; /* scaled slope of the 2nd segment */ 102 u64 ism2; /* scaled inverse-slope of the 2nd segment */ 103}; 104 105enum hfsc_class_flags { 106 HFSC_RSC = 0x1, 107 HFSC_FSC = 0x2, 108 HFSC_USC = 0x4 109}; 110 111struct hfsc_class { 112 struct Qdisc_class_common cl_common; 113 unsigned int refcnt; /* usage count */ 114 115 struct gnet_stats_basic_packed bstats; 116 struct gnet_stats_queue qstats; 117 struct gnet_stats_rate_est64 rate_est; 118 unsigned int level; /* class level in hierarchy */ 119 struct tcf_proto __rcu *filter_list; /* filter list */ 120 unsigned int filter_cnt; /* filter count */ 121 122 struct hfsc_sched *sched; /* scheduler data */ 123 struct hfsc_class *cl_parent; /* parent class */ 124 struct list_head siblings; /* sibling classes */ 125 struct list_head children; /* child classes */ 126 struct Qdisc *qdisc; /* leaf qdisc */ 127 128 struct rb_node el_node; /* qdisc's eligible tree member */ 129 struct rb_root vt_tree; /* active children sorted by cl_vt */ 130 struct rb_node vt_node; /* parent's vt_tree member */ 131 struct rb_root cf_tree; /* active children sorted by cl_f */ 132 struct rb_node cf_node; /* parent's cf_heap member */ 133 struct list_head dlist; /* drop list member */ 134 135 u64 cl_total; /* total work in bytes */ 136 u64 cl_cumul; /* cumulative work in bytes done by 137 real-time criteria */ 138 139 u64 cl_d; /* deadline*/ 140 u64 cl_e; /* eligible time */ 141 u64 cl_vt; /* virtual time */ 142 u64 cl_f; /* time when this class will fit for 143 link-sharing, max(myf, cfmin) */ 144 u64 cl_myf; /* my fit-time (calculated from this 145 class's own upperlimit curve) */ 146 u64 cl_myfadj; /* my fit-time adjustment (to cancel 147 history dependence) */ 148 u64 cl_cfmin; /* earliest children's fit-time (used 149 with cl_myf to obtain cl_f) */ 150 u64 cl_cvtmin; /* minimal virtual time among the 151 children fit for link-sharing 152 (monotonic within a period) */ 153 u64 cl_vtadj; /* intra-period cumulative vt 154 adjustment */ 155 u64 cl_vtoff; /* inter-period cumulative vt offset */ 156 u64 cl_cvtmax; /* max child's vt in the last period */ 157 u64 cl_cvtoff; /* cumulative cvtmax of all periods */ 158 u64 cl_pcvtoff; /* parent's cvtoff at initialization 159 time */ 160 161 struct internal_sc cl_rsc; /* internal real-time service curve */ 162 struct internal_sc cl_fsc; /* internal fair service curve */ 163 struct internal_sc cl_usc; /* internal upperlimit service curve */ 164 struct runtime_sc cl_deadline; /* deadline curve */ 165 struct runtime_sc cl_eligible; /* eligible curve */ 166 struct runtime_sc cl_virtual; /* virtual curve */ 167 struct runtime_sc cl_ulimit; /* upperlimit curve */ 168 169 unsigned long cl_flags; /* which curves are valid */ 170 unsigned long cl_vtperiod; /* vt period sequence number */ 171 unsigned long cl_parentperiod;/* parent's vt period sequence number*/ 172 unsigned long cl_nactive; /* number of active children */ 173}; 174 175struct hfsc_sched { 176 u16 defcls; /* default class id */ 177 struct hfsc_class root; /* root class */ 178 struct Qdisc_class_hash clhash; /* class hash */ 179 struct rb_root eligible; /* eligible tree */ 180 struct list_head droplist; /* active leaf class list (for 181 dropping) */ 182 struct qdisc_watchdog watchdog; /* watchdog timer */ 183}; 184 185#define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ 186 187 188/* 189 * eligible tree holds backlogged classes being sorted by their eligible times. 190 * there is one eligible tree per hfsc instance. 191 */ 192 193static void 194eltree_insert(struct hfsc_class *cl) 195{ 196 struct rb_node **p = &cl->sched->eligible.rb_node; 197 struct rb_node *parent = NULL; 198 struct hfsc_class *cl1; 199 200 while (*p != NULL) { 201 parent = *p; 202 cl1 = rb_entry(parent, struct hfsc_class, el_node); 203 if (cl->cl_e >= cl1->cl_e) 204 p = &parent->rb_right; 205 else 206 p = &parent->rb_left; 207 } 208 rb_link_node(&cl->el_node, parent, p); 209 rb_insert_color(&cl->el_node, &cl->sched->eligible); 210} 211 212static inline void 213eltree_remove(struct hfsc_class *cl) 214{ 215 rb_erase(&cl->el_node, &cl->sched->eligible); 216} 217 218static inline void 219eltree_update(struct hfsc_class *cl) 220{ 221 eltree_remove(cl); 222 eltree_insert(cl); 223} 224 225/* find the class with the minimum deadline among the eligible classes */ 226static inline struct hfsc_class * 227eltree_get_mindl(struct hfsc_sched *q, u64 cur_time) 228{ 229 struct hfsc_class *p, *cl = NULL; 230 struct rb_node *n; 231 232 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { 233 p = rb_entry(n, struct hfsc_class, el_node); 234 if (p->cl_e > cur_time) 235 break; 236 if (cl == NULL || p->cl_d < cl->cl_d) 237 cl = p; 238 } 239 return cl; 240} 241 242/* find the class with minimum eligible time among the eligible classes */ 243static inline struct hfsc_class * 244eltree_get_minel(struct hfsc_sched *q) 245{ 246 struct rb_node *n; 247 248 n = rb_first(&q->eligible); 249 if (n == NULL) 250 return NULL; 251 return rb_entry(n, struct hfsc_class, el_node); 252} 253 254/* 255 * vttree holds holds backlogged child classes being sorted by their virtual 256 * time. each intermediate class has one vttree. 257 */ 258static void 259vttree_insert(struct hfsc_class *cl) 260{ 261 struct rb_node **p = &cl->cl_parent->vt_tree.rb_node; 262 struct rb_node *parent = NULL; 263 struct hfsc_class *cl1; 264 265 while (*p != NULL) { 266 parent = *p; 267 cl1 = rb_entry(parent, struct hfsc_class, vt_node); 268 if (cl->cl_vt >= cl1->cl_vt) 269 p = &parent->rb_right; 270 else 271 p = &parent->rb_left; 272 } 273 rb_link_node(&cl->vt_node, parent, p); 274 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree); 275} 276 277static inline void 278vttree_remove(struct hfsc_class *cl) 279{ 280 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree); 281} 282 283static inline void 284vttree_update(struct hfsc_class *cl) 285{ 286 vttree_remove(cl); 287 vttree_insert(cl); 288} 289 290static inline struct hfsc_class * 291vttree_firstfit(struct hfsc_class *cl, u64 cur_time) 292{ 293 struct hfsc_class *p; 294 struct rb_node *n; 295 296 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) { 297 p = rb_entry(n, struct hfsc_class, vt_node); 298 if (p->cl_f <= cur_time) 299 return p; 300 } 301 return NULL; 302} 303 304/* 305 * get the leaf class with the minimum vt in the hierarchy 306 */ 307static struct hfsc_class * 308vttree_get_minvt(struct hfsc_class *cl, u64 cur_time) 309{ 310 /* if root-class's cfmin is bigger than cur_time nothing to do */ 311 if (cl->cl_cfmin > cur_time) 312 return NULL; 313 314 while (cl->level > 0) { 315 cl = vttree_firstfit(cl, cur_time); 316 if (cl == NULL) 317 return NULL; 318 /* 319 * update parent's cl_cvtmin. 320 */ 321 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 322 cl->cl_parent->cl_cvtmin = cl->cl_vt; 323 } 324 return cl; 325} 326 327static void 328cftree_insert(struct hfsc_class *cl) 329{ 330 struct rb_node **p = &cl->cl_parent->cf_tree.rb_node; 331 struct rb_node *parent = NULL; 332 struct hfsc_class *cl1; 333 334 while (*p != NULL) { 335 parent = *p; 336 cl1 = rb_entry(parent, struct hfsc_class, cf_node); 337 if (cl->cl_f >= cl1->cl_f) 338 p = &parent->rb_right; 339 else 340 p = &parent->rb_left; 341 } 342 rb_link_node(&cl->cf_node, parent, p); 343 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree); 344} 345 346static inline void 347cftree_remove(struct hfsc_class *cl) 348{ 349 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree); 350} 351 352static inline void 353cftree_update(struct hfsc_class *cl) 354{ 355 cftree_remove(cl); 356 cftree_insert(cl); 357} 358 359/* 360 * service curve support functions 361 * 362 * external service curve parameters 363 * m: bps 364 * d: us 365 * internal service curve parameters 366 * sm: (bytes/psched_us) << SM_SHIFT 367 * ism: (psched_us/byte) << ISM_SHIFT 368 * dx: psched_us 369 * 370 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us. 371 * 372 * sm and ism are scaled in order to keep effective digits. 373 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective 374 * digits in decimal using the following table. 375 * 376 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 377 * ------------+------------------------------------------------------- 378 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 379 * 380 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 381 * 382 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18. 383 */ 384#define SM_SHIFT (30 - PSCHED_SHIFT) 385#define ISM_SHIFT (8 + PSCHED_SHIFT) 386 387#define SM_MASK ((1ULL << SM_SHIFT) - 1) 388#define ISM_MASK ((1ULL << ISM_SHIFT) - 1) 389 390static inline u64 391seg_x2y(u64 x, u64 sm) 392{ 393 u64 y; 394 395 /* 396 * compute 397 * y = x * sm >> SM_SHIFT 398 * but divide it for the upper and lower bits to avoid overflow 399 */ 400 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 401 return y; 402} 403 404static inline u64 405seg_y2x(u64 y, u64 ism) 406{ 407 u64 x; 408 409 if (y == 0) 410 x = 0; 411 else if (ism == HT_INFINITY) 412 x = HT_INFINITY; 413 else { 414 x = (y >> ISM_SHIFT) * ism 415 + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 416 } 417 return x; 418} 419 420/* Convert m (bps) into sm (bytes/psched us) */ 421static u64 422m2sm(u32 m) 423{ 424 u64 sm; 425 426 sm = ((u64)m << SM_SHIFT); 427 sm += PSCHED_TICKS_PER_SEC - 1; 428 do_div(sm, PSCHED_TICKS_PER_SEC); 429 return sm; 430} 431 432/* convert m (bps) into ism (psched us/byte) */ 433static u64 434m2ism(u32 m) 435{ 436 u64 ism; 437 438 if (m == 0) 439 ism = HT_INFINITY; 440 else { 441 ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT); 442 ism += m - 1; 443 do_div(ism, m); 444 } 445 return ism; 446} 447 448/* convert d (us) into dx (psched us) */ 449static u64 450d2dx(u32 d) 451{ 452 u64 dx; 453 454 dx = ((u64)d * PSCHED_TICKS_PER_SEC); 455 dx += USEC_PER_SEC - 1; 456 do_div(dx, USEC_PER_SEC); 457 return dx; 458} 459 460/* convert sm (bytes/psched us) into m (bps) */ 461static u32 462sm2m(u64 sm) 463{ 464 u64 m; 465 466 m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT; 467 return (u32)m; 468} 469 470/* convert dx (psched us) into d (us) */ 471static u32 472dx2d(u64 dx) 473{ 474 u64 d; 475 476 d = dx * USEC_PER_SEC; 477 do_div(d, PSCHED_TICKS_PER_SEC); 478 return (u32)d; 479} 480 481static void 482sc2isc(struct tc_service_curve *sc, struct internal_sc *isc) 483{ 484 isc->sm1 = m2sm(sc->m1); 485 isc->ism1 = m2ism(sc->m1); 486 isc->dx = d2dx(sc->d); 487 isc->dy = seg_x2y(isc->dx, isc->sm1); 488 isc->sm2 = m2sm(sc->m2); 489 isc->ism2 = m2ism(sc->m2); 490} 491 492/* 493 * initialize the runtime service curve with the given internal 494 * service curve starting at (x, y). 495 */ 496static void 497rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) 498{ 499 rtsc->x = x; 500 rtsc->y = y; 501 rtsc->sm1 = isc->sm1; 502 rtsc->ism1 = isc->ism1; 503 rtsc->dx = isc->dx; 504 rtsc->dy = isc->dy; 505 rtsc->sm2 = isc->sm2; 506 rtsc->ism2 = isc->ism2; 507} 508 509/* 510 * calculate the y-projection of the runtime service curve by the 511 * given x-projection value 512 */ 513static u64 514rtsc_y2x(struct runtime_sc *rtsc, u64 y) 515{ 516 u64 x; 517 518 if (y < rtsc->y) 519 x = rtsc->x; 520 else if (y <= rtsc->y + rtsc->dy) { 521 /* x belongs to the 1st segment */ 522 if (rtsc->dy == 0) 523 x = rtsc->x + rtsc->dx; 524 else 525 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 526 } else { 527 /* x belongs to the 2nd segment */ 528 x = rtsc->x + rtsc->dx 529 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 530 } 531 return x; 532} 533 534static u64 535rtsc_x2y(struct runtime_sc *rtsc, u64 x) 536{ 537 u64 y; 538 539 if (x <= rtsc->x) 540 y = rtsc->y; 541 else if (x <= rtsc->x + rtsc->dx) 542 /* y belongs to the 1st segment */ 543 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 544 else 545 /* y belongs to the 2nd segment */ 546 y = rtsc->y + rtsc->dy 547 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 548 return y; 549} 550 551/* 552 * update the runtime service curve by taking the minimum of the current 553 * runtime service curve and the service curve starting at (x, y). 554 */ 555static void 556rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) 557{ 558 u64 y1, y2, dx, dy; 559 u32 dsm; 560 561 if (isc->sm1 <= isc->sm2) { 562 /* service curve is convex */ 563 y1 = rtsc_x2y(rtsc, x); 564 if (y1 < y) 565 /* the current rtsc is smaller */ 566 return; 567 rtsc->x = x; 568 rtsc->y = y; 569 return; 570 } 571 572 /* 573 * service curve is concave 574 * compute the two y values of the current rtsc 575 * y1: at x 576 * y2: at (x + dx) 577 */ 578 y1 = rtsc_x2y(rtsc, x); 579 if (y1 <= y) { 580 /* rtsc is below isc, no change to rtsc */ 581 return; 582 } 583 584 y2 = rtsc_x2y(rtsc, x + isc->dx); 585 if (y2 >= y + isc->dy) { 586 /* rtsc is above isc, replace rtsc by isc */ 587 rtsc->x = x; 588 rtsc->y = y; 589 rtsc->dx = isc->dx; 590 rtsc->dy = isc->dy; 591 return; 592 } 593 594 /* 595 * the two curves intersect 596 * compute the offsets (dx, dy) using the reverse 597 * function of seg_x2y() 598 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 599 */ 600 dx = (y1 - y) << SM_SHIFT; 601 dsm = isc->sm1 - isc->sm2; 602 do_div(dx, dsm); 603 /* 604 * check if (x, y1) belongs to the 1st segment of rtsc. 605 * if so, add the offset. 606 */ 607 if (rtsc->x + rtsc->dx > x) 608 dx += rtsc->x + rtsc->dx - x; 609 dy = seg_x2y(dx, isc->sm1); 610 611 rtsc->x = x; 612 rtsc->y = y; 613 rtsc->dx = dx; 614 rtsc->dy = dy; 615} 616 617static void 618init_ed(struct hfsc_class *cl, unsigned int next_len) 619{ 620 u64 cur_time = psched_get_time(); 621 622 /* update the deadline curve */ 623 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); 624 625 /* 626 * update the eligible curve. 627 * for concave, it is equal to the deadline curve. 628 * for convex, it is a linear curve with slope m2. 629 */ 630 cl->cl_eligible = cl->cl_deadline; 631 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { 632 cl->cl_eligible.dx = 0; 633 cl->cl_eligible.dy = 0; 634 } 635 636 /* compute e and d */ 637 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 638 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 639 640 eltree_insert(cl); 641} 642 643static void 644update_ed(struct hfsc_class *cl, unsigned int next_len) 645{ 646 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 647 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 648 649 eltree_update(cl); 650} 651 652static inline void 653update_d(struct hfsc_class *cl, unsigned int next_len) 654{ 655 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 656} 657 658static inline void 659update_cfmin(struct hfsc_class *cl) 660{ 661 struct rb_node *n = rb_first(&cl->cf_tree); 662 struct hfsc_class *p; 663 664 if (n == NULL) { 665 cl->cl_cfmin = 0; 666 return; 667 } 668 p = rb_entry(n, struct hfsc_class, cf_node); 669 cl->cl_cfmin = p->cl_f; 670} 671 672static void 673init_vf(struct hfsc_class *cl, unsigned int len) 674{ 675 struct hfsc_class *max_cl; 676 struct rb_node *n; 677 u64 vt, f, cur_time; 678 int go_active; 679 680 cur_time = 0; 681 go_active = 1; 682 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 683 if (go_active && cl->cl_nactive++ == 0) 684 go_active = 1; 685 else 686 go_active = 0; 687 688 if (go_active) { 689 n = rb_last(&cl->cl_parent->vt_tree); 690 if (n != NULL) { 691 max_cl = rb_entry(n, struct hfsc_class, vt_node); 692 /* 693 * set vt to the average of the min and max 694 * classes. if the parent's period didn't 695 * change, don't decrease vt of the class. 696 */ 697 vt = max_cl->cl_vt; 698 if (cl->cl_parent->cl_cvtmin != 0) 699 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 700 701 if (cl->cl_parent->cl_vtperiod != 702 cl->cl_parentperiod || vt > cl->cl_vt) 703 cl->cl_vt = vt; 704 } else { 705 /* 706 * first child for a new parent backlog period. 707 * add parent's cvtmax to cvtoff to make a new 708 * vt (vtoff + vt) larger than the vt in the 709 * last period for all children. 710 */ 711 vt = cl->cl_parent->cl_cvtmax; 712 cl->cl_parent->cl_cvtoff += vt; 713 cl->cl_parent->cl_cvtmax = 0; 714 cl->cl_parent->cl_cvtmin = 0; 715 cl->cl_vt = 0; 716 } 717 718 cl->cl_vtoff = cl->cl_parent->cl_cvtoff - 719 cl->cl_pcvtoff; 720 721 /* update the virtual curve */ 722 vt = cl->cl_vt + cl->cl_vtoff; 723 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt, 724 cl->cl_total); 725 if (cl->cl_virtual.x == vt) { 726 cl->cl_virtual.x -= cl->cl_vtoff; 727 cl->cl_vtoff = 0; 728 } 729 cl->cl_vtadj = 0; 730 731 cl->cl_vtperiod++; /* increment vt period */ 732 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 733 if (cl->cl_parent->cl_nactive == 0) 734 cl->cl_parentperiod++; 735 cl->cl_f = 0; 736 737 vttree_insert(cl); 738 cftree_insert(cl); 739 740 if (cl->cl_flags & HFSC_USC) { 741 /* class has upper limit curve */ 742 if (cur_time == 0) 743 cur_time = psched_get_time(); 744 745 /* update the ulimit curve */ 746 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time, 747 cl->cl_total); 748 /* compute myf */ 749 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 750 cl->cl_total); 751 cl->cl_myfadj = 0; 752 } 753 } 754 755 f = max(cl->cl_myf, cl->cl_cfmin); 756 if (f != cl->cl_f) { 757 cl->cl_f = f; 758 cftree_update(cl); 759 } 760 update_cfmin(cl->cl_parent); 761 } 762} 763 764static void 765update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) 766{ 767 u64 f; /* , myf_bound, delta; */ 768 int go_passive = 0; 769 770 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) 771 go_passive = 1; 772 773 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 774 cl->cl_total += len; 775 776 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0) 777 continue; 778 779 if (go_passive && --cl->cl_nactive == 0) 780 go_passive = 1; 781 else 782 go_passive = 0; 783 784 if (go_passive) { 785 /* no more active child, going passive */ 786 787 /* update cvtmax of the parent class */ 788 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 789 cl->cl_parent->cl_cvtmax = cl->cl_vt; 790 791 /* remove this class from the vt tree */ 792 vttree_remove(cl); 793 794 cftree_remove(cl); 795 update_cfmin(cl->cl_parent); 796 797 continue; 798 } 799 800 /* 801 * update vt and f 802 */ 803 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 804 - cl->cl_vtoff + cl->cl_vtadj; 805 806 /* 807 * if vt of the class is smaller than cvtmin, 808 * the class was skipped in the past due to non-fit. 809 * if so, we need to adjust vtadj. 810 */ 811 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 812 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 813 cl->cl_vt = cl->cl_parent->cl_cvtmin; 814 } 815 816 /* update the vt tree */ 817 vttree_update(cl); 818 819 if (cl->cl_flags & HFSC_USC) { 820 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit, 821 cl->cl_total); 822#if 0 823 /* 824 * This code causes classes to stay way under their 825 * limit when multiple classes are used at gigabit 826 * speed. needs investigation. -kaber 827 */ 828 /* 829 * if myf lags behind by more than one clock tick 830 * from the current time, adjust myfadj to prevent 831 * a rate-limited class from going greedy. 832 * in a steady state under rate-limiting, myf 833 * fluctuates within one clock tick. 834 */ 835 myf_bound = cur_time - PSCHED_JIFFIE2US(1); 836 if (cl->cl_myf < myf_bound) { 837 delta = cur_time - cl->cl_myf; 838 cl->cl_myfadj += delta; 839 cl->cl_myf += delta; 840 } 841#endif 842 } 843 844 f = max(cl->cl_myf, cl->cl_cfmin); 845 if (f != cl->cl_f) { 846 cl->cl_f = f; 847 cftree_update(cl); 848 update_cfmin(cl->cl_parent); 849 } 850 } 851} 852 853static void 854set_active(struct hfsc_class *cl, unsigned int len) 855{ 856 if (cl->cl_flags & HFSC_RSC) 857 init_ed(cl, len); 858 if (cl->cl_flags & HFSC_FSC) 859 init_vf(cl, len); 860 861 list_add_tail(&cl->dlist, &cl->sched->droplist); 862} 863 864static void 865set_passive(struct hfsc_class *cl) 866{ 867 if (cl->cl_flags & HFSC_RSC) 868 eltree_remove(cl); 869 870 list_del(&cl->dlist); 871 872 /* 873 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0) 874 * needs to be called explicitly to remove a class from vttree. 875 */ 876} 877 878static unsigned int 879qdisc_peek_len(struct Qdisc *sch) 880{ 881 struct sk_buff *skb; 882 unsigned int len; 883 884 skb = sch->ops->peek(sch); 885 if (skb == NULL) { 886 qdisc_warn_nonwc("qdisc_peek_len", sch); 887 return 0; 888 } 889 len = qdisc_pkt_len(skb); 890 891 return len; 892} 893 894static void 895hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) 896{ 897 unsigned int len = cl->qdisc->q.qlen; 898 unsigned int backlog = cl->qdisc->qstats.backlog; 899 900 qdisc_reset(cl->qdisc); 901 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); 902} 903 904static void 905hfsc_adjust_levels(struct hfsc_class *cl) 906{ 907 struct hfsc_class *p; 908 unsigned int level; 909 910 do { 911 level = 0; 912 list_for_each_entry(p, &cl->children, siblings) { 913 if (p->level >= level) 914 level = p->level + 1; 915 } 916 cl->level = level; 917 } while ((cl = cl->cl_parent) != NULL); 918} 919 920static inline struct hfsc_class * 921hfsc_find_class(u32 classid, struct Qdisc *sch) 922{ 923 struct hfsc_sched *q = qdisc_priv(sch); 924 struct Qdisc_class_common *clc; 925 926 clc = qdisc_class_find(&q->clhash, classid); 927 if (clc == NULL) 928 return NULL; 929 return container_of(clc, struct hfsc_class, cl_common); 930} 931 932static void 933hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc, 934 u64 cur_time) 935{ 936 sc2isc(rsc, &cl->cl_rsc); 937 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); 938 cl->cl_eligible = cl->cl_deadline; 939 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { 940 cl->cl_eligible.dx = 0; 941 cl->cl_eligible.dy = 0; 942 } 943 cl->cl_flags |= HFSC_RSC; 944} 945 946static void 947hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc) 948{ 949 sc2isc(fsc, &cl->cl_fsc); 950 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); 951 cl->cl_flags |= HFSC_FSC; 952} 953 954static void 955hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, 956 u64 cur_time) 957{ 958 sc2isc(usc, &cl->cl_usc); 959 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total); 960 cl->cl_flags |= HFSC_USC; 961} 962 963static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = { 964 [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) }, 965 [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) }, 966 [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) }, 967}; 968 969static int 970hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 971 struct nlattr **tca, unsigned long *arg) 972{ 973 struct hfsc_sched *q = qdisc_priv(sch); 974 struct hfsc_class *cl = (struct hfsc_class *)*arg; 975 struct hfsc_class *parent = NULL; 976 struct nlattr *opt = tca[TCA_OPTIONS]; 977 struct nlattr *tb[TCA_HFSC_MAX + 1]; 978 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL; 979 u64 cur_time; 980 int err; 981 982 if (opt == NULL) 983 return -EINVAL; 984 985 err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy); 986 if (err < 0) 987 return err; 988 989 if (tb[TCA_HFSC_RSC]) { 990 rsc = nla_data(tb[TCA_HFSC_RSC]); 991 if (rsc->m1 == 0 && rsc->m2 == 0) 992 rsc = NULL; 993 } 994 995 if (tb[TCA_HFSC_FSC]) { 996 fsc = nla_data(tb[TCA_HFSC_FSC]); 997 if (fsc->m1 == 0 && fsc->m2 == 0) 998 fsc = NULL; 999 } 1000 1001 if (tb[TCA_HFSC_USC]) { 1002 usc = nla_data(tb[TCA_HFSC_USC]); 1003 if (usc->m1 == 0 && usc->m2 == 0) 1004 usc = NULL; 1005 } 1006 1007 if (cl != NULL) { 1008 if (parentid) { 1009 if (cl->cl_parent && 1010 cl->cl_parent->cl_common.classid != parentid) 1011 return -EINVAL; 1012 if (cl->cl_parent == NULL && parentid != TC_H_ROOT) 1013 return -EINVAL; 1014 } 1015 cur_time = psched_get_time(); 1016 1017 if (tca[TCA_RATE]) { 1018 spinlock_t *lock = qdisc_root_sleeping_lock(sch); 1019 1020 err = gen_replace_estimator(&cl->bstats, NULL, 1021 &cl->rate_est, 1022 lock, 1023 tca[TCA_RATE]); 1024 if (err) 1025 return err; 1026 } 1027 1028 sch_tree_lock(sch); 1029 if (rsc != NULL) 1030 hfsc_change_rsc(cl, rsc, cur_time); 1031 if (fsc != NULL) 1032 hfsc_change_fsc(cl, fsc); 1033 if (usc != NULL) 1034 hfsc_change_usc(cl, usc, cur_time); 1035 1036 if (cl->qdisc->q.qlen != 0) { 1037 if (cl->cl_flags & HFSC_RSC) 1038 update_ed(cl, qdisc_peek_len(cl->qdisc)); 1039 if (cl->cl_flags & HFSC_FSC) 1040 update_vf(cl, 0, cur_time); 1041 } 1042 sch_tree_unlock(sch); 1043 1044 return 0; 1045 } 1046 1047 if (parentid == TC_H_ROOT) 1048 return -EEXIST; 1049 1050 parent = &q->root; 1051 if (parentid) { 1052 parent = hfsc_find_class(parentid, sch); 1053 if (parent == NULL) 1054 return -ENOENT; 1055 } 1056 1057 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0) 1058 return -EINVAL; 1059 if (hfsc_find_class(classid, sch)) 1060 return -EEXIST; 1061 1062 if (rsc == NULL && fsc == NULL) 1063 return -EINVAL; 1064 1065 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL); 1066 if (cl == NULL) 1067 return -ENOBUFS; 1068 1069 if (tca[TCA_RATE]) { 1070 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, 1071 qdisc_root_sleeping_lock(sch), 1072 tca[TCA_RATE]); 1073 if (err) { 1074 kfree(cl); 1075 return err; 1076 } 1077 } 1078 1079 if (rsc != NULL) 1080 hfsc_change_rsc(cl, rsc, 0); 1081 if (fsc != NULL) 1082 hfsc_change_fsc(cl, fsc); 1083 if (usc != NULL) 1084 hfsc_change_usc(cl, usc, 0); 1085 1086 cl->cl_common.classid = classid; 1087 cl->refcnt = 1; 1088 cl->sched = q; 1089 cl->cl_parent = parent; 1090 cl->qdisc = qdisc_create_dflt(sch->dev_queue, 1091 &pfifo_qdisc_ops, classid); 1092 if (cl->qdisc == NULL) 1093 cl->qdisc = &noop_qdisc; 1094 INIT_LIST_HEAD(&cl->children); 1095 cl->vt_tree = RB_ROOT; 1096 cl->cf_tree = RB_ROOT; 1097 1098 sch_tree_lock(sch); 1099 qdisc_class_hash_insert(&q->clhash, &cl->cl_common); 1100 list_add_tail(&cl->siblings, &parent->children); 1101 if (parent->level == 0) 1102 hfsc_purge_queue(sch, parent); 1103 hfsc_adjust_levels(parent); 1104 cl->cl_pcvtoff = parent->cl_cvtoff; 1105 sch_tree_unlock(sch); 1106 1107 qdisc_class_hash_grow(sch, &q->clhash); 1108 1109 *arg = (unsigned long)cl; 1110 return 0; 1111} 1112 1113static void 1114hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) 1115{ 1116 struct hfsc_sched *q = qdisc_priv(sch); 1117 1118 tcf_destroy_chain(&cl->filter_list); 1119 qdisc_destroy(cl->qdisc); 1120 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1121 if (cl != &q->root) 1122 kfree(cl); 1123} 1124 1125static int 1126hfsc_delete_class(struct Qdisc *sch, unsigned long arg) 1127{ 1128 struct hfsc_sched *q = qdisc_priv(sch); 1129 struct hfsc_class *cl = (struct hfsc_class *)arg; 1130 1131 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root) 1132 return -EBUSY; 1133 1134 sch_tree_lock(sch); 1135 1136 list_del(&cl->siblings); 1137 hfsc_adjust_levels(cl->cl_parent); 1138 1139 hfsc_purge_queue(sch, cl); 1140 qdisc_class_hash_remove(&q->clhash, &cl->cl_common); 1141 1142 BUG_ON(--cl->refcnt == 0); 1143 /* 1144 * This shouldn't happen: we "hold" one cops->get() when called 1145 * from tc_ctl_tclass; the destroy method is done from cops->put(). 1146 */ 1147 1148 sch_tree_unlock(sch); 1149 return 0; 1150} 1151 1152static struct hfsc_class * 1153hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) 1154{ 1155 struct hfsc_sched *q = qdisc_priv(sch); 1156 struct hfsc_class *head, *cl; 1157 struct tcf_result res; 1158 struct tcf_proto *tcf; 1159 int result; 1160 1161 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 && 1162 (cl = hfsc_find_class(skb->priority, sch)) != NULL) 1163 if (cl->level == 0) 1164 return cl; 1165 1166 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 1167 head = &q->root; 1168 tcf = rcu_dereference_bh(q->root.filter_list); 1169 while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) { 1170#ifdef CONFIG_NET_CLS_ACT 1171 switch (result) { 1172 case TC_ACT_QUEUED: 1173 case TC_ACT_STOLEN: 1174 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 1175 case TC_ACT_SHOT: 1176 return NULL; 1177 } 1178#endif 1179 cl = (struct hfsc_class *)res.class; 1180 if (!cl) { 1181 cl = hfsc_find_class(res.classid, sch); 1182 if (!cl) 1183 break; /* filter selected invalid classid */ 1184 if (cl->level >= head->level) 1185 break; /* filter may only point downwards */ 1186 } 1187 1188 if (cl->level == 0) 1189 return cl; /* hit leaf class */ 1190 1191 /* apply inner filter chain */ 1192 tcf = rcu_dereference_bh(cl->filter_list); 1193 head = cl; 1194 } 1195 1196 /* classification failed, try default class */ 1197 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); 1198 if (cl == NULL || cl->level > 0) 1199 return NULL; 1200 1201 return cl; 1202} 1203 1204static int 1205hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1206 struct Qdisc **old) 1207{ 1208 struct hfsc_class *cl = (struct hfsc_class *)arg; 1209 1210 if (cl->level > 0) 1211 return -EINVAL; 1212 if (new == NULL) { 1213 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, 1214 cl->cl_common.classid); 1215 if (new == NULL) 1216 new = &noop_qdisc; 1217 } 1218 1219 *old = qdisc_replace(sch, new, &cl->qdisc); 1220 return 0; 1221} 1222 1223static struct Qdisc * 1224hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) 1225{ 1226 struct hfsc_class *cl = (struct hfsc_class *)arg; 1227 1228 if (cl->level == 0) 1229 return cl->qdisc; 1230 1231 return NULL; 1232} 1233 1234static void 1235hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) 1236{ 1237 struct hfsc_class *cl = (struct hfsc_class *)arg; 1238 1239 if (cl->qdisc->q.qlen == 0) { 1240 update_vf(cl, 0, 0); 1241 set_passive(cl); 1242 } 1243} 1244 1245static unsigned long 1246hfsc_get_class(struct Qdisc *sch, u32 classid) 1247{ 1248 struct hfsc_class *cl = hfsc_find_class(classid, sch); 1249 1250 if (cl != NULL) 1251 cl->refcnt++; 1252 1253 return (unsigned long)cl; 1254} 1255 1256static void 1257hfsc_put_class(struct Qdisc *sch, unsigned long arg) 1258{ 1259 struct hfsc_class *cl = (struct hfsc_class *)arg; 1260 1261 if (--cl->refcnt == 0) 1262 hfsc_destroy_class(sch, cl); 1263} 1264 1265static unsigned long 1266hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) 1267{ 1268 struct hfsc_class *p = (struct hfsc_class *)parent; 1269 struct hfsc_class *cl = hfsc_find_class(classid, sch); 1270 1271 if (cl != NULL) { 1272 if (p != NULL && p->level <= cl->level) 1273 return 0; 1274 cl->filter_cnt++; 1275 } 1276 1277 return (unsigned long)cl; 1278} 1279 1280static void 1281hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) 1282{ 1283 struct hfsc_class *cl = (struct hfsc_class *)arg; 1284 1285 cl->filter_cnt--; 1286} 1287 1288static struct tcf_proto __rcu ** 1289hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) 1290{ 1291 struct hfsc_sched *q = qdisc_priv(sch); 1292 struct hfsc_class *cl = (struct hfsc_class *)arg; 1293 1294 if (cl == NULL) 1295 cl = &q->root; 1296 1297 return &cl->filter_list; 1298} 1299 1300static int 1301hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) 1302{ 1303 struct tc_service_curve tsc; 1304 1305 tsc.m1 = sm2m(sc->sm1); 1306 tsc.d = dx2d(sc->dx); 1307 tsc.m2 = sm2m(sc->sm2); 1308 if (nla_put(skb, attr, sizeof(tsc), &tsc)) 1309 goto nla_put_failure; 1310 1311 return skb->len; 1312 1313 nla_put_failure: 1314 return -1; 1315} 1316 1317static int 1318hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) 1319{ 1320 if ((cl->cl_flags & HFSC_RSC) && 1321 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) 1322 goto nla_put_failure; 1323 1324 if ((cl->cl_flags & HFSC_FSC) && 1325 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) 1326 goto nla_put_failure; 1327 1328 if ((cl->cl_flags & HFSC_USC) && 1329 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) 1330 goto nla_put_failure; 1331 1332 return skb->len; 1333 1334 nla_put_failure: 1335 return -1; 1336} 1337 1338static int 1339hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, 1340 struct tcmsg *tcm) 1341{ 1342 struct hfsc_class *cl = (struct hfsc_class *)arg; 1343 struct nlattr *nest; 1344 1345 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid : 1346 TC_H_ROOT; 1347 tcm->tcm_handle = cl->cl_common.classid; 1348 if (cl->level == 0) 1349 tcm->tcm_info = cl->qdisc->handle; 1350 1351 nest = nla_nest_start(skb, TCA_OPTIONS); 1352 if (nest == NULL) 1353 goto nla_put_failure; 1354 if (hfsc_dump_curves(skb, cl) < 0) 1355 goto nla_put_failure; 1356 return nla_nest_end(skb, nest); 1357 1358 nla_put_failure: 1359 nla_nest_cancel(skb, nest); 1360 return -EMSGSIZE; 1361} 1362 1363static int 1364hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, 1365 struct gnet_dump *d) 1366{ 1367 struct hfsc_class *cl = (struct hfsc_class *)arg; 1368 struct tc_hfsc_stats xstats; 1369 1370 cl->qstats.backlog = cl->qdisc->qstats.backlog; 1371 xstats.level = cl->level; 1372 xstats.period = cl->cl_vtperiod; 1373 xstats.work = cl->cl_total; 1374 xstats.rtwork = cl->cl_cumul; 1375 1376 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 1377 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || 1378 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) 1379 return -1; 1380 1381 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 1382} 1383 1384 1385 1386static void 1387hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1388{ 1389 struct hfsc_sched *q = qdisc_priv(sch); 1390 struct hfsc_class *cl; 1391 unsigned int i; 1392 1393 if (arg->stop) 1394 return; 1395 1396 for (i = 0; i < q->clhash.hashsize; i++) { 1397 hlist_for_each_entry(cl, &q->clhash.hash[i], 1398 cl_common.hnode) { 1399 if (arg->count < arg->skip) { 1400 arg->count++; 1401 continue; 1402 } 1403 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { 1404 arg->stop = 1; 1405 return; 1406 } 1407 arg->count++; 1408 } 1409 } 1410} 1411 1412static void 1413hfsc_schedule_watchdog(struct Qdisc *sch) 1414{ 1415 struct hfsc_sched *q = qdisc_priv(sch); 1416 struct hfsc_class *cl; 1417 u64 next_time = 0; 1418 1419 cl = eltree_get_minel(q); 1420 if (cl) 1421 next_time = cl->cl_e; 1422 if (q->root.cl_cfmin != 0) { 1423 if (next_time == 0 || next_time > q->root.cl_cfmin) 1424 next_time = q->root.cl_cfmin; 1425 } 1426 WARN_ON(next_time == 0); 1427 qdisc_watchdog_schedule(&q->watchdog, next_time); 1428} 1429 1430static int 1431hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) 1432{ 1433 struct hfsc_sched *q = qdisc_priv(sch); 1434 struct tc_hfsc_qopt *qopt; 1435 int err; 1436 1437 if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 1438 return -EINVAL; 1439 qopt = nla_data(opt); 1440 1441 q->defcls = qopt->defcls; 1442 err = qdisc_class_hash_init(&q->clhash); 1443 if (err < 0) 1444 return err; 1445 q->eligible = RB_ROOT; 1446 INIT_LIST_HEAD(&q->droplist); 1447 1448 q->root.cl_common.classid = sch->handle; 1449 q->root.refcnt = 1; 1450 q->root.sched = q; 1451 q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, 1452 sch->handle); 1453 if (q->root.qdisc == NULL) 1454 q->root.qdisc = &noop_qdisc; 1455 INIT_LIST_HEAD(&q->root.children); 1456 q->root.vt_tree = RB_ROOT; 1457 q->root.cf_tree = RB_ROOT; 1458 1459 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); 1460 qdisc_class_hash_grow(sch, &q->clhash); 1461 1462 qdisc_watchdog_init(&q->watchdog, sch); 1463 1464 return 0; 1465} 1466 1467static int 1468hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt) 1469{ 1470 struct hfsc_sched *q = qdisc_priv(sch); 1471 struct tc_hfsc_qopt *qopt; 1472 1473 if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 1474 return -EINVAL; 1475 qopt = nla_data(opt); 1476 1477 sch_tree_lock(sch); 1478 q->defcls = qopt->defcls; 1479 sch_tree_unlock(sch); 1480 1481 return 0; 1482} 1483 1484static void 1485hfsc_reset_class(struct hfsc_class *cl) 1486{ 1487 cl->cl_total = 0; 1488 cl->cl_cumul = 0; 1489 cl->cl_d = 0; 1490 cl->cl_e = 0; 1491 cl->cl_vt = 0; 1492 cl->cl_vtadj = 0; 1493 cl->cl_vtoff = 0; 1494 cl->cl_cvtmin = 0; 1495 cl->cl_cvtmax = 0; 1496 cl->cl_cvtoff = 0; 1497 cl->cl_pcvtoff = 0; 1498 cl->cl_vtperiod = 0; 1499 cl->cl_parentperiod = 0; 1500 cl->cl_f = 0; 1501 cl->cl_myf = 0; 1502 cl->cl_myfadj = 0; 1503 cl->cl_cfmin = 0; 1504 cl->cl_nactive = 0; 1505 1506 cl->vt_tree = RB_ROOT; 1507 cl->cf_tree = RB_ROOT; 1508 qdisc_reset(cl->qdisc); 1509 1510 if (cl->cl_flags & HFSC_RSC) 1511 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0); 1512 if (cl->cl_flags & HFSC_FSC) 1513 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0); 1514 if (cl->cl_flags & HFSC_USC) 1515 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0); 1516} 1517 1518static void 1519hfsc_reset_qdisc(struct Qdisc *sch) 1520{ 1521 struct hfsc_sched *q = qdisc_priv(sch); 1522 struct hfsc_class *cl; 1523 unsigned int i; 1524 1525 for (i = 0; i < q->clhash.hashsize; i++) { 1526 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) 1527 hfsc_reset_class(cl); 1528 } 1529 q->eligible = RB_ROOT; 1530 INIT_LIST_HEAD(&q->droplist); 1531 qdisc_watchdog_cancel(&q->watchdog); 1532 sch->q.qlen = 0; 1533} 1534 1535static void 1536hfsc_destroy_qdisc(struct Qdisc *sch) 1537{ 1538 struct hfsc_sched *q = qdisc_priv(sch); 1539 struct hlist_node *next; 1540 struct hfsc_class *cl; 1541 unsigned int i; 1542 1543 for (i = 0; i < q->clhash.hashsize; i++) { 1544 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) 1545 tcf_destroy_chain(&cl->filter_list); 1546 } 1547 for (i = 0; i < q->clhash.hashsize; i++) { 1548 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], 1549 cl_common.hnode) 1550 hfsc_destroy_class(sch, cl); 1551 } 1552 qdisc_class_hash_destroy(&q->clhash); 1553 qdisc_watchdog_cancel(&q->watchdog); 1554} 1555 1556static int 1557hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) 1558{ 1559 struct hfsc_sched *q = qdisc_priv(sch); 1560 unsigned char *b = skb_tail_pointer(skb); 1561 struct tc_hfsc_qopt qopt; 1562 struct hfsc_class *cl; 1563 unsigned int i; 1564 1565 sch->qstats.backlog = 0; 1566 for (i = 0; i < q->clhash.hashsize; i++) { 1567 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) 1568 sch->qstats.backlog += cl->qdisc->qstats.backlog; 1569 } 1570 1571 qopt.defcls = q->defcls; 1572 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) 1573 goto nla_put_failure; 1574 return skb->len; 1575 1576 nla_put_failure: 1577 nlmsg_trim(skb, b); 1578 return -1; 1579} 1580 1581static int 1582hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 1583{ 1584 struct hfsc_class *cl; 1585 int uninitialized_var(err); 1586 1587 cl = hfsc_classify(skb, sch, &err); 1588 if (cl == NULL) { 1589 if (err & __NET_XMIT_BYPASS) 1590 qdisc_qstats_drop(sch); 1591 kfree_skb(skb); 1592 return err; 1593 } 1594 1595 err = qdisc_enqueue(skb, cl->qdisc); 1596 if (unlikely(err != NET_XMIT_SUCCESS)) { 1597 if (net_xmit_drop_count(err)) { 1598 cl->qstats.drops++; 1599 qdisc_qstats_drop(sch); 1600 } 1601 return err; 1602 } 1603 1604 if (cl->qdisc->q.qlen == 1) 1605 set_active(cl, qdisc_pkt_len(skb)); 1606 1607 sch->q.qlen++; 1608 1609 return NET_XMIT_SUCCESS; 1610} 1611 1612static struct sk_buff * 1613hfsc_dequeue(struct Qdisc *sch) 1614{ 1615 struct hfsc_sched *q = qdisc_priv(sch); 1616 struct hfsc_class *cl; 1617 struct sk_buff *skb; 1618 u64 cur_time; 1619 unsigned int next_len; 1620 int realtime = 0; 1621 1622 if (sch->q.qlen == 0) 1623 return NULL; 1624 1625 cur_time = psched_get_time(); 1626 1627 /* 1628 * if there are eligible classes, use real-time criteria. 1629 * find the class with the minimum deadline among 1630 * the eligible classes. 1631 */ 1632 cl = eltree_get_mindl(q, cur_time); 1633 if (cl) { 1634 realtime = 1; 1635 } else { 1636 /* 1637 * use link-sharing criteria 1638 * get the class with the minimum vt in the hierarchy 1639 */ 1640 cl = vttree_get_minvt(&q->root, cur_time); 1641 if (cl == NULL) { 1642 qdisc_qstats_overlimit(sch); 1643 hfsc_schedule_watchdog(sch); 1644 return NULL; 1645 } 1646 } 1647 1648 skb = qdisc_dequeue_peeked(cl->qdisc); 1649 if (skb == NULL) { 1650 qdisc_warn_nonwc("HFSC", cl->qdisc); 1651 return NULL; 1652 } 1653 1654 bstats_update(&cl->bstats, skb); 1655 update_vf(cl, qdisc_pkt_len(skb), cur_time); 1656 if (realtime) 1657 cl->cl_cumul += qdisc_pkt_len(skb); 1658 1659 if (cl->qdisc->q.qlen != 0) { 1660 if (cl->cl_flags & HFSC_RSC) { 1661 /* update ed */ 1662 next_len = qdisc_peek_len(cl->qdisc); 1663 if (realtime) 1664 update_ed(cl, next_len); 1665 else 1666 update_d(cl, next_len); 1667 } 1668 } else { 1669 /* the class becomes passive */ 1670 set_passive(cl); 1671 } 1672 1673 qdisc_unthrottled(sch); 1674 qdisc_bstats_update(sch, skb); 1675 sch->q.qlen--; 1676 1677 return skb; 1678} 1679 1680static unsigned int 1681hfsc_drop(struct Qdisc *sch) 1682{ 1683 struct hfsc_sched *q = qdisc_priv(sch); 1684 struct hfsc_class *cl; 1685 unsigned int len; 1686 1687 list_for_each_entry(cl, &q->droplist, dlist) { 1688 if (cl->qdisc->ops->drop != NULL && 1689 (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) { 1690 if (cl->qdisc->q.qlen == 0) { 1691 update_vf(cl, 0, 0); 1692 set_passive(cl); 1693 } else { 1694 list_move_tail(&cl->dlist, &q->droplist); 1695 } 1696 cl->qstats.drops++; 1697 qdisc_qstats_drop(sch); 1698 sch->q.qlen--; 1699 return len; 1700 } 1701 } 1702 return 0; 1703} 1704 1705static const struct Qdisc_class_ops hfsc_class_ops = { 1706 .change = hfsc_change_class, 1707 .delete = hfsc_delete_class, 1708 .graft = hfsc_graft_class, 1709 .leaf = hfsc_class_leaf, 1710 .qlen_notify = hfsc_qlen_notify, 1711 .get = hfsc_get_class, 1712 .put = hfsc_put_class, 1713 .bind_tcf = hfsc_bind_tcf, 1714 .unbind_tcf = hfsc_unbind_tcf, 1715 .tcf_chain = hfsc_tcf_chain, 1716 .dump = hfsc_dump_class, 1717 .dump_stats = hfsc_dump_class_stats, 1718 .walk = hfsc_walk 1719}; 1720 1721static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = { 1722 .id = "hfsc", 1723 .init = hfsc_init_qdisc, 1724 .change = hfsc_change_qdisc, 1725 .reset = hfsc_reset_qdisc, 1726 .destroy = hfsc_destroy_qdisc, 1727 .dump = hfsc_dump_qdisc, 1728 .enqueue = hfsc_enqueue, 1729 .dequeue = hfsc_dequeue, 1730 .peek = qdisc_peek_dequeued, 1731 .drop = hfsc_drop, 1732 .cl_ops = &hfsc_class_ops, 1733 .priv_size = sizeof(struct hfsc_sched), 1734 .owner = THIS_MODULE 1735}; 1736 1737static int __init 1738hfsc_init(void) 1739{ 1740 return register_qdisc(&hfsc_qdisc_ops); 1741} 1742 1743static void __exit 1744hfsc_cleanup(void) 1745{ 1746 unregister_qdisc(&hfsc_qdisc_ops); 1747} 1748 1749MODULE_LICENSE("GPL"); 1750module_init(hfsc_init); 1751module_exit(hfsc_cleanup); 1752