1/* 2 * net/sched/sch_generic.c Generic packet scheduler routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 11 * - Ingress support 12 */ 13 14#include <linux/bitops.h> 15#include <linux/module.h> 16#include <linux/types.h> 17#include <linux/kernel.h> 18#include <linux/sched.h> 19#include <linux/string.h> 20#include <linux/errno.h> 21#include <linux/netdevice.h> 22#include <linux/skbuff.h> 23#include <linux/rtnetlink.h> 24#include <linux/init.h> 25#include <linux/rcupdate.h> 26#include <linux/list.h> 27#include <linux/slab.h> 28#include <linux/if_vlan.h> 29#include <net/sch_generic.h> 30#include <net/pkt_sched.h> 31#include <net/dst.h> 32 33/* Qdisc to use by default */ 34const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; 35EXPORT_SYMBOL(default_qdisc_ops); 36 37/* Main transmission queue. */ 38 39/* Modifications to data participating in scheduling must be protected with 40 * qdisc_lock(qdisc) spinlock. 41 * 42 * The idea is the following: 43 * - enqueue, dequeue are serialized via qdisc root lock 44 * - ingress filtering is also serialized via qdisc root lock 45 * - updates to tree and tree walking are only done under the rtnl mutex. 46 */ 47 48static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 49{ 50 q->gso_skb = skb; 51 q->qstats.requeues++; 52 q->q.qlen++; /* it's still part of the queue */ 53 __netif_schedule(q); 54 55 return 0; 56} 57 58static void try_bulk_dequeue_skb(struct Qdisc *q, 59 struct sk_buff *skb, 60 const struct netdev_queue *txq, 61 int *packets) 62{ 63 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; 64 65 while (bytelimit > 0) { 66 struct sk_buff *nskb = q->dequeue(q); 67 68 if (!nskb) 69 break; 70 71 bytelimit -= nskb->len; /* covers GSO len */ 72 skb->next = nskb; 73 skb = nskb; 74 (*packets)++; /* GSO counts as one pkt */ 75 } 76 skb->next = NULL; 77} 78 79/* Note that dequeue_skb can possibly return a SKB list (via skb->next). 80 * A requeued skb (via q->gso_skb) can also be a SKB list. 81 */ 82static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, 83 int *packets) 84{ 85 struct sk_buff *skb = q->gso_skb; 86 const struct netdev_queue *txq = q->dev_queue; 87 88 *packets = 1; 89 *validate = true; 90 if (unlikely(skb)) { 91 /* check the reason of requeuing without tx lock first */ 92 txq = skb_get_tx_queue(txq->dev, skb); 93 if (!netif_xmit_frozen_or_stopped(txq)) { 94 q->gso_skb = NULL; 95 q->q.qlen--; 96 } else 97 skb = NULL; 98 /* skb in gso_skb were already validated */ 99 *validate = false; 100 } else { 101 if (!(q->flags & TCQ_F_ONETXQUEUE) || 102 !netif_xmit_frozen_or_stopped(txq)) { 103 skb = q->dequeue(q); 104 if (skb && qdisc_may_bulk(q)) 105 try_bulk_dequeue_skb(q, skb, txq, packets); 106 } 107 } 108 return skb; 109} 110 111static inline int handle_dev_cpu_collision(struct sk_buff *skb, 112 struct netdev_queue *dev_queue, 113 struct Qdisc *q) 114{ 115 int ret; 116 117 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { 118 /* 119 * Same CPU holding the lock. It may be a transient 120 * configuration error, when hard_start_xmit() recurses. We 121 * detect it by checking xmit owner and drop the packet when 122 * deadloop is detected. Return OK to try the next skb. 123 */ 124 kfree_skb_list(skb); 125 net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n", 126 dev_queue->dev->name); 127 ret = qdisc_qlen(q); 128 } else { 129 /* 130 * Another cpu is holding lock, requeue & delay xmits for 131 * some time. 132 */ 133 __this_cpu_inc(softnet_data.cpu_collision); 134 ret = dev_requeue_skb(skb, q); 135 } 136 137 return ret; 138} 139 140/* 141 * Transmit possibly several skbs, and handle the return status as 142 * required. Holding the __QDISC___STATE_RUNNING bit guarantees that 143 * only one CPU can execute this function. 144 * 145 * Returns to the caller: 146 * 0 - queue is empty or throttled. 147 * >0 - queue is not empty. 148 */ 149int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 150 struct net_device *dev, struct netdev_queue *txq, 151 spinlock_t *root_lock, bool validate) 152{ 153 int ret = NETDEV_TX_BUSY; 154 155 /* And release qdisc */ 156 spin_unlock(root_lock); 157 158 /* Note that we validate skb (GSO, checksum, ...) outside of locks */ 159 if (validate) 160 skb = validate_xmit_skb_list(skb, dev); 161 162 if (skb) { 163 HARD_TX_LOCK(dev, txq, smp_processor_id()); 164 if (!netif_xmit_frozen_or_stopped(txq)) 165 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 166 167 HARD_TX_UNLOCK(dev, txq); 168 } 169 spin_lock(root_lock); 170 171 if (dev_xmit_complete(ret)) { 172 /* Driver sent out skb successfully or skb was consumed */ 173 ret = qdisc_qlen(q); 174 } else if (ret == NETDEV_TX_LOCKED) { 175 /* Driver try lock failed */ 176 ret = handle_dev_cpu_collision(skb, txq, q); 177 } else { 178 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 179 if (unlikely(ret != NETDEV_TX_BUSY)) 180 net_warn_ratelimited("BUG %s code %d qlen %d\n", 181 dev->name, ret, q->q.qlen); 182 183 ret = dev_requeue_skb(skb, q); 184 } 185 186 if (ret && netif_xmit_frozen_or_stopped(txq)) 187 ret = 0; 188 189 return ret; 190} 191 192/* 193 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 194 * 195 * __QDISC___STATE_RUNNING guarantees only one CPU can process 196 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 197 * this queue. 198 * 199 * netif_tx_lock serializes accesses to device driver. 200 * 201 * qdisc_lock(q) and netif_tx_lock are mutually exclusive, 202 * if one is grabbed, another must be free. 203 * 204 * Note, that this procedure can be called by a watchdog timer 205 * 206 * Returns to the caller: 207 * 0 - queue is empty or throttled. 208 * >0 - queue is not empty. 209 * 210 */ 211static inline int qdisc_restart(struct Qdisc *q, int *packets) 212{ 213 struct netdev_queue *txq; 214 struct net_device *dev; 215 spinlock_t *root_lock; 216 struct sk_buff *skb; 217 bool validate; 218 219 /* Dequeue packet */ 220 skb = dequeue_skb(q, &validate, packets); 221 if (unlikely(!skb)) 222 return 0; 223 224 root_lock = qdisc_lock(q); 225 dev = qdisc_dev(q); 226 txq = skb_get_tx_queue(dev, skb); 227 228 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); 229} 230 231void __qdisc_run(struct Qdisc *q) 232{ 233 int quota = weight_p; 234 int packets; 235 236 while (qdisc_restart(q, &packets)) { 237 /* 238 * Ordered by possible occurrence: Postpone processing if 239 * 1. we've exceeded packet quota 240 * 2. another process needs the CPU; 241 */ 242 quota -= packets; 243 if (quota <= 0 || need_resched()) { 244 __netif_schedule(q); 245 break; 246 } 247 } 248 249 qdisc_run_end(q); 250} 251 252unsigned long dev_trans_start(struct net_device *dev) 253{ 254 unsigned long val, res; 255 unsigned int i; 256 257 if (is_vlan_dev(dev)) 258 dev = vlan_dev_real_dev(dev); 259 res = dev->trans_start; 260 for (i = 0; i < dev->num_tx_queues; i++) { 261 val = netdev_get_tx_queue(dev, i)->trans_start; 262 if (val && time_after(val, res)) 263 res = val; 264 } 265 dev->trans_start = res; 266 267 return res; 268} 269EXPORT_SYMBOL(dev_trans_start); 270 271static void dev_watchdog(unsigned long arg) 272{ 273 struct net_device *dev = (struct net_device *)arg; 274 275 netif_tx_lock(dev); 276 if (!qdisc_tx_is_noop(dev)) { 277 if (netif_device_present(dev) && 278 netif_running(dev) && 279 netif_carrier_ok(dev)) { 280 int some_queue_timedout = 0; 281 unsigned int i; 282 unsigned long trans_start; 283 284 for (i = 0; i < dev->num_tx_queues; i++) { 285 struct netdev_queue *txq; 286 287 txq = netdev_get_tx_queue(dev, i); 288 /* 289 * old device drivers set dev->trans_start 290 */ 291 trans_start = txq->trans_start ? : dev->trans_start; 292 if (netif_xmit_stopped(txq) && 293 time_after(jiffies, (trans_start + 294 dev->watchdog_timeo))) { 295 some_queue_timedout = 1; 296 txq->trans_timeout++; 297 break; 298 } 299 } 300 301 if (some_queue_timedout) { 302 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 303 dev->name, netdev_drivername(dev), i); 304 dev->netdev_ops->ndo_tx_timeout(dev); 305 } 306 if (!mod_timer(&dev->watchdog_timer, 307 round_jiffies(jiffies + 308 dev->watchdog_timeo))) 309 dev_hold(dev); 310 } 311 } 312 netif_tx_unlock(dev); 313 314 dev_put(dev); 315} 316 317void __netdev_watchdog_up(struct net_device *dev) 318{ 319 if (dev->netdev_ops->ndo_tx_timeout) { 320 if (dev->watchdog_timeo <= 0) 321 dev->watchdog_timeo = 5*HZ; 322 if (!mod_timer(&dev->watchdog_timer, 323 round_jiffies(jiffies + dev->watchdog_timeo))) 324 dev_hold(dev); 325 } 326} 327 328static void dev_watchdog_up(struct net_device *dev) 329{ 330 __netdev_watchdog_up(dev); 331} 332 333static void dev_watchdog_down(struct net_device *dev) 334{ 335 netif_tx_lock_bh(dev); 336 if (del_timer(&dev->watchdog_timer)) 337 dev_put(dev); 338 netif_tx_unlock_bh(dev); 339} 340 341/** 342 * netif_carrier_on - set carrier 343 * @dev: network device 344 * 345 * Device has detected that carrier. 346 */ 347void netif_carrier_on(struct net_device *dev) 348{ 349 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 350 if (dev->reg_state == NETREG_UNINITIALIZED) 351 return; 352 atomic_inc(&dev->carrier_changes); 353 linkwatch_fire_event(dev); 354 if (netif_running(dev)) 355 __netdev_watchdog_up(dev); 356 } 357} 358EXPORT_SYMBOL(netif_carrier_on); 359 360/** 361 * netif_carrier_off - clear carrier 362 * @dev: network device 363 * 364 * Device has detected loss of carrier. 365 */ 366void netif_carrier_off(struct net_device *dev) 367{ 368 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 369 if (dev->reg_state == NETREG_UNINITIALIZED) 370 return; 371 atomic_inc(&dev->carrier_changes); 372 linkwatch_fire_event(dev); 373 } 374} 375EXPORT_SYMBOL(netif_carrier_off); 376 377/* "NOOP" scheduler: the best scheduler, recommended for all interfaces 378 under all circumstances. It is difficult to invent anything faster or 379 cheaper. 380 */ 381 382static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) 383{ 384 kfree_skb(skb); 385 return NET_XMIT_CN; 386} 387 388static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) 389{ 390 return NULL; 391} 392 393struct Qdisc_ops noop_qdisc_ops __read_mostly = { 394 .id = "noop", 395 .priv_size = 0, 396 .enqueue = noop_enqueue, 397 .dequeue = noop_dequeue, 398 .peek = noop_dequeue, 399 .owner = THIS_MODULE, 400}; 401 402static struct netdev_queue noop_netdev_queue = { 403 .qdisc = &noop_qdisc, 404 .qdisc_sleeping = &noop_qdisc, 405}; 406 407struct Qdisc noop_qdisc = { 408 .enqueue = noop_enqueue, 409 .dequeue = noop_dequeue, 410 .flags = TCQ_F_BUILTIN, 411 .ops = &noop_qdisc_ops, 412 .list = LIST_HEAD_INIT(noop_qdisc.list), 413 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 414 .dev_queue = &noop_netdev_queue, 415 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 416}; 417EXPORT_SYMBOL(noop_qdisc); 418 419static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { 420 .id = "noqueue", 421 .priv_size = 0, 422 .enqueue = noop_enqueue, 423 .dequeue = noop_dequeue, 424 .peek = noop_dequeue, 425 .owner = THIS_MODULE, 426}; 427 428static struct Qdisc noqueue_qdisc; 429static struct netdev_queue noqueue_netdev_queue = { 430 .qdisc = &noqueue_qdisc, 431 .qdisc_sleeping = &noqueue_qdisc, 432}; 433 434static struct Qdisc noqueue_qdisc = { 435 .enqueue = NULL, 436 .dequeue = noop_dequeue, 437 .flags = TCQ_F_BUILTIN, 438 .ops = &noqueue_qdisc_ops, 439 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 440 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), 441 .dev_queue = &noqueue_netdev_queue, 442 .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock), 443}; 444 445 446static const u8 prio2band[TC_PRIO_MAX + 1] = { 447 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 448}; 449 450/* 3-band FIFO queue: old style, but should be a bit faster than 451 generic prio+fifo combination. 452 */ 453 454#define PFIFO_FAST_BANDS 3 455 456/* 457 * Private data for a pfifo_fast scheduler containing: 458 * - queues for the three band 459 * - bitmap indicating which of the bands contain skbs 460 */ 461struct pfifo_fast_priv { 462 u32 bitmap; 463 struct sk_buff_head q[PFIFO_FAST_BANDS]; 464}; 465 466/* 467 * Convert a bitmap to the first band number where an skb is queued, where: 468 * bitmap=0 means there are no skbs on any band. 469 * bitmap=1 means there is an skb on band 0. 470 * bitmap=7 means there are skbs on all 3 bands, etc. 471 */ 472static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; 473 474static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, 475 int band) 476{ 477 return priv->q + band; 478} 479 480static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) 481{ 482 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 483 int band = prio2band[skb->priority & TC_PRIO_MAX]; 484 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 485 struct sk_buff_head *list = band2list(priv, band); 486 487 priv->bitmap |= (1 << band); 488 qdisc->q.qlen++; 489 return __qdisc_enqueue_tail(skb, qdisc, list); 490 } 491 492 return qdisc_drop(skb, qdisc); 493} 494 495static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) 496{ 497 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 498 int band = bitmap2band[priv->bitmap]; 499 500 if (likely(band >= 0)) { 501 struct sk_buff_head *list = band2list(priv, band); 502 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); 503 504 qdisc->q.qlen--; 505 if (skb_queue_empty(list)) 506 priv->bitmap &= ~(1 << band); 507 508 return skb; 509 } 510 511 return NULL; 512} 513 514static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) 515{ 516 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 517 int band = bitmap2band[priv->bitmap]; 518 519 if (band >= 0) { 520 struct sk_buff_head *list = band2list(priv, band); 521 522 return skb_peek(list); 523 } 524 525 return NULL; 526} 527 528static void pfifo_fast_reset(struct Qdisc *qdisc) 529{ 530 int prio; 531 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 532 533 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 534 __qdisc_reset_queue(qdisc, band2list(priv, prio)); 535 536 priv->bitmap = 0; 537 qdisc->qstats.backlog = 0; 538 qdisc->q.qlen = 0; 539} 540 541static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 542{ 543 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 544 545 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); 546 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) 547 goto nla_put_failure; 548 return skb->len; 549 550nla_put_failure: 551 return -1; 552} 553 554static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) 555{ 556 int prio; 557 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 558 559 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 560 __skb_queue_head_init(band2list(priv, prio)); 561 562 /* Can by-pass the queue discipline */ 563 qdisc->flags |= TCQ_F_CAN_BYPASS; 564 return 0; 565} 566 567struct Qdisc_ops pfifo_fast_ops __read_mostly = { 568 .id = "pfifo_fast", 569 .priv_size = sizeof(struct pfifo_fast_priv), 570 .enqueue = pfifo_fast_enqueue, 571 .dequeue = pfifo_fast_dequeue, 572 .peek = pfifo_fast_peek, 573 .init = pfifo_fast_init, 574 .reset = pfifo_fast_reset, 575 .dump = pfifo_fast_dump, 576 .owner = THIS_MODULE, 577}; 578 579static struct lock_class_key qdisc_tx_busylock; 580 581struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 582 const struct Qdisc_ops *ops) 583{ 584 void *p; 585 struct Qdisc *sch; 586 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; 587 int err = -ENOBUFS; 588 struct net_device *dev = dev_queue->dev; 589 590 p = kzalloc_node(size, GFP_KERNEL, 591 netdev_queue_numa_node_read(dev_queue)); 592 593 if (!p) 594 goto errout; 595 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 596 /* if we got non aligned memory, ask more and do alignment ourself */ 597 if (sch != p) { 598 kfree(p); 599 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL, 600 netdev_queue_numa_node_read(dev_queue)); 601 if (!p) 602 goto errout; 603 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 604 sch->padded = (char *) sch - (char *) p; 605 } 606 INIT_LIST_HEAD(&sch->list); 607 skb_queue_head_init(&sch->q); 608 609 spin_lock_init(&sch->busylock); 610 lockdep_set_class(&sch->busylock, 611 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 612 613 sch->ops = ops; 614 sch->enqueue = ops->enqueue; 615 sch->dequeue = ops->dequeue; 616 sch->dev_queue = dev_queue; 617 dev_hold(dev); 618 atomic_set(&sch->refcnt, 1); 619 620 return sch; 621errout: 622 return ERR_PTR(err); 623} 624 625struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 626 const struct Qdisc_ops *ops, 627 unsigned int parentid) 628{ 629 struct Qdisc *sch; 630 631 if (!try_module_get(ops->owner)) 632 goto errout; 633 634 sch = qdisc_alloc(dev_queue, ops); 635 if (IS_ERR(sch)) 636 goto errout; 637 sch->parent = parentid; 638 639 if (!ops->init || ops->init(sch, NULL) == 0) 640 return sch; 641 642 qdisc_destroy(sch); 643errout: 644 return NULL; 645} 646EXPORT_SYMBOL(qdisc_create_dflt); 647 648/* Under qdisc_lock(qdisc) and BH! */ 649 650void qdisc_reset(struct Qdisc *qdisc) 651{ 652 const struct Qdisc_ops *ops = qdisc->ops; 653 654 if (ops->reset) 655 ops->reset(qdisc); 656 657 if (qdisc->gso_skb) { 658 kfree_skb_list(qdisc->gso_skb); 659 qdisc->gso_skb = NULL; 660 qdisc->q.qlen = 0; 661 } 662} 663EXPORT_SYMBOL(qdisc_reset); 664 665static void qdisc_rcu_free(struct rcu_head *head) 666{ 667 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); 668 669 if (qdisc_is_percpu_stats(qdisc)) { 670 free_percpu(qdisc->cpu_bstats); 671 free_percpu(qdisc->cpu_qstats); 672 } 673 674 kfree((char *) qdisc - qdisc->padded); 675} 676 677void qdisc_destroy(struct Qdisc *qdisc) 678{ 679 const struct Qdisc_ops *ops = qdisc->ops; 680 681 if (qdisc->flags & TCQ_F_BUILTIN || 682 !atomic_dec_and_test(&qdisc->refcnt)) 683 return; 684 685#ifdef CONFIG_NET_SCHED 686 qdisc_list_del(qdisc); 687 688 qdisc_put_stab(rtnl_dereference(qdisc->stab)); 689#endif 690 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 691 if (ops->reset) 692 ops->reset(qdisc); 693 if (ops->destroy) 694 ops->destroy(qdisc); 695 696 module_put(ops->owner); 697 dev_put(qdisc_dev(qdisc)); 698 699 kfree_skb_list(qdisc->gso_skb); 700 /* 701 * gen_estimator est_timer() might access qdisc->q.lock, 702 * wait a RCU grace period before freeing qdisc. 703 */ 704 call_rcu(&qdisc->rcu_head, qdisc_rcu_free); 705} 706EXPORT_SYMBOL(qdisc_destroy); 707 708/* Attach toplevel qdisc to device queue. */ 709struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 710 struct Qdisc *qdisc) 711{ 712 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 713 spinlock_t *root_lock; 714 715 root_lock = qdisc_lock(oqdisc); 716 spin_lock_bh(root_lock); 717 718 /* Prune old scheduler */ 719 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 720 qdisc_reset(oqdisc); 721 722 /* ... and graft new one */ 723 if (qdisc == NULL) 724 qdisc = &noop_qdisc; 725 dev_queue->qdisc_sleeping = qdisc; 726 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 727 728 spin_unlock_bh(root_lock); 729 730 return oqdisc; 731} 732EXPORT_SYMBOL(dev_graft_qdisc); 733 734static void attach_one_default_qdisc(struct net_device *dev, 735 struct netdev_queue *dev_queue, 736 void *_unused) 737{ 738 struct Qdisc *qdisc = &noqueue_qdisc; 739 740 if (dev->tx_queue_len) { 741 qdisc = qdisc_create_dflt(dev_queue, 742 default_qdisc_ops, TC_H_ROOT); 743 if (!qdisc) { 744 netdev_info(dev, "activation failed\n"); 745 return; 746 } 747 if (!netif_is_multiqueue(dev)) 748 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 749 } 750 dev_queue->qdisc_sleeping = qdisc; 751} 752 753static void attach_default_qdiscs(struct net_device *dev) 754{ 755 struct netdev_queue *txq; 756 struct Qdisc *qdisc; 757 758 txq = netdev_get_tx_queue(dev, 0); 759 760 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { 761 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 762 dev->qdisc = txq->qdisc_sleeping; 763 atomic_inc(&dev->qdisc->refcnt); 764 } else { 765 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); 766 if (qdisc) { 767 dev->qdisc = qdisc; 768 qdisc->ops->attach(qdisc); 769 } 770 } 771} 772 773static void transition_one_qdisc(struct net_device *dev, 774 struct netdev_queue *dev_queue, 775 void *_need_watchdog) 776{ 777 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 778 int *need_watchdog_p = _need_watchdog; 779 780 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 781 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 782 783 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 784 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) { 785 dev_queue->trans_start = 0; 786 *need_watchdog_p = 1; 787 } 788} 789 790void dev_activate(struct net_device *dev) 791{ 792 int need_watchdog; 793 794 /* No queueing discipline is attached to device; 795 * create default one for devices, which need queueing 796 * and noqueue_qdisc for virtual interfaces 797 */ 798 799 if (dev->qdisc == &noop_qdisc) 800 attach_default_qdiscs(dev); 801 802 if (!netif_carrier_ok(dev)) 803 /* Delay activation until next carrier-on event */ 804 return; 805 806 need_watchdog = 0; 807 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 808 if (dev_ingress_queue(dev)) 809 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); 810 811 if (need_watchdog) { 812 dev->trans_start = jiffies; 813 dev_watchdog_up(dev); 814 } 815} 816EXPORT_SYMBOL(dev_activate); 817 818static void dev_deactivate_queue(struct net_device *dev, 819 struct netdev_queue *dev_queue, 820 void *_qdisc_default) 821{ 822 struct Qdisc *qdisc_default = _qdisc_default; 823 struct Qdisc *qdisc; 824 825 qdisc = rtnl_dereference(dev_queue->qdisc); 826 if (qdisc) { 827 spin_lock_bh(qdisc_lock(qdisc)); 828 829 if (!(qdisc->flags & TCQ_F_BUILTIN)) 830 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 831 832 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 833 qdisc_reset(qdisc); 834 835 spin_unlock_bh(qdisc_lock(qdisc)); 836 } 837} 838 839static bool some_qdisc_is_busy(struct net_device *dev) 840{ 841 unsigned int i; 842 843 for (i = 0; i < dev->num_tx_queues; i++) { 844 struct netdev_queue *dev_queue; 845 spinlock_t *root_lock; 846 struct Qdisc *q; 847 int val; 848 849 dev_queue = netdev_get_tx_queue(dev, i); 850 q = dev_queue->qdisc_sleeping; 851 root_lock = qdisc_lock(q); 852 853 spin_lock_bh(root_lock); 854 855 val = (qdisc_is_running(q) || 856 test_bit(__QDISC_STATE_SCHED, &q->state)); 857 858 spin_unlock_bh(root_lock); 859 860 if (val) 861 return true; 862 } 863 return false; 864} 865 866/** 867 * dev_deactivate_many - deactivate transmissions on several devices 868 * @head: list of devices to deactivate 869 * 870 * This function returns only when all outstanding transmissions 871 * have completed, unless all devices are in dismantle phase. 872 */ 873void dev_deactivate_many(struct list_head *head) 874{ 875 struct net_device *dev; 876 bool sync_needed = false; 877 878 list_for_each_entry(dev, head, close_list) { 879 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 880 &noop_qdisc); 881 if (dev_ingress_queue(dev)) 882 dev_deactivate_queue(dev, dev_ingress_queue(dev), 883 &noop_qdisc); 884 885 dev_watchdog_down(dev); 886 sync_needed |= !dev->dismantle; 887 } 888 889 /* Wait for outstanding qdisc-less dev_queue_xmit calls. 890 * This is avoided if all devices are in dismantle phase : 891 * Caller will call synchronize_net() for us 892 */ 893 if (sync_needed) 894 synchronize_net(); 895 896 /* Wait for outstanding qdisc_run calls. */ 897 list_for_each_entry(dev, head, close_list) 898 while (some_qdisc_is_busy(dev)) 899 yield(); 900} 901 902void dev_deactivate(struct net_device *dev) 903{ 904 LIST_HEAD(single); 905 906 list_add(&dev->close_list, &single); 907 dev_deactivate_many(&single); 908 list_del(&single); 909} 910EXPORT_SYMBOL(dev_deactivate); 911 912static void dev_init_scheduler_queue(struct net_device *dev, 913 struct netdev_queue *dev_queue, 914 void *_qdisc) 915{ 916 struct Qdisc *qdisc = _qdisc; 917 918 rcu_assign_pointer(dev_queue->qdisc, qdisc); 919 dev_queue->qdisc_sleeping = qdisc; 920} 921 922void dev_init_scheduler(struct net_device *dev) 923{ 924 dev->qdisc = &noop_qdisc; 925 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 926 if (dev_ingress_queue(dev)) 927 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 928 929 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 930} 931 932static void shutdown_scheduler_queue(struct net_device *dev, 933 struct netdev_queue *dev_queue, 934 void *_qdisc_default) 935{ 936 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 937 struct Qdisc *qdisc_default = _qdisc_default; 938 939 if (qdisc) { 940 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 941 dev_queue->qdisc_sleeping = qdisc_default; 942 943 qdisc_destroy(qdisc); 944 } 945} 946 947void dev_shutdown(struct net_device *dev) 948{ 949 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 950 if (dev_ingress_queue(dev)) 951 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 952 qdisc_destroy(dev->qdisc); 953 dev->qdisc = &noop_qdisc; 954 955 WARN_ON(timer_pending(&dev->watchdog_timer)); 956} 957 958void psched_ratecfg_precompute(struct psched_ratecfg *r, 959 const struct tc_ratespec *conf, 960 u64 rate64) 961{ 962 memset(r, 0, sizeof(*r)); 963 r->overhead = conf->overhead; 964 r->rate_bytes_ps = max_t(u64, conf->rate, rate64); 965 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); 966 r->mult = 1; 967 /* 968 * The deal here is to replace a divide by a reciprocal one 969 * in fast path (a reciprocal divide is a multiply and a shift) 970 * 971 * Normal formula would be : 972 * time_in_ns = (NSEC_PER_SEC * len) / rate_bps 973 * 974 * We compute mult/shift to use instead : 975 * time_in_ns = (len * mult) >> shift; 976 * 977 * We try to get the highest possible mult value for accuracy, 978 * but have to make sure no overflows will ever happen. 979 */ 980 if (r->rate_bytes_ps > 0) { 981 u64 factor = NSEC_PER_SEC; 982 983 for (;;) { 984 r->mult = div64_u64(factor, r->rate_bytes_ps); 985 if (r->mult & (1U << 31) || factor & (1ULL << 63)) 986 break; 987 factor <<= 1; 988 r->shift++; 989 } 990 } 991} 992EXPORT_SYMBOL(psched_ratecfg_precompute); 993