root/drivers/net/eql.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. eql_timer
  2. eql_setup
  3. eql_open
  4. eql_kill_one_slave
  5. eql_kill_slave_queue
  6. eql_close
  7. eql_ioctl
  8. __eql_schedule_slaves
  9. eql_slave_xmit
  10. __eql_find_slave_dev
  11. eql_is_full
  12. __eql_insert_slave
  13. eql_enslave
  14. eql_emancipate
  15. eql_g_slave_cfg
  16. eql_s_slave_cfg
  17. eql_g_master_cfg
  18. eql_s_master_cfg
  19. eql_init_module
  20. eql_cleanup_module

   1 /*
   2  * Equalizer Load-balancer for serial network interfaces.
   3  *
   4  * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
   5  * NCM: Network and Communications Management, Inc.
   6  *
   7  * (c) Copyright 2002 David S. Miller (davem@redhat.com)
   8  *
   9  *      This software may be used and distributed according to the terms
  10  *      of the GNU General Public License, incorporated herein by reference.
  11  *
  12  * The author may be reached as simon@ncm.com, or C/O
  13  *    NCM
  14  *    Attn: Simon Janes
  15  *    6803 Whittier Ave
  16  *    McLean VA 22101
  17  *    Phone: 1-703-847-0040 ext 103
  18  */
  19 
  20 /*
  21  * Sources:
  22  *   skeleton.c by Donald Becker.
  23  * Inspirations:
  24  *   The Harried and Overworked Alan Cox
  25  * Conspiracies:
  26  *   The Alan Cox and Mike McLagan plot to get someone else to do the code,
  27  *   which turned out to be me.
  28  */
  29 
  30 /*
  31  * $Log: eql.c,v $
  32  * Revision 1.2  1996/04/11 17:51:52  guru
  33  * Added one-line eql_remove_slave patch.
  34  *
  35  * Revision 1.1  1996/04/11 17:44:17  guru
  36  * Initial revision
  37  *
  38  * Revision 3.13  1996/01/21  15:17:18  alan
  39  * tx_queue_len changes.
  40  * reformatted.
  41  *
  42  * Revision 3.12  1995/03/22  21:07:51  anarchy
  43  * Added capable() checks on configuration.
  44  * Moved header file.
  45  *
  46  * Revision 3.11  1995/01/19  23:14:31  guru
  47  *                    slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
  48  *                      (priority_Bps) + bytes_queued * 8;
  49  *
  50  * Revision 3.10  1995/01/19  23:07:53  guru
  51  * back to
  52  *                    slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
  53  *                      (priority_Bps) + bytes_queued;
  54  *
  55  * Revision 3.9  1995/01/19  22:38:20  guru
  56  *                    slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
  57  *                      (priority_Bps) + bytes_queued * 4;
  58  *
  59  * Revision 3.8  1995/01/19  22:30:55  guru
  60  *       slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
  61  *                      (priority_Bps) + bytes_queued * 2;
  62  *
  63  * Revision 3.7  1995/01/19  21:52:35  guru
  64  * printk's trimmed out.
  65  *
  66  * Revision 3.6  1995/01/19  21:49:56  guru
  67  * This is working pretty well. I gained 1 K/s in speed.. now it's just
  68  * robustness and printk's to be diked out.
  69  *
  70  * Revision 3.5  1995/01/18  22:29:59  guru
  71  * still crashes the kernel when the lock_wait thing is woken up.
  72  *
  73  * Revision 3.4  1995/01/18  21:59:47  guru
  74  * Broken set-bit locking snapshot
  75  *
  76  * Revision 3.3  1995/01/17  22:09:18  guru
  77  * infinite sleep in a lock somewhere..
  78  *
  79  * Revision 3.2  1995/01/15  16:46:06  guru
  80  * Log trimmed of non-pertinent 1.x branch messages
  81  *
  82  * Revision 3.1  1995/01/15  14:41:45  guru
  83  * New Scheduler and timer stuff...
  84  *
  85  * Revision 1.15  1995/01/15  14:29:02  guru
  86  * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one
  87  * with the dumber scheduler
  88  *
  89  * Revision 1.14  1995/01/15  02:37:08  guru
  90  * shock.. the kept-new-versions could have zonked working
  91  * stuff.. shudder
  92  *
  93  * Revision 1.13  1995/01/15  02:36:31  guru
  94  * big changes
  95  *
  96  *      scheduler was torn out and replaced with something smarter
  97  *
  98  *      global names not prefixed with eql_ were renamed to protect
  99  *      against namespace collisions
 100  *
 101  *      a few more abstract interfaces were added to facilitate any
 102  *      potential change of datastructure.  the driver is still using
 103  *      a linked list of slaves.  going to a heap would be a bit of
 104  *      an overkill.
 105  *
 106  *      this compiles fine with no warnings.
 107  *
 108  *      the locking mechanism and timer stuff must be written however,
 109  *      this version will not work otherwise
 110  *
 111  * Sorry, I had to rewrite most of this for 2.5.x -DaveM
 112  */
 113 
 114 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 115 
 116 #include <linux/capability.h>
 117 #include <linux/module.h>
 118 #include <linux/kernel.h>
 119 #include <linux/init.h>
 120 #include <linux/slab.h>
 121 #include <linux/timer.h>
 122 #include <linux/netdevice.h>
 123 #include <net/net_namespace.h>
 124 
 125 #include <linux/if.h>
 126 #include <linux/if_arp.h>
 127 #include <linux/if_eql.h>
 128 #include <linux/pkt_sched.h>
 129 
 130 #include <linux/uaccess.h>
 131 
 132 static int eql_open(struct net_device *dev);
 133 static int eql_close(struct net_device *dev);
 134 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 135 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
 136 
 137 #define eql_is_slave(dev)       ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
 138 #define eql_is_master(dev)      ((dev->flags & IFF_MASTER) == IFF_MASTER)
 139 
 140 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
 141 
 142 static void eql_timer(struct timer_list *t)
 143 {
 144         equalizer_t *eql = from_timer(eql, t, timer);
 145         struct list_head *this, *tmp, *head;
 146 
 147         spin_lock(&eql->queue.lock);
 148         head = &eql->queue.all_slaves;
 149         list_for_each_safe(this, tmp, head) {
 150                 slave_t *slave = list_entry(this, slave_t, list);
 151 
 152                 if ((slave->dev->flags & IFF_UP) == IFF_UP) {
 153                         slave->bytes_queued -= slave->priority_Bps;
 154                         if (slave->bytes_queued < 0)
 155                                 slave->bytes_queued = 0;
 156                 } else {
 157                         eql_kill_one_slave(&eql->queue, slave);
 158                 }
 159 
 160         }
 161         spin_unlock(&eql->queue.lock);
 162 
 163         eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
 164         add_timer(&eql->timer);
 165 }
 166 
 167 static const char version[] __initconst =
 168         "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
 169 
 170 static const struct net_device_ops eql_netdev_ops = {
 171         .ndo_open       = eql_open,
 172         .ndo_stop       = eql_close,
 173         .ndo_do_ioctl   = eql_ioctl,
 174         .ndo_start_xmit = eql_slave_xmit,
 175 };
 176 
 177 static void __init eql_setup(struct net_device *dev)
 178 {
 179         equalizer_t *eql = netdev_priv(dev);
 180 
 181         timer_setup(&eql->timer, eql_timer, 0);
 182         eql->timer.expires      = jiffies + EQL_DEFAULT_RESCHED_IVAL;
 183 
 184         spin_lock_init(&eql->queue.lock);
 185         INIT_LIST_HEAD(&eql->queue.all_slaves);
 186         eql->queue.master_dev   = dev;
 187 
 188         dev->netdev_ops         = &eql_netdev_ops;
 189 
 190         /*
 191          *      Now we undo some of the things that eth_setup does
 192          *      that we don't like
 193          */
 194 
 195         dev->mtu                = EQL_DEFAULT_MTU;      /* set to 576 in if_eql.h */
 196         dev->flags              = IFF_MASTER;
 197 
 198         dev->type               = ARPHRD_SLIP;
 199         dev->tx_queue_len       = 5;            /* Hands them off fast */
 200         netif_keep_dst(dev);
 201 }
 202 
 203 static int eql_open(struct net_device *dev)
 204 {
 205         equalizer_t *eql = netdev_priv(dev);
 206 
 207         /* XXX We should force this off automatically for the user. */
 208         netdev_info(dev,
 209                     "remember to turn off Van-Jacobson compression on your slave devices\n");
 210 
 211         BUG_ON(!list_empty(&eql->queue.all_slaves));
 212 
 213         eql->min_slaves = 1;
 214         eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
 215 
 216         add_timer(&eql->timer);
 217 
 218         return 0;
 219 }
 220 
 221 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
 222 {
 223         list_del(&slave->list);
 224         queue->num_slaves--;
 225         slave->dev->flags &= ~IFF_SLAVE;
 226         dev_put(slave->dev);
 227         kfree(slave);
 228 }
 229 
 230 static void eql_kill_slave_queue(slave_queue_t *queue)
 231 {
 232         struct list_head *head, *tmp, *this;
 233 
 234         spin_lock_bh(&queue->lock);
 235 
 236         head = &queue->all_slaves;
 237         list_for_each_safe(this, tmp, head) {
 238                 slave_t *s = list_entry(this, slave_t, list);
 239 
 240                 eql_kill_one_slave(queue, s);
 241         }
 242 
 243         spin_unlock_bh(&queue->lock);
 244 }
 245 
 246 static int eql_close(struct net_device *dev)
 247 {
 248         equalizer_t *eql = netdev_priv(dev);
 249 
 250         /*
 251          *      The timer has to be stopped first before we start hacking away
 252          *      at the data structure it scans every so often...
 253          */
 254 
 255         del_timer_sync(&eql->timer);
 256 
 257         eql_kill_slave_queue(&eql->queue);
 258 
 259         return 0;
 260 }
 261 
 262 static int eql_enslave(struct net_device *dev,  slaving_request_t __user *srq);
 263 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
 264 
 265 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
 266 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
 267 
 268 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
 269 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
 270 
 271 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 272 {
 273         if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG &&
 274             !capable(CAP_NET_ADMIN))
 275                 return -EPERM;
 276 
 277         switch (cmd) {
 278                 case EQL_ENSLAVE:
 279                         return eql_enslave(dev, ifr->ifr_data);
 280                 case EQL_EMANCIPATE:
 281                         return eql_emancipate(dev, ifr->ifr_data);
 282                 case EQL_GETSLAVECFG:
 283                         return eql_g_slave_cfg(dev, ifr->ifr_data);
 284                 case EQL_SETSLAVECFG:
 285                         return eql_s_slave_cfg(dev, ifr->ifr_data);
 286                 case EQL_GETMASTRCFG:
 287                         return eql_g_master_cfg(dev, ifr->ifr_data);
 288                 case EQL_SETMASTRCFG:
 289                         return eql_s_master_cfg(dev, ifr->ifr_data);
 290                 default:
 291                         return -EOPNOTSUPP;
 292         }
 293 }
 294 
 295 /* queue->lock must be held */
 296 static slave_t *__eql_schedule_slaves(slave_queue_t *queue)
 297 {
 298         unsigned long best_load = ~0UL;
 299         struct list_head *this, *tmp, *head;
 300         slave_t *best_slave;
 301 
 302         best_slave = NULL;
 303 
 304         /* Make a pass to set the best slave. */
 305         head = &queue->all_slaves;
 306         list_for_each_safe(this, tmp, head) {
 307                 slave_t *slave = list_entry(this, slave_t, list);
 308                 unsigned long slave_load, bytes_queued, priority_Bps;
 309 
 310                 /* Go through the slave list once, updating best_slave
 311                  * whenever a new best_load is found.
 312                  */
 313                 bytes_queued = slave->bytes_queued;
 314                 priority_Bps = slave->priority_Bps;
 315                 if ((slave->dev->flags & IFF_UP) == IFF_UP) {
 316                         slave_load = (~0UL - (~0UL / 2)) -
 317                                 (priority_Bps) + bytes_queued * 8;
 318 
 319                         if (slave_load < best_load) {
 320                                 best_load = slave_load;
 321                                 best_slave = slave;
 322                         }
 323                 } else {
 324                         /* We found a dead slave, kill it. */
 325                         eql_kill_one_slave(queue, slave);
 326                 }
 327         }
 328         return best_slave;
 329 }
 330 
 331 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev)
 332 {
 333         equalizer_t *eql = netdev_priv(dev);
 334         slave_t *slave;
 335 
 336         spin_lock(&eql->queue.lock);
 337 
 338         slave = __eql_schedule_slaves(&eql->queue);
 339         if (slave) {
 340                 struct net_device *slave_dev = slave->dev;
 341 
 342                 skb->dev = slave_dev;
 343                 skb->priority = TC_PRIO_FILLER;
 344                 slave->bytes_queued += skb->len;
 345                 dev_queue_xmit(skb);
 346                 dev->stats.tx_packets++;
 347         } else {
 348                 dev->stats.tx_dropped++;
 349                 dev_kfree_skb(skb);
 350         }
 351 
 352         spin_unlock(&eql->queue.lock);
 353 
 354         return NETDEV_TX_OK;
 355 }
 356 
 357 /*
 358  *      Private ioctl functions
 359  */
 360 
 361 /* queue->lock must be held */
 362 static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev)
 363 {
 364         struct list_head *this, *head;
 365 
 366         head = &queue->all_slaves;
 367         list_for_each(this, head) {
 368                 slave_t *slave = list_entry(this, slave_t, list);
 369 
 370                 if (slave->dev == dev)
 371                         return slave;
 372         }
 373 
 374         return NULL;
 375 }
 376 
 377 static inline int eql_is_full(slave_queue_t *queue)
 378 {
 379         equalizer_t *eql = netdev_priv(queue->master_dev);
 380 
 381         if (queue->num_slaves >= eql->max_slaves)
 382                 return 1;
 383         return 0;
 384 }
 385 
 386 /* queue->lock must be held */
 387 static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
 388 {
 389         if (!eql_is_full(queue)) {
 390                 slave_t *duplicate_slave = NULL;
 391 
 392                 duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
 393                 if (duplicate_slave)
 394                         eql_kill_one_slave(queue, duplicate_slave);
 395 
 396                 dev_hold(slave->dev);
 397                 list_add(&slave->list, &queue->all_slaves);
 398                 queue->num_slaves++;
 399                 slave->dev->flags |= IFF_SLAVE;
 400 
 401                 return 0;
 402         }
 403 
 404         return -ENOSPC;
 405 }
 406 
 407 static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp)
 408 {
 409         struct net_device *slave_dev;
 410         slaving_request_t srq;
 411 
 412         if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
 413                 return -EFAULT;
 414 
 415         slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
 416         if (!slave_dev)
 417                 return -ENODEV;
 418 
 419         if ((master_dev->flags & IFF_UP) == IFF_UP) {
 420                 /* slave is not a master & not already a slave: */
 421                 if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
 422                         slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
 423                         equalizer_t *eql = netdev_priv(master_dev);
 424                         int ret;
 425 
 426                         if (!s)
 427                                 return -ENOMEM;
 428 
 429                         memset(s, 0, sizeof(*s));
 430                         s->dev = slave_dev;
 431                         s->priority = srq.priority;
 432                         s->priority_bps = srq.priority;
 433                         s->priority_Bps = srq.priority / 8;
 434 
 435                         spin_lock_bh(&eql->queue.lock);
 436                         ret = __eql_insert_slave(&eql->queue, s);
 437                         if (ret)
 438                                 kfree(s);
 439 
 440                         spin_unlock_bh(&eql->queue.lock);
 441 
 442                         return ret;
 443                 }
 444         }
 445 
 446         return -EINVAL;
 447 }
 448 
 449 static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp)
 450 {
 451         equalizer_t *eql = netdev_priv(master_dev);
 452         struct net_device *slave_dev;
 453         slaving_request_t srq;
 454         int ret;
 455 
 456         if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
 457                 return -EFAULT;
 458 
 459         slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
 460         if (!slave_dev)
 461                 return -ENODEV;
 462 
 463         ret = -EINVAL;
 464         spin_lock_bh(&eql->queue.lock);
 465         if (eql_is_slave(slave_dev)) {
 466                 slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev);
 467                 if (slave) {
 468                         eql_kill_one_slave(&eql->queue, slave);
 469                         ret = 0;
 470                 }
 471         }
 472         spin_unlock_bh(&eql->queue.lock);
 473 
 474         return ret;
 475 }
 476 
 477 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
 478 {
 479         equalizer_t *eql = netdev_priv(dev);
 480         slave_t *slave;
 481         struct net_device *slave_dev;
 482         slave_config_t sc;
 483         int ret;
 484 
 485         if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
 486                 return -EFAULT;
 487 
 488         slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
 489         if (!slave_dev)
 490                 return -ENODEV;
 491 
 492         ret = -EINVAL;
 493 
 494         spin_lock_bh(&eql->queue.lock);
 495         if (eql_is_slave(slave_dev)) {
 496                 slave = __eql_find_slave_dev(&eql->queue, slave_dev);
 497                 if (slave) {
 498                         sc.priority = slave->priority;
 499                         ret = 0;
 500                 }
 501         }
 502         spin_unlock_bh(&eql->queue.lock);
 503 
 504         if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
 505                 ret = -EFAULT;
 506 
 507         return ret;
 508 }
 509 
 510 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
 511 {
 512         slave_t *slave;
 513         equalizer_t *eql;
 514         struct net_device *slave_dev;
 515         slave_config_t sc;
 516         int ret;
 517 
 518         if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
 519                 return -EFAULT;
 520 
 521         slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
 522         if (!slave_dev)
 523                 return -ENODEV;
 524 
 525         ret = -EINVAL;
 526 
 527         eql = netdev_priv(dev);
 528         spin_lock_bh(&eql->queue.lock);
 529         if (eql_is_slave(slave_dev)) {
 530                 slave = __eql_find_slave_dev(&eql->queue, slave_dev);
 531                 if (slave) {
 532                         slave->priority = sc.priority;
 533                         slave->priority_bps = sc.priority;
 534                         slave->priority_Bps = sc.priority / 8;
 535                         ret = 0;
 536                 }
 537         }
 538         spin_unlock_bh(&eql->queue.lock);
 539 
 540         return ret;
 541 }
 542 
 543 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
 544 {
 545         equalizer_t *eql;
 546         master_config_t mc;
 547 
 548         memset(&mc, 0, sizeof(master_config_t));
 549 
 550         if (eql_is_master(dev)) {
 551                 eql = netdev_priv(dev);
 552                 mc.max_slaves = eql->max_slaves;
 553                 mc.min_slaves = eql->min_slaves;
 554                 if (copy_to_user(mcp, &mc, sizeof (master_config_t)))
 555                         return -EFAULT;
 556                 return 0;
 557         }
 558         return -EINVAL;
 559 }
 560 
 561 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp)
 562 {
 563         equalizer_t *eql;
 564         master_config_t mc;
 565 
 566         if (copy_from_user(&mc, mcp, sizeof (master_config_t)))
 567                 return -EFAULT;
 568 
 569         if (eql_is_master(dev)) {
 570                 eql = netdev_priv(dev);
 571                 eql->max_slaves = mc.max_slaves;
 572                 eql->min_slaves = mc.min_slaves;
 573                 return 0;
 574         }
 575         return -EINVAL;
 576 }
 577 
 578 static struct net_device *dev_eql;
 579 
 580 static int __init eql_init_module(void)
 581 {
 582         int err;
 583 
 584         pr_info("%s\n", version);
 585 
 586         dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", NET_NAME_UNKNOWN,
 587                                eql_setup);
 588         if (!dev_eql)
 589                 return -ENOMEM;
 590 
 591         err = register_netdev(dev_eql);
 592         if (err)
 593                 free_netdev(dev_eql);
 594         return err;
 595 }
 596 
 597 static void __exit eql_cleanup_module(void)
 598 {
 599         unregister_netdev(dev_eql);
 600         free_netdev(dev_eql);
 601 }
 602 
 603 module_init(eql_init_module);
 604 module_exit(eql_cleanup_module);
 605 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */