root/drivers/net/plip/plip.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. enable_parport_interrupts
  2. disable_parport_interrupts
  3. write_data
  4. read_status
  5. plip_init_netdev
  6. plip_kick_bh
  7. plip_bh
  8. plip_timer_bh
  9. plip_bh_timeout_error
  10. plip_none
  11. plip_receive
  12. plip_type_trans
  13. plip_receive_packet
  14. plip_send
  15. plip_send_packet
  16. plip_connection_close
  17. plip_error
  18. plip_interrupt
  19. plip_tx_packet
  20. plip_rewrite_address
  21. plip_hard_header
  22. plip_hard_header_cache
  23. plip_open
  24. plip_close
  25. plip_preempt
  26. plip_wakeup
  27. plip_ioctl
  28. plip_searchfor
  29. plip_attach
  30. plip_detach
  31. plip_probe
  32. plip_cleanup_module
  33. plip_setup
  34. plip_init

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
   3 /* PLIP: A parallel port "network" driver for Linux. */
   4 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
   5 /*
   6  * Authors:     Donald Becker <becker@scyld.com>
   7  *              Tommy Thorn <thorn@daimi.aau.dk>
   8  *              Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
   9  *              Alan Cox <gw4pts@gw4pts.ampr.org>
  10  *              Peter Bauer <100136.3530@compuserve.com>
  11  *              Niibe Yutaka <gniibe@mri.co.jp>
  12  *              Nimrod Zimerman <zimerman@mailandnews.com>
  13  *
  14  * Enhancements:
  15  *              Modularization and ifreq/ifmap support by Alan Cox.
  16  *              Rewritten by Niibe Yutaka.
  17  *              parport-sharing awareness code by Philip Blundell.
  18  *              SMP locking by Niibe Yutaka.
  19  *              Support for parallel ports with no IRQ (poll mode),
  20  *              Modifications to use the parallel port API
  21  *              by Nimrod Zimerman.
  22  *
  23  * Fixes:
  24  *              Niibe Yutaka
  25  *                - Module initialization.
  26  *                - MTU fix.
  27  *                - Make sure other end is OK, before sending a packet.
  28  *                - Fix immediate timer problem.
  29  *
  30  *              Al Viro
  31  *                - Changed {enable,disable}_irq handling to make it work
  32  *                  with new ("stack") semantics.
  33  */
  34 
  35 /*
  36  * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
  37  * inspired by Russ Nelson's parallel port packet driver.
  38  *
  39  * NOTE:
  40  *     Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
  41  *     Because of the necessity to communicate to DOS machines with the
  42  *     Crynwr packet driver, Peter Bauer changed the protocol again
  43  *     back to original protocol.
  44  *
  45  *     This version follows original PLIP protocol.
  46  *     So, this PLIP can't communicate the PLIP of Linux v1.0.
  47  */
  48 
  49 /*
  50  *     To use with DOS box, please do (Turn on ARP switch):
  51  *      # ifconfig plip[0-2] arp
  52  */
  53 static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
  54 
  55 /*
  56   Sources:
  57         Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
  58         "parallel.asm" parallel port packet driver.
  59 
  60   The "Crynwr" parallel port standard specifies the following protocol:
  61     Trigger by sending nibble '0x8' (this causes interrupt on other end)
  62     count-low octet
  63     count-high octet
  64     ... data octets
  65     checksum octet
  66   Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
  67                         <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
  68 
  69   The packet is encapsulated as if it were ethernet.
  70 
  71   The cable used is a de facto standard parallel null cable -- sold as
  72   a "LapLink" cable by various places.  You'll need a 12-conductor cable to
  73   make one yourself.  The wiring is:
  74     SLCTIN      17 - 17
  75     GROUND      25 - 25
  76     D0->ERROR   2 - 15          15 - 2
  77     D1->SLCT    3 - 13          13 - 3
  78     D2->PAPOUT  4 - 12          12 - 4
  79     D3->ACK     5 - 10          10 - 5
  80     D4->BUSY    6 - 11          11 - 6
  81   Do not connect the other pins.  They are
  82     D5,D6,D7 are 7,8,9
  83     STROBE is 1, FEED is 14, INIT is 16
  84     extra grounds are 18,19,20,21,22,23,24
  85 */
  86 
  87 #include <linux/module.h>
  88 #include <linux/kernel.h>
  89 #include <linux/types.h>
  90 #include <linux/fcntl.h>
  91 #include <linux/interrupt.h>
  92 #include <linux/string.h>
  93 #include <linux/slab.h>
  94 #include <linux/if_ether.h>
  95 #include <linux/in.h>
  96 #include <linux/errno.h>
  97 #include <linux/delay.h>
  98 #include <linux/init.h>
  99 #include <linux/netdevice.h>
 100 #include <linux/etherdevice.h>
 101 #include <linux/inetdevice.h>
 102 #include <linux/skbuff.h>
 103 #include <linux/if_plip.h>
 104 #include <linux/workqueue.h>
 105 #include <linux/spinlock.h>
 106 #include <linux/completion.h>
 107 #include <linux/parport.h>
 108 #include <linux/bitops.h>
 109 
 110 #include <net/neighbour.h>
 111 
 112 #include <asm/irq.h>
 113 #include <asm/byteorder.h>
 114 
 115 /* Maximum number of devices to support. */
 116 #define PLIP_MAX  8
 117 
 118 /* Use 0 for production, 1 for verification, >2 for debug */
 119 #ifndef NET_DEBUG
 120 #define NET_DEBUG 1
 121 #endif
 122 static const unsigned int net_debug = NET_DEBUG;
 123 
 124 #define ENABLE(irq)  if (irq != -1) enable_irq(irq)
 125 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
 126 
 127 /* In micro second */
 128 #define PLIP_DELAY_UNIT            1
 129 
 130 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
 131 #define PLIP_TRIGGER_WAIT        500
 132 
 133 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
 134 #define PLIP_NIBBLE_WAIT        3000
 135 
 136 /* Bottom halves */
 137 static void plip_kick_bh(struct work_struct *work);
 138 static void plip_bh(struct work_struct *work);
 139 static void plip_timer_bh(struct work_struct *work);
 140 
 141 /* Interrupt handler */
 142 static void plip_interrupt(void *dev_id);
 143 
 144 /* Functions for DEV methods */
 145 static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
 146 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
 147                             unsigned short type, const void *daddr,
 148                             const void *saddr, unsigned len);
 149 static int plip_hard_header_cache(const struct neighbour *neigh,
 150                                   struct hh_cache *hh, __be16 type);
 151 static int plip_open(struct net_device *dev);
 152 static int plip_close(struct net_device *dev);
 153 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 154 static int plip_preempt(void *handle);
 155 static void plip_wakeup(void *handle);
 156 
 157 enum plip_connection_state {
 158         PLIP_CN_NONE=0,
 159         PLIP_CN_RECEIVE,
 160         PLIP_CN_SEND,
 161         PLIP_CN_CLOSING,
 162         PLIP_CN_ERROR
 163 };
 164 
 165 enum plip_packet_state {
 166         PLIP_PK_DONE=0,
 167         PLIP_PK_TRIGGER,
 168         PLIP_PK_LENGTH_LSB,
 169         PLIP_PK_LENGTH_MSB,
 170         PLIP_PK_DATA,
 171         PLIP_PK_CHECKSUM
 172 };
 173 
 174 enum plip_nibble_state {
 175         PLIP_NB_BEGIN,
 176         PLIP_NB_1,
 177         PLIP_NB_2,
 178 };
 179 
 180 struct plip_local {
 181         enum plip_packet_state state;
 182         enum plip_nibble_state nibble;
 183         union {
 184                 struct {
 185 #if defined(__LITTLE_ENDIAN)
 186                         unsigned char lsb;
 187                         unsigned char msb;
 188 #elif defined(__BIG_ENDIAN)
 189                         unsigned char msb;
 190                         unsigned char lsb;
 191 #else
 192 #error  "Please fix the endianness defines in <asm/byteorder.h>"
 193 #endif
 194                 } b;
 195                 unsigned short h;
 196         } length;
 197         unsigned short byte;
 198         unsigned char  checksum;
 199         unsigned char  data;
 200         struct sk_buff *skb;
 201 };
 202 
 203 struct net_local {
 204         struct net_device *dev;
 205         struct work_struct immediate;
 206         struct delayed_work deferred;
 207         struct delayed_work timer;
 208         struct plip_local snd_data;
 209         struct plip_local rcv_data;
 210         struct pardevice *pardev;
 211         unsigned long  trigger;
 212         unsigned long  nibble;
 213         enum plip_connection_state connection;
 214         unsigned short timeout_count;
 215         int is_deferred;
 216         int port_owner;
 217         int should_relinquish;
 218         spinlock_t lock;
 219         atomic_t kill_timer;
 220         struct completion killed_timer_cmp;
 221 };
 222 
 223 static inline void enable_parport_interrupts (struct net_device *dev)
 224 {
 225         if (dev->irq != -1)
 226         {
 227                 struct parport *port =
 228                    ((struct net_local *)netdev_priv(dev))->pardev->port;
 229                 port->ops->enable_irq (port);
 230         }
 231 }
 232 
 233 static inline void disable_parport_interrupts (struct net_device *dev)
 234 {
 235         if (dev->irq != -1)
 236         {
 237                 struct parport *port =
 238                    ((struct net_local *)netdev_priv(dev))->pardev->port;
 239                 port->ops->disable_irq (port);
 240         }
 241 }
 242 
 243 static inline void write_data (struct net_device *dev, unsigned char data)
 244 {
 245         struct parport *port =
 246            ((struct net_local *)netdev_priv(dev))->pardev->port;
 247 
 248         port->ops->write_data (port, data);
 249 }
 250 
 251 static inline unsigned char read_status (struct net_device *dev)
 252 {
 253         struct parport *port =
 254            ((struct net_local *)netdev_priv(dev))->pardev->port;
 255 
 256         return port->ops->read_status (port);
 257 }
 258 
 259 static const struct header_ops plip_header_ops = {
 260         .create = plip_hard_header,
 261         .cache  = plip_hard_header_cache,
 262 };
 263 
 264 static const struct net_device_ops plip_netdev_ops = {
 265         .ndo_open                = plip_open,
 266         .ndo_stop                = plip_close,
 267         .ndo_start_xmit          = plip_tx_packet,
 268         .ndo_do_ioctl            = plip_ioctl,
 269         .ndo_set_mac_address     = eth_mac_addr,
 270         .ndo_validate_addr       = eth_validate_addr,
 271 };
 272 
 273 /* Entry point of PLIP driver.
 274    Probe the hardware, and register/initialize the driver.
 275 
 276    PLIP is rather weird, because of the way it interacts with the parport
 277    system.  It is _not_ initialised from Space.c.  Instead, plip_init()
 278    is called, and that function makes up a "struct net_device" for each port, and
 279    then calls us here.
 280 
 281    */
 282 static void
 283 plip_init_netdev(struct net_device *dev)
 284 {
 285         struct net_local *nl = netdev_priv(dev);
 286 
 287         /* Then, override parts of it */
 288         dev->tx_queue_len        = 10;
 289         dev->flags               = IFF_POINTOPOINT|IFF_NOARP;
 290         memset(dev->dev_addr, 0xfc, ETH_ALEN);
 291 
 292         dev->netdev_ops          = &plip_netdev_ops;
 293         dev->header_ops          = &plip_header_ops;
 294 
 295 
 296         nl->port_owner = 0;
 297 
 298         /* Initialize constants */
 299         nl->trigger     = PLIP_TRIGGER_WAIT;
 300         nl->nibble      = PLIP_NIBBLE_WAIT;
 301 
 302         /* Initialize task queue structures */
 303         INIT_WORK(&nl->immediate, plip_bh);
 304         INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
 305 
 306         if (dev->irq == -1)
 307                 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
 308 
 309         spin_lock_init(&nl->lock);
 310 }
 311 
 312 /* Bottom half handler for the delayed request.
 313    This routine is kicked by do_timer().
 314    Request `plip_bh' to be invoked. */
 315 static void
 316 plip_kick_bh(struct work_struct *work)
 317 {
 318         struct net_local *nl =
 319                 container_of(work, struct net_local, deferred.work);
 320 
 321         if (nl->is_deferred)
 322                 schedule_work(&nl->immediate);
 323 }
 324 
 325 /* Forward declarations of internal routines */
 326 static int plip_none(struct net_device *, struct net_local *,
 327                      struct plip_local *, struct plip_local *);
 328 static int plip_receive_packet(struct net_device *, struct net_local *,
 329                                struct plip_local *, struct plip_local *);
 330 static int plip_send_packet(struct net_device *, struct net_local *,
 331                             struct plip_local *, struct plip_local *);
 332 static int plip_connection_close(struct net_device *, struct net_local *,
 333                                  struct plip_local *, struct plip_local *);
 334 static int plip_error(struct net_device *, struct net_local *,
 335                       struct plip_local *, struct plip_local *);
 336 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
 337                                  struct plip_local *snd,
 338                                  struct plip_local *rcv,
 339                                  int error);
 340 
 341 #define OK        0
 342 #define TIMEOUT   1
 343 #define ERROR     2
 344 #define HS_TIMEOUT      3
 345 
 346 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
 347                          struct plip_local *snd, struct plip_local *rcv);
 348 
 349 static const plip_func connection_state_table[] =
 350 {
 351         plip_none,
 352         plip_receive_packet,
 353         plip_send_packet,
 354         plip_connection_close,
 355         plip_error
 356 };
 357 
 358 /* Bottom half handler of PLIP. */
 359 static void
 360 plip_bh(struct work_struct *work)
 361 {
 362         struct net_local *nl = container_of(work, struct net_local, immediate);
 363         struct plip_local *snd = &nl->snd_data;
 364         struct plip_local *rcv = &nl->rcv_data;
 365         plip_func f;
 366         int r;
 367 
 368         nl->is_deferred = 0;
 369         f = connection_state_table[nl->connection];
 370         if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
 371             (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
 372                 nl->is_deferred = 1;
 373                 schedule_delayed_work(&nl->deferred, 1);
 374         }
 375 }
 376 
 377 static void
 378 plip_timer_bh(struct work_struct *work)
 379 {
 380         struct net_local *nl =
 381                 container_of(work, struct net_local, timer.work);
 382 
 383         if (!(atomic_read (&nl->kill_timer))) {
 384                 plip_interrupt (nl->dev);
 385 
 386                 schedule_delayed_work(&nl->timer, 1);
 387         }
 388         else {
 389                 complete(&nl->killed_timer_cmp);
 390         }
 391 }
 392 
 393 static int
 394 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
 395                       struct plip_local *snd, struct plip_local *rcv,
 396                       int error)
 397 {
 398         unsigned char c0;
 399         /*
 400          * This is tricky. If we got here from the beginning of send (either
 401          * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
 402          * already disabled. With the old variant of {enable,disable}_irq()
 403          * extra disable_irq() was a no-op. Now it became mortal - it's
 404          * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
 405          * that is). So we have to treat HS_TIMEOUT and ERROR from send
 406          * in a special way.
 407          */
 408 
 409         spin_lock_irq(&nl->lock);
 410         if (nl->connection == PLIP_CN_SEND) {
 411 
 412                 if (error != ERROR) { /* Timeout */
 413                         nl->timeout_count++;
 414                         if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
 415                             nl->timeout_count <= 3) {
 416                                 spin_unlock_irq(&nl->lock);
 417                                 /* Try again later */
 418                                 return TIMEOUT;
 419                         }
 420                         c0 = read_status(dev);
 421                         printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
 422                                dev->name, snd->state, c0);
 423                 } else
 424                         error = HS_TIMEOUT;
 425                 dev->stats.tx_errors++;
 426                 dev->stats.tx_aborted_errors++;
 427         } else if (nl->connection == PLIP_CN_RECEIVE) {
 428                 if (rcv->state == PLIP_PK_TRIGGER) {
 429                         /* Transmission was interrupted. */
 430                         spin_unlock_irq(&nl->lock);
 431                         return OK;
 432                 }
 433                 if (error != ERROR) { /* Timeout */
 434                         if (++nl->timeout_count <= 3) {
 435                                 spin_unlock_irq(&nl->lock);
 436                                 /* Try again later */
 437                                 return TIMEOUT;
 438                         }
 439                         c0 = read_status(dev);
 440                         printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
 441                                dev->name, rcv->state, c0);
 442                 }
 443                 dev->stats.rx_dropped++;
 444         }
 445         rcv->state = PLIP_PK_DONE;
 446         if (rcv->skb) {
 447                 kfree_skb(rcv->skb);
 448                 rcv->skb = NULL;
 449         }
 450         snd->state = PLIP_PK_DONE;
 451         if (snd->skb) {
 452                 dev_kfree_skb(snd->skb);
 453                 snd->skb = NULL;
 454         }
 455         spin_unlock_irq(&nl->lock);
 456         if (error == HS_TIMEOUT) {
 457                 DISABLE(dev->irq);
 458                 synchronize_irq(dev->irq);
 459         }
 460         disable_parport_interrupts (dev);
 461         netif_stop_queue (dev);
 462         nl->connection = PLIP_CN_ERROR;
 463         write_data (dev, 0x00);
 464 
 465         return TIMEOUT;
 466 }
 467 
 468 static int
 469 plip_none(struct net_device *dev, struct net_local *nl,
 470           struct plip_local *snd, struct plip_local *rcv)
 471 {
 472         return OK;
 473 }
 474 
 475 /* PLIP_RECEIVE --- receive a byte(two nibbles)
 476    Returns OK on success, TIMEOUT on timeout */
 477 static inline int
 478 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
 479              enum plip_nibble_state *ns_p, unsigned char *data_p)
 480 {
 481         unsigned char c0, c1;
 482         unsigned int cx;
 483 
 484         switch (*ns_p) {
 485         case PLIP_NB_BEGIN:
 486                 cx = nibble_timeout;
 487                 while (1) {
 488                         c0 = read_status(dev);
 489                         udelay(PLIP_DELAY_UNIT);
 490                         if ((c0 & 0x80) == 0) {
 491                                 c1 = read_status(dev);
 492                                 if (c0 == c1)
 493                                         break;
 494                         }
 495                         if (--cx == 0)
 496                                 return TIMEOUT;
 497                 }
 498                 *data_p = (c0 >> 3) & 0x0f;
 499                 write_data (dev, 0x10); /* send ACK */
 500                 *ns_p = PLIP_NB_1;
 501                 /* fall through */
 502 
 503         case PLIP_NB_1:
 504                 cx = nibble_timeout;
 505                 while (1) {
 506                         c0 = read_status(dev);
 507                         udelay(PLIP_DELAY_UNIT);
 508                         if (c0 & 0x80) {
 509                                 c1 = read_status(dev);
 510                                 if (c0 == c1)
 511                                         break;
 512                         }
 513                         if (--cx == 0)
 514                                 return TIMEOUT;
 515                 }
 516                 *data_p |= (c0 << 1) & 0xf0;
 517                 write_data (dev, 0x00); /* send ACK */
 518                 *ns_p = PLIP_NB_BEGIN;
 519         case PLIP_NB_2:
 520                 break;
 521         }
 522         return OK;
 523 }
 524 
 525 /*
 526  *      Determine the packet's protocol ID. The rule here is that we
 527  *      assume 802.3 if the type field is short enough to be a length.
 528  *      This is normal practice and works for any 'now in use' protocol.
 529  *
 530  *      PLIP is ethernet ish but the daddr might not be valid if unicast.
 531  *      PLIP fortunately has no bus architecture (its Point-to-point).
 532  *
 533  *      We can't fix the daddr thing as that quirk (more bug) is embedded
 534  *      in far too many old systems not all even running Linux.
 535  */
 536 
 537 static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
 538 {
 539         struct ethhdr *eth;
 540         unsigned char *rawp;
 541 
 542         skb_reset_mac_header(skb);
 543         skb_pull(skb,dev->hard_header_len);
 544         eth = eth_hdr(skb);
 545 
 546         if(is_multicast_ether_addr(eth->h_dest))
 547         {
 548                 if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
 549                         skb->pkt_type=PACKET_BROADCAST;
 550                 else
 551                         skb->pkt_type=PACKET_MULTICAST;
 552         }
 553 
 554         /*
 555          *      This ALLMULTI check should be redundant by 1.4
 556          *      so don't forget to remove it.
 557          */
 558 
 559         if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
 560                 return eth->h_proto;
 561 
 562         rawp = skb->data;
 563 
 564         /*
 565          *      This is a magic hack to spot IPX packets. Older Novell breaks
 566          *      the protocol design and runs IPX over 802.3 without an 802.2 LLC
 567          *      layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
 568          *      won't work for fault tolerant netware but does for the rest.
 569          */
 570         if (*(unsigned short *)rawp == 0xFFFF)
 571                 return htons(ETH_P_802_3);
 572 
 573         /*
 574          *      Real 802.2 LLC
 575          */
 576         return htons(ETH_P_802_2);
 577 }
 578 
 579 /* PLIP_RECEIVE_PACKET --- receive a packet */
 580 static int
 581 plip_receive_packet(struct net_device *dev, struct net_local *nl,
 582                     struct plip_local *snd, struct plip_local *rcv)
 583 {
 584         unsigned short nibble_timeout = nl->nibble;
 585         unsigned char *lbuf;
 586 
 587         switch (rcv->state) {
 588         case PLIP_PK_TRIGGER:
 589                 DISABLE(dev->irq);
 590                 /* Don't need to synchronize irq, as we can safely ignore it */
 591                 disable_parport_interrupts (dev);
 592                 write_data (dev, 0x01); /* send ACK */
 593                 if (net_debug > 2)
 594                         printk(KERN_DEBUG "%s: receive start\n", dev->name);
 595                 rcv->state = PLIP_PK_LENGTH_LSB;
 596                 rcv->nibble = PLIP_NB_BEGIN;
 597                 /* fall through */
 598 
 599         case PLIP_PK_LENGTH_LSB:
 600                 if (snd->state != PLIP_PK_DONE) {
 601                         if (plip_receive(nl->trigger, dev,
 602                                          &rcv->nibble, &rcv->length.b.lsb)) {
 603                                 /* collision, here dev->tbusy == 1 */
 604                                 rcv->state = PLIP_PK_DONE;
 605                                 nl->is_deferred = 1;
 606                                 nl->connection = PLIP_CN_SEND;
 607                                 schedule_delayed_work(&nl->deferred, 1);
 608                                 enable_parport_interrupts (dev);
 609                                 ENABLE(dev->irq);
 610                                 return OK;
 611                         }
 612                 } else {
 613                         if (plip_receive(nibble_timeout, dev,
 614                                          &rcv->nibble, &rcv->length.b.lsb))
 615                                 return TIMEOUT;
 616                 }
 617                 rcv->state = PLIP_PK_LENGTH_MSB;
 618                 /* fall through */
 619 
 620         case PLIP_PK_LENGTH_MSB:
 621                 if (plip_receive(nibble_timeout, dev,
 622                                  &rcv->nibble, &rcv->length.b.msb))
 623                         return TIMEOUT;
 624                 if (rcv->length.h > dev->mtu + dev->hard_header_len ||
 625                     rcv->length.h < 8) {
 626                         printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
 627                         return ERROR;
 628                 }
 629                 /* Malloc up new buffer. */
 630                 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
 631                 if (rcv->skb == NULL) {
 632                         printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
 633                         return ERROR;
 634                 }
 635                 skb_reserve(rcv->skb, 2);       /* Align IP on 16 byte boundaries */
 636                 skb_put(rcv->skb,rcv->length.h);
 637                 rcv->skb->dev = dev;
 638                 rcv->state = PLIP_PK_DATA;
 639                 rcv->byte = 0;
 640                 rcv->checksum = 0;
 641                 /* fall through */
 642 
 643         case PLIP_PK_DATA:
 644                 lbuf = rcv->skb->data;
 645                 do {
 646                         if (plip_receive(nibble_timeout, dev,
 647                                          &rcv->nibble, &lbuf[rcv->byte]))
 648                                 return TIMEOUT;
 649                 } while (++rcv->byte < rcv->length.h);
 650                 do {
 651                         rcv->checksum += lbuf[--rcv->byte];
 652                 } while (rcv->byte);
 653                 rcv->state = PLIP_PK_CHECKSUM;
 654                 /* fall through */
 655 
 656         case PLIP_PK_CHECKSUM:
 657                 if (plip_receive(nibble_timeout, dev,
 658                                  &rcv->nibble, &rcv->data))
 659                         return TIMEOUT;
 660                 if (rcv->data != rcv->checksum) {
 661                         dev->stats.rx_crc_errors++;
 662                         if (net_debug)
 663                                 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
 664                         return ERROR;
 665                 }
 666                 rcv->state = PLIP_PK_DONE;
 667                 /* fall through */
 668 
 669         case PLIP_PK_DONE:
 670                 /* Inform the upper layer for the arrival of a packet. */
 671                 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
 672                 netif_rx_ni(rcv->skb);
 673                 dev->stats.rx_bytes += rcv->length.h;
 674                 dev->stats.rx_packets++;
 675                 rcv->skb = NULL;
 676                 if (net_debug > 2)
 677                         printk(KERN_DEBUG "%s: receive end\n", dev->name);
 678 
 679                 /* Close the connection. */
 680                 write_data (dev, 0x00);
 681                 spin_lock_irq(&nl->lock);
 682                 if (snd->state != PLIP_PK_DONE) {
 683                         nl->connection = PLIP_CN_SEND;
 684                         spin_unlock_irq(&nl->lock);
 685                         schedule_work(&nl->immediate);
 686                         enable_parport_interrupts (dev);
 687                         ENABLE(dev->irq);
 688                         return OK;
 689                 } else {
 690                         nl->connection = PLIP_CN_NONE;
 691                         spin_unlock_irq(&nl->lock);
 692                         enable_parport_interrupts (dev);
 693                         ENABLE(dev->irq);
 694                         return OK;
 695                 }
 696         }
 697         return OK;
 698 }
 699 
 700 /* PLIP_SEND --- send a byte (two nibbles)
 701    Returns OK on success, TIMEOUT when timeout    */
 702 static inline int
 703 plip_send(unsigned short nibble_timeout, struct net_device *dev,
 704           enum plip_nibble_state *ns_p, unsigned char data)
 705 {
 706         unsigned char c0;
 707         unsigned int cx;
 708 
 709         switch (*ns_p) {
 710         case PLIP_NB_BEGIN:
 711                 write_data (dev, data & 0x0f);
 712                 *ns_p = PLIP_NB_1;
 713                 /* fall through */
 714 
 715         case PLIP_NB_1:
 716                 write_data (dev, 0x10 | (data & 0x0f));
 717                 cx = nibble_timeout;
 718                 while (1) {
 719                         c0 = read_status(dev);
 720                         if ((c0 & 0x80) == 0)
 721                                 break;
 722                         if (--cx == 0)
 723                                 return TIMEOUT;
 724                         udelay(PLIP_DELAY_UNIT);
 725                 }
 726                 write_data (dev, 0x10 | (data >> 4));
 727                 *ns_p = PLIP_NB_2;
 728                 /* fall through */
 729 
 730         case PLIP_NB_2:
 731                 write_data (dev, (data >> 4));
 732                 cx = nibble_timeout;
 733                 while (1) {
 734                         c0 = read_status(dev);
 735                         if (c0 & 0x80)
 736                                 break;
 737                         if (--cx == 0)
 738                                 return TIMEOUT;
 739                         udelay(PLIP_DELAY_UNIT);
 740                 }
 741                 *ns_p = PLIP_NB_BEGIN;
 742                 return OK;
 743         }
 744         return OK;
 745 }
 746 
 747 /* PLIP_SEND_PACKET --- send a packet */
 748 static int
 749 plip_send_packet(struct net_device *dev, struct net_local *nl,
 750                  struct plip_local *snd, struct plip_local *rcv)
 751 {
 752         unsigned short nibble_timeout = nl->nibble;
 753         unsigned char *lbuf;
 754         unsigned char c0;
 755         unsigned int cx;
 756 
 757         if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
 758                 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
 759                 snd->state = PLIP_PK_DONE;
 760                 snd->skb = NULL;
 761                 return ERROR;
 762         }
 763 
 764         switch (snd->state) {
 765         case PLIP_PK_TRIGGER:
 766                 if ((read_status(dev) & 0xf8) != 0x80)
 767                         return HS_TIMEOUT;
 768 
 769                 /* Trigger remote rx interrupt. */
 770                 write_data (dev, 0x08);
 771                 cx = nl->trigger;
 772                 while (1) {
 773                         udelay(PLIP_DELAY_UNIT);
 774                         spin_lock_irq(&nl->lock);
 775                         if (nl->connection == PLIP_CN_RECEIVE) {
 776                                 spin_unlock_irq(&nl->lock);
 777                                 /* Interrupted. */
 778                                 dev->stats.collisions++;
 779                                 return OK;
 780                         }
 781                         c0 = read_status(dev);
 782                         if (c0 & 0x08) {
 783                                 spin_unlock_irq(&nl->lock);
 784                                 DISABLE(dev->irq);
 785                                 synchronize_irq(dev->irq);
 786                                 if (nl->connection == PLIP_CN_RECEIVE) {
 787                                         /* Interrupted.
 788                                            We don't need to enable irq,
 789                                            as it is soon disabled.    */
 790                                         /* Yes, we do. New variant of
 791                                            {enable,disable}_irq *counts*
 792                                            them.  -- AV  */
 793                                         ENABLE(dev->irq);
 794                                         dev->stats.collisions++;
 795                                         return OK;
 796                                 }
 797                                 disable_parport_interrupts (dev);
 798                                 if (net_debug > 2)
 799                                         printk(KERN_DEBUG "%s: send start\n", dev->name);
 800                                 snd->state = PLIP_PK_LENGTH_LSB;
 801                                 snd->nibble = PLIP_NB_BEGIN;
 802                                 nl->timeout_count = 0;
 803                                 break;
 804                         }
 805                         spin_unlock_irq(&nl->lock);
 806                         if (--cx == 0) {
 807                                 write_data (dev, 0x00);
 808                                 return HS_TIMEOUT;
 809                         }
 810                 }
 811 
 812         case PLIP_PK_LENGTH_LSB:
 813                 if (plip_send(nibble_timeout, dev,
 814                               &snd->nibble, snd->length.b.lsb))
 815                         return TIMEOUT;
 816                 snd->state = PLIP_PK_LENGTH_MSB;
 817                 /* fall through */
 818 
 819         case PLIP_PK_LENGTH_MSB:
 820                 if (plip_send(nibble_timeout, dev,
 821                               &snd->nibble, snd->length.b.msb))
 822                         return TIMEOUT;
 823                 snd->state = PLIP_PK_DATA;
 824                 snd->byte = 0;
 825                 snd->checksum = 0;
 826                 /* fall through */
 827 
 828         case PLIP_PK_DATA:
 829                 do {
 830                         if (plip_send(nibble_timeout, dev,
 831                                       &snd->nibble, lbuf[snd->byte]))
 832                                 return TIMEOUT;
 833                 } while (++snd->byte < snd->length.h);
 834                 do {
 835                         snd->checksum += lbuf[--snd->byte];
 836                 } while (snd->byte);
 837                 snd->state = PLIP_PK_CHECKSUM;
 838                 /* fall through */
 839 
 840         case PLIP_PK_CHECKSUM:
 841                 if (plip_send(nibble_timeout, dev,
 842                               &snd->nibble, snd->checksum))
 843                         return TIMEOUT;
 844 
 845                 dev->stats.tx_bytes += snd->skb->len;
 846                 dev_kfree_skb(snd->skb);
 847                 dev->stats.tx_packets++;
 848                 snd->state = PLIP_PK_DONE;
 849                 /* fall through */
 850 
 851         case PLIP_PK_DONE:
 852                 /* Close the connection */
 853                 write_data (dev, 0x00);
 854                 snd->skb = NULL;
 855                 if (net_debug > 2)
 856                         printk(KERN_DEBUG "%s: send end\n", dev->name);
 857                 nl->connection = PLIP_CN_CLOSING;
 858                 nl->is_deferred = 1;
 859                 schedule_delayed_work(&nl->deferred, 1);
 860                 enable_parport_interrupts (dev);
 861                 ENABLE(dev->irq);
 862                 return OK;
 863         }
 864         return OK;
 865 }
 866 
 867 static int
 868 plip_connection_close(struct net_device *dev, struct net_local *nl,
 869                       struct plip_local *snd, struct plip_local *rcv)
 870 {
 871         spin_lock_irq(&nl->lock);
 872         if (nl->connection == PLIP_CN_CLOSING) {
 873                 nl->connection = PLIP_CN_NONE;
 874                 netif_wake_queue (dev);
 875         }
 876         spin_unlock_irq(&nl->lock);
 877         if (nl->should_relinquish) {
 878                 nl->should_relinquish = nl->port_owner = 0;
 879                 parport_release(nl->pardev);
 880         }
 881         return OK;
 882 }
 883 
 884 /* PLIP_ERROR --- wait till other end settled */
 885 static int
 886 plip_error(struct net_device *dev, struct net_local *nl,
 887            struct plip_local *snd, struct plip_local *rcv)
 888 {
 889         unsigned char status;
 890 
 891         status = read_status(dev);
 892         if ((status & 0xf8) == 0x80) {
 893                 if (net_debug > 2)
 894                         printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
 895                 nl->connection = PLIP_CN_NONE;
 896                 nl->should_relinquish = 0;
 897                 netif_start_queue (dev);
 898                 enable_parport_interrupts (dev);
 899                 ENABLE(dev->irq);
 900                 netif_wake_queue (dev);
 901         } else {
 902                 nl->is_deferred = 1;
 903                 schedule_delayed_work(&nl->deferred, 1);
 904         }
 905 
 906         return OK;
 907 }
 908 
 909 /* Handle the parallel port interrupts. */
 910 static void
 911 plip_interrupt(void *dev_id)
 912 {
 913         struct net_device *dev = dev_id;
 914         struct net_local *nl;
 915         struct plip_local *rcv;
 916         unsigned char c0;
 917         unsigned long flags;
 918 
 919         nl = netdev_priv(dev);
 920         rcv = &nl->rcv_data;
 921 
 922         spin_lock_irqsave (&nl->lock, flags);
 923 
 924         c0 = read_status(dev);
 925         if ((c0 & 0xf8) != 0xc0) {
 926                 if ((dev->irq != -1) && (net_debug > 1))
 927                         printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
 928                 spin_unlock_irqrestore (&nl->lock, flags);
 929                 return;
 930         }
 931 
 932         if (net_debug > 3)
 933                 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
 934 
 935         switch (nl->connection) {
 936         case PLIP_CN_CLOSING:
 937                 netif_wake_queue (dev);
 938                 /* fall through */
 939         case PLIP_CN_NONE:
 940         case PLIP_CN_SEND:
 941                 rcv->state = PLIP_PK_TRIGGER;
 942                 nl->connection = PLIP_CN_RECEIVE;
 943                 nl->timeout_count = 0;
 944                 schedule_work(&nl->immediate);
 945                 break;
 946 
 947         case PLIP_CN_RECEIVE:
 948                 /* May occur because there is race condition
 949                    around test and set of dev->interrupt.
 950                    Ignore this interrupt. */
 951                 break;
 952 
 953         case PLIP_CN_ERROR:
 954                 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
 955                 break;
 956         }
 957 
 958         spin_unlock_irqrestore(&nl->lock, flags);
 959 }
 960 
 961 static int
 962 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
 963 {
 964         struct net_local *nl = netdev_priv(dev);
 965         struct plip_local *snd = &nl->snd_data;
 966 
 967         if (netif_queue_stopped(dev))
 968                 return NETDEV_TX_BUSY;
 969 
 970         /* We may need to grab the bus */
 971         if (!nl->port_owner) {
 972                 if (parport_claim(nl->pardev))
 973                         return NETDEV_TX_BUSY;
 974                 nl->port_owner = 1;
 975         }
 976 
 977         netif_stop_queue (dev);
 978 
 979         if (skb->len > dev->mtu + dev->hard_header_len) {
 980                 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
 981                 netif_start_queue (dev);
 982                 return NETDEV_TX_BUSY;
 983         }
 984 
 985         if (net_debug > 2)
 986                 printk(KERN_DEBUG "%s: send request\n", dev->name);
 987 
 988         spin_lock_irq(&nl->lock);
 989         snd->skb = skb;
 990         snd->length.h = skb->len;
 991         snd->state = PLIP_PK_TRIGGER;
 992         if (nl->connection == PLIP_CN_NONE) {
 993                 nl->connection = PLIP_CN_SEND;
 994                 nl->timeout_count = 0;
 995         }
 996         schedule_work(&nl->immediate);
 997         spin_unlock_irq(&nl->lock);
 998 
 999         return NETDEV_TX_OK;
1000 }
1001 
1002 static void
1003 plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1004 {
1005         const struct in_device *in_dev;
1006 
1007         rcu_read_lock();
1008         in_dev = __in_dev_get_rcu(dev);
1009         if (in_dev) {
1010                 /* Any address will do - we take the first */
1011                 const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1012                 if (ifa) {
1013                         memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1014                         memset(eth->h_dest, 0xfc, 2);
1015                         memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1016                 }
1017         }
1018         rcu_read_unlock();
1019 }
1020 
1021 static int
1022 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1023                  unsigned short type, const void *daddr,
1024                  const void *saddr, unsigned len)
1025 {
1026         int ret;
1027 
1028         ret = eth_header(skb, dev, type, daddr, saddr, len);
1029         if (ret >= 0)
1030                 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1031 
1032         return ret;
1033 }
1034 
1035 static int plip_hard_header_cache(const struct neighbour *neigh,
1036                                   struct hh_cache *hh, __be16 type)
1037 {
1038         int ret;
1039 
1040         ret = eth_header_cache(neigh, hh, type);
1041         if (ret == 0) {
1042                 struct ethhdr *eth;
1043 
1044                 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1045                                        HH_DATA_OFF(sizeof(*eth)));
1046                 plip_rewrite_address (neigh->dev, eth);
1047         }
1048 
1049         return ret;
1050 }
1051 
1052 /* Open/initialize the board.  This is called (in the current kernel)
1053    sometime after booting when the 'ifconfig' program is run.
1054 
1055    This routine gets exclusive access to the parallel port by allocating
1056    its IRQ line.
1057  */
1058 static int
1059 plip_open(struct net_device *dev)
1060 {
1061         struct net_local *nl = netdev_priv(dev);
1062         struct in_device *in_dev;
1063 
1064         /* Grab the port */
1065         if (!nl->port_owner) {
1066                 if (parport_claim(nl->pardev)) return -EAGAIN;
1067                 nl->port_owner = 1;
1068         }
1069 
1070         nl->should_relinquish = 0;
1071 
1072         /* Clear the data port. */
1073         write_data (dev, 0x00);
1074 
1075         /* Enable rx interrupt. */
1076         enable_parport_interrupts (dev);
1077         if (dev->irq == -1)
1078         {
1079                 atomic_set (&nl->kill_timer, 0);
1080                 schedule_delayed_work(&nl->timer, 1);
1081         }
1082 
1083         /* Initialize the state machine. */
1084         nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1085         nl->rcv_data.skb = nl->snd_data.skb = NULL;
1086         nl->connection = PLIP_CN_NONE;
1087         nl->is_deferred = 0;
1088 
1089         /* Fill in the MAC-level header.
1090            We used to abuse dev->broadcast to store the point-to-point
1091            MAC address, but we no longer do it. Instead, we fetch the
1092            interface address whenever it is needed, which is cheap enough
1093            because we use the hh_cache. Actually, abusing dev->broadcast
1094            didn't work, because when using plip_open the point-to-point
1095            address isn't yet known.
1096            PLIP doesn't have a real MAC address, but we need it to be
1097            DOS compatible, and to properly support taps (otherwise,
1098            when the device address isn't identical to the address of a
1099            received frame, the kernel incorrectly drops it).             */
1100 
1101         in_dev=__in_dev_get_rtnl(dev);
1102         if (in_dev) {
1103                 /* Any address will do - we take the first. We already
1104                    have the first two bytes filled with 0xfc, from
1105                    plip_init_dev(). */
1106                 const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1107                 if (ifa != NULL) {
1108                         memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1109                 }
1110         }
1111 
1112         netif_start_queue (dev);
1113 
1114         return 0;
1115 }
1116 
1117 /* The inverse routine to plip_open (). */
1118 static int
1119 plip_close(struct net_device *dev)
1120 {
1121         struct net_local *nl = netdev_priv(dev);
1122         struct plip_local *snd = &nl->snd_data;
1123         struct plip_local *rcv = &nl->rcv_data;
1124 
1125         netif_stop_queue (dev);
1126         DISABLE(dev->irq);
1127         synchronize_irq(dev->irq);
1128 
1129         if (dev->irq == -1)
1130         {
1131                 init_completion(&nl->killed_timer_cmp);
1132                 atomic_set (&nl->kill_timer, 1);
1133                 wait_for_completion(&nl->killed_timer_cmp);
1134         }
1135 
1136 #ifdef NOTDEF
1137         outb(0x00, PAR_DATA(dev));
1138 #endif
1139         nl->is_deferred = 0;
1140         nl->connection = PLIP_CN_NONE;
1141         if (nl->port_owner) {
1142                 parport_release(nl->pardev);
1143                 nl->port_owner = 0;
1144         }
1145 
1146         snd->state = PLIP_PK_DONE;
1147         if (snd->skb) {
1148                 dev_kfree_skb(snd->skb);
1149                 snd->skb = NULL;
1150         }
1151         rcv->state = PLIP_PK_DONE;
1152         if (rcv->skb) {
1153                 kfree_skb(rcv->skb);
1154                 rcv->skb = NULL;
1155         }
1156 
1157 #ifdef NOTDEF
1158         /* Reset. */
1159         outb(0x00, PAR_CONTROL(dev));
1160 #endif
1161         return 0;
1162 }
1163 
1164 static int
1165 plip_preempt(void *handle)
1166 {
1167         struct net_device *dev = (struct net_device *)handle;
1168         struct net_local *nl = netdev_priv(dev);
1169 
1170         /* Stand our ground if a datagram is on the wire */
1171         if (nl->connection != PLIP_CN_NONE) {
1172                 nl->should_relinquish = 1;
1173                 return 1;
1174         }
1175 
1176         nl->port_owner = 0;     /* Remember that we released the bus */
1177         return 0;
1178 }
1179 
1180 static void
1181 plip_wakeup(void *handle)
1182 {
1183         struct net_device *dev = (struct net_device *)handle;
1184         struct net_local *nl = netdev_priv(dev);
1185 
1186         if (nl->port_owner) {
1187                 /* Why are we being woken up? */
1188                 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1189                 if (!parport_claim(nl->pardev))
1190                         /* bus_owner is already set (but why?) */
1191                         printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1192                 else
1193                         return;
1194         }
1195 
1196         if (!(dev->flags & IFF_UP))
1197                 /* Don't need the port when the interface is down */
1198                 return;
1199 
1200         if (!parport_claim(nl->pardev)) {
1201                 nl->port_owner = 1;
1202                 /* Clear the data port. */
1203                 write_data (dev, 0x00);
1204         }
1205 }
1206 
1207 static int
1208 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1209 {
1210         struct net_local *nl = netdev_priv(dev);
1211         struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1212 
1213         if (cmd != SIOCDEVPLIP)
1214                 return -EOPNOTSUPP;
1215 
1216         switch(pc->pcmd) {
1217         case PLIP_GET_TIMEOUT:
1218                 pc->trigger = nl->trigger;
1219                 pc->nibble  = nl->nibble;
1220                 break;
1221         case PLIP_SET_TIMEOUT:
1222                 if(!capable(CAP_NET_ADMIN))
1223                         return -EPERM;
1224                 nl->trigger = pc->trigger;
1225                 nl->nibble  = pc->nibble;
1226                 break;
1227         default:
1228                 return -EOPNOTSUPP;
1229         }
1230         return 0;
1231 }
1232 
1233 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1234 static int timid;
1235 
1236 module_param_array(parport, int, NULL, 0);
1237 module_param(timid, int, 0);
1238 MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1239 
1240 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1241 
1242 static inline int
1243 plip_searchfor(int list[], int a)
1244 {
1245         int i;
1246         for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1247                 if (list[i] == a) return 1;
1248         }
1249         return 0;
1250 }
1251 
1252 /* plip_attach() is called (by the parport code) when a port is
1253  * available to use. */
1254 static void plip_attach (struct parport *port)
1255 {
1256         static int unit;
1257         struct net_device *dev;
1258         struct net_local *nl;
1259         char name[IFNAMSIZ];
1260         struct pardev_cb plip_cb;
1261 
1262         if ((parport[0] == -1 && (!timid || !port->devices)) ||
1263             plip_searchfor(parport, port->number)) {
1264                 if (unit == PLIP_MAX) {
1265                         printk(KERN_ERR "plip: too many devices\n");
1266                         return;
1267                 }
1268 
1269                 sprintf(name, "plip%d", unit);
1270                 dev = alloc_etherdev(sizeof(struct net_local));
1271                 if (!dev)
1272                         return;
1273 
1274                 strcpy(dev->name, name);
1275 
1276                 dev->irq = port->irq;
1277                 dev->base_addr = port->base;
1278                 if (port->irq == -1) {
1279                         printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1280                                  "which is fairly inefficient!\n", port->name);
1281                 }
1282 
1283                 nl = netdev_priv(dev);
1284                 nl->dev = dev;
1285 
1286                 memset(&plip_cb, 0, sizeof(plip_cb));
1287                 plip_cb.private = dev;
1288                 plip_cb.preempt = plip_preempt;
1289                 plip_cb.wakeup = plip_wakeup;
1290                 plip_cb.irq_func = plip_interrupt;
1291 
1292                 nl->pardev = parport_register_dev_model(port, dev->name,
1293                                                         &plip_cb, unit);
1294 
1295                 if (!nl->pardev) {
1296                         printk(KERN_ERR "%s: parport_register failed\n", name);
1297                         goto err_free_dev;
1298                 }
1299 
1300                 plip_init_netdev(dev);
1301 
1302                 if (register_netdev(dev)) {
1303                         printk(KERN_ERR "%s: network register failed\n", name);
1304                         goto err_parport_unregister;
1305                 }
1306 
1307                 printk(KERN_INFO "%s", version);
1308                 if (dev->irq != -1)
1309                         printk(KERN_INFO "%s: Parallel port at %#3lx, "
1310                                          "using IRQ %d.\n",
1311                                          dev->name, dev->base_addr, dev->irq);
1312                 else
1313                         printk(KERN_INFO "%s: Parallel port at %#3lx, "
1314                                          "not using IRQ.\n",
1315                                          dev->name, dev->base_addr);
1316                 dev_plip[unit++] = dev;
1317         }
1318         return;
1319 
1320 err_parport_unregister:
1321         parport_unregister_device(nl->pardev);
1322 err_free_dev:
1323         free_netdev(dev);
1324 }
1325 
1326 /* plip_detach() is called (by the parport code) when a port is
1327  * no longer available to use. */
1328 static void plip_detach (struct parport *port)
1329 {
1330         /* Nothing to do */
1331 }
1332 
1333 static int plip_probe(struct pardevice *par_dev)
1334 {
1335         struct device_driver *drv = par_dev->dev.driver;
1336         int len = strlen(drv->name);
1337 
1338         if (strncmp(par_dev->name, drv->name, len))
1339                 return -ENODEV;
1340 
1341         return 0;
1342 }
1343 
1344 static struct parport_driver plip_driver = {
1345         .name           = "plip",
1346         .probe          = plip_probe,
1347         .match_port     = plip_attach,
1348         .detach         = plip_detach,
1349         .devmodel       = true,
1350 };
1351 
1352 static void __exit plip_cleanup_module (void)
1353 {
1354         struct net_device *dev;
1355         int i;
1356 
1357         for (i=0; i < PLIP_MAX; i++) {
1358                 if ((dev = dev_plip[i])) {
1359                         struct net_local *nl = netdev_priv(dev);
1360                         unregister_netdev(dev);
1361                         if (nl->port_owner)
1362                                 parport_release(nl->pardev);
1363                         parport_unregister_device(nl->pardev);
1364                         free_netdev(dev);
1365                         dev_plip[i] = NULL;
1366                 }
1367         }
1368 
1369         parport_unregister_driver(&plip_driver);
1370 }
1371 
1372 #ifndef MODULE
1373 
1374 static int parport_ptr;
1375 
1376 static int __init plip_setup(char *str)
1377 {
1378         int ints[4];
1379 
1380         str = get_options(str, ARRAY_SIZE(ints), ints);
1381 
1382         /* Ugh. */
1383         if (!strncmp(str, "parport", 7)) {
1384                 int n = simple_strtoul(str+7, NULL, 10);
1385                 if (parport_ptr < PLIP_MAX)
1386                         parport[parport_ptr++] = n;
1387                 else
1388                         printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1389                                str);
1390         } else if (!strcmp(str, "timid")) {
1391                 timid = 1;
1392         } else {
1393                 if (ints[0] == 0 || ints[1] == 0) {
1394                         /* disable driver on "plip=" or "plip=0" */
1395                         parport[0] = -2;
1396                 } else {
1397                         printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1398                                ints[1]);
1399                 }
1400         }
1401         return 1;
1402 }
1403 
1404 __setup("plip=", plip_setup);
1405 
1406 #endif /* !MODULE */
1407 
1408 static int __init plip_init (void)
1409 {
1410         if (parport[0] == -2)
1411                 return 0;
1412 
1413         if (parport[0] != -1 && timid) {
1414                 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1415                 timid = 0;
1416         }
1417 
1418         if (parport_register_driver (&plip_driver)) {
1419                 printk (KERN_WARNING "plip: couldn't register driver\n");
1420                 return 1;
1421         }
1422 
1423         return 0;
1424 }
1425 
1426 module_init(plip_init);
1427 module_exit(plip_cleanup_module);
1428 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */