This source file includes following definitions.
- dev_to_port
- get_status
- pci_map_single_debug
- wanxl_cable_intr
- wanxl_tx_intr
- wanxl_rx_intr
- wanxl_intr
- wanxl_xmit
- wanxl_attach
- wanxl_ioctl
- wanxl_open
- wanxl_close
- wanxl_get_stats
- wanxl_puts_command
- wanxl_reset
- wanxl_pci_remove_one
- wanxl_pci_init_one
- wanxl_init_module
- wanxl_cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/types.h>
20 #include <linux/fcntl.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/netdevice.h>
27 #include <linux/hdlc.h>
28 #include <linux/pci.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/delay.h>
31 #include <asm/io.h>
32
33 #include "wanxl.h"
34
35 static const char* version = "wanXL serial card driver version: 0.48";
36
37 #define PLX_CTL_RESET 0x40000000
38
39 #undef DEBUG_PKT
40 #undef DEBUG_PCI
41
42
43 #define MBX1_CMD_ABORTJ 0x85000000
44 #ifdef __LITTLE_ENDIAN
45 #define MBX1_CMD_BSWAP 0x8C000001
46 #else
47 #define MBX1_CMD_BSWAP 0x8C000000
48 #endif
49
50
51 #define MBX2_MEMSZ_MASK 0xFFFF0000
52
53
54 struct port {
55 struct net_device *dev;
56 struct card *card;
57 spinlock_t lock;
58 int node;
59 unsigned int clock_type;
60 int tx_in, tx_out;
61 struct sk_buff *tx_skbs[TX_BUFFERS];
62 };
63
64
65 struct card_status {
66 desc_t rx_descs[RX_QUEUE_LENGTH];
67 port_status_t port_status[4];
68 };
69
70
71 struct card {
72 int n_ports;
73 u8 irq;
74
75 u8 __iomem *plx;
76 struct pci_dev *pdev;
77 int rx_in;
78 struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
79 struct card_status *status;
80 dma_addr_t status_address;
81 struct port ports[0];
82 };
83
84
85
86 static inline struct port *dev_to_port(struct net_device *dev)
87 {
88 return (struct port *)dev_to_hdlc(dev)->priv;
89 }
90
91
92 static inline port_status_t *get_status(struct port *port)
93 {
94 return &port->card->status->port_status[port->node];
95 }
96
97
98 #ifdef DEBUG_PCI
99 static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
100 size_t size, int direction)
101 {
102 dma_addr_t addr = pci_map_single(pdev, ptr, size, direction);
103 if (addr + size > 0x100000000LL)
104 pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
105 pci_name(pdev), (unsigned long long)addr);
106 return addr;
107 }
108
109 #undef pci_map_single
110 #define pci_map_single pci_map_single_debug
111 #endif
112
113
114
115 static inline void wanxl_cable_intr(struct port *port)
116 {
117 u32 value = get_status(port)->cable;
118 int valid = 1;
119 const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
120
121 switch(value & 0x7) {
122 case STATUS_CABLE_V35: cable = "V.35"; break;
123 case STATUS_CABLE_X21: cable = "X.21"; break;
124 case STATUS_CABLE_V24: cable = "V.24"; break;
125 case STATUS_CABLE_EIA530: cable = "EIA530"; break;
126 case STATUS_CABLE_NONE: cable = "no"; break;
127 default: cable = "invalid";
128 }
129
130 switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
131 case STATUS_CABLE_V35: pm = "V.35"; break;
132 case STATUS_CABLE_X21: pm = "X.21"; break;
133 case STATUS_CABLE_V24: pm = "V.24"; break;
134 case STATUS_CABLE_EIA530: pm = "EIA530"; break;
135 case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break;
136 default: pm = "invalid personality"; valid = 0;
137 }
138
139 if (valid) {
140 if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
141 dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
142 ", DSR off";
143 dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
144 ", carrier off";
145 }
146 dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
147 }
148 netdev_info(port->dev, "%s%s module, %s cable%s%s\n",
149 pm, dte, cable, dsr, dcd);
150
151 if (value & STATUS_CABLE_DCD)
152 netif_carrier_on(port->dev);
153 else
154 netif_carrier_off(port->dev);
155 }
156
157
158
159
160 static inline void wanxl_tx_intr(struct port *port)
161 {
162 struct net_device *dev = port->dev;
163 while (1) {
164 desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
165 struct sk_buff *skb = port->tx_skbs[port->tx_in];
166
167 switch (desc->stat) {
168 case PACKET_FULL:
169 case PACKET_EMPTY:
170 netif_wake_queue(dev);
171 return;
172
173 case PACKET_UNDERRUN:
174 dev->stats.tx_errors++;
175 dev->stats.tx_fifo_errors++;
176 break;
177
178 default:
179 dev->stats.tx_packets++;
180 dev->stats.tx_bytes += skb->len;
181 }
182 desc->stat = PACKET_EMPTY;
183 pci_unmap_single(port->card->pdev, desc->address, skb->len,
184 PCI_DMA_TODEVICE);
185 dev_consume_skb_irq(skb);
186 port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
187 }
188 }
189
190
191
192
193 static inline void wanxl_rx_intr(struct card *card)
194 {
195 desc_t *desc;
196 while (desc = &card->status->rx_descs[card->rx_in],
197 desc->stat != PACKET_EMPTY) {
198 if ((desc->stat & PACKET_PORT_MASK) > card->n_ports)
199 pr_crit("%s: received packet for nonexistent port\n",
200 pci_name(card->pdev));
201 else {
202 struct sk_buff *skb = card->rx_skbs[card->rx_in];
203 struct port *port = &card->ports[desc->stat &
204 PACKET_PORT_MASK];
205 struct net_device *dev = port->dev;
206
207 if (!skb)
208 dev->stats.rx_dropped++;
209 else {
210 pci_unmap_single(card->pdev, desc->address,
211 BUFFER_LENGTH,
212 PCI_DMA_FROMDEVICE);
213 skb_put(skb, desc->length);
214
215 #ifdef DEBUG_PKT
216 printk(KERN_DEBUG "%s RX(%i):", dev->name,
217 skb->len);
218 debug_frame(skb);
219 #endif
220 dev->stats.rx_packets++;
221 dev->stats.rx_bytes += skb->len;
222 skb->protocol = hdlc_type_trans(skb, dev);
223 netif_rx(skb);
224 skb = NULL;
225 }
226
227 if (!skb) {
228 skb = dev_alloc_skb(BUFFER_LENGTH);
229 desc->address = skb ?
230 pci_map_single(card->pdev, skb->data,
231 BUFFER_LENGTH,
232 PCI_DMA_FROMDEVICE) : 0;
233 card->rx_skbs[card->rx_in] = skb;
234 }
235 }
236 desc->stat = PACKET_EMPTY;
237 card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
238 }
239 }
240
241
242
243 static irqreturn_t wanxl_intr(int irq, void* dev_id)
244 {
245 struct card *card = dev_id;
246 int i;
247 u32 stat;
248 int handled = 0;
249
250
251 while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
252 handled = 1;
253 writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
254
255 for (i = 0; i < card->n_ports; i++) {
256 if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
257 wanxl_tx_intr(&card->ports[i]);
258 if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
259 wanxl_cable_intr(&card->ports[i]);
260 }
261 if (stat & (1 << DOORBELL_FROM_CARD_RX))
262 wanxl_rx_intr(card);
263 }
264
265 return IRQ_RETVAL(handled);
266 }
267
268
269
270 static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
271 {
272 struct port *port = dev_to_port(dev);
273 desc_t *desc;
274
275 spin_lock(&port->lock);
276
277 desc = &get_status(port)->tx_descs[port->tx_out];
278 if (desc->stat != PACKET_EMPTY) {
279
280 #ifdef DEBUG_PKT
281 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
282 #endif
283 netif_stop_queue(dev);
284 spin_unlock(&port->lock);
285 return NETDEV_TX_BUSY;
286 }
287
288 #ifdef DEBUG_PKT
289 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
290 debug_frame(skb);
291 #endif
292
293 port->tx_skbs[port->tx_out] = skb;
294 desc->address = pci_map_single(port->card->pdev, skb->data, skb->len,
295 PCI_DMA_TODEVICE);
296 desc->length = skb->len;
297 desc->stat = PACKET_FULL;
298 writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
299 port->card->plx + PLX_DOORBELL_TO_CARD);
300
301 port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
302
303 if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
304 netif_stop_queue(dev);
305 #ifdef DEBUG_PKT
306 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
307 #endif
308 }
309
310 spin_unlock(&port->lock);
311 return NETDEV_TX_OK;
312 }
313
314
315
316 static int wanxl_attach(struct net_device *dev, unsigned short encoding,
317 unsigned short parity)
318 {
319 struct port *port = dev_to_port(dev);
320
321 if (encoding != ENCODING_NRZ &&
322 encoding != ENCODING_NRZI)
323 return -EINVAL;
324
325 if (parity != PARITY_NONE &&
326 parity != PARITY_CRC32_PR1_CCITT &&
327 parity != PARITY_CRC16_PR1_CCITT &&
328 parity != PARITY_CRC32_PR0_CCITT &&
329 parity != PARITY_CRC16_PR0_CCITT)
330 return -EINVAL;
331
332 get_status(port)->encoding = encoding;
333 get_status(port)->parity = parity;
334 return 0;
335 }
336
337
338
339 static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
340 {
341 const size_t size = sizeof(sync_serial_settings);
342 sync_serial_settings line;
343 struct port *port = dev_to_port(dev);
344
345 if (cmd != SIOCWANDEV)
346 return hdlc_ioctl(dev, ifr, cmd);
347
348 switch (ifr->ifr_settings.type) {
349 case IF_GET_IFACE:
350 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
351 if (ifr->ifr_settings.size < size) {
352 ifr->ifr_settings.size = size;
353 return -ENOBUFS;
354 }
355 memset(&line, 0, sizeof(line));
356 line.clock_type = get_status(port)->clocking;
357 line.clock_rate = 0;
358 line.loopback = 0;
359
360 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
361 return -EFAULT;
362 return 0;
363
364 case IF_IFACE_SYNC_SERIAL:
365 if (!capable(CAP_NET_ADMIN))
366 return -EPERM;
367 if (dev->flags & IFF_UP)
368 return -EBUSY;
369
370 if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
371 size))
372 return -EFAULT;
373
374 if (line.clock_type != CLOCK_EXT &&
375 line.clock_type != CLOCK_TXFROMRX)
376 return -EINVAL;
377
378 if (line.loopback != 0)
379 return -EINVAL;
380
381 get_status(port)->clocking = line.clock_type;
382 return 0;
383
384 default:
385 return hdlc_ioctl(dev, ifr, cmd);
386 }
387 }
388
389
390
391 static int wanxl_open(struct net_device *dev)
392 {
393 struct port *port = dev_to_port(dev);
394 u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
395 unsigned long timeout;
396 int i;
397
398 if (get_status(port)->open) {
399 netdev_err(dev, "port already open\n");
400 return -EIO;
401 }
402 if ((i = hdlc_open(dev)) != 0)
403 return i;
404
405 port->tx_in = port->tx_out = 0;
406 for (i = 0; i < TX_BUFFERS; i++)
407 get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
408
409 writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
410
411 timeout = jiffies + HZ;
412 do {
413 if (get_status(port)->open) {
414 netif_start_queue(dev);
415 return 0;
416 }
417 } while (time_after(timeout, jiffies));
418
419 netdev_err(dev, "unable to open port\n");
420
421 writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
422 return -EFAULT;
423 }
424
425
426
427 static int wanxl_close(struct net_device *dev)
428 {
429 struct port *port = dev_to_port(dev);
430 unsigned long timeout;
431 int i;
432
433 hdlc_close(dev);
434
435 writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
436 port->card->plx + PLX_DOORBELL_TO_CARD);
437
438 timeout = jiffies + HZ;
439 do {
440 if (!get_status(port)->open)
441 break;
442 } while (time_after(timeout, jiffies));
443
444 if (get_status(port)->open)
445 netdev_err(dev, "unable to close port\n");
446
447 netif_stop_queue(dev);
448
449 for (i = 0; i < TX_BUFFERS; i++) {
450 desc_t *desc = &get_status(port)->tx_descs[i];
451
452 if (desc->stat != PACKET_EMPTY) {
453 desc->stat = PACKET_EMPTY;
454 pci_unmap_single(port->card->pdev, desc->address,
455 port->tx_skbs[i]->len,
456 PCI_DMA_TODEVICE);
457 dev_kfree_skb(port->tx_skbs[i]);
458 }
459 }
460 return 0;
461 }
462
463
464
465 static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
466 {
467 struct port *port = dev_to_port(dev);
468
469 dev->stats.rx_over_errors = get_status(port)->rx_overruns;
470 dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors;
471 dev->stats.rx_errors = dev->stats.rx_over_errors +
472 dev->stats.rx_frame_errors;
473 return &dev->stats;
474 }
475
476
477
478 static int wanxl_puts_command(struct card *card, u32 cmd)
479 {
480 unsigned long timeout = jiffies + 5 * HZ;
481
482 writel(cmd, card->plx + PLX_MAILBOX_1);
483 do {
484 if (readl(card->plx + PLX_MAILBOX_1) == 0)
485 return 0;
486
487 schedule();
488 }while (time_after(timeout, jiffies));
489
490 return -1;
491 }
492
493
494
495 static void wanxl_reset(struct card *card)
496 {
497 u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
498
499 writel(0x80, card->plx + PLX_MAILBOX_0);
500 writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
501 readl(card->plx + PLX_CONTROL);
502 udelay(1);
503 writel(old_value, card->plx + PLX_CONTROL);
504 readl(card->plx + PLX_CONTROL);
505 }
506
507
508
509 static void wanxl_pci_remove_one(struct pci_dev *pdev)
510 {
511 struct card *card = pci_get_drvdata(pdev);
512 int i;
513
514 for (i = 0; i < card->n_ports; i++) {
515 unregister_hdlc_device(card->ports[i].dev);
516 free_netdev(card->ports[i].dev);
517 }
518
519
520 if (card->irq)
521 free_irq(card->irq, card);
522
523 wanxl_reset(card);
524
525 for (i = 0; i < RX_QUEUE_LENGTH; i++)
526 if (card->rx_skbs[i]) {
527 pci_unmap_single(card->pdev,
528 card->status->rx_descs[i].address,
529 BUFFER_LENGTH, PCI_DMA_FROMDEVICE);
530 dev_kfree_skb(card->rx_skbs[i]);
531 }
532
533 if (card->plx)
534 iounmap(card->plx);
535
536 if (card->status)
537 pci_free_consistent(pdev, sizeof(struct card_status),
538 card->status, card->status_address);
539
540 pci_release_regions(pdev);
541 pci_disable_device(pdev);
542 kfree(card);
543 }
544
545
546 #include "wanxlfw.inc"
547
548 static const struct net_device_ops wanxl_ops = {
549 .ndo_open = wanxl_open,
550 .ndo_stop = wanxl_close,
551 .ndo_start_xmit = hdlc_start_xmit,
552 .ndo_do_ioctl = wanxl_ioctl,
553 .ndo_get_stats = wanxl_get_stats,
554 };
555
556 static int wanxl_pci_init_one(struct pci_dev *pdev,
557 const struct pci_device_id *ent)
558 {
559 struct card *card;
560 u32 ramsize, stat;
561 unsigned long timeout;
562 u32 plx_phy;
563 u32 mem_phy;
564 u8 __iomem *mem;
565 int i, ports;
566
567 #ifndef MODULE
568 pr_info_once("%s\n", version);
569 #endif
570
571 i = pci_enable_device(pdev);
572 if (i)
573 return i;
574
575
576
577
578
579
580
581
582 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(28)) ||
583 pci_set_dma_mask(pdev, DMA_BIT_MASK(28))) {
584 pr_err("No usable DMA configuration\n");
585 pci_disable_device(pdev);
586 return -EIO;
587 }
588
589 i = pci_request_regions(pdev, "wanXL");
590 if (i) {
591 pci_disable_device(pdev);
592 return i;
593 }
594
595 switch (pdev->device) {
596 case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break;
597 case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break;
598 default: ports = 4;
599 }
600
601 card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL);
602 if (card == NULL) {
603 pci_release_regions(pdev);
604 pci_disable_device(pdev);
605 return -ENOBUFS;
606 }
607
608 pci_set_drvdata(pdev, card);
609 card->pdev = pdev;
610
611 card->status = pci_alloc_consistent(pdev,
612 sizeof(struct card_status),
613 &card->status_address);
614 if (card->status == NULL) {
615 wanxl_pci_remove_one(pdev);
616 return -ENOBUFS;
617 }
618
619 #ifdef DEBUG_PCI
620 printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
621 " at 0x%LX\n", pci_name(pdev),
622 (unsigned long long)card->status_address);
623 #endif
624
625
626
627
628 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) ||
629 pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
630 pr_err("No usable DMA configuration\n");
631 wanxl_pci_remove_one(pdev);
632 return -EIO;
633 }
634
635
636 plx_phy = pci_resource_start(pdev, 0);
637
638 card->plx = ioremap_nocache(plx_phy, 0x70);
639 if (!card->plx) {
640 pr_err("ioremap() failed\n");
641 wanxl_pci_remove_one(pdev);
642 return -EFAULT;
643 }
644
645 #if RESET_WHILE_LOADING
646 wanxl_reset(card);
647 #endif
648
649 timeout = jiffies + 20 * HZ;
650 while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
651 if (time_before(timeout, jiffies)) {
652 pr_warn("%s: timeout waiting for PUTS to complete\n",
653 pci_name(pdev));
654 wanxl_pci_remove_one(pdev);
655 return -ENODEV;
656 }
657
658 switch(stat & 0xC0) {
659 case 0x00:
660 case 0x80:
661 break;
662
663 default:
664 pr_warn("%s: PUTS test 0x%X failed\n",
665 pci_name(pdev), stat & 0x30);
666 wanxl_pci_remove_one(pdev);
667 return -ENODEV;
668 }
669
670 schedule();
671 }
672
673
674 ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
675
676
677 mem_phy = pci_resource_start(pdev, 2);
678
679
680
681 if (ramsize < BUFFERS_ADDR +
682 (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
683 pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n",
684 pci_name(pdev), ramsize,
685 BUFFERS_ADDR +
686 (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
687 wanxl_pci_remove_one(pdev);
688 return -ENODEV;
689 }
690
691 if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
692 pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev));
693 wanxl_pci_remove_one(pdev);
694 return -ENODEV;
695 }
696
697 for (i = 0; i < RX_QUEUE_LENGTH; i++) {
698 struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
699 card->rx_skbs[i] = skb;
700 if (skb)
701 card->status->rx_descs[i].address =
702 pci_map_single(card->pdev, skb->data,
703 BUFFER_LENGTH,
704 PCI_DMA_FROMDEVICE);
705 }
706
707 mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware));
708 if (!mem) {
709 pr_err("ioremap() failed\n");
710 wanxl_pci_remove_one(pdev);
711 return -EFAULT;
712 }
713
714 for (i = 0; i < sizeof(firmware); i += 4)
715 writel(ntohl(*(__be32*)(firmware + i)), mem + PDM_OFFSET + i);
716
717 for (i = 0; i < ports; i++)
718 writel(card->status_address +
719 (void *)&card->status->port_status[i] -
720 (void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
721 writel(card->status_address, mem + PDM_OFFSET + 20);
722 writel(PDM_OFFSET, mem);
723 iounmap(mem);
724
725 writel(0, card->plx + PLX_MAILBOX_5);
726
727 if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
728 pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev));
729 wanxl_pci_remove_one(pdev);
730 return -ENODEV;
731 }
732
733 timeout = jiffies + 5 * HZ;
734 do {
735 if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0)
736 break;
737 schedule();
738 }while (time_after(timeout, jiffies));
739
740 if (!stat) {
741 pr_warn("%s: timeout while initializing card firmware\n",
742 pci_name(pdev));
743 wanxl_pci_remove_one(pdev);
744 return -ENODEV;
745 }
746
747 #if DETECT_RAM
748 ramsize = stat;
749 #endif
750
751 pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
752 pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
753
754
755 if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) {
756 pr_warn("%s: could not allocate IRQ%i\n",
757 pci_name(pdev), pdev->irq);
758 wanxl_pci_remove_one(pdev);
759 return -EBUSY;
760 }
761 card->irq = pdev->irq;
762
763 for (i = 0; i < ports; i++) {
764 hdlc_device *hdlc;
765 struct port *port = &card->ports[i];
766 struct net_device *dev = alloc_hdlcdev(port);
767 if (!dev) {
768 pr_err("%s: unable to allocate memory\n",
769 pci_name(pdev));
770 wanxl_pci_remove_one(pdev);
771 return -ENOMEM;
772 }
773
774 port->dev = dev;
775 hdlc = dev_to_hdlc(dev);
776 spin_lock_init(&port->lock);
777 dev->tx_queue_len = 50;
778 dev->netdev_ops = &wanxl_ops;
779 hdlc->attach = wanxl_attach;
780 hdlc->xmit = wanxl_xmit;
781 port->card = card;
782 port->node = i;
783 get_status(port)->clocking = CLOCK_EXT;
784 if (register_hdlc_device(dev)) {
785 pr_err("%s: unable to register hdlc device\n",
786 pci_name(pdev));
787 free_netdev(dev);
788 wanxl_pci_remove_one(pdev);
789 return -ENOBUFS;
790 }
791 card->n_ports++;
792 }
793
794 pr_info("%s: port", pci_name(pdev));
795 for (i = 0; i < ports; i++)
796 pr_cont("%s #%i: %s",
797 i ? "," : "", i, card->ports[i].dev->name);
798 pr_cont("\n");
799
800 for (i = 0; i < ports; i++)
801 wanxl_cable_intr(&card->ports[i]);
802
803 return 0;
804 }
805
806 static const struct pci_device_id wanxl_pci_tbl[] = {
807 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
808 PCI_ANY_ID, 0, 0, 0 },
809 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
810 PCI_ANY_ID, 0, 0, 0 },
811 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
812 PCI_ANY_ID, 0, 0, 0 },
813 { 0, }
814 };
815
816
817 static struct pci_driver wanxl_pci_driver = {
818 .name = "wanXL",
819 .id_table = wanxl_pci_tbl,
820 .probe = wanxl_pci_init_one,
821 .remove = wanxl_pci_remove_one,
822 };
823
824
825 static int __init wanxl_init_module(void)
826 {
827 #ifdef MODULE
828 pr_info("%s\n", version);
829 #endif
830 return pci_register_driver(&wanxl_pci_driver);
831 }
832
833 static void __exit wanxl_cleanup_module(void)
834 {
835 pci_unregister_driver(&wanxl_pci_driver);
836 }
837
838
839 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
840 MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
841 MODULE_LICENSE("GPL v2");
842 MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
843
844 module_init(wanxl_init_module);
845 module_exit(wanxl_cleanup_module);