This source file includes following definitions.
- PRIV
- axnet_probe
- axnet_detach
- get_prom
- try_io_port
- axnet_configcheck
- axnet_config
- axnet_release
- axnet_suspend
- axnet_resume
- mdio_sync
- mdio_read
- mdio_write
- axnet_open
- axnet_close
- axnet_reset_8390
- ei_irq_wrapper
- ei_watchdog
- axnet_ioctl
- get_8390_hdr
- block_input
- block_output
- ax_open
- ax_close
- axnet_tx_timeout
- axnet_start_xmit
- ax_interrupt
- ei_tx_err
- ei_tx_intr
- ei_receive
- ei_rx_overrun
- get_stats
- make_mc_bits
- do_set_multicast_list
- set_multicast_list
- AX88190_init
- NS8390_trigger_send
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/ptrace.h>
32 #include <linux/string.h>
33 #include <linux/timer.h>
34 #include <linux/delay.h>
35 #include <linux/spinlock.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/crc32.h>
39 #include <linux/mii.h>
40 #include "8390.h"
41
42 #include <pcmcia/cistpl.h>
43 #include <pcmcia/ciscode.h>
44 #include <pcmcia/ds.h>
45 #include <pcmcia/cisreg.h>
46
47 #include <asm/io.h>
48 #include <asm/byteorder.h>
49 #include <linux/uaccess.h>
50
51 #define AXNET_CMD 0x00
52 #define AXNET_DATAPORT 0x10
53 #define AXNET_RESET 0x1f
54 #define AXNET_MII_EEP 0x14
55 #define AXNET_TEST 0x15
56 #define AXNET_GPIO 0x17
57
58 #define AXNET_START_PG 0x40
59 #define AXNET_STOP_PG 0x80
60
61 #define AXNET_RDC_TIMEOUT 0x02
62
63 #define IS_AX88190 0x0001
64 #define IS_AX88790 0x0002
65
66
67
68
69
70 MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
71 MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver");
72 MODULE_LICENSE("GPL");
73
74
75
76
77 static int axnet_config(struct pcmcia_device *link);
78 static void axnet_release(struct pcmcia_device *link);
79 static int axnet_open(struct net_device *dev);
80 static int axnet_close(struct net_device *dev);
81 static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
82 static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
83 struct net_device *dev);
84 static struct net_device_stats *get_stats(struct net_device *dev);
85 static void set_multicast_list(struct net_device *dev);
86 static void axnet_tx_timeout(struct net_device *dev);
87 static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
88 static void ei_watchdog(struct timer_list *t);
89 static void axnet_reset_8390(struct net_device *dev);
90
91 static int mdio_read(unsigned int addr, int phy_id, int loc);
92 static void mdio_write(unsigned int addr, int phy_id, int loc, int value);
93
94 static void get_8390_hdr(struct net_device *,
95 struct e8390_pkt_hdr *, int);
96 static void block_input(struct net_device *dev, int count,
97 struct sk_buff *skb, int ring_offset);
98 static void block_output(struct net_device *dev, int count,
99 const u_char *buf, const int start_page);
100
101 static void axnet_detach(struct pcmcia_device *p_dev);
102
103 static void AX88190_init(struct net_device *dev, int startp);
104 static int ax_open(struct net_device *dev);
105 static int ax_close(struct net_device *dev);
106 static irqreturn_t ax_interrupt(int irq, void *dev_id);
107
108
109
110 struct axnet_dev {
111 struct pcmcia_device *p_dev;
112 caddr_t base;
113 struct timer_list watchdog;
114 int stale, fast_poll;
115 u_short link_status;
116 u_char duplex_flag;
117 int phy_id;
118 int flags;
119 int active_low;
120 };
121
122 static inline struct axnet_dev *PRIV(struct net_device *dev)
123 {
124 void *p = (char *)netdev_priv(dev) + sizeof(struct ei_device);
125 return p;
126 }
127
128 static const struct net_device_ops axnet_netdev_ops = {
129 .ndo_open = axnet_open,
130 .ndo_stop = axnet_close,
131 .ndo_do_ioctl = axnet_ioctl,
132 .ndo_start_xmit = axnet_start_xmit,
133 .ndo_tx_timeout = axnet_tx_timeout,
134 .ndo_get_stats = get_stats,
135 .ndo_set_rx_mode = set_multicast_list,
136 .ndo_set_mac_address = eth_mac_addr,
137 .ndo_validate_addr = eth_validate_addr,
138 };
139
140 static int axnet_probe(struct pcmcia_device *link)
141 {
142 struct axnet_dev *info;
143 struct net_device *dev;
144 struct ei_device *ei_local;
145
146 dev_dbg(&link->dev, "axnet_attach()\n");
147
148 dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(struct axnet_dev));
149 if (!dev)
150 return -ENOMEM;
151
152 ei_local = netdev_priv(dev);
153 spin_lock_init(&ei_local->page_lock);
154
155 info = PRIV(dev);
156 info->p_dev = link;
157 link->priv = dev;
158 link->config_flags |= CONF_ENABLE_IRQ;
159
160 dev->netdev_ops = &axnet_netdev_ops;
161
162 dev->watchdog_timeo = TX_TIMEOUT;
163
164 return axnet_config(link);
165 }
166
167 static void axnet_detach(struct pcmcia_device *link)
168 {
169 struct net_device *dev = link->priv;
170
171 dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link);
172
173 unregister_netdev(dev);
174
175 axnet_release(link);
176
177 free_netdev(dev);
178 }
179
180
181
182
183
184
185
186 static int get_prom(struct pcmcia_device *link)
187 {
188 struct net_device *dev = link->priv;
189 unsigned int ioaddr = dev->base_addr;
190 int i, j;
191
192
193 struct {
194 u_char value, offset;
195 } program_seq[] = {
196 {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD},
197 {0x01, EN0_DCFG},
198 {0x00, EN0_RCNTLO},
199 {0x00, EN0_RCNTHI},
200 {0x00, EN0_IMR},
201 {0xFF, EN0_ISR},
202 {E8390_RXOFF|0x40, EN0_RXCR},
203 {E8390_TXOFF, EN0_TXCR},
204 {0x10, EN0_RCNTLO},
205 {0x00, EN0_RCNTHI},
206 {0x00, EN0_RSARLO},
207 {0x04, EN0_RSARHI},
208 {E8390_RREAD+E8390_START, E8390_CMD},
209 };
210
211
212 if (link->config_base != 0x03c0)
213 return 0;
214
215 axnet_reset_8390(dev);
216 mdelay(10);
217
218 for (i = 0; i < ARRAY_SIZE(program_seq); i++)
219 outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
220
221 for (i = 0; i < 6; i += 2) {
222 j = inw(ioaddr + AXNET_DATAPORT);
223 dev->dev_addr[i] = j & 0xff;
224 dev->dev_addr[i+1] = j >> 8;
225 }
226 return 1;
227 }
228
229 static int try_io_port(struct pcmcia_device *link)
230 {
231 int j, ret;
232 link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
233 link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
234 if (link->resource[0]->end == 32) {
235 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
236
237 if (link->resource[1]->end > 0)
238 link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
239 } else {
240
241 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
242 link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16;
243 }
244 if (link->resource[0]->start == 0) {
245 for (j = 0; j < 0x400; j += 0x20) {
246 link->resource[0]->start = j ^ 0x300;
247 link->resource[1]->start = (j ^ 0x300) + 0x10;
248 link->io_lines = 16;
249 ret = pcmcia_request_io(link);
250 if (ret == 0)
251 return ret;
252 }
253 return ret;
254 } else {
255 return pcmcia_request_io(link);
256 }
257 }
258
259 static int axnet_configcheck(struct pcmcia_device *p_dev, void *priv_data)
260 {
261 if (p_dev->config_index == 0)
262 return -EINVAL;
263
264 p_dev->config_index = 0x05;
265 if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32)
266 return -ENODEV;
267
268 return try_io_port(p_dev);
269 }
270
271 static int axnet_config(struct pcmcia_device *link)
272 {
273 struct net_device *dev = link->priv;
274 struct axnet_dev *info = PRIV(dev);
275 int i, j, j2, ret;
276
277 dev_dbg(&link->dev, "axnet_config(0x%p)\n", link);
278
279
280 link->config_regs = 0x63;
281 link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
282 ret = pcmcia_loop_config(link, axnet_configcheck, NULL);
283 if (ret != 0)
284 goto failed;
285
286 if (!link->irq)
287 goto failed;
288
289 if (resource_size(link->resource[1]) == 8)
290 link->config_flags |= CONF_ENABLE_SPKR;
291
292 ret = pcmcia_enable_device(link);
293 if (ret)
294 goto failed;
295
296 dev->irq = link->irq;
297 dev->base_addr = link->resource[0]->start;
298
299 if (!get_prom(link)) {
300 pr_notice("this is not an AX88190 card!\n");
301 pr_notice("use pcnet_cs instead.\n");
302 goto failed;
303 }
304
305 ei_status.name = "AX88190";
306 ei_status.word16 = 1;
307 ei_status.tx_start_page = AXNET_START_PG;
308 ei_status.rx_start_page = AXNET_START_PG + TX_PAGES;
309 ei_status.stop_page = AXNET_STOP_PG;
310 ei_status.reset_8390 = axnet_reset_8390;
311 ei_status.get_8390_hdr = get_8390_hdr;
312 ei_status.block_input = block_input;
313 ei_status.block_output = block_output;
314
315 if (inb(dev->base_addr + AXNET_TEST) != 0)
316 info->flags |= IS_AX88790;
317 else
318 info->flags |= IS_AX88190;
319
320 if (info->flags & IS_AX88790)
321 outb(0x10, dev->base_addr + AXNET_GPIO);
322
323 info->active_low = 0;
324
325 for (i = 0; i < 32; i++) {
326 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
327 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
328 if (j == j2) continue;
329 if ((j != 0) && (j != 0xffff)) break;
330 }
331
332 if (i == 32) {
333
334
335 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
336 for (i = 0; i < 32; i++) {
337 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
338 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
339 if (j == j2) continue;
340 if ((j != 0) && (j != 0xffff)) {
341 info->active_low = 1;
342 break;
343 }
344 }
345 }
346
347 info->phy_id = (i < 32) ? i : -1;
348 SET_NETDEV_DEV(dev, &link->dev);
349
350 if (register_netdev(dev) != 0) {
351 pr_notice("register_netdev() failed\n");
352 goto failed;
353 }
354
355 netdev_info(dev, "Asix AX88%d90: io %#3lx, irq %d, hw_addr %pM\n",
356 ((info->flags & IS_AX88790) ? 7 : 1),
357 dev->base_addr, dev->irq, dev->dev_addr);
358 if (info->phy_id != -1) {
359 netdev_dbg(dev, " MII transceiver at index %d, status %x\n",
360 info->phy_id, j);
361 } else {
362 netdev_notice(dev, " No MII transceivers found!\n");
363 }
364 return 0;
365
366 failed:
367 axnet_release(link);
368 return -ENODEV;
369 }
370
371 static void axnet_release(struct pcmcia_device *link)
372 {
373 pcmcia_disable_device(link);
374 }
375
376 static int axnet_suspend(struct pcmcia_device *link)
377 {
378 struct net_device *dev = link->priv;
379
380 if (link->open)
381 netif_device_detach(dev);
382
383 return 0;
384 }
385
386 static int axnet_resume(struct pcmcia_device *link)
387 {
388 struct net_device *dev = link->priv;
389 struct axnet_dev *info = PRIV(dev);
390
391 if (link->open) {
392 if (info->active_low == 1)
393 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
394
395 axnet_reset_8390(dev);
396 AX88190_init(dev, 1);
397 netif_device_attach(dev);
398 }
399
400 return 0;
401 }
402
403
404
405
406
407
408
409
410 #define MDIO_SHIFT_CLK 0x01
411 #define MDIO_DATA_WRITE0 0x00
412 #define MDIO_DATA_WRITE1 0x08
413 #define MDIO_DATA_READ 0x04
414 #define MDIO_MASK 0x0f
415 #define MDIO_ENB_IN 0x02
416
417 static void mdio_sync(unsigned int addr)
418 {
419 int bits;
420 for (bits = 0; bits < 32; bits++) {
421 outb_p(MDIO_DATA_WRITE1, addr);
422 outb_p(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
423 }
424 }
425
426 static int mdio_read(unsigned int addr, int phy_id, int loc)
427 {
428 u_int cmd = (0xf6<<10)|(phy_id<<5)|loc;
429 int i, retval = 0;
430
431 mdio_sync(addr);
432 for (i = 14; i >= 0; i--) {
433 int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
434 outb_p(dat, addr);
435 outb_p(dat | MDIO_SHIFT_CLK, addr);
436 }
437 for (i = 19; i > 0; i--) {
438 outb_p(MDIO_ENB_IN, addr);
439 retval = (retval << 1) | ((inb_p(addr) & MDIO_DATA_READ) != 0);
440 outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr);
441 }
442 return (retval>>1) & 0xffff;
443 }
444
445 static void mdio_write(unsigned int addr, int phy_id, int loc, int value)
446 {
447 u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
448 int i;
449
450 mdio_sync(addr);
451 for (i = 31; i >= 0; i--) {
452 int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
453 outb_p(dat, addr);
454 outb_p(dat | MDIO_SHIFT_CLK, addr);
455 }
456 for (i = 1; i >= 0; i--) {
457 outb_p(MDIO_ENB_IN, addr);
458 outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr);
459 }
460 }
461
462
463
464 static int axnet_open(struct net_device *dev)
465 {
466 int ret;
467 struct axnet_dev *info = PRIV(dev);
468 struct pcmcia_device *link = info->p_dev;
469 unsigned int nic_base = dev->base_addr;
470
471 dev_dbg(&link->dev, "axnet_open('%s')\n", dev->name);
472
473 if (!pcmcia_dev_present(link))
474 return -ENODEV;
475
476 outb_p(0xFF, nic_base + EN0_ISR);
477 ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev);
478 if (ret)
479 return ret;
480
481 link->open++;
482
483 info->link_status = 0x00;
484 timer_setup(&info->watchdog, ei_watchdog, 0);
485 mod_timer(&info->watchdog, jiffies + HZ);
486
487 return ax_open(dev);
488 }
489
490
491
492 static int axnet_close(struct net_device *dev)
493 {
494 struct axnet_dev *info = PRIV(dev);
495 struct pcmcia_device *link = info->p_dev;
496
497 dev_dbg(&link->dev, "axnet_close('%s')\n", dev->name);
498
499 ax_close(dev);
500 free_irq(dev->irq, dev);
501
502 link->open--;
503 netif_stop_queue(dev);
504 del_timer_sync(&info->watchdog);
505
506 return 0;
507 }
508
509
510
511
512
513
514
515
516 static void axnet_reset_8390(struct net_device *dev)
517 {
518 unsigned int nic_base = dev->base_addr;
519 int i;
520
521 ei_status.txing = ei_status.dmaing = 0;
522
523 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD);
524
525 outb(inb(nic_base + AXNET_RESET), nic_base + AXNET_RESET);
526
527 for (i = 0; i < 100; i++) {
528 if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0)
529 break;
530 udelay(100);
531 }
532 outb_p(ENISR_RESET, nic_base + EN0_ISR);
533
534 if (i == 100)
535 netdev_err(dev, "axnet_reset_8390() did not complete\n");
536
537 }
538
539
540
541 static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
542 {
543 struct net_device *dev = dev_id;
544 PRIV(dev)->stale = 0;
545 return ax_interrupt(irq, dev_id);
546 }
547
548 static void ei_watchdog(struct timer_list *t)
549 {
550 struct axnet_dev *info = from_timer(info, t, watchdog);
551 struct net_device *dev = info->p_dev->priv;
552 unsigned int nic_base = dev->base_addr;
553 unsigned int mii_addr = nic_base + AXNET_MII_EEP;
554 u_short link;
555
556 if (!netif_device_present(dev)) goto reschedule;
557
558
559
560 if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
561 if (!info->fast_poll)
562 netdev_info(dev, "interrupt(s) dropped!\n");
563 ei_irq_wrapper(dev->irq, dev);
564 info->fast_poll = HZ;
565 }
566 if (info->fast_poll) {
567 info->fast_poll--;
568 info->watchdog.expires = jiffies + 1;
569 add_timer(&info->watchdog);
570 return;
571 }
572
573 if (info->phy_id < 0)
574 goto reschedule;
575 link = mdio_read(mii_addr, info->phy_id, 1);
576 if (!link || (link == 0xffff)) {
577 netdev_info(dev, "MII is missing!\n");
578 info->phy_id = -1;
579 goto reschedule;
580 }
581
582 link &= 0x0004;
583 if (link != info->link_status) {
584 u_short p = mdio_read(mii_addr, info->phy_id, 5);
585 netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
586 if (link) {
587 info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00;
588 if (p)
589 netdev_info(dev, "autonegotiation complete: %dbaseT-%cD selected\n",
590 (p & 0x0180) ? 100 : 10, (p & 0x0140) ? 'F' : 'H');
591 else
592 netdev_info(dev, "link partner did not autonegotiate\n");
593 AX88190_init(dev, 1);
594 }
595 info->link_status = link;
596 }
597
598 reschedule:
599 info->watchdog.expires = jiffies + HZ;
600 add_timer(&info->watchdog);
601 }
602
603
604
605 static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
606 {
607 struct axnet_dev *info = PRIV(dev);
608 struct mii_ioctl_data *data = if_mii(rq);
609 unsigned int mii_addr = dev->base_addr + AXNET_MII_EEP;
610 switch (cmd) {
611 case SIOCGMIIPHY:
612 data->phy_id = info->phy_id;
613
614 case SIOCGMIIREG:
615 data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
616 return 0;
617 case SIOCSMIIREG:
618 mdio_write(mii_addr, data->phy_id, data->reg_num & 0x1f, data->val_in);
619 return 0;
620 }
621 return -EOPNOTSUPP;
622 }
623
624
625
626 static void get_8390_hdr(struct net_device *dev,
627 struct e8390_pkt_hdr *hdr,
628 int ring_page)
629 {
630 unsigned int nic_base = dev->base_addr;
631
632 outb_p(0, nic_base + EN0_RSARLO);
633 outb_p(ring_page, nic_base + EN0_RSARHI);
634 outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
635
636 insw(nic_base + AXNET_DATAPORT, hdr,
637 sizeof(struct e8390_pkt_hdr)>>1);
638
639 hdr->count = le16_to_cpu(hdr->count);
640
641 }
642
643
644
645 static void block_input(struct net_device *dev, int count,
646 struct sk_buff *skb, int ring_offset)
647 {
648 unsigned int nic_base = dev->base_addr;
649 struct ei_device *ei_local = netdev_priv(dev);
650 int xfer_count = count;
651 char *buf = skb->data;
652
653 if ((netif_msg_rx_status(ei_local)) && (count != 4))
654 netdev_dbg(dev, "[bi=%d]\n", count+4);
655 outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
656 outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
657 outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
658
659 insw(nic_base + AXNET_DATAPORT,buf,count>>1);
660 if (count & 0x01)
661 buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++;
662
663 }
664
665
666
667 static void block_output(struct net_device *dev, int count,
668 const u_char *buf, const int start_page)
669 {
670 unsigned int nic_base = dev->base_addr;
671
672 pr_debug("%s: [bo=%d]\n", dev->name, count);
673
674
675
676
677 if (count & 0x01)
678 count++;
679
680 outb_p(0x00, nic_base + EN0_RSARLO);
681 outb_p(start_page, nic_base + EN0_RSARHI);
682 outb_p(E8390_RWRITE+E8390_START, nic_base + AXNET_CMD);
683 outsw(nic_base + AXNET_DATAPORT, buf, count>>1);
684 }
685
686 static const struct pcmcia_device_id axnet_ids[] = {
687 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081),
688 PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301),
689 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
690 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301),
691 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303),
692 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
693 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106),
694 PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab),
695 PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202),
696 PCMCIA_DEVICE_MANF_CARD(0xffff, 0x1090),
697 PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc),
698 PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef),
699 PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef),
700 PCMCIA_DEVICE_PROD_ID12("Billionton", "LNA-100B", 0x552ab682, 0xbc3b87e1),
701 PCMCIA_DEVICE_PROD_ID12("CHEETAH ETHERCARD", "EN2228", 0x00fa7bc8, 0x00e990cc),
702 PCMCIA_DEVICE_PROD_ID12("CNet", "CNF301", 0xbc477dde, 0x78c5f40b),
703 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5),
704 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e),
705 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXM", 0x5261440f, 0x3abbd061),
706 PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90),
707 PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2),
708 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8),
709 PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609),
710 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875),
711 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04),
712 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116),
713 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058),
714 PCMCIA_DEVICE_PROD_ID14("Network Everywhere", "AX88190", 0x820a67b6, 0xab9be5ef),
715 PCMCIA_DEVICE_NULL,
716 };
717 MODULE_DEVICE_TABLE(pcmcia, axnet_ids);
718
719 static struct pcmcia_driver axnet_cs_driver = {
720 .owner = THIS_MODULE,
721 .name = "axnet_cs",
722 .probe = axnet_probe,
723 .remove = axnet_detach,
724 .id_table = axnet_ids,
725 .suspend = axnet_suspend,
726 .resume = axnet_resume,
727 };
728 module_pcmcia_driver(axnet_cs_driver);
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779 #include <linux/bitops.h>
780 #include <asm/irq.h>
781 #include <linux/fcntl.h>
782 #include <linux/in.h>
783 #include <linux/interrupt.h>
784
785 #define BUG_83C690
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805 #define ei_reset_8390 (ei_local->reset_8390)
806 #define ei_block_output (ei_local->block_output)
807 #define ei_block_input (ei_local->block_input)
808 #define ei_get_8390_hdr (ei_local->get_8390_hdr)
809
810
811 static void ei_tx_intr(struct net_device *dev);
812 static void ei_tx_err(struct net_device *dev);
813 static void ei_receive(struct net_device *dev);
814 static void ei_rx_overrun(struct net_device *dev);
815
816
817 static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
818 int start_page);
819 static void do_set_multicast_list(struct net_device *dev);
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855 static int ax_open(struct net_device *dev)
856 {
857 unsigned long flags;
858 struct ei_device *ei_local = netdev_priv(dev);
859
860
861
862
863
864
865 spin_lock_irqsave(&ei_local->page_lock, flags);
866 AX88190_init(dev, 1);
867
868
869 netif_start_queue(dev);
870 spin_unlock_irqrestore(&ei_local->page_lock, flags);
871 ei_local->irqlock = 0;
872 return 0;
873 }
874
875 #define dev_lock(dev) (((struct ei_device *)netdev_priv(dev))->page_lock)
876
877
878
879
880
881
882
883 static int ax_close(struct net_device *dev)
884 {
885 unsigned long flags;
886
887
888
889
890
891 spin_lock_irqsave(&dev_lock(dev), flags);
892 AX88190_init(dev, 0);
893 spin_unlock_irqrestore(&dev_lock(dev), flags);
894 netif_stop_queue(dev);
895 return 0;
896 }
897
898
899
900
901
902
903
904
905
906 static void axnet_tx_timeout(struct net_device *dev)
907 {
908 long e8390_base = dev->base_addr;
909 struct ei_device *ei_local = netdev_priv(dev);
910 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
911 unsigned long flags;
912
913 dev->stats.tx_errors++;
914
915 spin_lock_irqsave(&ei_local->page_lock, flags);
916 txsr = inb(e8390_base+EN0_TSR);
917 isr = inb(e8390_base+EN0_ISR);
918 spin_unlock_irqrestore(&ei_local->page_lock, flags);
919
920 netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
921 (txsr & ENTSR_ABT) ? "excess collisions." :
922 (isr) ? "lost interrupt?" : "cable problem?",
923 txsr, isr, tickssofar);
924
925 if (!isr && !dev->stats.tx_packets)
926 {
927
928 ei_local->interface_num ^= 1;
929 }
930
931
932
933 spin_lock_irqsave(&ei_local->page_lock, flags);
934
935
936 ei_reset_8390(dev);
937 AX88190_init(dev, 1);
938
939 spin_unlock_irqrestore(&ei_local->page_lock, flags);
940 netif_wake_queue(dev);
941 }
942
943
944
945
946
947
948
949
950
951 static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
952 struct net_device *dev)
953 {
954 long e8390_base = dev->base_addr;
955 struct ei_device *ei_local = netdev_priv(dev);
956 int length, send_length, output_page;
957 unsigned long flags;
958 u8 packet[ETH_ZLEN];
959
960 netif_stop_queue(dev);
961
962 length = skb->len;
963
964
965
966
967
968
969 spin_lock_irqsave(&ei_local->page_lock, flags);
970 outb_p(0x00, e8390_base + EN0_IMR);
971
972
973
974
975
976 ei_local->irqlock = 1;
977
978 send_length = max(length, ETH_ZLEN);
979
980
981
982
983
984
985
986
987
988 if (ei_local->tx1 == 0)
989 {
990 output_page = ei_local->tx_start_page;
991 ei_local->tx1 = send_length;
992 if ((netif_msg_tx_queued(ei_local)) &&
993 ei_local->tx2 > 0)
994 netdev_dbg(dev,
995 "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
996 ei_local->tx2, ei_local->lasttx,
997 ei_local->txing);
998 }
999 else if (ei_local->tx2 == 0)
1000 {
1001 output_page = ei_local->tx_start_page + TX_PAGES/2;
1002 ei_local->tx2 = send_length;
1003 if ((netif_msg_tx_queued(ei_local)) &&
1004 ei_local->tx1 > 0)
1005 netdev_dbg(dev,
1006 "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
1007 ei_local->tx1, ei_local->lasttx,
1008 ei_local->txing);
1009 }
1010 else
1011 {
1012 netif_dbg(ei_local, tx_err, dev,
1013 "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
1014 ei_local->tx1, ei_local->tx2,
1015 ei_local->lasttx);
1016 ei_local->irqlock = 0;
1017 netif_stop_queue(dev);
1018 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1019 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1020 dev->stats.tx_errors++;
1021 return NETDEV_TX_BUSY;
1022 }
1023
1024
1025
1026
1027
1028
1029
1030 if (length == skb->len)
1031 ei_block_output(dev, length, skb->data, output_page);
1032 else {
1033 memset(packet, 0, ETH_ZLEN);
1034 skb_copy_from_linear_data(skb, packet, skb->len);
1035 ei_block_output(dev, length, packet, output_page);
1036 }
1037
1038 if (! ei_local->txing)
1039 {
1040 ei_local->txing = 1;
1041 NS8390_trigger_send(dev, send_length, output_page);
1042 netif_trans_update(dev);
1043 if (output_page == ei_local->tx_start_page)
1044 {
1045 ei_local->tx1 = -1;
1046 ei_local->lasttx = -1;
1047 }
1048 else
1049 {
1050 ei_local->tx2 = -1;
1051 ei_local->lasttx = -2;
1052 }
1053 }
1054 else ei_local->txqueue++;
1055
1056 if (ei_local->tx1 && ei_local->tx2)
1057 netif_stop_queue(dev);
1058 else
1059 netif_start_queue(dev);
1060
1061
1062 ei_local->irqlock = 0;
1063 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1064
1065 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1066
1067 dev_kfree_skb (skb);
1068 dev->stats.tx_bytes += send_length;
1069
1070 return NETDEV_TX_OK;
1071 }
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 static irqreturn_t ax_interrupt(int irq, void *dev_id)
1086 {
1087 struct net_device *dev = dev_id;
1088 long e8390_base;
1089 int interrupts, nr_serviced = 0, i;
1090 struct ei_device *ei_local;
1091 int handled = 0;
1092 unsigned long flags;
1093
1094 e8390_base = dev->base_addr;
1095 ei_local = netdev_priv(dev);
1096
1097
1098
1099
1100
1101 spin_lock_irqsave(&ei_local->page_lock, flags);
1102
1103 if (ei_local->irqlock) {
1104 #if 1
1105 const char *msg;
1106
1107 if (ei_local->irqlock)
1108 msg = "Interrupted while interrupts are masked!";
1109 else
1110 msg = "Reentering the interrupt handler!";
1111 netdev_info(dev, "%s, isr=%#2x imr=%#2x\n",
1112 msg,
1113 inb_p(e8390_base + EN0_ISR),
1114 inb_p(e8390_base + EN0_IMR));
1115 #endif
1116 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1117 return IRQ_NONE;
1118 }
1119
1120 netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
1121 inb_p(e8390_base + EN0_ISR));
1122
1123 outb_p(0x00, e8390_base + EN0_ISR);
1124 ei_local->irqlock = 1;
1125
1126
1127 while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0 &&
1128 ++nr_serviced < MAX_SERVICE)
1129 {
1130 if (!netif_running(dev) || (interrupts == 0xff)) {
1131 netif_warn(ei_local, intr, dev,
1132 "interrupt from stopped card\n");
1133 outb_p(interrupts, e8390_base + EN0_ISR);
1134 interrupts = 0;
1135 break;
1136 }
1137 handled = 1;
1138
1139
1140 outb_p(interrupts, e8390_base + EN0_ISR);
1141 for (i = 0; i < 10; i++) {
1142 if (!(inb(e8390_base + EN0_ISR) & interrupts))
1143 break;
1144 outb_p(0, e8390_base + EN0_ISR);
1145 outb_p(interrupts, e8390_base + EN0_ISR);
1146 }
1147 if (interrupts & ENISR_OVER)
1148 ei_rx_overrun(dev);
1149 else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
1150 {
1151
1152 ei_receive(dev);
1153 }
1154
1155 if (interrupts & ENISR_TX)
1156 ei_tx_intr(dev);
1157 else if (interrupts & ENISR_TX_ERR)
1158 ei_tx_err(dev);
1159
1160 if (interrupts & ENISR_COUNTERS)
1161 {
1162 dev->stats.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
1163 dev->stats.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
1164 dev->stats.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
1165 }
1166 }
1167
1168 if (interrupts && (netif_msg_intr(ei_local)))
1169 {
1170 handled = 1;
1171 if (nr_serviced >= MAX_SERVICE)
1172 {
1173
1174 if (interrupts != 0xFF)
1175 netdev_warn(dev,
1176 "Too much work at interrupt, status %#2.2x\n",
1177 interrupts);
1178 outb_p(ENISR_ALL, e8390_base + EN0_ISR);
1179 } else {
1180 netdev_warn(dev, "unknown interrupt %#2x\n",
1181 interrupts);
1182 outb_p(0xff, e8390_base + EN0_ISR);
1183 }
1184 }
1185
1186
1187 ei_local->irqlock = 0;
1188 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1189
1190 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1191 return IRQ_RETVAL(handled);
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208 static void ei_tx_err(struct net_device *dev)
1209 {
1210 long e8390_base = dev->base_addr;
1211 unsigned char txsr = inb_p(e8390_base+EN0_TSR);
1212 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
1213
1214 #ifdef VERBOSE_ERROR_DUMP
1215 netdev_dbg(dev, "transmitter error (%#2x):", txsr);
1216 if (txsr & ENTSR_ABT)
1217 pr_cont(" excess-collisions");
1218 if (txsr & ENTSR_ND)
1219 pr_cont(" non-deferral");
1220 if (txsr & ENTSR_CRS)
1221 pr_cont(" lost-carrier");
1222 if (txsr & ENTSR_FU)
1223 pr_cont(" FIFO-underrun");
1224 if (txsr & ENTSR_CDH)
1225 pr_cont(" lost-heartbeat");
1226 pr_cont("\n");
1227 #endif
1228
1229 if (tx_was_aborted)
1230 ei_tx_intr(dev);
1231 else
1232 {
1233 dev->stats.tx_errors++;
1234 if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++;
1235 if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++;
1236 if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++;
1237 }
1238 }
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248 static void ei_tx_intr(struct net_device *dev)
1249 {
1250 long e8390_base = dev->base_addr;
1251 struct ei_device *ei_local = netdev_priv(dev);
1252 int status = inb(e8390_base + EN0_TSR);
1253
1254
1255
1256
1257
1258 ei_local->txqueue--;
1259
1260 if (ei_local->tx1 < 0)
1261 {
1262 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
1263 netdev_err(dev, "%s: bogus last_tx_buffer %d, tx1=%d\n",
1264 ei_local->name, ei_local->lasttx,
1265 ei_local->tx1);
1266 ei_local->tx1 = 0;
1267 if (ei_local->tx2 > 0)
1268 {
1269 ei_local->txing = 1;
1270 NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
1271 netif_trans_update(dev);
1272 ei_local->tx2 = -1,
1273 ei_local->lasttx = 2;
1274 }
1275 else ei_local->lasttx = 20, ei_local->txing = 0;
1276 }
1277 else if (ei_local->tx2 < 0)
1278 {
1279 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
1280 netdev_err(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
1281 ei_local->name, ei_local->lasttx,
1282 ei_local->tx2);
1283 ei_local->tx2 = 0;
1284 if (ei_local->tx1 > 0)
1285 {
1286 ei_local->txing = 1;
1287 NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
1288 netif_trans_update(dev);
1289 ei_local->tx1 = -1;
1290 ei_local->lasttx = 1;
1291 }
1292 else
1293 ei_local->lasttx = 10, ei_local->txing = 0;
1294 }
1295
1296
1297
1298
1299
1300 if (status & ENTSR_COL)
1301 dev->stats.collisions++;
1302 if (status & ENTSR_PTX)
1303 dev->stats.tx_packets++;
1304 else
1305 {
1306 dev->stats.tx_errors++;
1307 if (status & ENTSR_ABT)
1308 {
1309 dev->stats.tx_aborted_errors++;
1310 dev->stats.collisions += 16;
1311 }
1312 if (status & ENTSR_CRS)
1313 dev->stats.tx_carrier_errors++;
1314 if (status & ENTSR_FU)
1315 dev->stats.tx_fifo_errors++;
1316 if (status & ENTSR_CDH)
1317 dev->stats.tx_heartbeat_errors++;
1318 if (status & ENTSR_OWC)
1319 dev->stats.tx_window_errors++;
1320 }
1321 netif_wake_queue(dev);
1322 }
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 static void ei_receive(struct net_device *dev)
1333 {
1334 long e8390_base = dev->base_addr;
1335 struct ei_device *ei_local = netdev_priv(dev);
1336 unsigned char rxing_page, this_frame, next_frame;
1337 unsigned short current_offset;
1338 int rx_pkt_count = 0;
1339 struct e8390_pkt_hdr rx_frame;
1340
1341 while (++rx_pkt_count < 10)
1342 {
1343 int pkt_len, pkt_stat;
1344
1345
1346 rxing_page = inb_p(e8390_base + EN1_CURPAG -1);
1347
1348
1349 this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
1350 if (this_frame >= ei_local->stop_page)
1351 this_frame = ei_local->rx_start_page;
1352
1353
1354
1355
1356
1357
1358
1359 if ((netif_msg_rx_err(ei_local)) &&
1360 this_frame != ei_local->current_page &&
1361 (this_frame != 0x0 || rxing_page != 0xFF))
1362 netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
1363 this_frame, ei_local->current_page);
1364
1365 if (this_frame == rxing_page)
1366 break;
1367
1368 current_offset = this_frame << 8;
1369 ei_get_8390_hdr(dev, &rx_frame, this_frame);
1370
1371 pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
1372 pkt_stat = rx_frame.status;
1373
1374 next_frame = this_frame + 1 + ((pkt_len+4)>>8);
1375
1376 if (pkt_len < 60 || pkt_len > 1518)
1377 {
1378 netif_err(ei_local, rx_err, dev,
1379 "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
1380 rx_frame.count, rx_frame.status,
1381 rx_frame.next);
1382 dev->stats.rx_errors++;
1383 dev->stats.rx_length_errors++;
1384 }
1385 else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
1386 {
1387 struct sk_buff *skb;
1388
1389 skb = netdev_alloc_skb(dev, pkt_len + 2);
1390 if (skb == NULL)
1391 {
1392 netif_err(ei_local, rx_err, dev,
1393 "Couldn't allocate a sk_buff of size %d\n",
1394 pkt_len);
1395 dev->stats.rx_dropped++;
1396 break;
1397 }
1398 else
1399 {
1400 skb_reserve(skb,2);
1401 skb_put(skb, pkt_len);
1402 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
1403 skb->protocol=eth_type_trans(skb,dev);
1404 netif_rx(skb);
1405 dev->stats.rx_packets++;
1406 dev->stats.rx_bytes += pkt_len;
1407 if (pkt_stat & ENRSR_PHY)
1408 dev->stats.multicast++;
1409 }
1410 }
1411 else
1412 {
1413 netif_err(ei_local, rx_err, dev,
1414 "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
1415 rx_frame.status, rx_frame.next,
1416 rx_frame.count);
1417 dev->stats.rx_errors++;
1418
1419 if (pkt_stat & ENRSR_FO)
1420 dev->stats.rx_fifo_errors++;
1421 }
1422 next_frame = rx_frame.next;
1423
1424
1425 if (next_frame >= ei_local->stop_page) {
1426 netdev_info(dev, "next frame inconsistency, %#2x\n",
1427 next_frame);
1428 next_frame = ei_local->rx_start_page;
1429 }
1430 ei_local->current_page = next_frame;
1431 outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
1432 }
1433 }
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448 static void ei_rx_overrun(struct net_device *dev)
1449 {
1450 struct axnet_dev *info = PRIV(dev);
1451 long e8390_base = dev->base_addr;
1452 unsigned char was_txing, must_resend = 0;
1453 struct ei_device *ei_local = netdev_priv(dev);
1454
1455
1456
1457
1458
1459 was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
1460 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1461
1462 netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
1463 dev->stats.rx_over_errors++;
1464
1465
1466
1467
1468
1469
1470 mdelay(2);
1471
1472
1473
1474
1475 outb_p(0x00, e8390_base+EN0_RCNTLO);
1476 outb_p(0x00, e8390_base+EN0_RCNTHI);
1477
1478
1479
1480
1481
1482
1483 if (was_txing)
1484 {
1485 unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
1486 if (!tx_completed)
1487 must_resend = 1;
1488 }
1489
1490
1491
1492
1493
1494 outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
1495 outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
1496
1497
1498
1499
1500 ei_receive(dev);
1501
1502
1503
1504
1505 outb_p(E8390_TXCONFIG | info->duplex_flag, e8390_base + EN0_TXCR);
1506 if (must_resend)
1507 outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
1508 }
1509
1510
1511
1512
1513
1514 static struct net_device_stats *get_stats(struct net_device *dev)
1515 {
1516 long ioaddr = dev->base_addr;
1517 struct ei_device *ei_local = netdev_priv(dev);
1518 unsigned long flags;
1519
1520
1521 if (!netif_running(dev))
1522 return &dev->stats;
1523
1524 spin_lock_irqsave(&ei_local->page_lock,flags);
1525
1526 dev->stats.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
1527 dev->stats.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
1528 dev->stats.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
1529 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1530
1531 return &dev->stats;
1532 }
1533
1534
1535
1536
1537
1538
1539 static inline void make_mc_bits(u8 *bits, struct net_device *dev)
1540 {
1541 struct netdev_hw_addr *ha;
1542 u32 crc;
1543
1544 netdev_for_each_mc_addr(ha, dev) {
1545 crc = ether_crc(ETH_ALEN, ha->addr);
1546
1547
1548
1549
1550 bits[crc>>29] |= (1<<((crc>>26)&7));
1551 }
1552 }
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 static void do_set_multicast_list(struct net_device *dev)
1563 {
1564 long e8390_base = dev->base_addr;
1565 int i;
1566 struct ei_device *ei_local = netdev_priv(dev);
1567
1568 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
1569 memset(ei_local->mcfilter, 0, 8);
1570 if (!netdev_mc_empty(dev))
1571 make_mc_bits(ei_local->mcfilter, dev);
1572 } else {
1573
1574 memset(ei_local->mcfilter, 0xFF, 8);
1575 }
1576
1577 outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
1578 for(i = 0; i < 8; i++)
1579 {
1580 outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
1581 }
1582 outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
1583
1584 if(dev->flags&IFF_PROMISC)
1585 outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR);
1586 else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
1587 outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
1588 else
1589 outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
1590
1591 outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1592 }
1593
1594
1595
1596
1597
1598
1599
1600 static void set_multicast_list(struct net_device *dev)
1601 {
1602 unsigned long flags;
1603
1604 spin_lock_irqsave(&dev_lock(dev), flags);
1605 do_set_multicast_list(dev);
1606 spin_unlock_irqrestore(&dev_lock(dev), flags);
1607 }
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620 static void AX88190_init(struct net_device *dev, int startp)
1621 {
1622 struct axnet_dev *info = PRIV(dev);
1623 long e8390_base = dev->base_addr;
1624 struct ei_device *ei_local = netdev_priv(dev);
1625 int i;
1626 int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
1627
1628 if(sizeof(struct e8390_pkt_hdr)!=4)
1629 panic("8390.c: header struct mispacked\n");
1630
1631 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1632 outb_p(endcfg, e8390_base + EN0_DCFG);
1633
1634 outb_p(0x00, e8390_base + EN0_RCNTLO);
1635 outb_p(0x00, e8390_base + EN0_RCNTHI);
1636
1637 outb_p(E8390_RXOFF|0x40, e8390_base + EN0_RXCR);
1638 outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
1639
1640 outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1641 ei_local->tx1 = ei_local->tx2 = 0;
1642 outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1643 outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);
1644 ei_local->current_page = ei_local->rx_start_page;
1645 outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1646
1647 outb_p(0xFF, e8390_base + EN0_ISR);
1648 outb_p(0x00, e8390_base + EN0_IMR);
1649
1650
1651
1652 outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD);
1653 for(i = 0; i < 6; i++)
1654 {
1655 outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1656 if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
1657 netdev_err(dev, "Hw. address read/write mismap %d\n", i);
1658 }
1659
1660 outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1661 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1662
1663 netif_start_queue(dev);
1664 ei_local->tx1 = ei_local->tx2 = 0;
1665 ei_local->txing = 0;
1666
1667 if (info->flags & IS_AX88790)
1668 outb(0x10, e8390_base + AXNET_GPIO);
1669
1670 if (startp)
1671 {
1672 outb_p(0xff, e8390_base + EN0_ISR);
1673 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1674 outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1675 outb_p(E8390_TXCONFIG | info->duplex_flag,
1676 e8390_base + EN0_TXCR);
1677
1678 outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
1679 do_set_multicast_list(dev);
1680 }
1681 }
1682
1683
1684
1685
1686 static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1687 int start_page)
1688 {
1689 long e8390_base = dev->base_addr;
1690 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1691
1692 if (inb_p(e8390_base) & E8390_TRANS)
1693 {
1694 netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1695 return;
1696 }
1697 outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1698 outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1699 outb_p(start_page, e8390_base + EN0_TPSR);
1700 outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1701 }