This source file includes following definitions.
- WRITERAP
- WRITERDP
- READRDP
- load_csrs
- lance_init_ring
- init_restart_lance
- lance_reset
- lance_rx
- lance_tx
- lance_interrupt
- lance_open
- lance_close
- lance_tx_timeout
- lance_start_xmit
- lance_load_multicast
- lance_set_multicast
- lance_poll
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16 #include <linux/crc32.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/fcntl.h>
25 #include <linux/interrupt.h>
26 #include <linux/ioport.h>
27 #include <linux/in.h>
28 #include <linux/route.h>
29 #include <linux/string.h>
30 #include <linux/skbuff.h>
31 #include <asm/irq.h>
32
33 #include <linux/socket.h>
34 #include <linux/bitops.h>
35
36 #include <asm/io.h>
37 #include <asm/dma.h>
38 #include <asm/pgtable.h>
39 #ifdef CONFIG_HP300
40 #include <asm/blinken.h>
41 #endif
42
43 #include "7990.h"
44
45 #define WRITERAP(lp, x) out_be16(lp->base + LANCE_RAP, (x))
46 #define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x))
47 #define READRDP(lp) in_be16(lp->base + LANCE_RDP)
48
49 #if IS_ENABLED(CONFIG_HPLANCE)
50 #include "hplance.h"
51
52 #undef WRITERAP
53 #undef WRITERDP
54 #undef READRDP
55
56 #if IS_ENABLED(CONFIG_MVME147_NET)
57
58
59 #define WRITERAP(lp, x) (lp->writerap(lp, x))
60 #define WRITERDP(lp, x) (lp->writerdp(lp, x))
61 #define READRDP(lp) (lp->readrdp(lp))
62
63 #else
64
65
66 static inline void WRITERAP(struct lance_private *lp, __u16 value)
67 {
68 do {
69 out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
70 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
71 }
72
73 static inline void WRITERDP(struct lance_private *lp, __u16 value)
74 {
75 do {
76 out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
77 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
78 }
79
80 static inline __u16 READRDP(struct lance_private *lp)
81 {
82 __u16 value;
83 do {
84 value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
85 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
86 return value;
87 }
88
89 #endif
90 #endif
91
92
93
94 #ifdef UNDEF
95 #define PRINT_RINGS() \
96 do { \
97 int t; \
98 for (t = 0; t < RX_RING_SIZE; t++) { \
99 printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
100 t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
101 ib->brx_ring[t].length, \
102 ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
103 } \
104 for (t = 0; t < TX_RING_SIZE; t++) { \
105 printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
106 t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
107 ib->btx_ring[t].length, \
108 ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
109 } \
110 } while (0)
111 #else
112 #define PRINT_RINGS()
113 #endif
114
115
116 static void load_csrs(struct lance_private *lp)
117 {
118 volatile struct lance_init_block *aib = lp->lance_init_block;
119 int leptr;
120
121 leptr = LANCE_ADDR(aib);
122
123 WRITERAP(lp, LE_CSR1);
124 WRITERDP(lp, leptr & 0xFFFF);
125 WRITERAP(lp, LE_CSR2);
126 WRITERDP(lp, leptr >> 16);
127 WRITERAP(lp, LE_CSR3);
128 WRITERDP(lp, lp->busmaster_regval);
129
130
131 WRITERAP(lp, LE_CSR0);
132 }
133
134
135 #define DEBUG_IRING 0
136
137 static void lance_init_ring(struct net_device *dev)
138 {
139 struct lance_private *lp = netdev_priv(dev);
140 volatile struct lance_init_block *ib = lp->init_block;
141 volatile struct lance_init_block *aib;
142 int leptr;
143 int i;
144
145 aib = lp->lance_init_block;
146
147 lp->rx_new = lp->tx_new = 0;
148 lp->rx_old = lp->tx_old = 0;
149
150 ib->mode = LE_MO_PROM;
151
152
153
154
155
156
157
158
159
160
161
162 #ifdef __BIG_ENDIAN
163 ib->phys_addr[0] = dev->dev_addr[1];
164 ib->phys_addr[1] = dev->dev_addr[0];
165 ib->phys_addr[2] = dev->dev_addr[3];
166 ib->phys_addr[3] = dev->dev_addr[2];
167 ib->phys_addr[4] = dev->dev_addr[5];
168 ib->phys_addr[5] = dev->dev_addr[4];
169 #else
170 for (i = 0; i < 6; i++)
171 ib->phys_addr[i] = dev->dev_addr[i];
172 #endif
173
174 if (DEBUG_IRING)
175 printk("TX rings:\n");
176
177 lp->tx_full = 0;
178
179 for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
180 leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
181 ib->btx_ring[i].tmd0 = leptr;
182 ib->btx_ring[i].tmd1_hadr = leptr >> 16;
183 ib->btx_ring[i].tmd1_bits = 0;
184 ib->btx_ring[i].length = 0xf000;
185 ib->btx_ring[i].misc = 0;
186 if (DEBUG_IRING)
187 printk("%d: 0x%8.8x\n", i, leptr);
188 }
189
190
191 if (DEBUG_IRING)
192 printk("RX rings:\n");
193 for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
194 leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
195
196 ib->brx_ring[i].rmd0 = leptr;
197 ib->brx_ring[i].rmd1_hadr = leptr >> 16;
198 ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
199
200 ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
201 ib->brx_ring[i].mblength = 0;
202 if (DEBUG_IRING)
203 printk("%d: 0x%8.8x\n", i, leptr);
204 }
205
206
207
208
209 leptr = LANCE_ADDR(&aib->brx_ring);
210 ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
211 ib->rx_ptr = leptr;
212 if (DEBUG_IRING)
213 printk("RX ptr: %8.8x\n", leptr);
214
215
216 leptr = LANCE_ADDR(&aib->btx_ring);
217 ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
218 ib->tx_ptr = leptr;
219 if (DEBUG_IRING)
220 printk("TX ptr: %8.8x\n", leptr);
221
222
223 ib->filter[0] = 0;
224 ib->filter[1] = 0;
225 PRINT_RINGS();
226 }
227
228
229 static int init_restart_lance(struct lance_private *lp)
230 {
231 int i;
232
233 WRITERAP(lp, LE_CSR0);
234 WRITERDP(lp, LE_C0_INIT);
235
236
237
238
239 for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
240 barrier();
241 if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
242 printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
243 return -1;
244 }
245
246
247 WRITERDP(lp, LE_C0_IDON);
248 WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
249
250 return 0;
251 }
252
253 static int lance_reset(struct net_device *dev)
254 {
255 struct lance_private *lp = netdev_priv(dev);
256 int status;
257
258
259 WRITERAP(lp, LE_CSR0);
260 WRITERDP(lp, LE_C0_STOP);
261
262 load_csrs(lp);
263 lance_init_ring(dev);
264 netif_trans_update(dev);
265 status = init_restart_lance(lp);
266 #ifdef DEBUG_DRIVER
267 printk("Lance restart=%d\n", status);
268 #endif
269 return status;
270 }
271
272 static int lance_rx(struct net_device *dev)
273 {
274 struct lance_private *lp = netdev_priv(dev);
275 volatile struct lance_init_block *ib = lp->init_block;
276 volatile struct lance_rx_desc *rd;
277 unsigned char bits;
278 #ifdef TEST_HITS
279 int i;
280 #endif
281
282 #ifdef TEST_HITS
283 printk("[");
284 for (i = 0; i < RX_RING_SIZE; i++) {
285 if (i == lp->rx_new)
286 printk("%s",
287 ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
288 else
289 printk("%s",
290 ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
291 }
292 printk("]");
293 #endif
294 #ifdef CONFIG_HP300
295 blinken_leds(0x40, 0);
296 #endif
297 WRITERDP(lp, LE_C0_RINT | LE_C0_INEA);
298 for (rd = &ib->brx_ring[lp->rx_new];
299 !((bits = rd->rmd1_bits) & LE_R1_OWN);
300 rd = &ib->brx_ring[lp->rx_new]) {
301
302
303 if ((bits & LE_R1_POK) != LE_R1_POK) {
304 dev->stats.rx_over_errors++;
305 dev->stats.rx_errors++;
306 continue;
307 } else if (bits & LE_R1_ERR) {
308
309
310
311 if (bits & LE_R1_BUF)
312 dev->stats.rx_fifo_errors++;
313 if (bits & LE_R1_CRC)
314 dev->stats.rx_crc_errors++;
315 if (bits & LE_R1_OFL)
316 dev->stats.rx_over_errors++;
317 if (bits & LE_R1_FRA)
318 dev->stats.rx_frame_errors++;
319 if (bits & LE_R1_EOP)
320 dev->stats.rx_errors++;
321 } else {
322 int len = (rd->mblength & 0xfff) - 4;
323 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
324
325 if (!skb) {
326 dev->stats.rx_dropped++;
327 rd->mblength = 0;
328 rd->rmd1_bits = LE_R1_OWN;
329 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
330 return 0;
331 }
332
333 skb_reserve(skb, 2);
334 skb_put(skb, len);
335 skb_copy_to_linear_data(skb,
336 (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
337 len);
338 skb->protocol = eth_type_trans(skb, dev);
339 netif_rx(skb);
340 dev->stats.rx_packets++;
341 dev->stats.rx_bytes += len;
342 }
343
344
345 rd->mblength = 0;
346 rd->rmd1_bits = LE_R1_OWN;
347 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
348 }
349 return 0;
350 }
351
352 static int lance_tx(struct net_device *dev)
353 {
354 struct lance_private *lp = netdev_priv(dev);
355 volatile struct lance_init_block *ib = lp->init_block;
356 volatile struct lance_tx_desc *td;
357 int i, j;
358 int status;
359
360 #ifdef CONFIG_HP300
361 blinken_leds(0x80, 0);
362 #endif
363
364 WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
365
366
367 j = lp->tx_old;
368 for (i = j; i != lp->tx_new; i = j) {
369 td = &ib->btx_ring[i];
370
371
372 if (td->tmd1_bits & LE_T1_OWN)
373 break;
374
375 if (td->tmd1_bits & LE_T1_ERR) {
376 status = td->misc;
377
378 dev->stats.tx_errors++;
379 if (status & LE_T3_RTY)
380 dev->stats.tx_aborted_errors++;
381 if (status & LE_T3_LCOL)
382 dev->stats.tx_window_errors++;
383
384 if (status & LE_T3_CLOS) {
385 dev->stats.tx_carrier_errors++;
386 if (lp->auto_select) {
387 lp->tpe = 1 - lp->tpe;
388 printk("%s: Carrier Lost, trying %s\n",
389 dev->name,
390 lp->tpe ? "TPE" : "AUI");
391
392 WRITERAP(lp, LE_CSR0);
393 WRITERDP(lp, LE_C0_STOP);
394 lance_init_ring(dev);
395 load_csrs(lp);
396 init_restart_lance(lp);
397 return 0;
398 }
399 }
400
401
402
403 if (status & (LE_T3_BUF|LE_T3_UFL)) {
404 dev->stats.tx_fifo_errors++;
405
406 printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
407 dev->name);
408
409 WRITERAP(lp, LE_CSR0);
410 WRITERDP(lp, LE_C0_STOP);
411 lance_init_ring(dev);
412 load_csrs(lp);
413 init_restart_lance(lp);
414 return 0;
415 }
416 } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
417
418
419
420 td->tmd1_bits &= ~(LE_T1_POK);
421
422
423 if (td->tmd1_bits & LE_T1_EONE)
424 dev->stats.collisions++;
425
426
427 if (td->tmd1_bits & LE_T1_EMORE)
428 dev->stats.collisions += 2;
429
430 dev->stats.tx_packets++;
431 }
432
433 j = (j + 1) & lp->tx_ring_mod_mask;
434 }
435 lp->tx_old = j;
436 WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
437 return 0;
438 }
439
440 static irqreturn_t
441 lance_interrupt(int irq, void *dev_id)
442 {
443 struct net_device *dev = (struct net_device *)dev_id;
444 struct lance_private *lp = netdev_priv(dev);
445 int csr0;
446
447 spin_lock(&lp->devlock);
448
449 WRITERAP(lp, LE_CSR0);
450 csr0 = READRDP(lp);
451
452 PRINT_RINGS();
453
454 if (!(csr0 & LE_C0_INTR)) {
455 spin_unlock(&lp->devlock);
456 return IRQ_NONE;
457 }
458
459
460 WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
461
462 if ((csr0 & LE_C0_ERR)) {
463
464 WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
465 }
466
467 if (csr0 & LE_C0_RINT)
468 lance_rx(dev);
469
470 if (csr0 & LE_C0_TINT)
471 lance_tx(dev);
472
473
474 if (csr0 & LE_C0_BABL)
475 dev->stats.tx_errors++;
476 if (csr0 & LE_C0_MISS)
477 dev->stats.rx_errors++;
478 if (csr0 & LE_C0_MERR) {
479 printk("%s: Bus master arbitration failure, status %4.4x.\n",
480 dev->name, csr0);
481
482 WRITERDP(lp, LE_C0_STRT);
483 }
484
485 if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
486 lp->tx_full = 0;
487 netif_wake_queue(dev);
488 }
489
490 WRITERAP(lp, LE_CSR0);
491 WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
492
493 spin_unlock(&lp->devlock);
494 return IRQ_HANDLED;
495 }
496
497 int lance_open(struct net_device *dev)
498 {
499 struct lance_private *lp = netdev_priv(dev);
500 int res;
501
502
503 if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
504 return -EAGAIN;
505
506 res = lance_reset(dev);
507 spin_lock_init(&lp->devlock);
508 netif_start_queue(dev);
509
510 return res;
511 }
512 EXPORT_SYMBOL_GPL(lance_open);
513
514 int lance_close(struct net_device *dev)
515 {
516 struct lance_private *lp = netdev_priv(dev);
517
518 netif_stop_queue(dev);
519
520
521 WRITERAP(lp, LE_CSR0);
522 WRITERDP(lp, LE_C0_STOP);
523
524 free_irq(lp->irq, dev);
525
526 return 0;
527 }
528 EXPORT_SYMBOL_GPL(lance_close);
529
530 void lance_tx_timeout(struct net_device *dev)
531 {
532 printk("lance_tx_timeout\n");
533 lance_reset(dev);
534 netif_trans_update(dev);
535 netif_wake_queue(dev);
536 }
537 EXPORT_SYMBOL_GPL(lance_tx_timeout);
538
539 int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
540 {
541 struct lance_private *lp = netdev_priv(dev);
542 volatile struct lance_init_block *ib = lp->init_block;
543 int entry, skblen, len;
544 static int outs;
545 unsigned long flags;
546
547 netif_stop_queue(dev);
548
549 if (!TX_BUFFS_AVAIL) {
550 dev_consume_skb_any(skb);
551 return NETDEV_TX_OK;
552 }
553
554 skblen = skb->len;
555
556 #ifdef DEBUG_DRIVER
557
558 {
559 int i;
560
561 for (i = 0; i < 64; i++) {
562 if ((i % 16) == 0)
563 printk("\n");
564 printk("%2.2x ", skb->data[i]);
565 }
566 }
567 #endif
568 len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
569 entry = lp->tx_new & lp->tx_ring_mod_mask;
570 ib->btx_ring[entry].length = (-len) | 0xf000;
571 ib->btx_ring[entry].misc = 0;
572
573 if (skb->len < ETH_ZLEN)
574 memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
575 skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
576
577
578 ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
579 lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
580
581 outs++;
582
583 WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
584 dev_consume_skb_any(skb);
585
586 spin_lock_irqsave(&lp->devlock, flags);
587 if (TX_BUFFS_AVAIL)
588 netif_start_queue(dev);
589 else
590 lp->tx_full = 1;
591 spin_unlock_irqrestore(&lp->devlock, flags);
592
593 return NETDEV_TX_OK;
594 }
595 EXPORT_SYMBOL_GPL(lance_start_xmit);
596
597
598 static void lance_load_multicast(struct net_device *dev)
599 {
600 struct lance_private *lp = netdev_priv(dev);
601 volatile struct lance_init_block *ib = lp->init_block;
602 volatile u16 *mcast_table = (u16 *)&ib->filter;
603 struct netdev_hw_addr *ha;
604 u32 crc;
605
606
607 if (dev->flags & IFF_ALLMULTI) {
608 ib->filter[0] = 0xffffffff;
609 ib->filter[1] = 0xffffffff;
610 return;
611 }
612
613 ib->filter[0] = 0;
614 ib->filter[1] = 0;
615
616
617 netdev_for_each_mc_addr(ha, dev) {
618 crc = ether_crc_le(6, ha->addr);
619 crc = crc >> 26;
620 mcast_table[crc >> 4] |= 1 << (crc & 0xf);
621 }
622 }
623
624
625 void lance_set_multicast(struct net_device *dev)
626 {
627 struct lance_private *lp = netdev_priv(dev);
628 volatile struct lance_init_block *ib = lp->init_block;
629 int stopped;
630
631 stopped = netif_queue_stopped(dev);
632 if (!stopped)
633 netif_stop_queue(dev);
634
635 while (lp->tx_old != lp->tx_new)
636 schedule();
637
638 WRITERAP(lp, LE_CSR0);
639 WRITERDP(lp, LE_C0_STOP);
640 lance_init_ring(dev);
641
642 if (dev->flags & IFF_PROMISC) {
643 ib->mode |= LE_MO_PROM;
644 } else {
645 ib->mode &= ~LE_MO_PROM;
646 lance_load_multicast(dev);
647 }
648 load_csrs(lp);
649 init_restart_lance(lp);
650
651 if (!stopped)
652 netif_start_queue(dev);
653 }
654 EXPORT_SYMBOL_GPL(lance_set_multicast);
655
656 #ifdef CONFIG_NET_POLL_CONTROLLER
657 void lance_poll(struct net_device *dev)
658 {
659 struct lance_private *lp = netdev_priv(dev);
660
661 spin_lock(&lp->devlock);
662 WRITERAP(lp, LE_CSR0);
663 WRITERDP(lp, LE_C0_STRT);
664 spin_unlock(&lp->devlock);
665 lance_interrupt(dev->irq, dev);
666 }
667 EXPORT_SYMBOL_GPL(lance_poll);
668 #endif
669
670 MODULE_LICENSE("GPL");