This source file includes following definitions.
- sonic_msg_init
- sonic_open
- sonic_quiesce
- sonic_close
- sonic_tx_timeout
- sonic_send_packet
- sonic_interrupt
- index_from_addr
- sonic_alloc_rb
- sonic_update_rra
- sonic_rx
- sonic_get_stats
- sonic_multicast_list
- sonic_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37 static unsigned int version_printed;
38
39 static int sonic_debug = -1;
40 module_param(sonic_debug, int, 0);
41 MODULE_PARM_DESC(sonic_debug, "debug message level");
42
43 static void sonic_msg_init(struct net_device *dev)
44 {
45 struct sonic_local *lp = netdev_priv(dev);
46
47 lp->msg_enable = netif_msg_init(sonic_debug, 0);
48
49 if (version_printed++ == 0)
50 netif_dbg(lp, drv, dev, "%s", version);
51 }
52
53
54
55
56
57
58
59
60 static int sonic_open(struct net_device *dev)
61 {
62 struct sonic_local *lp = netdev_priv(dev);
63 int i;
64
65 netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
66
67 spin_lock_init(&lp->lock);
68
69 for (i = 0; i < SONIC_NUM_RRS; i++) {
70 struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
71 if (skb == NULL) {
72 while(i > 0) {
73 i--;
74 dev_kfree_skb(lp->rx_skb[i]);
75 lp->rx_skb[i] = NULL;
76 }
77 printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
78 dev->name);
79 return -ENOMEM;
80 }
81
82 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
83 skb_reserve(skb, 2);
84 lp->rx_skb[i] = skb;
85 }
86
87 for (i = 0; i < SONIC_NUM_RRS; i++) {
88 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
89 SONIC_RBSIZE, DMA_FROM_DEVICE);
90 if (dma_mapping_error(lp->device, laddr)) {
91 while(i > 0) {
92 i--;
93 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
94 lp->rx_laddr[i] = (dma_addr_t)0;
95 }
96 for (i = 0; i < SONIC_NUM_RRS; i++) {
97 dev_kfree_skb(lp->rx_skb[i]);
98 lp->rx_skb[i] = NULL;
99 }
100 printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
101 dev->name);
102 return -ENOMEM;
103 }
104 lp->rx_laddr[i] = laddr;
105 }
106
107
108
109
110 sonic_init(dev);
111
112 netif_start_queue(dev);
113
114 netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__);
115
116 return 0;
117 }
118
119
120 static void sonic_quiesce(struct net_device *dev, u16 mask)
121 {
122 struct sonic_local * __maybe_unused lp = netdev_priv(dev);
123 int i;
124 u16 bits;
125
126 for (i = 0; i < 1000; ++i) {
127 bits = SONIC_READ(SONIC_CMD) & mask;
128 if (!bits)
129 return;
130 if (irqs_disabled() || in_interrupt())
131 udelay(20);
132 else
133 usleep_range(100, 200);
134 }
135 WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
136 }
137
138
139
140
141 static int sonic_close(struct net_device *dev)
142 {
143 struct sonic_local *lp = netdev_priv(dev);
144 int i;
145
146 netif_dbg(lp, ifdown, dev, "%s\n", __func__);
147
148 netif_stop_queue(dev);
149
150
151
152
153 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
154 sonic_quiesce(dev, SONIC_CR_ALL);
155
156 SONIC_WRITE(SONIC_IMR, 0);
157 SONIC_WRITE(SONIC_ISR, 0x7fff);
158 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
159
160
161 for (i = 0; i < SONIC_NUM_TDS; i++) {
162 if(lp->tx_laddr[i]) {
163 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
164 lp->tx_laddr[i] = (dma_addr_t)0;
165 }
166 if(lp->tx_skb[i]) {
167 dev_kfree_skb(lp->tx_skb[i]);
168 lp->tx_skb[i] = NULL;
169 }
170 }
171
172
173 for (i = 0; i < SONIC_NUM_RRS; i++) {
174 if(lp->rx_laddr[i]) {
175 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
176 lp->rx_laddr[i] = (dma_addr_t)0;
177 }
178 if(lp->rx_skb[i]) {
179 dev_kfree_skb(lp->rx_skb[i]);
180 lp->rx_skb[i] = NULL;
181 }
182 }
183
184 return 0;
185 }
186
187 static void sonic_tx_timeout(struct net_device *dev)
188 {
189 struct sonic_local *lp = netdev_priv(dev);
190 int i;
191
192
193
194
195 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
196 sonic_quiesce(dev, SONIC_CR_ALL);
197
198 SONIC_WRITE(SONIC_IMR, 0);
199 SONIC_WRITE(SONIC_ISR, 0x7fff);
200 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
201
202 for (i = 0; i < SONIC_NUM_TDS; i++) {
203 if(lp->tx_laddr[i]) {
204 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
205 lp->tx_laddr[i] = (dma_addr_t)0;
206 }
207 if(lp->tx_skb[i]) {
208 dev_kfree_skb(lp->tx_skb[i]);
209 lp->tx_skb[i] = NULL;
210 }
211 }
212
213 sonic_init(dev);
214 lp->stats.tx_errors++;
215 netif_trans_update(dev);
216 netif_wake_queue(dev);
217 }
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237 static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
238 {
239 struct sonic_local *lp = netdev_priv(dev);
240 dma_addr_t laddr;
241 int length;
242 int entry;
243 unsigned long flags;
244
245 netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
246
247 length = skb->len;
248 if (length < ETH_ZLEN) {
249 if (skb_padto(skb, ETH_ZLEN))
250 return NETDEV_TX_OK;
251 length = ETH_ZLEN;
252 }
253
254
255
256
257
258 laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
259 if (!laddr) {
260 pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
261 dev_kfree_skb_any(skb);
262 return NETDEV_TX_OK;
263 }
264
265 spin_lock_irqsave(&lp->lock, flags);
266
267 entry = lp->next_tx;
268
269 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);
270 sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1);
271 sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length);
272 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
273 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
274 sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
275 sonic_tda_put(dev, entry, SONIC_TD_LINK,
276 sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
277
278 wmb();
279 lp->tx_len[entry] = length;
280 lp->tx_laddr[entry] = laddr;
281 lp->tx_skb[entry] = skb;
282
283 wmb();
284 sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
285 sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
286 lp->eol_tx = entry;
287
288 lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
289 if (lp->tx_skb[lp->next_tx] != NULL) {
290
291 netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
292 netif_stop_queue(dev);
293
294 } else netif_start_queue(dev);
295
296 netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
297
298 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
299
300 spin_unlock_irqrestore(&lp->lock, flags);
301
302 return NETDEV_TX_OK;
303 }
304
305
306
307
308
309 static irqreturn_t sonic_interrupt(int irq, void *dev_id)
310 {
311 struct net_device *dev = dev_id;
312 struct sonic_local *lp = netdev_priv(dev);
313 int status;
314 unsigned long flags;
315
316
317
318
319
320
321 spin_lock_irqsave(&lp->lock, flags);
322
323 status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
324 if (!status) {
325 spin_unlock_irqrestore(&lp->lock, flags);
326
327 return IRQ_NONE;
328 }
329
330 do {
331 SONIC_WRITE(SONIC_ISR, status);
332
333 if (status & SONIC_INT_PKTRX) {
334 netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
335 sonic_rx(dev);
336 }
337
338 if (status & SONIC_INT_TXDN) {
339 int entry = lp->cur_tx;
340 int td_status;
341 int freed_some = 0;
342
343
344
345
346
347
348
349
350
351 netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
352
353 while (lp->tx_skb[entry] != NULL) {
354 if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
355 break;
356
357 if (td_status & SONIC_TCR_PTX) {
358 lp->stats.tx_packets++;
359 lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
360 } else {
361 if (td_status & (SONIC_TCR_EXD |
362 SONIC_TCR_EXC | SONIC_TCR_BCM))
363 lp->stats.tx_aborted_errors++;
364 if (td_status &
365 (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
366 lp->stats.tx_carrier_errors++;
367 if (td_status & SONIC_TCR_OWC)
368 lp->stats.tx_window_errors++;
369 if (td_status & SONIC_TCR_FU)
370 lp->stats.tx_fifo_errors++;
371 }
372
373
374 dev_consume_skb_irq(lp->tx_skb[entry]);
375 lp->tx_skb[entry] = NULL;
376
377 dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
378 lp->tx_laddr[entry] = (dma_addr_t)0;
379 freed_some = 1;
380
381 if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
382 entry = (entry + 1) & SONIC_TDS_MASK;
383 break;
384 }
385 entry = (entry + 1) & SONIC_TDS_MASK;
386 }
387
388 if (freed_some || lp->tx_skb[entry] == NULL)
389 netif_wake_queue(dev);
390 lp->cur_tx = entry;
391 }
392
393
394
395
396 if (status & SONIC_INT_RFO) {
397 netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
398 __func__);
399 }
400 if (status & SONIC_INT_RDE) {
401 netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
402 __func__);
403 }
404 if (status & SONIC_INT_RBAE) {
405 netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
406 __func__);
407 }
408
409
410 if (status & SONIC_INT_FAE)
411 lp->stats.rx_frame_errors += 65536;
412 if (status & SONIC_INT_CRC)
413 lp->stats.rx_crc_errors += 65536;
414 if (status & SONIC_INT_MP)
415 lp->stats.rx_missed_errors += 65536;
416
417
418 if (status & SONIC_INT_TXER) {
419 u16 tcr = SONIC_READ(SONIC_TCR);
420
421 netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
422 __func__, tcr);
423
424 if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
425 SONIC_TCR_FU | SONIC_TCR_BCM)) {
426
427 netif_stop_queue(dev);
428 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
429 }
430 }
431
432
433 if (status & SONIC_INT_BR) {
434 printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
435 dev->name);
436
437
438 SONIC_WRITE(SONIC_IMR, 0);
439 }
440
441 status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
442 } while (status);
443
444 spin_unlock_irqrestore(&lp->lock, flags);
445
446 return IRQ_HANDLED;
447 }
448
449
450 static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
451 unsigned int last)
452 {
453 unsigned int i = last;
454
455 do {
456 i = (i + 1) & SONIC_RRS_MASK;
457 if (addr == lp->rx_laddr[i])
458 return i;
459 } while (i != last);
460
461 return -ENOENT;
462 }
463
464
465 static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
466 struct sk_buff **new_skb, dma_addr_t *new_addr)
467 {
468 *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
469 if (!*new_skb)
470 return false;
471
472 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
473 skb_reserve(*new_skb, 2);
474
475 *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
476 SONIC_RBSIZE, DMA_FROM_DEVICE);
477 if (!*new_addr) {
478 dev_kfree_skb(*new_skb);
479 *new_skb = NULL;
480 return false;
481 }
482
483 return true;
484 }
485
486
487 static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
488 dma_addr_t old_addr, dma_addr_t new_addr)
489 {
490 unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
491 unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
492 u32 buf;
493
494
495
496
497 do {
498 buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
499 sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
500
501 if (buf == old_addr)
502 break;
503
504 entry = (entry + 1) & SONIC_RRS_MASK;
505 } while (entry != end);
506
507 WARN_ONCE(buf != old_addr, "failed to find resource!\n");
508
509 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
510 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
511
512 entry = (entry + 1) & SONIC_RRS_MASK;
513
514 SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
515 }
516
517
518
519
520 static void sonic_rx(struct net_device *dev)
521 {
522 struct sonic_local *lp = netdev_priv(dev);
523 int entry = lp->cur_rx;
524 int prev_entry = lp->eol_rx;
525 bool rbe = false;
526
527 while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
528 u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
529
530
531 if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
532 struct sk_buff *new_skb;
533 dma_addr_t new_laddr;
534 u32 addr = (sonic_rda_get(dev, entry,
535 SONIC_RD_PKTPTR_H) << 16) |
536 sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
537 int i = index_from_addr(lp, addr, entry);
538
539 if (i < 0) {
540 WARN_ONCE(1, "failed to find buffer!\n");
541 break;
542 }
543
544 if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
545 struct sk_buff *used_skb = lp->rx_skb[i];
546 int pkt_len;
547
548
549 dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
550 DMA_FROM_DEVICE);
551
552 pkt_len = sonic_rda_get(dev, entry,
553 SONIC_RD_PKTLEN);
554 skb_trim(used_skb, pkt_len);
555 used_skb->protocol = eth_type_trans(used_skb,
556 dev);
557 netif_rx(used_skb);
558 lp->stats.rx_packets++;
559 lp->stats.rx_bytes += pkt_len;
560
561 lp->rx_skb[i] = new_skb;
562 lp->rx_laddr[i] = new_laddr;
563 } else {
564
565 new_laddr = addr;
566 lp->stats.rx_dropped++;
567 }
568
569
570
571 rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
572 sonic_update_rra(dev, lp, addr, new_laddr);
573 }
574
575
576
577 sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
578 sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
579
580 prev_entry = entry;
581 entry = (entry + 1) & SONIC_RDS_MASK;
582 }
583
584 lp->cur_rx = entry;
585
586 if (prev_entry != lp->eol_rx) {
587
588 sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
589 sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
590 sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
591 sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
592 lp->eol_rx = prev_entry;
593 }
594
595 if (rbe)
596 SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
597
598
599
600
601
602 }
603
604
605
606
607
608
609 static struct net_device_stats *sonic_get_stats(struct net_device *dev)
610 {
611 struct sonic_local *lp = netdev_priv(dev);
612
613
614 lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
615 SONIC_WRITE(SONIC_CRCT, 0xffff);
616 lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
617 SONIC_WRITE(SONIC_FAET, 0xffff);
618 lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
619 SONIC_WRITE(SONIC_MPT, 0xffff);
620
621 return &lp->stats;
622 }
623
624
625
626
627
628 static void sonic_multicast_list(struct net_device *dev)
629 {
630 struct sonic_local *lp = netdev_priv(dev);
631 unsigned int rcr;
632 struct netdev_hw_addr *ha;
633 unsigned char *addr;
634 int i;
635
636 rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
637 rcr |= SONIC_RCR_BRD;
638
639 if (dev->flags & IFF_PROMISC) {
640 rcr |= SONIC_RCR_PRO;
641 } else {
642 if ((dev->flags & IFF_ALLMULTI) ||
643 (netdev_mc_count(dev) > 15)) {
644 rcr |= SONIC_RCR_AMC;
645 } else {
646 unsigned long flags;
647
648 netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
649 netdev_mc_count(dev));
650 sonic_set_cam_enable(dev, 1);
651 i = 1;
652 netdev_for_each_mc_addr(ha, dev) {
653 addr = ha->addr;
654 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
655 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
656 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
657 sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
658 i++;
659 }
660 SONIC_WRITE(SONIC_CDC, 16);
661 SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
662
663
664 spin_lock_irqsave(&lp->lock, flags);
665 sonic_quiesce(dev, SONIC_CR_TXP);
666 SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
667 sonic_quiesce(dev, SONIC_CR_LCAM);
668 spin_unlock_irqrestore(&lp->lock, flags);
669 }
670 }
671
672 netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr);
673
674 SONIC_WRITE(SONIC_RCR, rcr);
675 }
676
677
678
679
680
681 static int sonic_init(struct net_device *dev)
682 {
683 struct sonic_local *lp = netdev_priv(dev);
684 int i;
685
686
687
688
689
690 SONIC_WRITE(SONIC_IMR, 0);
691 SONIC_WRITE(SONIC_ISR, 0x7fff);
692 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
693
694
695 SONIC_WRITE(SONIC_CE, 0);
696
697
698
699
700
701 SONIC_WRITE(SONIC_CMD, 0);
702 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
703 sonic_quiesce(dev, SONIC_CR_ALL);
704
705
706
707
708 netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n",
709 __func__);
710
711 for (i = 0; i < SONIC_NUM_RRS; i++) {
712 u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
713 u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
714 sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
715 sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
716 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
717 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
718 }
719
720
721 SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
722 SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
723 SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
724 SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
725 SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
726 SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
727
728
729 netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
730
731 SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
732 sonic_quiesce(dev, SONIC_CR_RRRA);
733
734
735
736
737
738
739 netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n",
740 __func__);
741
742 for (i=0; i<SONIC_NUM_RDS; i++) {
743 sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
744 sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
745 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
746 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
747 sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
748 sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
749 sonic_rda_put(dev, i, SONIC_RD_LINK,
750 lp->rda_laddr +
751 ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
752 }
753
754 sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
755 (lp->rda_laddr & 0xffff) | SONIC_EOL);
756 lp->eol_rx = SONIC_NUM_RDS - 1;
757 lp->cur_rx = 0;
758 SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
759 SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
760
761
762
763
764 netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n",
765 __func__);
766
767 for (i = 0; i < SONIC_NUM_TDS; i++) {
768 sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
769 sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
770 sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
771 sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
772 sonic_tda_put(dev, i, SONIC_TD_LINK,
773 (lp->tda_laddr & 0xffff) +
774 (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
775 lp->tx_skb[i] = NULL;
776 }
777
778 sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
779 (lp->tda_laddr & 0xffff));
780
781 SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
782 SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
783 lp->cur_tx = lp->next_tx = 0;
784 lp->eol_tx = SONIC_NUM_TDS - 1;
785
786
787
788
789 sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
790 sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
791 sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
792 sonic_set_cam_enable(dev, 1);
793
794 for (i = 0; i < 16; i++)
795 sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
796
797
798
799
800 SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
801 SONIC_WRITE(SONIC_CDC, 16);
802
803
804
805
806 SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
807 sonic_quiesce(dev, SONIC_CR_LCAM);
808
809
810
811
812
813 SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
814 SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
815 SONIC_WRITE(SONIC_ISR, 0x7fff);
816 SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
817 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
818
819 netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
820 SONIC_READ(SONIC_CMD));
821
822 return 0;
823 }
824
825 MODULE_LICENSE("GPL");