This source file includes following definitions.
- velocity_set_power_state
- mac_get_cam_mask
- mac_set_cam_mask
- mac_set_vlan_cam_mask
- mac_set_cam
- mac_set_vlan_cam
- mac_wol_reset
- get_chip_name
- velocity_set_int_opt
- velocity_set_bool_opt
- velocity_get_options
- velocity_init_cam_filter
- velocity_vlan_rx_add_vid
- velocity_vlan_rx_kill_vid
- velocity_init_rx_ring_indexes
- velocity_rx_reset
- velocity_get_opt_media_mode
- safe_disable_mii_autopoll
- enable_mii_autopoll
- velocity_mii_read
- mii_check_media_mode
- velocity_mii_write
- set_mii_flow_control
- mii_set_auto_on
- check_connection_type
- velocity_set_media_mode
- velocity_print_link_status
- enable_flow_control_ability
- velocity_soft_reset
- velocity_set_multi
- mii_init
- setup_queue_timers
- setup_adaptive_interrupts
- velocity_init_registers
- velocity_give_many_rx_descs
- velocity_init_dma_rings
- velocity_set_rxbufsize
- velocity_alloc_rx_buf
- velocity_rx_refill
- velocity_free_rd_ring
- velocity_init_rd_ring
- velocity_init_td_ring
- velocity_free_dma_rings
- velocity_init_rings
- velocity_free_tx_buf
- velocity_free_td_ring_entry
- velocity_free_td_ring
- velocity_free_rings
- velocity_error
- velocity_tx_srv
- velocity_rx_csum
- velocity_rx_copy
- velocity_iph_realign
- velocity_receive_frame
- velocity_rx_srv
- velocity_poll
- velocity_intr
- velocity_open
- velocity_shutdown
- velocity_change_mtu
- velocity_poll_controller
- velocity_mii_ioctl
- velocity_ioctl
- velocity_get_stats
- velocity_close
- velocity_xmit
- velocity_init_info
- velocity_get_pci_info
- velocity_get_platform_info
- velocity_print_info
- velocity_get_link
- velocity_probe
- velocity_remove
- velocity_pci_probe
- velocity_pci_remove
- velocity_platform_probe
- velocity_platform_remove
- wol_calc_crc
- velocity_set_wol
- velocity_save_context
- velocity_suspend
- velocity_restore_context
- velocity_resume
- velocity_ethtool_up
- velocity_ethtool_down
- velocity_get_link_ksettings
- velocity_set_link_ksettings
- velocity_get_drvinfo
- velocity_ethtool_get_wol
- velocity_ethtool_set_wol
- velocity_get_msglevel
- velocity_set_msglevel
- get_pending_timer_val
- set_pending_timer_val
- velocity_get_coalesce
- velocity_set_coalesce
- velocity_get_strings
- velocity_get_sset_count
- velocity_get_ethtool_stats
- velocity_netdev_event
- velocity_register_notifier
- velocity_unregister_notifier
- velocity_init_module
- velocity_cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include <linux/module.h>
36 #include <linux/types.h>
37 #include <linux/bitops.h>
38 #include <linux/init.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/mm.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/kernel.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/delay.h>
49 #include <linux/timer.h>
50 #include <linux/slab.h>
51 #include <linux/interrupt.h>
52 #include <linux/string.h>
53 #include <linux/wait.h>
54 #include <linux/io.h>
55 #include <linux/if.h>
56 #include <linux/uaccess.h>
57 #include <linux/proc_fs.h>
58 #include <linux/of_address.h>
59 #include <linux/of_device.h>
60 #include <linux/of_irq.h>
61 #include <linux/inetdevice.h>
62 #include <linux/platform_device.h>
63 #include <linux/reboot.h>
64 #include <linux/ethtool.h>
65 #include <linux/mii.h>
66 #include <linux/in.h>
67 #include <linux/if_arp.h>
68 #include <linux/if_vlan.h>
69 #include <linux/ip.h>
70 #include <linux/tcp.h>
71 #include <linux/udp.h>
72 #include <linux/crc-ccitt.h>
73 #include <linux/crc32.h>
74
75 #include "via-velocity.h"
76
77 enum velocity_bus_type {
78 BUS_PCI,
79 BUS_PLATFORM,
80 };
81
82 static int velocity_nics;
83 static int msglevel = MSG_LEVEL_INFO;
84
85 static void velocity_set_power_state(struct velocity_info *vptr, char state)
86 {
87 void *addr = vptr->mac_regs;
88
89 if (vptr->pdev)
90 pci_set_power_state(vptr->pdev, state);
91 else
92 writeb(state, addr + 0x154);
93 }
94
95
96
97
98
99
100
101
102
103 static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
104 {
105 int i;
106
107
108 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
109
110 writeb(0, ®s->CAMADDR);
111
112
113 for (i = 0; i < 8; i++)
114 *mask++ = readb(&(regs->MARCAM[i]));
115
116
117 writeb(0, ®s->CAMADDR);
118
119
120 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
121 }
122
123
124
125
126
127
128
129
130 static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
131 {
132 int i;
133
134 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
135
136 writeb(CAMADDR_CAMEN, ®s->CAMADDR);
137
138 for (i = 0; i < 8; i++)
139 writeb(*mask++, &(regs->MARCAM[i]));
140
141
142 writeb(0, ®s->CAMADDR);
143
144
145 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
146 }
147
148 static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
149 {
150 int i;
151
152 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
153
154 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR);
155
156 for (i = 0; i < 8; i++)
157 writeb(*mask++, &(regs->MARCAM[i]));
158
159
160 writeb(0, ®s->CAMADDR);
161
162
163 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
164 }
165
166
167
168
169
170
171
172
173
174 static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
175 {
176 int i;
177
178
179 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
180
181 idx &= (64 - 1);
182
183 writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR);
184
185 for (i = 0; i < 6; i++)
186 writeb(*addr++, &(regs->MARCAM[i]));
187
188 BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR);
189
190 udelay(10);
191
192 writeb(0, ®s->CAMADDR);
193
194
195 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
196 }
197
198 static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
199 const u8 *addr)
200 {
201
202
203 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
204
205 idx &= (64 - 1);
206
207 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR);
208 writew(*((u16 *) addr), ®s->MARCAM[0]);
209
210 BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR);
211
212 udelay(10);
213
214 writeb(0, ®s->CAMADDR);
215
216
217 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
218 }
219
220
221
222
223
224
225
226
227
228
229 static void mac_wol_reset(struct mac_regs __iomem *regs)
230 {
231
232
233 BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW);
234
235 BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW);
236
237 BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR);
238 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
239
240 writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr);
241
242 writew(0xFFFF, ®s->WOLCRClr);
243
244 writew(0xFFFF, ®s->WOLSRClr);
245 }
246
247 static const struct ethtool_ops velocity_ethtool_ops;
248
249
250
251
252
253 MODULE_AUTHOR("VIA Networking Technologies, Inc.");
254 MODULE_LICENSE("GPL");
255 MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
256
257 #define VELOCITY_PARAM(N, D) \
258 static int N[MAX_UNITS] = OPTION_DEFAULT;\
259 module_param_array(N, int, NULL, 0); \
260 MODULE_PARM_DESC(N, D);
261
262 #define RX_DESC_MIN 64
263 #define RX_DESC_MAX 255
264 #define RX_DESC_DEF 64
265 VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
266
267 #define TX_DESC_MIN 16
268 #define TX_DESC_MAX 256
269 #define TX_DESC_DEF 64
270 VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
271
272 #define RX_THRESH_MIN 0
273 #define RX_THRESH_MAX 3
274 #define RX_THRESH_DEF 0
275
276
277
278
279
280
281 VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
282
283 #define DMA_LENGTH_MIN 0
284 #define DMA_LENGTH_MAX 7
285 #define DMA_LENGTH_DEF 6
286
287
288
289
290
291
292
293
294
295
296
297 VELOCITY_PARAM(DMA_length, "DMA length");
298
299 #define IP_ALIG_DEF 0
300
301
302
303
304
305
306 VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
307
308 #define FLOW_CNTL_DEF 1
309 #define FLOW_CNTL_MIN 1
310 #define FLOW_CNTL_MAX 5
311
312
313
314
315
316
317
318
319 VELOCITY_PARAM(flow_control, "Enable flow control ability");
320
321 #define MED_LNK_DEF 0
322 #define MED_LNK_MIN 0
323 #define MED_LNK_MAX 5
324
325
326
327
328
329
330
331
332
333
334
335
336 VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
337
338 #define WOL_OPT_DEF 0
339 #define WOL_OPT_MIN 0
340 #define WOL_OPT_MAX 7
341
342
343
344
345
346
347
348 VELOCITY_PARAM(wol_opts, "Wake On Lan options");
349
350 static int rx_copybreak = 200;
351 module_param(rx_copybreak, int, 0644);
352 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
353
354
355
356
357 static struct velocity_info_tbl chip_info_table[] = {
358 {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
359 { }
360 };
361
362
363
364
365
366
367 static const struct pci_device_id velocity_pci_id_table[] = {
368 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
369 { }
370 };
371
372 MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
373
374
375
376
377
378 static const struct of_device_id velocity_of_ids[] = {
379 { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
380 { },
381 };
382 MODULE_DEVICE_TABLE(of, velocity_of_ids);
383
384
385
386
387
388
389
390
391 static const char *get_chip_name(enum chip_type chip_id)
392 {
393 int i;
394 for (i = 0; chip_info_table[i].name != NULL; i++)
395 if (chip_info_table[i].chip_id == chip_id)
396 break;
397 return chip_info_table[i].name;
398 }
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414 static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
415 char *name, const char *devname)
416 {
417 if (val == -1)
418 *opt = def;
419 else if (val < min || val > max) {
420 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
421 devname, name, min, max);
422 *opt = def;
423 } else {
424 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
425 devname, name, val);
426 *opt = val;
427 }
428 }
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443 static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
444 char *name, const char *devname)
445 {
446 (*opt) &= (~flag);
447 if (val == -1)
448 *opt |= (def ? flag : 0);
449 else if (val < 0 || val > 1) {
450 printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
451 devname, name);
452 *opt |= (def ? flag : 0);
453 } else {
454 printk(KERN_INFO "%s: set parameter %s to %s\n",
455 devname, name, val ? "TRUE" : "FALSE");
456 *opt |= (val ? flag : 0);
457 }
458 }
459
460
461
462
463
464
465
466
467
468
469 static void velocity_get_options(struct velocity_opt *opts, int index,
470 const char *devname)
471 {
472
473 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
474 velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
475 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
476 velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
477
478 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
479 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
480 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
481 velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
482 opts->numrx = (opts->numrx & ~3);
483 }
484
485
486
487
488
489
490
491
492 static void velocity_init_cam_filter(struct velocity_info *vptr)
493 {
494 struct mac_regs __iomem *regs = vptr->mac_regs;
495 unsigned int vid, i = 0;
496
497
498 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG);
499 WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG);
500
501
502 memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
503 memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
504 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
505 mac_set_cam_mask(regs, vptr->mCAMmask);
506
507
508 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
509 mac_set_vlan_cam(regs, i, (u8 *) &vid);
510 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
511 if (++i >= VCAM_SIZE)
512 break;
513 }
514 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
515 }
516
517 static int velocity_vlan_rx_add_vid(struct net_device *dev,
518 __be16 proto, u16 vid)
519 {
520 struct velocity_info *vptr = netdev_priv(dev);
521
522 spin_lock_irq(&vptr->lock);
523 set_bit(vid, vptr->active_vlans);
524 velocity_init_cam_filter(vptr);
525 spin_unlock_irq(&vptr->lock);
526 return 0;
527 }
528
529 static int velocity_vlan_rx_kill_vid(struct net_device *dev,
530 __be16 proto, u16 vid)
531 {
532 struct velocity_info *vptr = netdev_priv(dev);
533
534 spin_lock_irq(&vptr->lock);
535 clear_bit(vid, vptr->active_vlans);
536 velocity_init_cam_filter(vptr);
537 spin_unlock_irq(&vptr->lock);
538 return 0;
539 }
540
541 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
542 {
543 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
544 }
545
546
547
548
549
550
551
552
553 static void velocity_rx_reset(struct velocity_info *vptr)
554 {
555
556 struct mac_regs __iomem *regs = vptr->mac_regs;
557 int i;
558
559 velocity_init_rx_ring_indexes(vptr);
560
561
562
563
564 for (i = 0; i < vptr->options.numrx; ++i)
565 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
566
567 writew(vptr->options.numrx, ®s->RBRDU);
568 writel(vptr->rx.pool_dma, ®s->RDBaseLo);
569 writew(0, ®s->RDIdx);
570 writew(vptr->options.numrx - 1, ®s->RDCSize);
571 }
572
573
574
575
576
577
578
579
580
581 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
582 {
583 u32 status = 0;
584
585 switch (vptr->options.spd_dpx) {
586 case SPD_DPX_AUTO:
587 status = VELOCITY_AUTONEG_ENABLE;
588 break;
589 case SPD_DPX_100_FULL:
590 status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
591 break;
592 case SPD_DPX_10_FULL:
593 status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
594 break;
595 case SPD_DPX_100_HALF:
596 status = VELOCITY_SPEED_100;
597 break;
598 case SPD_DPX_10_HALF:
599 status = VELOCITY_SPEED_10;
600 break;
601 case SPD_DPX_1000_FULL:
602 status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
603 break;
604 }
605 vptr->mii_status = status;
606 return status;
607 }
608
609
610
611
612
613
614
615 static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
616 {
617 u16 ww;
618
619
620 writeb(0, ®s->MIICR);
621 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
622 udelay(1);
623 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
624 break;
625 }
626 }
627
628
629
630
631
632
633
634
635 static void enable_mii_autopoll(struct mac_regs __iomem *regs)
636 {
637 int ii;
638
639 writeb(0, &(regs->MIICR));
640 writeb(MIIADR_SWMPL, ®s->MIIADR);
641
642 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
643 udelay(1);
644 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
645 break;
646 }
647
648 writeb(MIICR_MAUTO, ®s->MIICR);
649
650 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
651 udelay(1);
652 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
653 break;
654 }
655
656 }
657
658
659
660
661
662
663
664
665
666
667 static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
668 {
669 u16 ww;
670
671
672
673
674 safe_disable_mii_autopoll(regs);
675
676 writeb(index, ®s->MIIADR);
677
678 BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR);
679
680 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
681 if (!(readb(®s->MIICR) & MIICR_RCMD))
682 break;
683 }
684
685 *data = readw(®s->MIIDATA);
686
687 enable_mii_autopoll(regs);
688 if (ww == W_MAX_TIMEOUT)
689 return -ETIMEDOUT;
690 return 0;
691 }
692
693
694
695
696
697
698
699
700 static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
701 {
702 u32 status = 0;
703 u16 ANAR;
704
705 if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
706 status |= VELOCITY_LINK_FAIL;
707
708 if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
709 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
710 else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
711 status |= (VELOCITY_SPEED_1000);
712 else {
713 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
714 if (ANAR & ADVERTISE_100FULL)
715 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
716 else if (ANAR & ADVERTISE_100HALF)
717 status |= VELOCITY_SPEED_100;
718 else if (ANAR & ADVERTISE_10FULL)
719 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
720 else
721 status |= (VELOCITY_SPEED_10);
722 }
723
724 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
725 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
726 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
727 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
728 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
729 status |= VELOCITY_AUTONEG_ENABLE;
730 }
731 }
732
733 return status;
734 }
735
736
737
738
739
740
741
742
743
744
745 static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
746 {
747 u16 ww;
748
749
750
751
752 safe_disable_mii_autopoll(regs);
753
754
755 writeb(mii_addr, ®s->MIIADR);
756
757 writew(data, ®s->MIIDATA);
758
759
760 BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR);
761
762
763 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
764 udelay(5);
765 if (!(readb(®s->MIICR) & MIICR_WCMD))
766 break;
767 }
768 enable_mii_autopoll(regs);
769
770 if (ww == W_MAX_TIMEOUT)
771 return -ETIMEDOUT;
772 return 0;
773 }
774
775
776
777
778
779
780
781
782 static void set_mii_flow_control(struct velocity_info *vptr)
783 {
784
785 switch (vptr->options.flow_cntl) {
786 case FLOW_CNTL_TX:
787 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
788 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
789 break;
790
791 case FLOW_CNTL_RX:
792 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
793 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
794 break;
795
796 case FLOW_CNTL_TX_RX:
797 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
798 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
799 break;
800
801 case FLOW_CNTL_DISABLE:
802 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
803 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
804 break;
805 default:
806 break;
807 }
808 }
809
810
811
812
813
814
815
816 static void mii_set_auto_on(struct velocity_info *vptr)
817 {
818 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
819 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
820 else
821 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
822 }
823
824 static u32 check_connection_type(struct mac_regs __iomem *regs)
825 {
826 u32 status = 0;
827 u8 PHYSR0;
828 u16 ANAR;
829 PHYSR0 = readb(®s->PHYSR0);
830
831
832
833
834
835
836 if (PHYSR0 & PHYSR0_FDPX)
837 status |= VELOCITY_DUPLEX_FULL;
838
839 if (PHYSR0 & PHYSR0_SPDG)
840 status |= VELOCITY_SPEED_1000;
841 else if (PHYSR0 & PHYSR0_SPD10)
842 status |= VELOCITY_SPEED_10;
843 else
844 status |= VELOCITY_SPEED_100;
845
846 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
847 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
848 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
849 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
850 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
851 status |= VELOCITY_AUTONEG_ENABLE;
852 }
853 }
854
855 return status;
856 }
857
858
859
860
861
862
863
864
865
866 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
867 {
868 u32 curr_status;
869 struct mac_regs __iomem *regs = vptr->mac_regs;
870
871 vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
872 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
873
874
875 set_mii_flow_control(vptr);
876
877
878
879
880
881
882
883
884
885
886
887
888 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
889 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
890
891
892
893
894 if (mii_status & VELOCITY_AUTONEG_ENABLE) {
895 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
896
897 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
898
899 MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
900 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
901 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
902
903
904 mii_set_auto_on(vptr);
905 } else {
906 u16 CTRL1000;
907 u16 ANAR;
908 u8 CHIPGCR;
909
910
911
912
913
914
915
916
917
918 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
919
920 CHIPGCR = readb(®s->CHIPGCR);
921
922 if (mii_status & VELOCITY_SPEED_1000)
923 CHIPGCR |= CHIPGCR_FCGMII;
924 else
925 CHIPGCR &= ~CHIPGCR_FCGMII;
926
927 if (mii_status & VELOCITY_DUPLEX_FULL) {
928 CHIPGCR |= CHIPGCR_FCFDX;
929 writeb(CHIPGCR, ®s->CHIPGCR);
930 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
931 if (vptr->rev_id < REV_ID_VT3216_A0)
932 BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
933 } else {
934 CHIPGCR &= ~CHIPGCR_FCFDX;
935 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
936 writeb(CHIPGCR, ®s->CHIPGCR);
937 if (vptr->rev_id < REV_ID_VT3216_A0)
938 BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
939 }
940
941 velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
942 CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
943 if ((mii_status & VELOCITY_SPEED_1000) &&
944 (mii_status & VELOCITY_DUPLEX_FULL)) {
945 CTRL1000 |= ADVERTISE_1000FULL;
946 }
947 velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
948
949 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
950 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
951 else
952 BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
953
954
955 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
956 ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
957 if (mii_status & VELOCITY_SPEED_100) {
958 if (mii_status & VELOCITY_DUPLEX_FULL)
959 ANAR |= ADVERTISE_100FULL;
960 else
961 ANAR |= ADVERTISE_100HALF;
962 } else if (mii_status & VELOCITY_SPEED_10) {
963 if (mii_status & VELOCITY_DUPLEX_FULL)
964 ANAR |= ADVERTISE_10FULL;
965 else
966 ANAR |= ADVERTISE_10HALF;
967 }
968 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
969
970 mii_set_auto_on(vptr);
971
972 }
973
974
975 return VELOCITY_LINK_CHANGE;
976 }
977
978
979
980
981
982
983
984
985
986 static void velocity_print_link_status(struct velocity_info *vptr)
987 {
988
989 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
990 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name);
991 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
992 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name);
993
994 if (vptr->mii_status & VELOCITY_SPEED_1000)
995 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
996 else if (vptr->mii_status & VELOCITY_SPEED_100)
997 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
998 else
999 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1000
1001 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1002 VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1003 else
1004 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1005 } else {
1006 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name);
1007 switch (vptr->options.spd_dpx) {
1008 case SPD_DPX_1000_FULL:
1009 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1010 break;
1011 case SPD_DPX_100_HALF:
1012 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1013 break;
1014 case SPD_DPX_100_FULL:
1015 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1016 break;
1017 case SPD_DPX_10_HALF:
1018 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1019 break;
1020 case SPD_DPX_10_FULL:
1021 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1022 break;
1023 default:
1024 break;
1025 }
1026 }
1027 }
1028
1029
1030
1031
1032
1033
1034
1035
1036 static void enable_flow_control_ability(struct velocity_info *vptr)
1037 {
1038
1039 struct mac_regs __iomem *regs = vptr->mac_regs;
1040
1041 switch (vptr->options.flow_cntl) {
1042
1043 case FLOW_CNTL_DEFAULT:
1044 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0))
1045 writel(CR0_FDXRFCEN, ®s->CR0Set);
1046 else
1047 writel(CR0_FDXRFCEN, ®s->CR0Clr);
1048
1049 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0))
1050 writel(CR0_FDXTFCEN, ®s->CR0Set);
1051 else
1052 writel(CR0_FDXTFCEN, ®s->CR0Clr);
1053 break;
1054
1055 case FLOW_CNTL_TX:
1056 writel(CR0_FDXTFCEN, ®s->CR0Set);
1057 writel(CR0_FDXRFCEN, ®s->CR0Clr);
1058 break;
1059
1060 case FLOW_CNTL_RX:
1061 writel(CR0_FDXRFCEN, ®s->CR0Set);
1062 writel(CR0_FDXTFCEN, ®s->CR0Clr);
1063 break;
1064
1065 case FLOW_CNTL_TX_RX:
1066 writel(CR0_FDXTFCEN, ®s->CR0Set);
1067 writel(CR0_FDXRFCEN, ®s->CR0Set);
1068 break;
1069
1070 case FLOW_CNTL_DISABLE:
1071 writel(CR0_FDXRFCEN, ®s->CR0Clr);
1072 writel(CR0_FDXTFCEN, ®s->CR0Clr);
1073 break;
1074
1075 default:
1076 break;
1077 }
1078
1079 }
1080
1081
1082
1083
1084
1085
1086
1087
1088 static int velocity_soft_reset(struct velocity_info *vptr)
1089 {
1090 struct mac_regs __iomem *regs = vptr->mac_regs;
1091 int i = 0;
1092
1093 writel(CR0_SFRST, ®s->CR0Set);
1094
1095 for (i = 0; i < W_MAX_TIMEOUT; i++) {
1096 udelay(5);
1097 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set))
1098 break;
1099 }
1100
1101 if (i == W_MAX_TIMEOUT) {
1102 writel(CR0_FORSRST, ®s->CR0Set);
1103
1104
1105 mdelay(2);
1106 }
1107 return 0;
1108 }
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118 static void velocity_set_multi(struct net_device *dev)
1119 {
1120 struct velocity_info *vptr = netdev_priv(dev);
1121 struct mac_regs __iomem *regs = vptr->mac_regs;
1122 u8 rx_mode;
1123 int i;
1124 struct netdev_hw_addr *ha;
1125
1126 if (dev->flags & IFF_PROMISC) {
1127 writel(0xffffffff, ®s->MARCAM[0]);
1128 writel(0xffffffff, ®s->MARCAM[4]);
1129 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1130 } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1131 (dev->flags & IFF_ALLMULTI)) {
1132 writel(0xffffffff, ®s->MARCAM[0]);
1133 writel(0xffffffff, ®s->MARCAM[4]);
1134 rx_mode = (RCR_AM | RCR_AB);
1135 } else {
1136 int offset = MCAM_SIZE - vptr->multicast_limit;
1137 mac_get_cam_mask(regs, vptr->mCAMmask);
1138
1139 i = 0;
1140 netdev_for_each_mc_addr(ha, dev) {
1141 mac_set_cam(regs, i + offset, ha->addr);
1142 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1143 i++;
1144 }
1145
1146 mac_set_cam_mask(regs, vptr->mCAMmask);
1147 rx_mode = RCR_AM | RCR_AB | RCR_AP;
1148 }
1149 if (dev->mtu > 1500)
1150 rx_mode |= RCR_AL;
1151
1152 BYTE_REG_BITS_ON(rx_mode, ®s->RCR);
1153
1154 }
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 static void mii_init(struct velocity_info *vptr, u32 mii_status)
1168 {
1169 u16 BMCR;
1170
1171 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1172 case PHYID_ICPLUS_IP101A:
1173 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1174 MII_ADVERTISE, vptr->mac_regs);
1175 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1176 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1177 vptr->mac_regs);
1178 else
1179 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1180 vptr->mac_regs);
1181 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1182 break;
1183 case PHYID_CICADA_CS8201:
1184
1185
1186
1187 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1188
1189
1190
1191
1192
1193 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1194 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1195 else
1196 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1197
1198
1199
1200 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1201 break;
1202 case PHYID_VT3216_32BIT:
1203 case PHYID_VT3216_64BIT:
1204
1205
1206
1207 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1208
1209
1210
1211
1212
1213 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1214 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1215 else
1216 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1217 break;
1218
1219 case PHYID_MARVELL_1000:
1220 case PHYID_MARVELL_1000S:
1221
1222
1223
1224 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1225
1226
1227
1228 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1229 break;
1230 default:
1231 ;
1232 }
1233 velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1234 if (BMCR & BMCR_ISOLATE) {
1235 BMCR &= ~BMCR_ISOLATE;
1236 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1237 }
1238 }
1239
1240
1241
1242
1243
1244
1245
1246 static void setup_queue_timers(struct velocity_info *vptr)
1247 {
1248
1249 if (vptr->rev_id >= REV_ID_VT3216_A0) {
1250 u8 txqueue_timer = 0;
1251 u8 rxqueue_timer = 0;
1252
1253 if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1254 VELOCITY_SPEED_100)) {
1255 txqueue_timer = vptr->options.txqueue_timer;
1256 rxqueue_timer = vptr->options.rxqueue_timer;
1257 }
1258
1259 writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1260 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1261 }
1262 }
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272 static void setup_adaptive_interrupts(struct velocity_info *vptr)
1273 {
1274 struct mac_regs __iomem *regs = vptr->mac_regs;
1275 u16 tx_intsup = vptr->options.tx_intsup;
1276 u16 rx_intsup = vptr->options.rx_intsup;
1277
1278
1279 vptr->int_mask = INT_MASK_DEF;
1280
1281
1282 writeb(CAMCR_PS0, ®s->CAMCR);
1283 if (tx_intsup != 0) {
1284 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1285 ISR_PTX2I | ISR_PTX3I);
1286 writew(tx_intsup, ®s->ISRCTL);
1287 } else
1288 writew(ISRCTL_TSUPDIS, ®s->ISRCTL);
1289
1290
1291 writeb(CAMCR_PS1, ®s->CAMCR);
1292 if (rx_intsup != 0) {
1293 vptr->int_mask &= ~ISR_PRXI;
1294 writew(rx_intsup, ®s->ISRCTL);
1295 } else
1296 writew(ISRCTL_RSUPDIS, ®s->ISRCTL);
1297
1298
1299 writeb(0, ®s->CAMCR);
1300 }
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310 static void velocity_init_registers(struct velocity_info *vptr,
1311 enum velocity_init_type type)
1312 {
1313 struct mac_regs __iomem *regs = vptr->mac_regs;
1314 struct net_device *netdev = vptr->netdev;
1315 int i, mii_status;
1316
1317 mac_wol_reset(regs);
1318
1319 switch (type) {
1320 case VELOCITY_INIT_RESET:
1321 case VELOCITY_INIT_WOL:
1322
1323 netif_stop_queue(netdev);
1324
1325
1326
1327
1328 velocity_rx_reset(vptr);
1329 mac_rx_queue_run(regs);
1330 mac_rx_queue_wake(regs);
1331
1332 mii_status = velocity_get_opt_media_mode(vptr);
1333 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1334 velocity_print_link_status(vptr);
1335 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1336 netif_wake_queue(netdev);
1337 }
1338
1339 enable_flow_control_ability(vptr);
1340
1341 mac_clear_isr(regs);
1342 writel(CR0_STOP, ®s->CR0Clr);
1343 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1344 ®s->CR0Set);
1345
1346 break;
1347
1348 case VELOCITY_INIT_COLD:
1349 default:
1350
1351
1352
1353 velocity_soft_reset(vptr);
1354 mdelay(5);
1355
1356 if (!vptr->no_eeprom) {
1357 mac_eeprom_reload(regs);
1358 for (i = 0; i < 6; i++)
1359 writeb(netdev->dev_addr[i], regs->PAR + i);
1360 }
1361
1362
1363
1364
1365 BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1366 mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1367 mac_set_dma_length(regs, vptr->options.DMA_length);
1368
1369 writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet);
1370
1371
1372
1373 BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB);
1374
1375
1376
1377
1378 velocity_init_cam_filter(vptr);
1379
1380
1381
1382
1383 velocity_set_multi(netdev);
1384
1385
1386
1387
1388 enable_mii_autopoll(regs);
1389
1390 setup_adaptive_interrupts(vptr);
1391
1392 writel(vptr->rx.pool_dma, ®s->RDBaseLo);
1393 writew(vptr->options.numrx - 1, ®s->RDCSize);
1394 mac_rx_queue_run(regs);
1395 mac_rx_queue_wake(regs);
1396
1397 writew(vptr->options.numtx - 1, ®s->TDCSize);
1398
1399 for (i = 0; i < vptr->tx.numq; i++) {
1400 writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]);
1401 mac_tx_queue_run(regs, i);
1402 }
1403
1404 init_flow_control_register(vptr);
1405
1406 writel(CR0_STOP, ®s->CR0Clr);
1407 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set);
1408
1409 mii_status = velocity_get_opt_media_mode(vptr);
1410 netif_stop_queue(netdev);
1411
1412 mii_init(vptr, mii_status);
1413
1414 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1415 velocity_print_link_status(vptr);
1416 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1417 netif_wake_queue(netdev);
1418 }
1419
1420 enable_flow_control_ability(vptr);
1421 mac_hw_mibs_init(regs);
1422 mac_write_int_mask(vptr->int_mask, regs);
1423 mac_clear_isr(regs);
1424
1425 }
1426 }
1427
1428 static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1429 {
1430 struct mac_regs __iomem *regs = vptr->mac_regs;
1431 int avail, dirty, unusable;
1432
1433
1434
1435
1436
1437 if (vptr->rx.filled < 4)
1438 return;
1439
1440 wmb();
1441
1442 unusable = vptr->rx.filled & 0x0003;
1443 dirty = vptr->rx.dirty - unusable;
1444 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1445 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1446 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1447 }
1448
1449 writew(vptr->rx.filled & 0xfffc, ®s->RBRDU);
1450 vptr->rx.filled = unusable;
1451 }
1452
1453
1454
1455
1456
1457
1458
1459
1460 static int velocity_init_dma_rings(struct velocity_info *vptr)
1461 {
1462 struct velocity_opt *opt = &vptr->options;
1463 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1464 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1465 dma_addr_t pool_dma;
1466 void *pool;
1467 unsigned int i;
1468
1469
1470
1471
1472
1473
1474
1475 pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1476 rx_ring_size, &pool_dma, GFP_ATOMIC);
1477 if (!pool) {
1478 dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1479 vptr->netdev->name);
1480 return -ENOMEM;
1481 }
1482
1483 vptr->rx.ring = pool;
1484 vptr->rx.pool_dma = pool_dma;
1485
1486 pool += rx_ring_size;
1487 pool_dma += rx_ring_size;
1488
1489 for (i = 0; i < vptr->tx.numq; i++) {
1490 vptr->tx.rings[i] = pool;
1491 vptr->tx.pool_dma[i] = pool_dma;
1492 pool += tx_ring_size;
1493 pool_dma += tx_ring_size;
1494 }
1495
1496 return 0;
1497 }
1498
1499 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1500 {
1501 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1502 }
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1515 {
1516 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1517 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1518
1519 rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1520 if (rd_info->skb == NULL)
1521 return -ENOMEM;
1522
1523
1524
1525
1526
1527 skb_reserve(rd_info->skb,
1528 64 - ((unsigned long) rd_info->skb->data & 63));
1529 rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1530 vptr->rx.buf_sz, DMA_FROM_DEVICE);
1531
1532
1533
1534
1535
1536 *((u32 *) & (rd->rdesc0)) = 0;
1537 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1538 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1539 rd->pa_high = 0;
1540 return 0;
1541 }
1542
1543
1544 static int velocity_rx_refill(struct velocity_info *vptr)
1545 {
1546 int dirty = vptr->rx.dirty, done = 0;
1547
1548 do {
1549 struct rx_desc *rd = vptr->rx.ring + dirty;
1550
1551
1552 if (rd->rdesc0.len & OWNED_BY_NIC)
1553 break;
1554
1555 if (!vptr->rx.info[dirty].skb) {
1556 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1557 break;
1558 }
1559 done++;
1560 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1561 } while (dirty != vptr->rx.curr);
1562
1563 if (done) {
1564 vptr->rx.dirty = dirty;
1565 vptr->rx.filled += done;
1566 }
1567
1568 return done;
1569 }
1570
1571
1572
1573
1574
1575
1576
1577
1578 static void velocity_free_rd_ring(struct velocity_info *vptr)
1579 {
1580 int i;
1581
1582 if (vptr->rx.info == NULL)
1583 return;
1584
1585 for (i = 0; i < vptr->options.numrx; i++) {
1586 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1587 struct rx_desc *rd = vptr->rx.ring + i;
1588
1589 memset(rd, 0, sizeof(*rd));
1590
1591 if (!rd_info->skb)
1592 continue;
1593 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1594 DMA_FROM_DEVICE);
1595 rd_info->skb_dma = 0;
1596
1597 dev_kfree_skb(rd_info->skb);
1598 rd_info->skb = NULL;
1599 }
1600
1601 kfree(vptr->rx.info);
1602 vptr->rx.info = NULL;
1603 }
1604
1605
1606
1607
1608
1609
1610
1611
1612 static int velocity_init_rd_ring(struct velocity_info *vptr)
1613 {
1614 int ret = -ENOMEM;
1615
1616 vptr->rx.info = kcalloc(vptr->options.numrx,
1617 sizeof(struct velocity_rd_info), GFP_KERNEL);
1618 if (!vptr->rx.info)
1619 goto out;
1620
1621 velocity_init_rx_ring_indexes(vptr);
1622
1623 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1624 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1625 "%s: failed to allocate RX buffer.\n", vptr->netdev->name);
1626 velocity_free_rd_ring(vptr);
1627 goto out;
1628 }
1629
1630 ret = 0;
1631 out:
1632 return ret;
1633 }
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643 static int velocity_init_td_ring(struct velocity_info *vptr)
1644 {
1645 int j;
1646
1647
1648 for (j = 0; j < vptr->tx.numq; j++) {
1649
1650 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1651 sizeof(struct velocity_td_info),
1652 GFP_KERNEL);
1653 if (!vptr->tx.infos[j]) {
1654 while (--j >= 0)
1655 kfree(vptr->tx.infos[j]);
1656 return -ENOMEM;
1657 }
1658
1659 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1660 }
1661 return 0;
1662 }
1663
1664
1665
1666
1667
1668
1669
1670 static void velocity_free_dma_rings(struct velocity_info *vptr)
1671 {
1672 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1673 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1674
1675 dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1676 }
1677
1678 static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1679 {
1680 int ret;
1681
1682 velocity_set_rxbufsize(vptr, mtu);
1683
1684 ret = velocity_init_dma_rings(vptr);
1685 if (ret < 0)
1686 goto out;
1687
1688 ret = velocity_init_rd_ring(vptr);
1689 if (ret < 0)
1690 goto err_free_dma_rings_0;
1691
1692 ret = velocity_init_td_ring(vptr);
1693 if (ret < 0)
1694 goto err_free_rd_ring_1;
1695 out:
1696 return ret;
1697
1698 err_free_rd_ring_1:
1699 velocity_free_rd_ring(vptr);
1700 err_free_dma_rings_0:
1701 velocity_free_dma_rings(vptr);
1702 goto out;
1703 }
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713 static void velocity_free_tx_buf(struct velocity_info *vptr,
1714 struct velocity_td_info *tdinfo, struct tx_desc *td)
1715 {
1716 struct sk_buff *skb = tdinfo->skb;
1717 int i;
1718
1719
1720
1721
1722 for (i = 0; i < tdinfo->nskb_dma; i++) {
1723 size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1724
1725
1726 if (skb_shinfo(skb)->nr_frags > 0)
1727 pktlen = max_t(size_t, pktlen,
1728 td->td_buf[i].size & ~TD_QUEUE);
1729
1730 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1731 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1732 }
1733 dev_consume_skb_irq(skb);
1734 tdinfo->skb = NULL;
1735 }
1736
1737
1738
1739
1740 static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1741 int q, int n)
1742 {
1743 struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1744 int i;
1745
1746 if (td_info == NULL)
1747 return;
1748
1749 if (td_info->skb) {
1750 for (i = 0; i < td_info->nskb_dma; i++) {
1751 if (td_info->skb_dma[i]) {
1752 dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1753 td_info->skb->len, DMA_TO_DEVICE);
1754 td_info->skb_dma[i] = 0;
1755 }
1756 }
1757 dev_kfree_skb(td_info->skb);
1758 td_info->skb = NULL;
1759 }
1760 }
1761
1762
1763
1764
1765
1766
1767
1768
1769 static void velocity_free_td_ring(struct velocity_info *vptr)
1770 {
1771 int i, j;
1772
1773 for (j = 0; j < vptr->tx.numq; j++) {
1774 if (vptr->tx.infos[j] == NULL)
1775 continue;
1776 for (i = 0; i < vptr->options.numtx; i++)
1777 velocity_free_td_ring_entry(vptr, j, i);
1778
1779 kfree(vptr->tx.infos[j]);
1780 vptr->tx.infos[j] = NULL;
1781 }
1782 }
1783
1784 static void velocity_free_rings(struct velocity_info *vptr)
1785 {
1786 velocity_free_td_ring(vptr);
1787 velocity_free_rd_ring(vptr);
1788 velocity_free_dma_rings(vptr);
1789 }
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802 static void velocity_error(struct velocity_info *vptr, int status)
1803 {
1804
1805 if (status & ISR_TXSTLI) {
1806 struct mac_regs __iomem *regs = vptr->mac_regs;
1807
1808 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0]));
1809 BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR);
1810 writew(TRDCSR_RUN, ®s->TDCSRClr);
1811 netif_stop_queue(vptr->netdev);
1812
1813
1814
1815 }
1816
1817 if (status & ISR_SRCI) {
1818 struct mac_regs __iomem *regs = vptr->mac_regs;
1819 int linked;
1820
1821 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1822 vptr->mii_status = check_connection_type(regs);
1823
1824
1825
1826
1827
1828
1829 if (vptr->rev_id < REV_ID_VT3216_A0) {
1830 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1831 BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
1832 else
1833 BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
1834 }
1835
1836
1837
1838 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1839 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
1840 else
1841 BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
1842
1843 setup_queue_timers(vptr);
1844 }
1845
1846
1847
1848 linked = readb(®s->PHYSR0) & PHYSR0_LINKGD;
1849
1850 if (linked) {
1851 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1852 netif_carrier_on(vptr->netdev);
1853 } else {
1854 vptr->mii_status |= VELOCITY_LINK_FAIL;
1855 netif_carrier_off(vptr->netdev);
1856 }
1857
1858 velocity_print_link_status(vptr);
1859 enable_flow_control_ability(vptr);
1860
1861
1862
1863
1864
1865
1866 enable_mii_autopoll(regs);
1867
1868 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1869 netif_stop_queue(vptr->netdev);
1870 else
1871 netif_wake_queue(vptr->netdev);
1872
1873 }
1874 if (status & ISR_MIBFI)
1875 velocity_update_hw_mibs(vptr);
1876 if (status & ISR_LSTEI)
1877 mac_rx_queue_wake(vptr->mac_regs);
1878 }
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888 static int velocity_tx_srv(struct velocity_info *vptr)
1889 {
1890 struct tx_desc *td;
1891 int qnum;
1892 int full = 0;
1893 int idx;
1894 int works = 0;
1895 struct velocity_td_info *tdinfo;
1896 struct net_device_stats *stats = &vptr->netdev->stats;
1897
1898 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1899 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1900 idx = (idx + 1) % vptr->options.numtx) {
1901
1902
1903
1904
1905 td = &(vptr->tx.rings[qnum][idx]);
1906 tdinfo = &(vptr->tx.infos[qnum][idx]);
1907
1908 if (td->tdesc0.len & OWNED_BY_NIC)
1909 break;
1910
1911 if ((works++ > 15))
1912 break;
1913
1914 if (td->tdesc0.TSR & TSR0_TERR) {
1915 stats->tx_errors++;
1916 stats->tx_dropped++;
1917 if (td->tdesc0.TSR & TSR0_CDH)
1918 stats->tx_heartbeat_errors++;
1919 if (td->tdesc0.TSR & TSR0_CRS)
1920 stats->tx_carrier_errors++;
1921 if (td->tdesc0.TSR & TSR0_ABT)
1922 stats->tx_aborted_errors++;
1923 if (td->tdesc0.TSR & TSR0_OWC)
1924 stats->tx_window_errors++;
1925 } else {
1926 stats->tx_packets++;
1927 stats->tx_bytes += tdinfo->skb->len;
1928 }
1929 velocity_free_tx_buf(vptr, tdinfo, td);
1930 vptr->tx.used[qnum]--;
1931 }
1932 vptr->tx.tail[qnum] = idx;
1933
1934 if (AVAIL_TD(vptr, qnum) < 1)
1935 full = 1;
1936 }
1937
1938
1939
1940
1941 if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1942 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1943 netif_wake_queue(vptr->netdev);
1944 }
1945 return works;
1946 }
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1957 {
1958 skb_checksum_none_assert(skb);
1959
1960 if (rd->rdesc1.CSM & CSM_IPKT) {
1961 if (rd->rdesc1.CSM & CSM_IPOK) {
1962 if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1963 (rd->rdesc1.CSM & CSM_UDPKT)) {
1964 if (!(rd->rdesc1.CSM & CSM_TUPOK))
1965 return;
1966 }
1967 skb->ip_summed = CHECKSUM_UNNECESSARY;
1968 }
1969 }
1970 }
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984 static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1985 struct velocity_info *vptr)
1986 {
1987 int ret = -1;
1988 if (pkt_size < rx_copybreak) {
1989 struct sk_buff *new_skb;
1990
1991 new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
1992 if (new_skb) {
1993 new_skb->ip_summed = rx_skb[0]->ip_summed;
1994 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1995 *rx_skb = new_skb;
1996 ret = 0;
1997 }
1998
1999 }
2000 return ret;
2001 }
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012 static inline void velocity_iph_realign(struct velocity_info *vptr,
2013 struct sk_buff *skb, int pkt_size)
2014 {
2015 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2016 memmove(skb->data + 2, skb->data, pkt_size);
2017 skb_reserve(skb, 2);
2018 }
2019 }
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2030 {
2031 struct net_device_stats *stats = &vptr->netdev->stats;
2032 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2033 struct rx_desc *rd = &(vptr->rx.ring[idx]);
2034 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2035 struct sk_buff *skb;
2036
2037 if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2038 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2039 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
2040 stats->rx_length_errors++;
2041 return -EINVAL;
2042 }
2043
2044 if (rd->rdesc0.RSR & RSR_MAR)
2045 stats->multicast++;
2046
2047 skb = rd_info->skb;
2048
2049 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2050 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2051
2052 velocity_rx_csum(rd, skb);
2053
2054 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2055 velocity_iph_realign(vptr, skb, pkt_len);
2056 rd_info->skb = NULL;
2057 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2058 DMA_FROM_DEVICE);
2059 } else {
2060 dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2061 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2062 }
2063
2064 skb_put(skb, pkt_len - 4);
2065 skb->protocol = eth_type_trans(skb, vptr->netdev);
2066
2067 if (rd->rdesc0.RSR & RSR_DETAG) {
2068 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2069
2070 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2071 }
2072 netif_receive_skb(skb);
2073
2074 stats->rx_bytes += pkt_len;
2075 stats->rx_packets++;
2076
2077 return 0;
2078 }
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2089 {
2090 struct net_device_stats *stats = &vptr->netdev->stats;
2091 int rd_curr = vptr->rx.curr;
2092 int works = 0;
2093
2094 while (works < budget_left) {
2095 struct rx_desc *rd = vptr->rx.ring + rd_curr;
2096
2097 if (!vptr->rx.info[rd_curr].skb)
2098 break;
2099
2100 if (rd->rdesc0.len & OWNED_BY_NIC)
2101 break;
2102
2103 rmb();
2104
2105
2106
2107
2108 if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2109 if (velocity_receive_frame(vptr, rd_curr) < 0)
2110 stats->rx_dropped++;
2111 } else {
2112 if (rd->rdesc0.RSR & RSR_CRC)
2113 stats->rx_crc_errors++;
2114 if (rd->rdesc0.RSR & RSR_FAE)
2115 stats->rx_frame_errors++;
2116
2117 stats->rx_dropped++;
2118 }
2119
2120 rd->size |= RX_INTEN;
2121
2122 rd_curr++;
2123 if (rd_curr >= vptr->options.numrx)
2124 rd_curr = 0;
2125 works++;
2126 }
2127
2128 vptr->rx.curr = rd_curr;
2129
2130 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2131 velocity_give_many_rx_descs(vptr);
2132
2133 VAR_USED(stats);
2134 return works;
2135 }
2136
2137 static int velocity_poll(struct napi_struct *napi, int budget)
2138 {
2139 struct velocity_info *vptr = container_of(napi,
2140 struct velocity_info, napi);
2141 unsigned int rx_done;
2142 unsigned long flags;
2143
2144
2145
2146
2147
2148 rx_done = velocity_rx_srv(vptr, budget);
2149 spin_lock_irqsave(&vptr->lock, flags);
2150 velocity_tx_srv(vptr);
2151
2152 if (rx_done < budget) {
2153 napi_complete_done(napi, rx_done);
2154 mac_enable_int(vptr->mac_regs);
2155 }
2156 spin_unlock_irqrestore(&vptr->lock, flags);
2157
2158 return rx_done;
2159 }
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171 static irqreturn_t velocity_intr(int irq, void *dev_instance)
2172 {
2173 struct net_device *dev = dev_instance;
2174 struct velocity_info *vptr = netdev_priv(dev);
2175 u32 isr_status;
2176
2177 spin_lock(&vptr->lock);
2178 isr_status = mac_read_isr(vptr->mac_regs);
2179
2180
2181 if (isr_status == 0) {
2182 spin_unlock(&vptr->lock);
2183 return IRQ_NONE;
2184 }
2185
2186
2187 mac_write_isr(vptr->mac_regs, isr_status);
2188
2189 if (likely(napi_schedule_prep(&vptr->napi))) {
2190 mac_disable_int(vptr->mac_regs);
2191 __napi_schedule(&vptr->napi);
2192 }
2193
2194 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2195 velocity_error(vptr, isr_status);
2196
2197 spin_unlock(&vptr->lock);
2198
2199 return IRQ_HANDLED;
2200 }
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212 static int velocity_open(struct net_device *dev)
2213 {
2214 struct velocity_info *vptr = netdev_priv(dev);
2215 int ret;
2216
2217 ret = velocity_init_rings(vptr, dev->mtu);
2218 if (ret < 0)
2219 goto out;
2220
2221
2222 velocity_set_power_state(vptr, PCI_D0);
2223
2224 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2225
2226 ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2227 dev->name, dev);
2228 if (ret < 0) {
2229
2230 velocity_set_power_state(vptr, PCI_D3hot);
2231 velocity_free_rings(vptr);
2232 goto out;
2233 }
2234
2235 velocity_give_many_rx_descs(vptr);
2236
2237 mac_enable_int(vptr->mac_regs);
2238 netif_start_queue(dev);
2239 napi_enable(&vptr->napi);
2240 vptr->flags |= VELOCITY_FLAGS_OPENED;
2241 out:
2242 return ret;
2243 }
2244
2245
2246
2247
2248
2249
2250
2251
2252 static void velocity_shutdown(struct velocity_info *vptr)
2253 {
2254 struct mac_regs __iomem *regs = vptr->mac_regs;
2255 mac_disable_int(regs);
2256 writel(CR0_STOP, ®s->CR0Set);
2257 writew(0xFFFF, ®s->TDCSRClr);
2258 writeb(0xFF, ®s->RDCSRClr);
2259 safe_disable_mii_autopoll(regs);
2260 mac_clear_isr(regs);
2261 }
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272 static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2273 {
2274 struct velocity_info *vptr = netdev_priv(dev);
2275 int ret = 0;
2276
2277 if (!netif_running(dev)) {
2278 dev->mtu = new_mtu;
2279 goto out_0;
2280 }
2281
2282 if (dev->mtu != new_mtu) {
2283 struct velocity_info *tmp_vptr;
2284 unsigned long flags;
2285 struct rx_info rx;
2286 struct tx_info tx;
2287
2288 tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2289 if (!tmp_vptr) {
2290 ret = -ENOMEM;
2291 goto out_0;
2292 }
2293
2294 tmp_vptr->netdev = dev;
2295 tmp_vptr->pdev = vptr->pdev;
2296 tmp_vptr->dev = vptr->dev;
2297 tmp_vptr->options = vptr->options;
2298 tmp_vptr->tx.numq = vptr->tx.numq;
2299
2300 ret = velocity_init_rings(tmp_vptr, new_mtu);
2301 if (ret < 0)
2302 goto out_free_tmp_vptr_1;
2303
2304 napi_disable(&vptr->napi);
2305
2306 spin_lock_irqsave(&vptr->lock, flags);
2307
2308 netif_stop_queue(dev);
2309 velocity_shutdown(vptr);
2310
2311 rx = vptr->rx;
2312 tx = vptr->tx;
2313
2314 vptr->rx = tmp_vptr->rx;
2315 vptr->tx = tmp_vptr->tx;
2316
2317 tmp_vptr->rx = rx;
2318 tmp_vptr->tx = tx;
2319
2320 dev->mtu = new_mtu;
2321
2322 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2323
2324 velocity_give_many_rx_descs(vptr);
2325
2326 napi_enable(&vptr->napi);
2327
2328 mac_enable_int(vptr->mac_regs);
2329 netif_start_queue(dev);
2330
2331 spin_unlock_irqrestore(&vptr->lock, flags);
2332
2333 velocity_free_rings(tmp_vptr);
2334
2335 out_free_tmp_vptr_1:
2336 kfree(tmp_vptr);
2337 }
2338 out_0:
2339 return ret;
2340 }
2341
2342 #ifdef CONFIG_NET_POLL_CONTROLLER
2343
2344
2345
2346
2347
2348
2349
2350
2351 static void velocity_poll_controller(struct net_device *dev)
2352 {
2353 disable_irq(dev->irq);
2354 velocity_intr(dev->irq, dev);
2355 enable_irq(dev->irq);
2356 }
2357 #endif
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369 static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2370 {
2371 struct velocity_info *vptr = netdev_priv(dev);
2372 struct mac_regs __iomem *regs = vptr->mac_regs;
2373 unsigned long flags;
2374 struct mii_ioctl_data *miidata = if_mii(ifr);
2375 int err;
2376
2377 switch (cmd) {
2378 case SIOCGMIIPHY:
2379 miidata->phy_id = readb(®s->MIIADR) & 0x1f;
2380 break;
2381 case SIOCGMIIREG:
2382 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2383 return -ETIMEDOUT;
2384 break;
2385 case SIOCSMIIREG:
2386 spin_lock_irqsave(&vptr->lock, flags);
2387 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2388 spin_unlock_irqrestore(&vptr->lock, flags);
2389 check_connection_type(vptr->mac_regs);
2390 if (err)
2391 return err;
2392 break;
2393 default:
2394 return -EOPNOTSUPP;
2395 }
2396 return 0;
2397 }
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408 static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2409 {
2410 struct velocity_info *vptr = netdev_priv(dev);
2411 int ret;
2412
2413
2414
2415
2416 if (!netif_running(dev))
2417 velocity_set_power_state(vptr, PCI_D0);
2418
2419 switch (cmd) {
2420 case SIOCGMIIPHY:
2421 case SIOCGMIIREG:
2422 case SIOCSMIIREG:
2423 ret = velocity_mii_ioctl(dev, rq, cmd);
2424 break;
2425
2426 default:
2427 ret = -EOPNOTSUPP;
2428 }
2429 if (!netif_running(dev))
2430 velocity_set_power_state(vptr, PCI_D3hot);
2431
2432
2433 return ret;
2434 }
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446 static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2447 {
2448 struct velocity_info *vptr = netdev_priv(dev);
2449
2450
2451 if (!netif_running(dev))
2452 return &dev->stats;
2453
2454 spin_lock_irq(&vptr->lock);
2455 velocity_update_hw_mibs(vptr);
2456 spin_unlock_irq(&vptr->lock);
2457
2458 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2459 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2460 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2461
2462
2463 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2464
2465
2466
2467 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2468
2469
2470
2471
2472
2473
2474
2475 return &dev->stats;
2476 }
2477
2478
2479
2480
2481
2482
2483
2484
2485 static int velocity_close(struct net_device *dev)
2486 {
2487 struct velocity_info *vptr = netdev_priv(dev);
2488
2489 napi_disable(&vptr->napi);
2490 netif_stop_queue(dev);
2491 velocity_shutdown(vptr);
2492
2493 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2494 velocity_get_ip(vptr);
2495
2496 free_irq(dev->irq, dev);
2497
2498 velocity_free_rings(vptr);
2499
2500 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2501 return 0;
2502 }
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512 static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2513 struct net_device *dev)
2514 {
2515 struct velocity_info *vptr = netdev_priv(dev);
2516 int qnum = 0;
2517 struct tx_desc *td_ptr;
2518 struct velocity_td_info *tdinfo;
2519 unsigned long flags;
2520 int pktlen;
2521 int index, prev;
2522 int i = 0;
2523
2524 if (skb_padto(skb, ETH_ZLEN))
2525 goto out;
2526
2527
2528
2529 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2530 dev_kfree_skb_any(skb);
2531 return NETDEV_TX_OK;
2532 }
2533
2534 pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2535 max_t(unsigned int, skb->len, ETH_ZLEN) :
2536 skb_headlen(skb);
2537
2538 spin_lock_irqsave(&vptr->lock, flags);
2539
2540 index = vptr->tx.curr[qnum];
2541 td_ptr = &(vptr->tx.rings[qnum][index]);
2542 tdinfo = &(vptr->tx.infos[qnum][index]);
2543
2544 td_ptr->tdesc1.TCR = TCR0_TIC;
2545 td_ptr->td_buf[0].size &= ~TD_QUEUE;
2546
2547
2548
2549
2550
2551 tdinfo->skb = skb;
2552 tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2553 DMA_TO_DEVICE);
2554 td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2555 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2556 td_ptr->td_buf[0].pa_high = 0;
2557 td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2558
2559
2560 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2561 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2562
2563 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2564 frag, 0,
2565 skb_frag_size(frag),
2566 DMA_TO_DEVICE);
2567
2568 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2569 td_ptr->td_buf[i + 1].pa_high = 0;
2570 td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2571 }
2572 tdinfo->nskb_dma = i + 1;
2573
2574 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2575
2576 if (skb_vlan_tag_present(skb)) {
2577 td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2578 td_ptr->tdesc1.TCR |= TCR0_VETAG;
2579 }
2580
2581
2582
2583
2584 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2585 const struct iphdr *ip = ip_hdr(skb);
2586 if (ip->protocol == IPPROTO_TCP)
2587 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2588 else if (ip->protocol == IPPROTO_UDP)
2589 td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2590 td_ptr->tdesc1.TCR |= TCR0_IPCK;
2591 }
2592
2593 prev = index - 1;
2594 if (prev < 0)
2595 prev = vptr->options.numtx - 1;
2596 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2597 vptr->tx.used[qnum]++;
2598 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2599
2600 if (AVAIL_TD(vptr, qnum) < 1)
2601 netif_stop_queue(dev);
2602
2603 td_ptr = &(vptr->tx.rings[qnum][prev]);
2604 td_ptr->td_buf[0].size |= TD_QUEUE;
2605 mac_tx_queue_wake(vptr->mac_regs, qnum);
2606
2607 spin_unlock_irqrestore(&vptr->lock, flags);
2608 out:
2609 return NETDEV_TX_OK;
2610 }
2611
2612 static const struct net_device_ops velocity_netdev_ops = {
2613 .ndo_open = velocity_open,
2614 .ndo_stop = velocity_close,
2615 .ndo_start_xmit = velocity_xmit,
2616 .ndo_get_stats = velocity_get_stats,
2617 .ndo_validate_addr = eth_validate_addr,
2618 .ndo_set_mac_address = eth_mac_addr,
2619 .ndo_set_rx_mode = velocity_set_multi,
2620 .ndo_change_mtu = velocity_change_mtu,
2621 .ndo_do_ioctl = velocity_ioctl,
2622 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
2623 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
2624 #ifdef CONFIG_NET_POLL_CONTROLLER
2625 .ndo_poll_controller = velocity_poll_controller,
2626 #endif
2627 };
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638 static void velocity_init_info(struct velocity_info *vptr,
2639 const struct velocity_info_tbl *info)
2640 {
2641 vptr->chip_id = info->chip_id;
2642 vptr->tx.numq = info->txqueue;
2643 vptr->multicast_limit = MCAM_SIZE;
2644 spin_lock_init(&vptr->lock);
2645 }
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655 static int velocity_get_pci_info(struct velocity_info *vptr)
2656 {
2657 struct pci_dev *pdev = vptr->pdev;
2658
2659 pci_set_master(pdev);
2660
2661 vptr->ioaddr = pci_resource_start(pdev, 0);
2662 vptr->memaddr = pci_resource_start(pdev, 1);
2663
2664 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2665 dev_err(&pdev->dev,
2666 "region #0 is not an I/O resource, aborting.\n");
2667 return -EINVAL;
2668 }
2669
2670 if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2671 dev_err(&pdev->dev,
2672 "region #1 is an I/O resource, aborting.\n");
2673 return -EINVAL;
2674 }
2675
2676 if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2677 dev_err(&pdev->dev, "region #1 is too small.\n");
2678 return -EINVAL;
2679 }
2680
2681 return 0;
2682 }
2683
2684
2685
2686
2687
2688
2689
2690
2691 static int velocity_get_platform_info(struct velocity_info *vptr)
2692 {
2693 struct resource res;
2694 int ret;
2695
2696 if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2697 vptr->no_eeprom = 1;
2698
2699 ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2700 if (ret) {
2701 dev_err(vptr->dev, "unable to find memory address\n");
2702 return ret;
2703 }
2704
2705 vptr->memaddr = res.start;
2706
2707 if (resource_size(&res) < VELOCITY_IO_SIZE) {
2708 dev_err(vptr->dev, "memory region is too small.\n");
2709 return -EINVAL;
2710 }
2711
2712 return 0;
2713 }
2714
2715
2716
2717
2718
2719
2720
2721
2722 static void velocity_print_info(struct velocity_info *vptr)
2723 {
2724 struct net_device *dev = vptr->netdev;
2725
2726 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2727 printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2728 dev->name, dev->dev_addr);
2729 }
2730
2731 static u32 velocity_get_link(struct net_device *dev)
2732 {
2733 struct velocity_info *vptr = netdev_priv(dev);
2734 struct mac_regs __iomem *regs = vptr->mac_regs;
2735 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0;
2736 }
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747 static int velocity_probe(struct device *dev, int irq,
2748 const struct velocity_info_tbl *info,
2749 enum velocity_bus_type bustype)
2750 {
2751 static int first = 1;
2752 struct net_device *netdev;
2753 int i;
2754 const char *drv_string;
2755 struct velocity_info *vptr;
2756 struct mac_regs __iomem *regs;
2757 int ret = -ENOMEM;
2758
2759
2760
2761
2762 if (velocity_nics >= MAX_UNITS) {
2763 dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2764 return -ENODEV;
2765 }
2766
2767 netdev = alloc_etherdev(sizeof(struct velocity_info));
2768 if (!netdev)
2769 goto out;
2770
2771
2772
2773 SET_NETDEV_DEV(netdev, dev);
2774 vptr = netdev_priv(netdev);
2775
2776 if (first) {
2777 printk(KERN_INFO "%s Ver. %s\n",
2778 VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2779 printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2780 printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2781 first = 0;
2782 }
2783
2784 netdev->irq = irq;
2785 vptr->netdev = netdev;
2786 vptr->dev = dev;
2787
2788 velocity_init_info(vptr, info);
2789
2790 if (bustype == BUS_PCI) {
2791 vptr->pdev = to_pci_dev(dev);
2792
2793 ret = velocity_get_pci_info(vptr);
2794 if (ret < 0)
2795 goto err_free_dev;
2796 } else {
2797 vptr->pdev = NULL;
2798 ret = velocity_get_platform_info(vptr);
2799 if (ret < 0)
2800 goto err_free_dev;
2801 }
2802
2803 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2804 if (regs == NULL) {
2805 ret = -EIO;
2806 goto err_free_dev;
2807 }
2808
2809 vptr->mac_regs = regs;
2810 vptr->rev_id = readb(®s->rev_id);
2811
2812 mac_wol_reset(regs);
2813
2814 for (i = 0; i < 6; i++)
2815 netdev->dev_addr[i] = readb(®s->PAR[i]);
2816
2817
2818 drv_string = dev_driver_string(dev);
2819
2820 velocity_get_options(&vptr->options, velocity_nics, drv_string);
2821
2822
2823
2824
2825
2826 vptr->options.flags &= info->flags;
2827
2828
2829
2830
2831
2832 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2833
2834 vptr->wol_opts = vptr->options.wol_opts;
2835 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2836
2837 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2838
2839 netdev->netdev_ops = &velocity_netdev_ops;
2840 netdev->ethtool_ops = &velocity_ethtool_ops;
2841 netif_napi_add(netdev, &vptr->napi, velocity_poll,
2842 VELOCITY_NAPI_WEIGHT);
2843
2844 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2845 NETIF_F_HW_VLAN_CTAG_TX;
2846 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2847 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2848 NETIF_F_IP_CSUM;
2849
2850
2851 netdev->min_mtu = VELOCITY_MIN_MTU;
2852 netdev->max_mtu = VELOCITY_MAX_MTU;
2853
2854 ret = register_netdev(netdev);
2855 if (ret < 0)
2856 goto err_iounmap;
2857
2858 if (!velocity_get_link(netdev)) {
2859 netif_carrier_off(netdev);
2860 vptr->mii_status |= VELOCITY_LINK_FAIL;
2861 }
2862
2863 velocity_print_info(vptr);
2864 dev_set_drvdata(vptr->dev, netdev);
2865
2866
2867
2868 velocity_set_power_state(vptr, PCI_D3hot);
2869 velocity_nics++;
2870 out:
2871 return ret;
2872
2873 err_iounmap:
2874 netif_napi_del(&vptr->napi);
2875 iounmap(regs);
2876 err_free_dev:
2877 free_netdev(netdev);
2878 goto out;
2879 }
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889 static int velocity_remove(struct device *dev)
2890 {
2891 struct net_device *netdev = dev_get_drvdata(dev);
2892 struct velocity_info *vptr = netdev_priv(netdev);
2893
2894 unregister_netdev(netdev);
2895 netif_napi_del(&vptr->napi);
2896 iounmap(vptr->mac_regs);
2897 free_netdev(netdev);
2898 velocity_nics--;
2899
2900 return 0;
2901 }
2902
2903 static int velocity_pci_probe(struct pci_dev *pdev,
2904 const struct pci_device_id *ent)
2905 {
2906 const struct velocity_info_tbl *info =
2907 &chip_info_table[ent->driver_data];
2908 int ret;
2909
2910 ret = pci_enable_device(pdev);
2911 if (ret < 0)
2912 return ret;
2913
2914 ret = pci_request_regions(pdev, VELOCITY_NAME);
2915 if (ret < 0) {
2916 dev_err(&pdev->dev, "No PCI resources.\n");
2917 goto fail1;
2918 }
2919
2920 ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2921 if (ret == 0)
2922 return 0;
2923
2924 pci_release_regions(pdev);
2925 fail1:
2926 pci_disable_device(pdev);
2927 return ret;
2928 }
2929
2930 static void velocity_pci_remove(struct pci_dev *pdev)
2931 {
2932 velocity_remove(&pdev->dev);
2933
2934 pci_release_regions(pdev);
2935 pci_disable_device(pdev);
2936 }
2937
2938 static int velocity_platform_probe(struct platform_device *pdev)
2939 {
2940 const struct of_device_id *of_id;
2941 const struct velocity_info_tbl *info;
2942 int irq;
2943
2944 of_id = of_match_device(velocity_of_ids, &pdev->dev);
2945 if (!of_id)
2946 return -EINVAL;
2947 info = of_id->data;
2948
2949 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2950 if (!irq)
2951 return -EINVAL;
2952
2953 return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2954 }
2955
2956 static int velocity_platform_remove(struct platform_device *pdev)
2957 {
2958 velocity_remove(&pdev->dev);
2959
2960 return 0;
2961 }
2962
2963 #ifdef CONFIG_PM_SLEEP
2964
2965
2966
2967
2968
2969
2970
2971
2972 static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2973 {
2974 u16 crc = 0xFFFF;
2975 u8 mask;
2976 int i, j;
2977
2978 for (i = 0; i < size; i++) {
2979 mask = mask_pattern[i];
2980
2981
2982 if (mask == 0x00)
2983 continue;
2984
2985 for (j = 0; j < 8; j++) {
2986 if ((mask & 0x01) == 0) {
2987 mask >>= 1;
2988 continue;
2989 }
2990 mask >>= 1;
2991 crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2992 }
2993 }
2994
2995 crc = ~crc;
2996 return bitrev32(crc) >> 16;
2997 }
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008 static int velocity_set_wol(struct velocity_info *vptr)
3009 {
3010 struct mac_regs __iomem *regs = vptr->mac_regs;
3011 enum speed_opt spd_dpx = vptr->options.spd_dpx;
3012 static u8 buf[256];
3013 int i;
3014
3015 static u32 mask_pattern[2][4] = {
3016 {0x00203000, 0x000003C0, 0x00000000, 0x0000000},
3017 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}
3018 };
3019
3020 writew(0xFFFF, ®s->WOLCRClr);
3021 writeb(WOLCFG_SAB | WOLCFG_SAM, ®s->WOLCFGSet);
3022 writew(WOLCR_MAGIC_EN, ®s->WOLCRSet);
3023
3024
3025
3026
3027
3028
3029 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3030 writew(WOLCR_UNICAST_EN, ®s->WOLCRSet);
3031
3032 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3033 struct arp_packet *arp = (struct arp_packet *) buf;
3034 u16 crc;
3035 memset(buf, 0, sizeof(struct arp_packet) + 7);
3036
3037 for (i = 0; i < 4; i++)
3038 writel(mask_pattern[0][i], ®s->ByteMask[0][i]);
3039
3040 arp->type = htons(ETH_P_ARP);
3041 arp->ar_op = htons(1);
3042
3043 memcpy(arp->ar_tip, vptr->ip_addr, 4);
3044
3045 crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3046 (u8 *) & mask_pattern[0][0]);
3047
3048 writew(crc, ®s->PatternCRC[0]);
3049 writew(WOLCR_ARP_EN, ®s->WOLCRSet);
3050 }
3051
3052 BYTE_REG_BITS_ON(PWCFG_WOLTYPE, ®s->PWCFGSet);
3053 BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, ®s->PWCFGSet);
3054
3055 writew(0x0FFF, ®s->WOLSRClr);
3056
3057 if (spd_dpx == SPD_DPX_1000_FULL)
3058 goto mac_done;
3059
3060 if (spd_dpx != SPD_DPX_AUTO)
3061 goto advertise_done;
3062
3063 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3064 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3065 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
3066
3067 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
3068 }
3069
3070 if (vptr->mii_status & VELOCITY_SPEED_1000)
3071 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
3072
3073 advertise_done:
3074 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
3075
3076 {
3077 u8 GCR;
3078 GCR = readb(®s->CHIPGCR);
3079 GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3080 writeb(GCR, ®s->CHIPGCR);
3081 }
3082
3083 mac_done:
3084 BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR);
3085
3086 BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW);
3087
3088 BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW);
3089
3090 return 0;
3091 }
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3104 {
3105 struct mac_regs __iomem *regs = vptr->mac_regs;
3106 u16 i;
3107 u8 __iomem *ptr = (u8 __iomem *)regs;
3108
3109 for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3110 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3111
3112 for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3113 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3114
3115 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3116 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3117
3118 }
3119
3120 static int velocity_suspend(struct device *dev)
3121 {
3122 struct net_device *netdev = dev_get_drvdata(dev);
3123 struct velocity_info *vptr = netdev_priv(netdev);
3124 unsigned long flags;
3125
3126 if (!netif_running(vptr->netdev))
3127 return 0;
3128
3129 netif_device_detach(vptr->netdev);
3130
3131 spin_lock_irqsave(&vptr->lock, flags);
3132 if (vptr->pdev)
3133 pci_save_state(vptr->pdev);
3134
3135 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3136 velocity_get_ip(vptr);
3137 velocity_save_context(vptr, &vptr->context);
3138 velocity_shutdown(vptr);
3139 velocity_set_wol(vptr);
3140 if (vptr->pdev)
3141 pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3142 velocity_set_power_state(vptr, PCI_D3hot);
3143 } else {
3144 velocity_save_context(vptr, &vptr->context);
3145 velocity_shutdown(vptr);
3146 if (vptr->pdev)
3147 pci_disable_device(vptr->pdev);
3148 velocity_set_power_state(vptr, PCI_D3hot);
3149 }
3150
3151 spin_unlock_irqrestore(&vptr->lock, flags);
3152 return 0;
3153 }
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3164 {
3165 struct mac_regs __iomem *regs = vptr->mac_regs;
3166 int i;
3167 u8 __iomem *ptr = (u8 __iomem *)regs;
3168
3169 for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3170 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3171
3172
3173 for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3174
3175 writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3176
3177 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3178 }
3179
3180 for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3181 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3182
3183 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3184 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3185
3186 for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3187 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3188 }
3189
3190 static int velocity_resume(struct device *dev)
3191 {
3192 struct net_device *netdev = dev_get_drvdata(dev);
3193 struct velocity_info *vptr = netdev_priv(netdev);
3194 unsigned long flags;
3195 int i;
3196
3197 if (!netif_running(vptr->netdev))
3198 return 0;
3199
3200 velocity_set_power_state(vptr, PCI_D0);
3201
3202 if (vptr->pdev) {
3203 pci_enable_wake(vptr->pdev, PCI_D0, 0);
3204 pci_restore_state(vptr->pdev);
3205 }
3206
3207 mac_wol_reset(vptr->mac_regs);
3208
3209 spin_lock_irqsave(&vptr->lock, flags);
3210 velocity_restore_context(vptr, &vptr->context);
3211 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3212 mac_disable_int(vptr->mac_regs);
3213
3214 velocity_tx_srv(vptr);
3215
3216 for (i = 0; i < vptr->tx.numq; i++) {
3217 if (vptr->tx.used[i])
3218 mac_tx_queue_wake(vptr->mac_regs, i);
3219 }
3220
3221 mac_enable_int(vptr->mac_regs);
3222 spin_unlock_irqrestore(&vptr->lock, flags);
3223 netif_device_attach(vptr->netdev);
3224
3225 return 0;
3226 }
3227 #endif
3228
3229 static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3230
3231
3232
3233
3234
3235 static struct pci_driver velocity_pci_driver = {
3236 .name = VELOCITY_NAME,
3237 .id_table = velocity_pci_id_table,
3238 .probe = velocity_pci_probe,
3239 .remove = velocity_pci_remove,
3240 .driver = {
3241 .pm = &velocity_pm_ops,
3242 },
3243 };
3244
3245 static struct platform_driver velocity_platform_driver = {
3246 .probe = velocity_platform_probe,
3247 .remove = velocity_platform_remove,
3248 .driver = {
3249 .name = "via-velocity",
3250 .of_match_table = velocity_of_ids,
3251 .pm = &velocity_pm_ops,
3252 },
3253 };
3254
3255
3256
3257
3258
3259
3260
3261
3262 static int velocity_ethtool_up(struct net_device *dev)
3263 {
3264 struct velocity_info *vptr = netdev_priv(dev);
3265 if (!netif_running(dev))
3266 velocity_set_power_state(vptr, PCI_D0);
3267 return 0;
3268 }
3269
3270
3271
3272
3273
3274
3275
3276
3277 static void velocity_ethtool_down(struct net_device *dev)
3278 {
3279 struct velocity_info *vptr = netdev_priv(dev);
3280 if (!netif_running(dev))
3281 velocity_set_power_state(vptr, PCI_D3hot);
3282 }
3283
3284 static int velocity_get_link_ksettings(struct net_device *dev,
3285 struct ethtool_link_ksettings *cmd)
3286 {
3287 struct velocity_info *vptr = netdev_priv(dev);
3288 struct mac_regs __iomem *regs = vptr->mac_regs;
3289 u32 status;
3290 u32 supported, advertising;
3291
3292 status = check_connection_type(vptr->mac_regs);
3293
3294 supported = SUPPORTED_TP |
3295 SUPPORTED_Autoneg |
3296 SUPPORTED_10baseT_Half |
3297 SUPPORTED_10baseT_Full |
3298 SUPPORTED_100baseT_Half |
3299 SUPPORTED_100baseT_Full |
3300 SUPPORTED_1000baseT_Half |
3301 SUPPORTED_1000baseT_Full;
3302
3303 advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3304 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3305 advertising |=
3306 ADVERTISED_10baseT_Half |
3307 ADVERTISED_10baseT_Full |
3308 ADVERTISED_100baseT_Half |
3309 ADVERTISED_100baseT_Full |
3310 ADVERTISED_1000baseT_Half |
3311 ADVERTISED_1000baseT_Full;
3312 } else {
3313 switch (vptr->options.spd_dpx) {
3314 case SPD_DPX_1000_FULL:
3315 advertising |= ADVERTISED_1000baseT_Full;
3316 break;
3317 case SPD_DPX_100_HALF:
3318 advertising |= ADVERTISED_100baseT_Half;
3319 break;
3320 case SPD_DPX_100_FULL:
3321 advertising |= ADVERTISED_100baseT_Full;
3322 break;
3323 case SPD_DPX_10_HALF:
3324 advertising |= ADVERTISED_10baseT_Half;
3325 break;
3326 case SPD_DPX_10_FULL:
3327 advertising |= ADVERTISED_10baseT_Full;
3328 break;
3329 default:
3330 break;
3331 }
3332 }
3333
3334 if (status & VELOCITY_SPEED_1000)
3335 cmd->base.speed = SPEED_1000;
3336 else if (status & VELOCITY_SPEED_100)
3337 cmd->base.speed = SPEED_100;
3338 else
3339 cmd->base.speed = SPEED_10;
3340
3341 cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
3342 AUTONEG_ENABLE : AUTONEG_DISABLE;
3343 cmd->base.port = PORT_TP;
3344 cmd->base.phy_address = readb(®s->MIIADR) & 0x1F;
3345
3346 if (status & VELOCITY_DUPLEX_FULL)
3347 cmd->base.duplex = DUPLEX_FULL;
3348 else
3349 cmd->base.duplex = DUPLEX_HALF;
3350
3351 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3352 supported);
3353 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3354 advertising);
3355
3356 return 0;
3357 }
3358
3359 static int velocity_set_link_ksettings(struct net_device *dev,
3360 const struct ethtool_link_ksettings *cmd)
3361 {
3362 struct velocity_info *vptr = netdev_priv(dev);
3363 u32 speed = cmd->base.speed;
3364 u32 curr_status;
3365 u32 new_status = 0;
3366 int ret = 0;
3367
3368 curr_status = check_connection_type(vptr->mac_regs);
3369 curr_status &= (~VELOCITY_LINK_FAIL);
3370
3371 new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3372 new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3373 new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3374 new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3375 new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
3376 VELOCITY_DUPLEX_FULL : 0);
3377
3378 if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3379 (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3380 ret = -EINVAL;
3381 } else {
3382 enum speed_opt spd_dpx;
3383
3384 if (new_status & VELOCITY_AUTONEG_ENABLE)
3385 spd_dpx = SPD_DPX_AUTO;
3386 else if ((new_status & VELOCITY_SPEED_1000) &&
3387 (new_status & VELOCITY_DUPLEX_FULL)) {
3388 spd_dpx = SPD_DPX_1000_FULL;
3389 } else if (new_status & VELOCITY_SPEED_100)
3390 spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3391 SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3392 else if (new_status & VELOCITY_SPEED_10)
3393 spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3394 SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3395 else
3396 return -EOPNOTSUPP;
3397
3398 vptr->options.spd_dpx = spd_dpx;
3399
3400 velocity_set_media_mode(vptr, new_status);
3401 }
3402
3403 return ret;
3404 }
3405
3406 static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3407 {
3408 struct velocity_info *vptr = netdev_priv(dev);
3409
3410 strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3411 strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3412 if (vptr->pdev)
3413 strlcpy(info->bus_info, pci_name(vptr->pdev),
3414 sizeof(info->bus_info));
3415 else
3416 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
3417 }
3418
3419 static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3420 {
3421 struct velocity_info *vptr = netdev_priv(dev);
3422 wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3423 wol->wolopts |= WAKE_MAGIC;
3424
3425
3426
3427
3428 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3429 wol->wolopts |= WAKE_UCAST;
3430 if (vptr->wol_opts & VELOCITY_WOL_ARP)
3431 wol->wolopts |= WAKE_ARP;
3432 memcpy(&wol->sopass, vptr->wol_passwd, 6);
3433 }
3434
3435 static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3436 {
3437 struct velocity_info *vptr = netdev_priv(dev);
3438
3439 if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3440 return -EFAULT;
3441 vptr->wol_opts = VELOCITY_WOL_MAGIC;
3442
3443
3444
3445
3446
3447
3448
3449
3450 if (wol->wolopts & WAKE_MAGIC) {
3451 vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3452 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3453 }
3454 if (wol->wolopts & WAKE_UCAST) {
3455 vptr->wol_opts |= VELOCITY_WOL_UCAST;
3456 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3457 }
3458 if (wol->wolopts & WAKE_ARP) {
3459 vptr->wol_opts |= VELOCITY_WOL_ARP;
3460 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3461 }
3462 memcpy(vptr->wol_passwd, wol->sopass, 6);
3463 return 0;
3464 }
3465
3466 static u32 velocity_get_msglevel(struct net_device *dev)
3467 {
3468 return msglevel;
3469 }
3470
3471 static void velocity_set_msglevel(struct net_device *dev, u32 value)
3472 {
3473 msglevel = value;
3474 }
3475
3476 static int get_pending_timer_val(int val)
3477 {
3478 int mult_bits = val >> 6;
3479 int mult = 1;
3480
3481 switch (mult_bits)
3482 {
3483 case 1:
3484 mult = 4; break;
3485 case 2:
3486 mult = 16; break;
3487 case 3:
3488 mult = 64; break;
3489 case 0:
3490 default:
3491 break;
3492 }
3493
3494 return (val & 0x3f) * mult;
3495 }
3496
3497 static void set_pending_timer_val(int *val, u32 us)
3498 {
3499 u8 mult = 0;
3500 u8 shift = 0;
3501
3502 if (us >= 0x3f) {
3503 mult = 1;
3504 shift = 2;
3505 }
3506 if (us >= 0x3f * 4) {
3507 mult = 2;
3508 shift = 4;
3509 }
3510 if (us >= 0x3f * 16) {
3511 mult = 3;
3512 shift = 6;
3513 }
3514
3515 *val = (mult << 6) | ((us >> shift) & 0x3f);
3516 }
3517
3518
3519 static int velocity_get_coalesce(struct net_device *dev,
3520 struct ethtool_coalesce *ecmd)
3521 {
3522 struct velocity_info *vptr = netdev_priv(dev);
3523
3524 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3525 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3526
3527 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3528 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3529
3530 return 0;
3531 }
3532
3533 static int velocity_set_coalesce(struct net_device *dev,
3534 struct ethtool_coalesce *ecmd)
3535 {
3536 struct velocity_info *vptr = netdev_priv(dev);
3537 int max_us = 0x3f * 64;
3538 unsigned long flags;
3539
3540
3541 if (ecmd->tx_coalesce_usecs > max_us)
3542 return -EINVAL;
3543 if (ecmd->rx_coalesce_usecs > max_us)
3544 return -EINVAL;
3545
3546 if (ecmd->tx_max_coalesced_frames > 0xff)
3547 return -EINVAL;
3548 if (ecmd->rx_max_coalesced_frames > 0xff)
3549 return -EINVAL;
3550
3551 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3552 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3553
3554 set_pending_timer_val(&vptr->options.rxqueue_timer,
3555 ecmd->rx_coalesce_usecs);
3556 set_pending_timer_val(&vptr->options.txqueue_timer,
3557 ecmd->tx_coalesce_usecs);
3558
3559
3560 spin_lock_irqsave(&vptr->lock, flags);
3561 mac_disable_int(vptr->mac_regs);
3562 setup_adaptive_interrupts(vptr);
3563 setup_queue_timers(vptr);
3564
3565 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3566 mac_clear_isr(vptr->mac_regs);
3567 mac_enable_int(vptr->mac_regs);
3568 spin_unlock_irqrestore(&vptr->lock, flags);
3569
3570 return 0;
3571 }
3572
3573 static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3574 "rx_all",
3575 "rx_ok",
3576 "tx_ok",
3577 "rx_error",
3578 "rx_runt_ok",
3579 "rx_runt_err",
3580 "rx_64",
3581 "tx_64",
3582 "rx_65_to_127",
3583 "tx_65_to_127",
3584 "rx_128_to_255",
3585 "tx_128_to_255",
3586 "rx_256_to_511",
3587 "tx_256_to_511",
3588 "rx_512_to_1023",
3589 "tx_512_to_1023",
3590 "rx_1024_to_1518",
3591 "tx_1024_to_1518",
3592 "tx_ether_collisions",
3593 "rx_crc_errors",
3594 "rx_jumbo",
3595 "tx_jumbo",
3596 "rx_mac_control_frames",
3597 "tx_mac_control_frames",
3598 "rx_frame_alignment_errors",
3599 "rx_long_ok",
3600 "rx_long_err",
3601 "tx_sqe_errors",
3602 "rx_no_buf",
3603 "rx_symbol_errors",
3604 "in_range_length_errors",
3605 "late_collisions"
3606 };
3607
3608 static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3609 {
3610 switch (sset) {
3611 case ETH_SS_STATS:
3612 memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3613 break;
3614 }
3615 }
3616
3617 static int velocity_get_sset_count(struct net_device *dev, int sset)
3618 {
3619 switch (sset) {
3620 case ETH_SS_STATS:
3621 return ARRAY_SIZE(velocity_gstrings);
3622 default:
3623 return -EOPNOTSUPP;
3624 }
3625 }
3626
3627 static void velocity_get_ethtool_stats(struct net_device *dev,
3628 struct ethtool_stats *stats, u64 *data)
3629 {
3630 if (netif_running(dev)) {
3631 struct velocity_info *vptr = netdev_priv(dev);
3632 u32 *p = vptr->mib_counter;
3633 int i;
3634
3635 spin_lock_irq(&vptr->lock);
3636 velocity_update_hw_mibs(vptr);
3637 spin_unlock_irq(&vptr->lock);
3638
3639 for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3640 *data++ = *p++;
3641 }
3642 }
3643
3644 static const struct ethtool_ops velocity_ethtool_ops = {
3645 .get_drvinfo = velocity_get_drvinfo,
3646 .get_wol = velocity_ethtool_get_wol,
3647 .set_wol = velocity_ethtool_set_wol,
3648 .get_msglevel = velocity_get_msglevel,
3649 .set_msglevel = velocity_set_msglevel,
3650 .get_link = velocity_get_link,
3651 .get_strings = velocity_get_strings,
3652 .get_sset_count = velocity_get_sset_count,
3653 .get_ethtool_stats = velocity_get_ethtool_stats,
3654 .get_coalesce = velocity_get_coalesce,
3655 .set_coalesce = velocity_set_coalesce,
3656 .begin = velocity_ethtool_up,
3657 .complete = velocity_ethtool_down,
3658 .get_link_ksettings = velocity_get_link_ksettings,
3659 .set_link_ksettings = velocity_set_link_ksettings,
3660 };
3661
3662 #if defined(CONFIG_PM) && defined(CONFIG_INET)
3663 static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3664 {
3665 struct in_ifaddr *ifa = ptr;
3666 struct net_device *dev = ifa->ifa_dev->dev;
3667
3668 if (dev_net(dev) == &init_net &&
3669 dev->netdev_ops == &velocity_netdev_ops)
3670 velocity_get_ip(netdev_priv(dev));
3671
3672 return NOTIFY_DONE;
3673 }
3674
3675 static struct notifier_block velocity_inetaddr_notifier = {
3676 .notifier_call = velocity_netdev_event,
3677 };
3678
3679 static void velocity_register_notifier(void)
3680 {
3681 register_inetaddr_notifier(&velocity_inetaddr_notifier);
3682 }
3683
3684 static void velocity_unregister_notifier(void)
3685 {
3686 unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3687 }
3688
3689 #else
3690
3691 #define velocity_register_notifier() do {} while (0)
3692 #define velocity_unregister_notifier() do {} while (0)
3693
3694 #endif
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704 static int __init velocity_init_module(void)
3705 {
3706 int ret_pci, ret_platform;
3707
3708 velocity_register_notifier();
3709
3710 ret_pci = pci_register_driver(&velocity_pci_driver);
3711 ret_platform = platform_driver_register(&velocity_platform_driver);
3712
3713
3714 if ((ret_pci < 0) && (ret_platform < 0)) {
3715 velocity_unregister_notifier();
3716 return ret_pci;
3717 }
3718
3719 return 0;
3720 }
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730 static void __exit velocity_cleanup_module(void)
3731 {
3732 velocity_unregister_notifier();
3733
3734 pci_unregister_driver(&velocity_pci_driver);
3735 platform_driver_unregister(&velocity_platform_driver);
3736 }
3737
3738 module_init(velocity_init_module);
3739 module_exit(velocity_cleanup_module);