This source file includes following definitions.
- atl2_sw_init
- atl2_set_multi
- init_ring_ptrs
- atl2_configure
- atl2_setup_ring_resources
- atl2_irq_enable
- atl2_irq_disable
- __atl2_vlan_mode
- atl2_vlan_mode
- atl2_restore_vlan
- atl2_fix_features
- atl2_set_features
- atl2_intr_rx
- atl2_intr_tx
- atl2_check_for_link
- atl2_clear_phy_int
- atl2_intr
- atl2_request_irq
- atl2_free_ring_resources
- atl2_open
- atl2_down
- atl2_free_irq
- atl2_close
- TxsFreeUnit
- TxdFreeBytes
- atl2_xmit_frame
- atl2_change_mtu
- atl2_set_mac
- atl2_mii_ioctl
- atl2_ioctl
- atl2_tx_timeout
- atl2_watchdog
- atl2_phy_config
- atl2_up
- atl2_reinit_locked
- atl2_reset_task
- atl2_setup_mac_ctrl
- atl2_check_link
- atl2_link_chg_task
- atl2_setup_pcicmd
- atl2_poll_controller
- atl2_probe
- atl2_remove
- atl2_suspend
- atl2_resume
- atl2_shutdown
- atl2_init_module
- atl2_exit_module
- atl2_read_pci_cfg
- atl2_write_pci_cfg
- atl2_get_link_ksettings
- atl2_set_link_ksettings
- atl2_get_msglevel
- atl2_set_msglevel
- atl2_get_regs_len
- atl2_get_regs
- atl2_get_eeprom_len
- atl2_get_eeprom
- atl2_set_eeprom
- atl2_get_drvinfo
- atl2_get_wol
- atl2_set_wol
- atl2_nway_reset
- atl2_reset_hw
- atl2_spi_read
- get_permanent_address
- atl2_read_mac_addr
- atl2_hash_mc_addr
- atl2_hash_set
- atl2_init_pcie
- atl2_init_flash_opcode
- atl2_init_hw
- atl2_get_speed_and_duplex
- atl2_read_phy_reg
- atl2_write_phy_reg
- atl2_phy_setup_autoneg_adv
- atl2_phy_commit
- atl2_phy_init
- atl2_set_mac_addr
- atl2_check_eeprom_exist
- atl2_write_eeprom
- atl2_read_eeprom
- atl2_force_ps
- atl2_validate_option
- atl2_check_options
1
2
3
4
5
6
7
8
9
10 #include <linux/atomic.h>
11 #include <linux/crc32.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/hardirq.h>
16 #include <linux/if_vlan.h>
17 #include <linux/in.h>
18 #include <linux/interrupt.h>
19 #include <linux/ip.h>
20 #include <linux/irqflags.h>
21 #include <linux/irqreturn.h>
22 #include <linux/mii.h>
23 #include <linux/net.h>
24 #include <linux/netdevice.h>
25 #include <linux/pci.h>
26 #include <linux/pci_ids.h>
27 #include <linux/pm.h>
28 #include <linux/skbuff.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/tcp.h>
33 #include <linux/timer.h>
34 #include <linux/types.h>
35 #include <linux/workqueue.h>
36
37 #include "atl2.h"
38
39 #define ATL2_DRV_VERSION "2.2.3"
40
41 static const char atl2_driver_name[] = "atl2";
42 static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
43 static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
44 static const char atl2_driver_version[] = ATL2_DRV_VERSION;
45 static const struct ethtool_ops atl2_ethtool_ops;
46
47 MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
48 MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
49 MODULE_LICENSE("GPL");
50 MODULE_VERSION(ATL2_DRV_VERSION);
51
52
53
54
55 static const struct pci_device_id atl2_pci_tbl[] = {
56 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
57
58 {0,}
59 };
60 MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
61
62 static void atl2_check_options(struct atl2_adapter *adapter);
63
64
65
66
67
68
69
70
71
72 static int atl2_sw_init(struct atl2_adapter *adapter)
73 {
74 struct atl2_hw *hw = &adapter->hw;
75 struct pci_dev *pdev = adapter->pdev;
76
77
78 hw->vendor_id = pdev->vendor;
79 hw->device_id = pdev->device;
80 hw->subsystem_vendor_id = pdev->subsystem_vendor;
81 hw->subsystem_id = pdev->subsystem_device;
82 hw->revision_id = pdev->revision;
83
84 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
85
86 adapter->wol = 0;
87 adapter->ict = 50000;
88 adapter->link_speed = SPEED_0;
89 adapter->link_duplex = FULL_DUPLEX;
90
91 hw->phy_configured = false;
92 hw->preamble_len = 7;
93 hw->ipgt = 0x60;
94 hw->min_ifg = 0x50;
95 hw->ipgr1 = 0x40;
96 hw->ipgr2 = 0x60;
97 hw->retry_buf = 2;
98 hw->max_retry = 0xf;
99 hw->lcol = 0x37;
100 hw->jam_ipg = 7;
101 hw->fc_rxd_hi = 0;
102 hw->fc_rxd_lo = 0;
103 hw->max_frame_size = adapter->netdev->mtu;
104
105 spin_lock_init(&adapter->stats_lock);
106
107 set_bit(__ATL2_DOWN, &adapter->flags);
108
109 return 0;
110 }
111
112
113
114
115
116
117
118
119
120
121 static void atl2_set_multi(struct net_device *netdev)
122 {
123 struct atl2_adapter *adapter = netdev_priv(netdev);
124 struct atl2_hw *hw = &adapter->hw;
125 struct netdev_hw_addr *ha;
126 u32 rctl;
127 u32 hash_value;
128
129
130 rctl = ATL2_READ_REG(hw, REG_MAC_CTRL);
131
132 if (netdev->flags & IFF_PROMISC) {
133 rctl |= MAC_CTRL_PROMIS_EN;
134 } else if (netdev->flags & IFF_ALLMULTI) {
135 rctl |= MAC_CTRL_MC_ALL_EN;
136 rctl &= ~MAC_CTRL_PROMIS_EN;
137 } else
138 rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
139
140 ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl);
141
142
143 ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
144 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
145
146
147 netdev_for_each_mc_addr(ha, netdev) {
148 hash_value = atl2_hash_mc_addr(hw, ha->addr);
149 atl2_hash_set(hw, hash_value);
150 }
151 }
152
153 static void init_ring_ptrs(struct atl2_adapter *adapter)
154 {
155
156 adapter->txd_write_ptr = 0;
157 atomic_set(&adapter->txd_read_ptr, 0);
158
159 adapter->rxd_read_ptr = 0;
160 adapter->rxd_write_ptr = 0;
161
162 atomic_set(&adapter->txs_write_ptr, 0);
163 adapter->txs_next_clear = 0;
164 }
165
166
167
168
169
170
171
172 static int atl2_configure(struct atl2_adapter *adapter)
173 {
174 struct atl2_hw *hw = &adapter->hw;
175 u32 value;
176
177
178 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff);
179
180
181 value = (((u32)hw->mac_addr[2]) << 24) |
182 (((u32)hw->mac_addr[3]) << 16) |
183 (((u32)hw->mac_addr[4]) << 8) |
184 (((u32)hw->mac_addr[5]));
185 ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value);
186 value = (((u32)hw->mac_addr[0]) << 8) |
187 (((u32)hw->mac_addr[1]));
188 ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value);
189
190
191 ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
192 (u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32));
193
194
195 ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO,
196 (u32)(adapter->txd_dma & 0x00000000ffffffffULL));
197 ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO,
198 (u32)(adapter->txs_dma & 0x00000000ffffffffULL));
199 ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO,
200 (u32)(adapter->rxd_dma & 0x00000000ffffffffULL));
201
202
203 ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4));
204 ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size);
205 ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM, (u16)adapter->rxd_ring_size);
206
207
208
209
210
211
212
213
214 value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) <<
215 MAC_IPG_IFG_IPGT_SHIFT) |
216 (((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) <<
217 MAC_IPG_IFG_MIFG_SHIFT) |
218 (((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) <<
219 MAC_IPG_IFG_IPGR1_SHIFT)|
220 (((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) <<
221 MAC_IPG_IFG_IPGR2_SHIFT);
222 ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value);
223
224
225 value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
226 (((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) <<
227 MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
228 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
229 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
230 (((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) <<
231 MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
232 ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value);
233
234
235 ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt);
236 ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN);
237
238
239 ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict);
240
241
242 ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu +
243 ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
244
245
246 ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177);
247
248
249 ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi);
250 ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo);
251
252
253 ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr);
254 ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr);
255
256
257 ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN);
258 ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN);
259
260 value = ATL2_READ_REG(&adapter->hw, REG_ISR);
261 if ((value & ISR_PHY_LINKDOWN) != 0)
262 value = 1;
263 else
264 value = 0;
265
266
267 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff);
268 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
269 return value;
270 }
271
272
273
274
275
276
277
278 static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
279 {
280 struct pci_dev *pdev = adapter->pdev;
281 int size;
282 u8 offset = 0;
283
284
285 adapter->ring_size = size =
286 adapter->txd_ring_size * 1 + 7 +
287 adapter->txs_ring_size * 4 + 7 +
288 adapter->rxd_ring_size * 1536 + 127;
289
290 adapter->ring_vir_addr = pci_alloc_consistent(pdev, size,
291 &adapter->ring_dma);
292 if (!adapter->ring_vir_addr)
293 return -ENOMEM;
294
295
296 adapter->txd_dma = adapter->ring_dma ;
297 offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0;
298 adapter->txd_dma += offset;
299 adapter->txd_ring = adapter->ring_vir_addr + offset;
300
301
302 adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size;
303 offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0;
304 adapter->txs_dma += offset;
305 adapter->txs_ring = (struct tx_pkt_status *)
306 (((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset));
307
308
309 adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4;
310 offset = (adapter->rxd_dma & 127) ?
311 (128 - (adapter->rxd_dma & 127)) : 0;
312 if (offset > 7)
313 offset -= 8;
314 else
315 offset += (128 - 8);
316
317 adapter->rxd_dma += offset;
318 adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) +
319 (adapter->txs_ring_size * 4 + offset));
320
321
322
323
324
325 return 0;
326 }
327
328
329
330
331
332 static inline void atl2_irq_enable(struct atl2_adapter *adapter)
333 {
334 ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
335 ATL2_WRITE_FLUSH(&adapter->hw);
336 }
337
338
339
340
341
342 static inline void atl2_irq_disable(struct atl2_adapter *adapter)
343 {
344 ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0);
345 ATL2_WRITE_FLUSH(&adapter->hw);
346 synchronize_irq(adapter->pdev->irq);
347 }
348
349 static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
350 {
351 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
352
353 *ctrl |= MAC_CTRL_RMV_VLAN;
354 } else {
355
356 *ctrl &= ~MAC_CTRL_RMV_VLAN;
357 }
358 }
359
360 static void atl2_vlan_mode(struct net_device *netdev,
361 netdev_features_t features)
362 {
363 struct atl2_adapter *adapter = netdev_priv(netdev);
364 u32 ctrl;
365
366 atl2_irq_disable(adapter);
367
368 ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
369 __atl2_vlan_mode(features, &ctrl);
370 ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
371
372 atl2_irq_enable(adapter);
373 }
374
375 static void atl2_restore_vlan(struct atl2_adapter *adapter)
376 {
377 atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
378 }
379
380 static netdev_features_t atl2_fix_features(struct net_device *netdev,
381 netdev_features_t features)
382 {
383
384
385
386
387 if (features & NETIF_F_HW_VLAN_CTAG_RX)
388 features |= NETIF_F_HW_VLAN_CTAG_TX;
389 else
390 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
391
392 return features;
393 }
394
395 static int atl2_set_features(struct net_device *netdev,
396 netdev_features_t features)
397 {
398 netdev_features_t changed = netdev->features ^ features;
399
400 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
401 atl2_vlan_mode(netdev, features);
402
403 return 0;
404 }
405
406 static void atl2_intr_rx(struct atl2_adapter *adapter)
407 {
408 struct net_device *netdev = adapter->netdev;
409 struct rx_desc *rxd;
410 struct sk_buff *skb;
411
412 do {
413 rxd = adapter->rxd_ring+adapter->rxd_write_ptr;
414 if (!rxd->status.update)
415 break;
416
417
418 rxd->status.update = 0;
419
420 if (rxd->status.ok && rxd->status.pkt_size >= 60) {
421 int rx_size = (int)(rxd->status.pkt_size - 4);
422
423 skb = netdev_alloc_skb_ip_align(netdev, rx_size);
424 if (NULL == skb) {
425
426
427
428
429 netdev->stats.rx_dropped++;
430 break;
431 }
432 memcpy(skb->data, rxd->packet, rx_size);
433 skb_put(skb, rx_size);
434 skb->protocol = eth_type_trans(skb, netdev);
435 if (rxd->status.vlan) {
436 u16 vlan_tag = (rxd->status.vtag>>4) |
437 ((rxd->status.vtag&7) << 13) |
438 ((rxd->status.vtag&8) << 9);
439
440 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
441 }
442 netif_rx(skb);
443 netdev->stats.rx_bytes += rx_size;
444 netdev->stats.rx_packets++;
445 } else {
446 netdev->stats.rx_errors++;
447
448 if (rxd->status.ok && rxd->status.pkt_size <= 60)
449 netdev->stats.rx_length_errors++;
450 if (rxd->status.mcast)
451 netdev->stats.multicast++;
452 if (rxd->status.crc)
453 netdev->stats.rx_crc_errors++;
454 if (rxd->status.align)
455 netdev->stats.rx_frame_errors++;
456 }
457
458
459 if (++adapter->rxd_write_ptr == adapter->rxd_ring_size)
460 adapter->rxd_write_ptr = 0;
461 } while (1);
462
463
464 adapter->rxd_read_ptr = adapter->rxd_write_ptr;
465 ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr);
466 }
467
468 static void atl2_intr_tx(struct atl2_adapter *adapter)
469 {
470 struct net_device *netdev = adapter->netdev;
471 u32 txd_read_ptr;
472 u32 txs_write_ptr;
473 struct tx_pkt_status *txs;
474 struct tx_pkt_header *txph;
475 int free_hole = 0;
476
477 do {
478 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
479 txs = adapter->txs_ring + txs_write_ptr;
480 if (!txs->update)
481 break;
482
483 free_hole = 1;
484 txs->update = 0;
485
486 if (++txs_write_ptr == adapter->txs_ring_size)
487 txs_write_ptr = 0;
488 atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr);
489
490 txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr);
491 txph = (struct tx_pkt_header *)
492 (((u8 *)adapter->txd_ring) + txd_read_ptr);
493
494 if (txph->pkt_size != txs->pkt_size) {
495 struct tx_pkt_status *old_txs = txs;
496 printk(KERN_WARNING
497 "%s: txs packet size not consistent with txd"
498 " txd_:0x%08x, txs_:0x%08x!\n",
499 adapter->netdev->name,
500 *(u32 *)txph, *(u32 *)txs);
501 printk(KERN_WARNING
502 "txd read ptr: 0x%x\n",
503 txd_read_ptr);
504 txs = adapter->txs_ring + txs_write_ptr;
505 printk(KERN_WARNING
506 "txs-behind:0x%08x\n",
507 *(u32 *)txs);
508 if (txs_write_ptr < 2) {
509 txs = adapter->txs_ring +
510 (adapter->txs_ring_size +
511 txs_write_ptr - 2);
512 } else {
513 txs = adapter->txs_ring + (txs_write_ptr - 2);
514 }
515 printk(KERN_WARNING
516 "txs-before:0x%08x\n",
517 *(u32 *)txs);
518 txs = old_txs;
519 }
520
521
522 txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3);
523 if (txd_read_ptr >= adapter->txd_ring_size)
524 txd_read_ptr -= adapter->txd_ring_size;
525
526 atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr);
527
528
529 if (txs->ok) {
530 netdev->stats.tx_bytes += txs->pkt_size;
531 netdev->stats.tx_packets++;
532 }
533 else
534 netdev->stats.tx_errors++;
535
536 if (txs->defer)
537 netdev->stats.collisions++;
538 if (txs->abort_col)
539 netdev->stats.tx_aborted_errors++;
540 if (txs->late_col)
541 netdev->stats.tx_window_errors++;
542 if (txs->underrun)
543 netdev->stats.tx_fifo_errors++;
544 } while (1);
545
546 if (free_hole) {
547 if (netif_queue_stopped(adapter->netdev) &&
548 netif_carrier_ok(adapter->netdev))
549 netif_wake_queue(adapter->netdev);
550 }
551 }
552
553 static void atl2_check_for_link(struct atl2_adapter *adapter)
554 {
555 struct net_device *netdev = adapter->netdev;
556 u16 phy_data = 0;
557
558 spin_lock(&adapter->stats_lock);
559 atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
560 atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
561 spin_unlock(&adapter->stats_lock);
562
563
564 if (!(phy_data & BMSR_LSTATUS)) {
565 if (netif_carrier_ok(netdev)) {
566 printk(KERN_INFO "%s: %s NIC Link is Down\n",
567 atl2_driver_name, netdev->name);
568 adapter->link_speed = SPEED_0;
569 netif_carrier_off(netdev);
570 netif_stop_queue(netdev);
571 }
572 }
573 schedule_work(&adapter->link_chg_task);
574 }
575
576 static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
577 {
578 u16 phy_data;
579 spin_lock(&adapter->stats_lock);
580 atl2_read_phy_reg(&adapter->hw, 19, &phy_data);
581 spin_unlock(&adapter->stats_lock);
582 }
583
584
585
586
587
588
589 static irqreturn_t atl2_intr(int irq, void *data)
590 {
591 struct atl2_adapter *adapter = netdev_priv(data);
592 struct atl2_hw *hw = &adapter->hw;
593 u32 status;
594
595 status = ATL2_READ_REG(hw, REG_ISR);
596 if (0 == status)
597 return IRQ_NONE;
598
599
600 if (status & ISR_PHY)
601 atl2_clear_phy_int(adapter);
602
603
604 ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
605
606
607 if (status & ISR_PHY_LINKDOWN) {
608 if (netif_running(adapter->netdev)) {
609 ATL2_WRITE_REG(hw, REG_ISR, 0);
610 ATL2_WRITE_REG(hw, REG_IMR, 0);
611 ATL2_WRITE_FLUSH(hw);
612 schedule_work(&adapter->reset_task);
613 return IRQ_HANDLED;
614 }
615 }
616
617
618 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
619 ATL2_WRITE_REG(hw, REG_ISR, 0);
620 ATL2_WRITE_REG(hw, REG_IMR, 0);
621 ATL2_WRITE_FLUSH(hw);
622 schedule_work(&adapter->reset_task);
623 return IRQ_HANDLED;
624 }
625
626
627 if (status & (ISR_PHY | ISR_MANUAL)) {
628 adapter->netdev->stats.tx_carrier_errors++;
629 atl2_check_for_link(adapter);
630 }
631
632
633 if (status & ISR_TX_EVENT)
634 atl2_intr_tx(adapter);
635
636
637 if (status & ISR_RX_EVENT)
638 atl2_intr_rx(adapter);
639
640
641 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
642 return IRQ_HANDLED;
643 }
644
645 static int atl2_request_irq(struct atl2_adapter *adapter)
646 {
647 struct net_device *netdev = adapter->netdev;
648 int flags, err = 0;
649
650 flags = IRQF_SHARED;
651 adapter->have_msi = true;
652 err = pci_enable_msi(adapter->pdev);
653 if (err)
654 adapter->have_msi = false;
655
656 if (adapter->have_msi)
657 flags &= ~IRQF_SHARED;
658
659 return request_irq(adapter->pdev->irq, atl2_intr, flags, netdev->name,
660 netdev);
661 }
662
663
664
665
666
667
668
669 static void atl2_free_ring_resources(struct atl2_adapter *adapter)
670 {
671 struct pci_dev *pdev = adapter->pdev;
672 pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr,
673 adapter->ring_dma);
674 }
675
676
677
678
679
680
681
682
683
684
685
686
687
688 static int atl2_open(struct net_device *netdev)
689 {
690 struct atl2_adapter *adapter = netdev_priv(netdev);
691 int err;
692 u32 val;
693
694
695 if (test_bit(__ATL2_TESTING, &adapter->flags))
696 return -EBUSY;
697
698
699 err = atl2_setup_ring_resources(adapter);
700 if (err)
701 return err;
702
703 err = atl2_init_hw(&adapter->hw);
704 if (err) {
705 err = -EIO;
706 goto err_init_hw;
707 }
708
709
710 atl2_set_multi(netdev);
711 init_ring_ptrs(adapter);
712
713 atl2_restore_vlan(adapter);
714
715 if (atl2_configure(adapter)) {
716 err = -EIO;
717 goto err_config;
718 }
719
720 err = atl2_request_irq(adapter);
721 if (err)
722 goto err_req_irq;
723
724 clear_bit(__ATL2_DOWN, &adapter->flags);
725
726 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 4*HZ));
727
728 val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
729 ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
730 val | MASTER_CTRL_MANUAL_INT);
731
732 atl2_irq_enable(adapter);
733
734 return 0;
735
736 err_init_hw:
737 err_req_irq:
738 err_config:
739 atl2_free_ring_resources(adapter);
740 atl2_reset_hw(&adapter->hw);
741
742 return err;
743 }
744
745 static void atl2_down(struct atl2_adapter *adapter)
746 {
747 struct net_device *netdev = adapter->netdev;
748
749
750
751 set_bit(__ATL2_DOWN, &adapter->flags);
752
753 netif_tx_disable(netdev);
754
755
756 atl2_reset_hw(&adapter->hw);
757 msleep(1);
758
759 atl2_irq_disable(adapter);
760
761 del_timer_sync(&adapter->watchdog_timer);
762 del_timer_sync(&adapter->phy_config_timer);
763 clear_bit(0, &adapter->cfg_phy);
764
765 netif_carrier_off(netdev);
766 adapter->link_speed = SPEED_0;
767 adapter->link_duplex = -1;
768 }
769
770 static void atl2_free_irq(struct atl2_adapter *adapter)
771 {
772 struct net_device *netdev = adapter->netdev;
773
774 free_irq(adapter->pdev->irq, netdev);
775
776 #ifdef CONFIG_PCI_MSI
777 if (adapter->have_msi)
778 pci_disable_msi(adapter->pdev);
779 #endif
780 }
781
782
783
784
785
786
787
788
789
790
791
792
793 static int atl2_close(struct net_device *netdev)
794 {
795 struct atl2_adapter *adapter = netdev_priv(netdev);
796
797 WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
798
799 atl2_down(adapter);
800 atl2_free_irq(adapter);
801 atl2_free_ring_resources(adapter);
802
803 return 0;
804 }
805
806 static inline int TxsFreeUnit(struct atl2_adapter *adapter)
807 {
808 u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
809
810 return (adapter->txs_next_clear >= txs_write_ptr) ?
811 (int) (adapter->txs_ring_size - adapter->txs_next_clear +
812 txs_write_ptr - 1) :
813 (int) (txs_write_ptr - adapter->txs_next_clear - 1);
814 }
815
816 static inline int TxdFreeBytes(struct atl2_adapter *adapter)
817 {
818 u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr);
819
820 return (adapter->txd_write_ptr >= txd_read_ptr) ?
821 (int) (adapter->txd_ring_size - adapter->txd_write_ptr +
822 txd_read_ptr - 1) :
823 (int) (txd_read_ptr - adapter->txd_write_ptr - 1);
824 }
825
826 static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
827 struct net_device *netdev)
828 {
829 struct atl2_adapter *adapter = netdev_priv(netdev);
830 struct tx_pkt_header *txph;
831 u32 offset, copy_len;
832 int txs_unused;
833 int txbuf_unused;
834
835 if (test_bit(__ATL2_DOWN, &adapter->flags)) {
836 dev_kfree_skb_any(skb);
837 return NETDEV_TX_OK;
838 }
839
840 if (unlikely(skb->len <= 0)) {
841 dev_kfree_skb_any(skb);
842 return NETDEV_TX_OK;
843 }
844
845 txs_unused = TxsFreeUnit(adapter);
846 txbuf_unused = TxdFreeBytes(adapter);
847
848 if (skb->len + sizeof(struct tx_pkt_header) + 4 > txbuf_unused ||
849 txs_unused < 1) {
850
851 netif_stop_queue(netdev);
852 return NETDEV_TX_BUSY;
853 }
854
855 offset = adapter->txd_write_ptr;
856
857 txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset);
858
859 *(u32 *)txph = 0;
860 txph->pkt_size = skb->len;
861
862 offset += 4;
863 if (offset >= adapter->txd_ring_size)
864 offset -= adapter->txd_ring_size;
865 copy_len = adapter->txd_ring_size - offset;
866 if (copy_len >= skb->len) {
867 memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len);
868 offset += ((u32)(skb->len + 3) & ~3);
869 } else {
870 memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len);
871 memcpy((u8 *)adapter->txd_ring, skb->data+copy_len,
872 skb->len-copy_len);
873 offset = ((u32)(skb->len-copy_len + 3) & ~3);
874 }
875 #ifdef NETIF_F_HW_VLAN_CTAG_TX
876 if (skb_vlan_tag_present(skb)) {
877 u16 vlan_tag = skb_vlan_tag_get(skb);
878 vlan_tag = (vlan_tag << 4) |
879 (vlan_tag >> 13) |
880 ((vlan_tag >> 9) & 0x8);
881 txph->ins_vlan = 1;
882 txph->vlan = vlan_tag;
883 }
884 #endif
885 if (offset >= adapter->txd_ring_size)
886 offset -= adapter->txd_ring_size;
887 adapter->txd_write_ptr = offset;
888
889
890 adapter->txs_ring[adapter->txs_next_clear].update = 0;
891 if (++adapter->txs_next_clear == adapter->txs_ring_size)
892 adapter->txs_next_clear = 0;
893
894 ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
895 (adapter->txd_write_ptr >> 2));
896
897 dev_consume_skb_any(skb);
898 return NETDEV_TX_OK;
899 }
900
901
902
903
904
905
906
907
908 static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
909 {
910 struct atl2_adapter *adapter = netdev_priv(netdev);
911 struct atl2_hw *hw = &adapter->hw;
912
913
914 netdev->mtu = new_mtu;
915 hw->max_frame_size = new_mtu;
916 ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ETH_HLEN +
917 VLAN_HLEN + ETH_FCS_LEN);
918
919 return 0;
920 }
921
922
923
924
925
926
927
928
929 static int atl2_set_mac(struct net_device *netdev, void *p)
930 {
931 struct atl2_adapter *adapter = netdev_priv(netdev);
932 struct sockaddr *addr = p;
933
934 if (!is_valid_ether_addr(addr->sa_data))
935 return -EADDRNOTAVAIL;
936
937 if (netif_running(netdev))
938 return -EBUSY;
939
940 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
941 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
942
943 atl2_set_mac_addr(&adapter->hw);
944
945 return 0;
946 }
947
948 static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
949 {
950 struct atl2_adapter *adapter = netdev_priv(netdev);
951 struct mii_ioctl_data *data = if_mii(ifr);
952 unsigned long flags;
953
954 switch (cmd) {
955 case SIOCGMIIPHY:
956 data->phy_id = 0;
957 break;
958 case SIOCGMIIREG:
959 spin_lock_irqsave(&adapter->stats_lock, flags);
960 if (atl2_read_phy_reg(&adapter->hw,
961 data->reg_num & 0x1F, &data->val_out)) {
962 spin_unlock_irqrestore(&adapter->stats_lock, flags);
963 return -EIO;
964 }
965 spin_unlock_irqrestore(&adapter->stats_lock, flags);
966 break;
967 case SIOCSMIIREG:
968 if (data->reg_num & ~(0x1F))
969 return -EFAULT;
970 spin_lock_irqsave(&adapter->stats_lock, flags);
971 if (atl2_write_phy_reg(&adapter->hw, data->reg_num,
972 data->val_in)) {
973 spin_unlock_irqrestore(&adapter->stats_lock, flags);
974 return -EIO;
975 }
976 spin_unlock_irqrestore(&adapter->stats_lock, flags);
977 break;
978 default:
979 return -EOPNOTSUPP;
980 }
981 return 0;
982 }
983
984 static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
985 {
986 switch (cmd) {
987 case SIOCGMIIPHY:
988 case SIOCGMIIREG:
989 case SIOCSMIIREG:
990 return atl2_mii_ioctl(netdev, ifr, cmd);
991 #ifdef ETHTOOL_OPS_COMPAT
992 case SIOCETHTOOL:
993 return ethtool_ioctl(ifr);
994 #endif
995 default:
996 return -EOPNOTSUPP;
997 }
998 }
999
1000
1001
1002
1003
1004 static void atl2_tx_timeout(struct net_device *netdev)
1005 {
1006 struct atl2_adapter *adapter = netdev_priv(netdev);
1007
1008
1009 schedule_work(&adapter->reset_task);
1010 }
1011
1012
1013
1014
1015
1016 static void atl2_watchdog(struct timer_list *t)
1017 {
1018 struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
1019
1020 if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
1021 u32 drop_rxd, drop_rxs;
1022 unsigned long flags;
1023
1024 spin_lock_irqsave(&adapter->stats_lock, flags);
1025 drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV);
1026 drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV);
1027 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1028
1029 adapter->netdev->stats.rx_over_errors += drop_rxd + drop_rxs;
1030
1031
1032 mod_timer(&adapter->watchdog_timer,
1033 round_jiffies(jiffies + 4 * HZ));
1034 }
1035 }
1036
1037
1038
1039
1040
1041 static void atl2_phy_config(struct timer_list *t)
1042 {
1043 struct atl2_adapter *adapter = from_timer(adapter, t,
1044 phy_config_timer);
1045 struct atl2_hw *hw = &adapter->hw;
1046 unsigned long flags;
1047
1048 spin_lock_irqsave(&adapter->stats_lock, flags);
1049 atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1050 atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN |
1051 MII_CR_RESTART_AUTO_NEG);
1052 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1053 clear_bit(0, &adapter->cfg_phy);
1054 }
1055
1056 static int atl2_up(struct atl2_adapter *adapter)
1057 {
1058 struct net_device *netdev = adapter->netdev;
1059 int err = 0;
1060 u32 val;
1061
1062
1063
1064 err = atl2_init_hw(&adapter->hw);
1065 if (err) {
1066 err = -EIO;
1067 return err;
1068 }
1069
1070 atl2_set_multi(netdev);
1071 init_ring_ptrs(adapter);
1072
1073 atl2_restore_vlan(adapter);
1074
1075 if (atl2_configure(adapter)) {
1076 err = -EIO;
1077 goto err_up;
1078 }
1079
1080 clear_bit(__ATL2_DOWN, &adapter->flags);
1081
1082 val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
1083 ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val |
1084 MASTER_CTRL_MANUAL_INT);
1085
1086 atl2_irq_enable(adapter);
1087
1088 err_up:
1089 return err;
1090 }
1091
1092 static void atl2_reinit_locked(struct atl2_adapter *adapter)
1093 {
1094 WARN_ON(in_interrupt());
1095 while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
1096 msleep(1);
1097 atl2_down(adapter);
1098 atl2_up(adapter);
1099 clear_bit(__ATL2_RESETTING, &adapter->flags);
1100 }
1101
1102 static void atl2_reset_task(struct work_struct *work)
1103 {
1104 struct atl2_adapter *adapter;
1105 adapter = container_of(work, struct atl2_adapter, reset_task);
1106
1107 atl2_reinit_locked(adapter);
1108 }
1109
1110 static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter)
1111 {
1112 u32 value;
1113 struct atl2_hw *hw = &adapter->hw;
1114 struct net_device *netdev = adapter->netdev;
1115
1116
1117 value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
1118
1119
1120 if (FULL_DUPLEX == adapter->link_duplex)
1121 value |= MAC_CTRL_DUPLX;
1122
1123
1124 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1125
1126
1127 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1128
1129
1130 value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) <<
1131 MAC_CTRL_PRMLEN_SHIFT);
1132
1133
1134 __atl2_vlan_mode(netdev->features, &value);
1135
1136
1137 value |= MAC_CTRL_BC_EN;
1138 if (netdev->flags & IFF_PROMISC)
1139 value |= MAC_CTRL_PROMIS_EN;
1140 else if (netdev->flags & IFF_ALLMULTI)
1141 value |= MAC_CTRL_MC_ALL_EN;
1142
1143
1144 value |= (((u32)(adapter->hw.retry_buf &
1145 MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT);
1146
1147 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1148 }
1149
1150 static int atl2_check_link(struct atl2_adapter *adapter)
1151 {
1152 struct atl2_hw *hw = &adapter->hw;
1153 struct net_device *netdev = adapter->netdev;
1154 int ret_val;
1155 u16 speed, duplex, phy_data;
1156 int reconfig = 0;
1157
1158
1159 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1160 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1161 if (!(phy_data&BMSR_LSTATUS)) {
1162 if (netif_carrier_ok(netdev)) {
1163 u32 value;
1164
1165 value = ATL2_READ_REG(hw, REG_MAC_CTRL);
1166 value &= ~MAC_CTRL_RX_EN;
1167 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1168 adapter->link_speed = SPEED_0;
1169 netif_carrier_off(netdev);
1170 netif_stop_queue(netdev);
1171 }
1172 return 0;
1173 }
1174
1175
1176 ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
1177 if (ret_val)
1178 return ret_val;
1179 switch (hw->MediaType) {
1180 case MEDIA_TYPE_100M_FULL:
1181 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
1182 reconfig = 1;
1183 break;
1184 case MEDIA_TYPE_100M_HALF:
1185 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
1186 reconfig = 1;
1187 break;
1188 case MEDIA_TYPE_10M_FULL:
1189 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
1190 reconfig = 1;
1191 break;
1192 case MEDIA_TYPE_10M_HALF:
1193 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
1194 reconfig = 1;
1195 break;
1196 }
1197
1198 if (reconfig == 0) {
1199 if (adapter->link_speed != speed ||
1200 adapter->link_duplex != duplex) {
1201 adapter->link_speed = speed;
1202 adapter->link_duplex = duplex;
1203 atl2_setup_mac_ctrl(adapter);
1204 printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n",
1205 atl2_driver_name, netdev->name,
1206 adapter->link_speed,
1207 adapter->link_duplex == FULL_DUPLEX ?
1208 "Full Duplex" : "Half Duplex");
1209 }
1210
1211 if (!netif_carrier_ok(netdev)) {
1212 netif_carrier_on(netdev);
1213 netif_wake_queue(netdev);
1214 }
1215 return 0;
1216 }
1217
1218
1219 if (netif_carrier_ok(netdev)) {
1220 u32 value;
1221
1222 value = ATL2_READ_REG(hw, REG_MAC_CTRL);
1223 value &= ~MAC_CTRL_RX_EN;
1224 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1225
1226 adapter->link_speed = SPEED_0;
1227 netif_carrier_off(netdev);
1228 netif_stop_queue(netdev);
1229 }
1230
1231
1232
1233 if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
1234 if (!test_and_set_bit(0, &adapter->cfg_phy))
1235 mod_timer(&adapter->phy_config_timer,
1236 round_jiffies(jiffies + 5 * HZ));
1237 }
1238
1239 return 0;
1240 }
1241
1242
1243
1244
1245 static void atl2_link_chg_task(struct work_struct *work)
1246 {
1247 struct atl2_adapter *adapter;
1248 unsigned long flags;
1249
1250 adapter = container_of(work, struct atl2_adapter, link_chg_task);
1251
1252 spin_lock_irqsave(&adapter->stats_lock, flags);
1253 atl2_check_link(adapter);
1254 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1255 }
1256
1257 static void atl2_setup_pcicmd(struct pci_dev *pdev)
1258 {
1259 u16 cmd;
1260
1261 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1262
1263 if (cmd & PCI_COMMAND_INTX_DISABLE)
1264 cmd &= ~PCI_COMMAND_INTX_DISABLE;
1265 if (cmd & PCI_COMMAND_IO)
1266 cmd &= ~PCI_COMMAND_IO;
1267 if (0 == (cmd & PCI_COMMAND_MEMORY))
1268 cmd |= PCI_COMMAND_MEMORY;
1269 if (0 == (cmd & PCI_COMMAND_MASTER))
1270 cmd |= PCI_COMMAND_MASTER;
1271 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1272
1273
1274
1275
1276
1277
1278 pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
1279 }
1280
1281 #ifdef CONFIG_NET_POLL_CONTROLLER
1282 static void atl2_poll_controller(struct net_device *netdev)
1283 {
1284 disable_irq(netdev->irq);
1285 atl2_intr(netdev->irq, netdev);
1286 enable_irq(netdev->irq);
1287 }
1288 #endif
1289
1290
1291 static const struct net_device_ops atl2_netdev_ops = {
1292 .ndo_open = atl2_open,
1293 .ndo_stop = atl2_close,
1294 .ndo_start_xmit = atl2_xmit_frame,
1295 .ndo_set_rx_mode = atl2_set_multi,
1296 .ndo_validate_addr = eth_validate_addr,
1297 .ndo_set_mac_address = atl2_set_mac,
1298 .ndo_change_mtu = atl2_change_mtu,
1299 .ndo_fix_features = atl2_fix_features,
1300 .ndo_set_features = atl2_set_features,
1301 .ndo_do_ioctl = atl2_ioctl,
1302 .ndo_tx_timeout = atl2_tx_timeout,
1303 #ifdef CONFIG_NET_POLL_CONTROLLER
1304 .ndo_poll_controller = atl2_poll_controller,
1305 #endif
1306 };
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319 static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1320 {
1321 struct net_device *netdev;
1322 struct atl2_adapter *adapter;
1323 static int cards_found = 0;
1324 unsigned long mmio_start;
1325 int mmio_len;
1326 int err;
1327
1328 err = pci_enable_device(pdev);
1329 if (err)
1330 return err;
1331
1332
1333
1334
1335
1336
1337 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1338 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1339 printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
1340 err = -EIO;
1341 goto err_dma;
1342 }
1343
1344
1345
1346 err = pci_request_regions(pdev, atl2_driver_name);
1347 if (err)
1348 goto err_pci_reg;
1349
1350
1351
1352 pci_set_master(pdev);
1353
1354 netdev = alloc_etherdev(sizeof(struct atl2_adapter));
1355 if (!netdev) {
1356 err = -ENOMEM;
1357 goto err_alloc_etherdev;
1358 }
1359
1360 SET_NETDEV_DEV(netdev, &pdev->dev);
1361
1362 pci_set_drvdata(pdev, netdev);
1363 adapter = netdev_priv(netdev);
1364 adapter->netdev = netdev;
1365 adapter->pdev = pdev;
1366 adapter->hw.back = adapter;
1367
1368 mmio_start = pci_resource_start(pdev, 0x0);
1369 mmio_len = pci_resource_len(pdev, 0x0);
1370
1371 adapter->hw.mem_rang = (u32)mmio_len;
1372 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
1373 if (!adapter->hw.hw_addr) {
1374 err = -EIO;
1375 goto err_ioremap;
1376 }
1377
1378 atl2_setup_pcicmd(pdev);
1379
1380 netdev->netdev_ops = &atl2_netdev_ops;
1381 netdev->ethtool_ops = &atl2_ethtool_ops;
1382 netdev->watchdog_timeo = 5 * HZ;
1383 netdev->min_mtu = 40;
1384 netdev->max_mtu = ETH_DATA_LEN + VLAN_HLEN;
1385 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1386
1387 netdev->mem_start = mmio_start;
1388 netdev->mem_end = mmio_start + mmio_len;
1389 adapter->bd_number = cards_found;
1390 adapter->pci_using_64 = false;
1391
1392
1393 err = atl2_sw_init(adapter);
1394 if (err)
1395 goto err_sw_init;
1396
1397 netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
1398 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1399
1400
1401 atl2_phy_init(&adapter->hw);
1402
1403
1404
1405
1406 if (atl2_reset_hw(&adapter->hw)) {
1407 err = -EIO;
1408 goto err_reset;
1409 }
1410
1411
1412 atl2_read_mac_addr(&adapter->hw);
1413 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
1414 if (!is_valid_ether_addr(netdev->dev_addr)) {
1415 err = -EIO;
1416 goto err_eeprom;
1417 }
1418
1419 atl2_check_options(adapter);
1420
1421 timer_setup(&adapter->watchdog_timer, atl2_watchdog, 0);
1422
1423 timer_setup(&adapter->phy_config_timer, atl2_phy_config, 0);
1424
1425 INIT_WORK(&adapter->reset_task, atl2_reset_task);
1426 INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
1427
1428 strcpy(netdev->name, "eth%d");
1429 err = register_netdev(netdev);
1430 if (err)
1431 goto err_register;
1432
1433
1434 netif_carrier_off(netdev);
1435 netif_stop_queue(netdev);
1436
1437 cards_found++;
1438
1439 return 0;
1440
1441 err_reset:
1442 err_register:
1443 err_sw_init:
1444 err_eeprom:
1445 iounmap(adapter->hw.hw_addr);
1446 err_ioremap:
1447 free_netdev(netdev);
1448 err_alloc_etherdev:
1449 pci_release_regions(pdev);
1450 err_pci_reg:
1451 err_dma:
1452 pci_disable_device(pdev);
1453 return err;
1454 }
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467 static void atl2_remove(struct pci_dev *pdev)
1468 {
1469 struct net_device *netdev = pci_get_drvdata(pdev);
1470 struct atl2_adapter *adapter = netdev_priv(netdev);
1471
1472
1473
1474 set_bit(__ATL2_DOWN, &adapter->flags);
1475
1476 del_timer_sync(&adapter->watchdog_timer);
1477 del_timer_sync(&adapter->phy_config_timer);
1478 cancel_work_sync(&adapter->reset_task);
1479 cancel_work_sync(&adapter->link_chg_task);
1480
1481 unregister_netdev(netdev);
1482
1483 atl2_force_ps(&adapter->hw);
1484
1485 iounmap(adapter->hw.hw_addr);
1486 pci_release_regions(pdev);
1487
1488 free_netdev(netdev);
1489
1490 pci_disable_device(pdev);
1491 }
1492
1493 static int atl2_suspend(struct pci_dev *pdev, pm_message_t state)
1494 {
1495 struct net_device *netdev = pci_get_drvdata(pdev);
1496 struct atl2_adapter *adapter = netdev_priv(netdev);
1497 struct atl2_hw *hw = &adapter->hw;
1498 u16 speed, duplex;
1499 u32 ctrl = 0;
1500 u32 wufc = adapter->wol;
1501
1502 #ifdef CONFIG_PM
1503 int retval = 0;
1504 #endif
1505
1506 netif_device_detach(netdev);
1507
1508 if (netif_running(netdev)) {
1509 WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
1510 atl2_down(adapter);
1511 }
1512
1513 #ifdef CONFIG_PM
1514 retval = pci_save_state(pdev);
1515 if (retval)
1516 return retval;
1517 #endif
1518
1519 atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
1520 atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
1521 if (ctrl & BMSR_LSTATUS)
1522 wufc &= ~ATLX_WUFC_LNKC;
1523
1524 if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) {
1525 u32 ret_val;
1526
1527 ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
1528 if (ret_val) {
1529 printk(KERN_DEBUG
1530 "%s: get speed&duplex error while suspend\n",
1531 atl2_driver_name);
1532 goto wol_dis;
1533 }
1534
1535 ctrl = 0;
1536
1537
1538 if (wufc & ATLX_WUFC_MAG)
1539 ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
1540
1541
1542 ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
1543
1544
1545 ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
1546 if (FULL_DUPLEX == adapter->link_duplex)
1547 ctrl |= MAC_CTRL_DUPLX;
1548 ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1549 ctrl |= (((u32)adapter->hw.preamble_len &
1550 MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
1551 ctrl |= (((u32)(adapter->hw.retry_buf &
1552 MAC_CTRL_HALF_LEFT_BUF_MASK)) <<
1553 MAC_CTRL_HALF_LEFT_BUF_SHIFT);
1554 if (wufc & ATLX_WUFC_MAG) {
1555
1556 ctrl |= MAC_CTRL_BC_EN;
1557 }
1558
1559 ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl);
1560
1561
1562 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1563 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1564 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1565 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1566 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1567 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1568
1569 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
1570 goto suspend_exit;
1571 }
1572
1573 if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) {
1574
1575 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
1576 ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
1577 ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0);
1578
1579
1580 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1581 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1582 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1583 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1584 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1585 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1586
1587 hw->phy_configured = false;
1588
1589 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
1590
1591 goto suspend_exit;
1592 }
1593
1594 wol_dis:
1595
1596 ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0);
1597
1598
1599 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1600 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1601 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1602 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1603 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1604 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1605
1606 atl2_force_ps(hw);
1607 hw->phy_configured = false;
1608
1609 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1610
1611 suspend_exit:
1612 if (netif_running(netdev))
1613 atl2_free_irq(adapter);
1614
1615 pci_disable_device(pdev);
1616
1617 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1618
1619 return 0;
1620 }
1621
1622 #ifdef CONFIG_PM
1623 static int atl2_resume(struct pci_dev *pdev)
1624 {
1625 struct net_device *netdev = pci_get_drvdata(pdev);
1626 struct atl2_adapter *adapter = netdev_priv(netdev);
1627 u32 err;
1628
1629 pci_set_power_state(pdev, PCI_D0);
1630 pci_restore_state(pdev);
1631
1632 err = pci_enable_device(pdev);
1633 if (err) {
1634 printk(KERN_ERR
1635 "atl2: Cannot enable PCI device from suspend\n");
1636 return err;
1637 }
1638
1639 pci_set_master(pdev);
1640
1641 ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL);
1642
1643 pci_enable_wake(pdev, PCI_D3hot, 0);
1644 pci_enable_wake(pdev, PCI_D3cold, 0);
1645
1646 ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
1647
1648 if (netif_running(netdev)) {
1649 err = atl2_request_irq(adapter);
1650 if (err)
1651 return err;
1652 }
1653
1654 atl2_reset_hw(&adapter->hw);
1655
1656 if (netif_running(netdev))
1657 atl2_up(adapter);
1658
1659 netif_device_attach(netdev);
1660
1661 return 0;
1662 }
1663 #endif
1664
1665 static void atl2_shutdown(struct pci_dev *pdev)
1666 {
1667 atl2_suspend(pdev, PMSG_SUSPEND);
1668 }
1669
1670 static struct pci_driver atl2_driver = {
1671 .name = atl2_driver_name,
1672 .id_table = atl2_pci_tbl,
1673 .probe = atl2_probe,
1674 .remove = atl2_remove,
1675
1676 .suspend = atl2_suspend,
1677 #ifdef CONFIG_PM
1678 .resume = atl2_resume,
1679 #endif
1680 .shutdown = atl2_shutdown,
1681 };
1682
1683
1684
1685
1686
1687
1688
1689 static int __init atl2_init_module(void)
1690 {
1691 printk(KERN_INFO "%s - version %s\n", atl2_driver_string,
1692 atl2_driver_version);
1693 printk(KERN_INFO "%s\n", atl2_copyright);
1694 return pci_register_driver(&atl2_driver);
1695 }
1696 module_init(atl2_init_module);
1697
1698
1699
1700
1701
1702
1703
1704 static void __exit atl2_exit_module(void)
1705 {
1706 pci_unregister_driver(&atl2_driver);
1707 }
1708 module_exit(atl2_exit_module);
1709
1710 static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
1711 {
1712 struct atl2_adapter *adapter = hw->back;
1713 pci_read_config_word(adapter->pdev, reg, value);
1714 }
1715
1716 static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
1717 {
1718 struct atl2_adapter *adapter = hw->back;
1719 pci_write_config_word(adapter->pdev, reg, *value);
1720 }
1721
1722 static int atl2_get_link_ksettings(struct net_device *netdev,
1723 struct ethtool_link_ksettings *cmd)
1724 {
1725 struct atl2_adapter *adapter = netdev_priv(netdev);
1726 struct atl2_hw *hw = &adapter->hw;
1727 u32 supported, advertising;
1728
1729 supported = (SUPPORTED_10baseT_Half |
1730 SUPPORTED_10baseT_Full |
1731 SUPPORTED_100baseT_Half |
1732 SUPPORTED_100baseT_Full |
1733 SUPPORTED_Autoneg |
1734 SUPPORTED_TP);
1735 advertising = ADVERTISED_TP;
1736
1737 advertising |= ADVERTISED_Autoneg;
1738 advertising |= hw->autoneg_advertised;
1739
1740 cmd->base.port = PORT_TP;
1741 cmd->base.phy_address = 0;
1742
1743 if (adapter->link_speed != SPEED_0) {
1744 cmd->base.speed = adapter->link_speed;
1745 if (adapter->link_duplex == FULL_DUPLEX)
1746 cmd->base.duplex = DUPLEX_FULL;
1747 else
1748 cmd->base.duplex = DUPLEX_HALF;
1749 } else {
1750 cmd->base.speed = SPEED_UNKNOWN;
1751 cmd->base.duplex = DUPLEX_UNKNOWN;
1752 }
1753
1754 cmd->base.autoneg = AUTONEG_ENABLE;
1755
1756 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1757 supported);
1758 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1759 advertising);
1760
1761 return 0;
1762 }
1763
1764 static int atl2_set_link_ksettings(struct net_device *netdev,
1765 const struct ethtool_link_ksettings *cmd)
1766 {
1767 struct atl2_adapter *adapter = netdev_priv(netdev);
1768 struct atl2_hw *hw = &adapter->hw;
1769 u32 advertising;
1770
1771 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1772 cmd->link_modes.advertising);
1773
1774 while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
1775 msleep(1);
1776
1777 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1778 #define MY_ADV_MASK (ADVERTISE_10_HALF | \
1779 ADVERTISE_10_FULL | \
1780 ADVERTISE_100_HALF| \
1781 ADVERTISE_100_FULL)
1782
1783 if ((advertising & MY_ADV_MASK) == MY_ADV_MASK) {
1784 hw->MediaType = MEDIA_TYPE_AUTO_SENSOR;
1785 hw->autoneg_advertised = MY_ADV_MASK;
1786 } else if ((advertising & MY_ADV_MASK) == ADVERTISE_100_FULL) {
1787 hw->MediaType = MEDIA_TYPE_100M_FULL;
1788 hw->autoneg_advertised = ADVERTISE_100_FULL;
1789 } else if ((advertising & MY_ADV_MASK) == ADVERTISE_100_HALF) {
1790 hw->MediaType = MEDIA_TYPE_100M_HALF;
1791 hw->autoneg_advertised = ADVERTISE_100_HALF;
1792 } else if ((advertising & MY_ADV_MASK) == ADVERTISE_10_FULL) {
1793 hw->MediaType = MEDIA_TYPE_10M_FULL;
1794 hw->autoneg_advertised = ADVERTISE_10_FULL;
1795 } else if ((advertising & MY_ADV_MASK) == ADVERTISE_10_HALF) {
1796 hw->MediaType = MEDIA_TYPE_10M_HALF;
1797 hw->autoneg_advertised = ADVERTISE_10_HALF;
1798 } else {
1799 clear_bit(__ATL2_RESETTING, &adapter->flags);
1800 return -EINVAL;
1801 }
1802 advertising = hw->autoneg_advertised |
1803 ADVERTISED_TP | ADVERTISED_Autoneg;
1804 } else {
1805 clear_bit(__ATL2_RESETTING, &adapter->flags);
1806 return -EINVAL;
1807 }
1808
1809
1810 if (netif_running(adapter->netdev)) {
1811 atl2_down(adapter);
1812 atl2_up(adapter);
1813 } else
1814 atl2_reset_hw(&adapter->hw);
1815
1816 clear_bit(__ATL2_RESETTING, &adapter->flags);
1817 return 0;
1818 }
1819
1820 static u32 atl2_get_msglevel(struct net_device *netdev)
1821 {
1822 return 0;
1823 }
1824
1825
1826
1827
1828 static void atl2_set_msglevel(struct net_device *netdev, u32 data)
1829 {
1830 }
1831
1832 static int atl2_get_regs_len(struct net_device *netdev)
1833 {
1834 #define ATL2_REGS_LEN 42
1835 return sizeof(u32) * ATL2_REGS_LEN;
1836 }
1837
1838 static void atl2_get_regs(struct net_device *netdev,
1839 struct ethtool_regs *regs, void *p)
1840 {
1841 struct atl2_adapter *adapter = netdev_priv(netdev);
1842 struct atl2_hw *hw = &adapter->hw;
1843 u32 *regs_buff = p;
1844 u16 phy_data;
1845
1846 memset(p, 0, sizeof(u32) * ATL2_REGS_LEN);
1847
1848 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
1849
1850 regs_buff[0] = ATL2_READ_REG(hw, REG_VPD_CAP);
1851 regs_buff[1] = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
1852 regs_buff[2] = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG);
1853 regs_buff[3] = ATL2_READ_REG(hw, REG_TWSI_CTRL);
1854 regs_buff[4] = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL);
1855 regs_buff[5] = ATL2_READ_REG(hw, REG_MASTER_CTRL);
1856 regs_buff[6] = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT);
1857 regs_buff[7] = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT);
1858 regs_buff[8] = ATL2_READ_REG(hw, REG_PHY_ENABLE);
1859 regs_buff[9] = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER);
1860 regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS);
1861 regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL);
1862 regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK);
1863 regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL);
1864 regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG);
1865 regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
1866 regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4);
1867 regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE);
1868 regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4);
1869 regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL);
1870 regs_buff[20] = ATL2_READ_REG(hw, REG_MTU);
1871 regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL);
1872 regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END);
1873 regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI);
1874 regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO);
1875 regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE);
1876 regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO);
1877 regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE);
1878 regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO);
1879 regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM);
1880 regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR);
1881 regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH);
1882 regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW);
1883 regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH);
1884 regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH);
1885 regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX);
1886 regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX);
1887 regs_buff[38] = ATL2_READ_REG(hw, REG_ISR);
1888 regs_buff[39] = ATL2_READ_REG(hw, REG_IMR);
1889
1890 atl2_read_phy_reg(hw, MII_BMCR, &phy_data);
1891 regs_buff[40] = (u32)phy_data;
1892 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1893 regs_buff[41] = (u32)phy_data;
1894 }
1895
1896 static int atl2_get_eeprom_len(struct net_device *netdev)
1897 {
1898 struct atl2_adapter *adapter = netdev_priv(netdev);
1899
1900 if (!atl2_check_eeprom_exist(&adapter->hw))
1901 return 512;
1902 else
1903 return 0;
1904 }
1905
1906 static int atl2_get_eeprom(struct net_device *netdev,
1907 struct ethtool_eeprom *eeprom, u8 *bytes)
1908 {
1909 struct atl2_adapter *adapter = netdev_priv(netdev);
1910 struct atl2_hw *hw = &adapter->hw;
1911 u32 *eeprom_buff;
1912 int first_dword, last_dword;
1913 int ret_val = 0;
1914 int i;
1915
1916 if (eeprom->len == 0)
1917 return -EINVAL;
1918
1919 if (atl2_check_eeprom_exist(hw))
1920 return -EINVAL;
1921
1922 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1923
1924 first_dword = eeprom->offset >> 2;
1925 last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
1926
1927 eeprom_buff = kmalloc_array(last_dword - first_dword + 1, sizeof(u32),
1928 GFP_KERNEL);
1929 if (!eeprom_buff)
1930 return -ENOMEM;
1931
1932 for (i = first_dword; i < last_dword; i++) {
1933 if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) {
1934 ret_val = -EIO;
1935 goto free;
1936 }
1937 }
1938
1939 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
1940 eeprom->len);
1941 free:
1942 kfree(eeprom_buff);
1943
1944 return ret_val;
1945 }
1946
1947 static int atl2_set_eeprom(struct net_device *netdev,
1948 struct ethtool_eeprom *eeprom, u8 *bytes)
1949 {
1950 struct atl2_adapter *adapter = netdev_priv(netdev);
1951 struct atl2_hw *hw = &adapter->hw;
1952 u32 *eeprom_buff;
1953 u32 *ptr;
1954 int max_len, first_dword, last_dword, ret_val = 0;
1955 int i;
1956
1957 if (eeprom->len == 0)
1958 return -EOPNOTSUPP;
1959
1960 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
1961 return -EFAULT;
1962
1963 max_len = 512;
1964
1965 first_dword = eeprom->offset >> 2;
1966 last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
1967 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
1968 if (!eeprom_buff)
1969 return -ENOMEM;
1970
1971 ptr = eeprom_buff;
1972
1973 if (eeprom->offset & 3) {
1974
1975
1976 if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) {
1977 ret_val = -EIO;
1978 goto out;
1979 }
1980 ptr++;
1981 }
1982 if (((eeprom->offset + eeprom->len) & 3)) {
1983
1984
1985
1986
1987 if (!atl2_read_eeprom(hw, last_dword * 4,
1988 &(eeprom_buff[last_dword - first_dword]))) {
1989 ret_val = -EIO;
1990 goto out;
1991 }
1992 }
1993
1994
1995 memcpy(ptr, bytes, eeprom->len);
1996
1997 for (i = 0; i < last_dword - first_dword + 1; i++) {
1998 if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) {
1999 ret_val = -EIO;
2000 goto out;
2001 }
2002 }
2003 out:
2004 kfree(eeprom_buff);
2005 return ret_val;
2006 }
2007
2008 static void atl2_get_drvinfo(struct net_device *netdev,
2009 struct ethtool_drvinfo *drvinfo)
2010 {
2011 struct atl2_adapter *adapter = netdev_priv(netdev);
2012
2013 strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
2014 strlcpy(drvinfo->version, atl2_driver_version,
2015 sizeof(drvinfo->version));
2016 strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
2017 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
2018 sizeof(drvinfo->bus_info));
2019 }
2020
2021 static void atl2_get_wol(struct net_device *netdev,
2022 struct ethtool_wolinfo *wol)
2023 {
2024 struct atl2_adapter *adapter = netdev_priv(netdev);
2025
2026 wol->supported = WAKE_MAGIC;
2027 wol->wolopts = 0;
2028
2029 if (adapter->wol & ATLX_WUFC_EX)
2030 wol->wolopts |= WAKE_UCAST;
2031 if (adapter->wol & ATLX_WUFC_MC)
2032 wol->wolopts |= WAKE_MCAST;
2033 if (adapter->wol & ATLX_WUFC_BC)
2034 wol->wolopts |= WAKE_BCAST;
2035 if (adapter->wol & ATLX_WUFC_MAG)
2036 wol->wolopts |= WAKE_MAGIC;
2037 if (adapter->wol & ATLX_WUFC_LNKC)
2038 wol->wolopts |= WAKE_PHY;
2039 }
2040
2041 static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2042 {
2043 struct atl2_adapter *adapter = netdev_priv(netdev);
2044
2045 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
2046 return -EOPNOTSUPP;
2047
2048 if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
2049 return -EOPNOTSUPP;
2050
2051
2052 adapter->wol = 0;
2053
2054 if (wol->wolopts & WAKE_MAGIC)
2055 adapter->wol |= ATLX_WUFC_MAG;
2056 if (wol->wolopts & WAKE_PHY)
2057 adapter->wol |= ATLX_WUFC_LNKC;
2058
2059 return 0;
2060 }
2061
2062 static int atl2_nway_reset(struct net_device *netdev)
2063 {
2064 struct atl2_adapter *adapter = netdev_priv(netdev);
2065 if (netif_running(netdev))
2066 atl2_reinit_locked(adapter);
2067 return 0;
2068 }
2069
2070 static const struct ethtool_ops atl2_ethtool_ops = {
2071 .get_drvinfo = atl2_get_drvinfo,
2072 .get_regs_len = atl2_get_regs_len,
2073 .get_regs = atl2_get_regs,
2074 .get_wol = atl2_get_wol,
2075 .set_wol = atl2_set_wol,
2076 .get_msglevel = atl2_get_msglevel,
2077 .set_msglevel = atl2_set_msglevel,
2078 .nway_reset = atl2_nway_reset,
2079 .get_link = ethtool_op_get_link,
2080 .get_eeprom_len = atl2_get_eeprom_len,
2081 .get_eeprom = atl2_get_eeprom,
2082 .set_eeprom = atl2_set_eeprom,
2083 .get_link_ksettings = atl2_get_link_ksettings,
2084 .set_link_ksettings = atl2_set_link_ksettings,
2085 };
2086
2087 #define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \
2088 (((a) & 0xff00ff00) >> 8))
2089 #define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
2090 #define SHORTSWAP(a) (((a) << 8) | ((a) >> 8))
2091
2092
2093
2094
2095
2096
2097
2098 static s32 atl2_reset_hw(struct atl2_hw *hw)
2099 {
2100 u32 icr;
2101 u16 pci_cfg_cmd_word;
2102 int i;
2103
2104
2105 atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
2106 if ((pci_cfg_cmd_word &
2107 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) !=
2108 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) {
2109 pci_cfg_cmd_word |=
2110 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER);
2111 atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
2112 }
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126 ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
2127 wmb();
2128 msleep(1);
2129
2130
2131 for (i = 0; i < 10; i++) {
2132 icr = ATL2_READ_REG(hw, REG_IDLE_STATUS);
2133 if (!icr)
2134 break;
2135 msleep(1);
2136 cpu_relax();
2137 }
2138
2139 if (icr)
2140 return icr;
2141
2142 return 0;
2143 }
2144
2145 #define CUSTOM_SPI_CS_SETUP 2
2146 #define CUSTOM_SPI_CLK_HI 2
2147 #define CUSTOM_SPI_CLK_LO 2
2148 #define CUSTOM_SPI_CS_HOLD 2
2149 #define CUSTOM_SPI_CS_HI 3
2150
2151 static struct atl2_spi_flash_dev flash_table[] =
2152 {
2153
2154 {"Atmel", 0x0, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 },
2155 {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 },
2156 {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7 },
2157 };
2158
2159 static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf)
2160 {
2161 int i;
2162 u32 value;
2163
2164 ATL2_WRITE_REG(hw, REG_SPI_DATA, 0);
2165 ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr);
2166
2167 value = SPI_FLASH_CTRL_WAIT_READY |
2168 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
2169 SPI_FLASH_CTRL_CS_SETUP_SHIFT |
2170 (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) <<
2171 SPI_FLASH_CTRL_CLK_HI_SHIFT |
2172 (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) <<
2173 SPI_FLASH_CTRL_CLK_LO_SHIFT |
2174 (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) <<
2175 SPI_FLASH_CTRL_CS_HOLD_SHIFT |
2176 (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) <<
2177 SPI_FLASH_CTRL_CS_HI_SHIFT |
2178 (0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT;
2179
2180 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2181
2182 value |= SPI_FLASH_CTRL_START;
2183
2184 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2185
2186 for (i = 0; i < 10; i++) {
2187 msleep(1);
2188 value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
2189 if (!(value & SPI_FLASH_CTRL_START))
2190 break;
2191 }
2192
2193 if (value & SPI_FLASH_CTRL_START)
2194 return false;
2195
2196 *buf = ATL2_READ_REG(hw, REG_SPI_DATA);
2197
2198 return true;
2199 }
2200
2201
2202
2203
2204
2205 static int get_permanent_address(struct atl2_hw *hw)
2206 {
2207 u32 Addr[2];
2208 u32 i, Control;
2209 u16 Register;
2210 u8 EthAddr[ETH_ALEN];
2211 bool KeyValid;
2212
2213 if (is_valid_ether_addr(hw->perm_mac_addr))
2214 return 0;
2215
2216 Addr[0] = 0;
2217 Addr[1] = 0;
2218
2219 if (!atl2_check_eeprom_exist(hw)) {
2220 Register = 0;
2221 KeyValid = false;
2222
2223
2224 i = 0;
2225 while (1) {
2226 if (atl2_read_eeprom(hw, i + 0x100, &Control)) {
2227 if (KeyValid) {
2228 if (Register == REG_MAC_STA_ADDR)
2229 Addr[0] = Control;
2230 else if (Register ==
2231 (REG_MAC_STA_ADDR + 4))
2232 Addr[1] = Control;
2233 KeyValid = false;
2234 } else if ((Control & 0xff) == 0x5A) {
2235 KeyValid = true;
2236 Register = (u16) (Control >> 16);
2237 } else {
2238
2239 break;
2240 }
2241 } else {
2242 break;
2243 }
2244 i += 4;
2245 }
2246
2247 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2248 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
2249
2250 if (is_valid_ether_addr(EthAddr)) {
2251 memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
2252 return 0;
2253 }
2254 return 1;
2255 }
2256
2257
2258 Addr[0] = 0;
2259 Addr[1] = 0;
2260 Register = 0;
2261 KeyValid = false;
2262 i = 0;
2263 while (1) {
2264 if (atl2_spi_read(hw, i + 0x1f000, &Control)) {
2265 if (KeyValid) {
2266 if (Register == REG_MAC_STA_ADDR)
2267 Addr[0] = Control;
2268 else if (Register == (REG_MAC_STA_ADDR + 4))
2269 Addr[1] = Control;
2270 KeyValid = false;
2271 } else if ((Control & 0xff) == 0x5A) {
2272 KeyValid = true;
2273 Register = (u16) (Control >> 16);
2274 } else {
2275 break;
2276 }
2277 } else {
2278 break;
2279 }
2280 i += 4;
2281 }
2282
2283 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2284 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]);
2285 if (is_valid_ether_addr(EthAddr)) {
2286 memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
2287 return 0;
2288 }
2289
2290 Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
2291 Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4);
2292 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2293 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
2294
2295 if (is_valid_ether_addr(EthAddr)) {
2296 memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
2297 return 0;
2298 }
2299
2300 return 1;
2301 }
2302
2303
2304
2305
2306
2307
2308 static s32 atl2_read_mac_addr(struct atl2_hw *hw)
2309 {
2310 if (get_permanent_address(hw)) {
2311
2312
2313 hw->perm_mac_addr[0] = 0x00;
2314 hw->perm_mac_addr[1] = 0x13;
2315 hw->perm_mac_addr[2] = 0x74;
2316 hw->perm_mac_addr[3] = 0x00;
2317 hw->perm_mac_addr[4] = 0x5c;
2318 hw->perm_mac_addr[5] = 0x38;
2319 }
2320
2321 memcpy(hw->mac_addr, hw->perm_mac_addr, ETH_ALEN);
2322
2323 return 0;
2324 }
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339 static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr)
2340 {
2341 u32 crc32, value;
2342 int i;
2343
2344 value = 0;
2345 crc32 = ether_crc_le(6, mc_addr);
2346
2347 for (i = 0; i < 32; i++)
2348 value |= (((crc32 >> i) & 1) << (31 - i));
2349
2350 return value;
2351 }
2352
2353
2354
2355
2356
2357
2358
2359 static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value)
2360 {
2361 u32 hash_bit, hash_reg;
2362 u32 mta;
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372 hash_reg = (hash_value >> 31) & 0x1;
2373 hash_bit = (hash_value >> 26) & 0x1F;
2374
2375 mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
2376
2377 mta |= (1 << hash_bit);
2378
2379 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
2380 }
2381
2382
2383
2384
2385 static void atl2_init_pcie(struct atl2_hw *hw)
2386 {
2387 u32 value;
2388 value = LTSSM_TEST_MODE_DEF;
2389 ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
2390
2391 value = PCIE_DLL_TX_CTRL1_DEF;
2392 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value);
2393 }
2394
2395 static void atl2_init_flash_opcode(struct atl2_hw *hw)
2396 {
2397 if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
2398 hw->flash_vendor = 0;
2399
2400
2401 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM,
2402 flash_table[hw->flash_vendor].cmdPROGRAM);
2403 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE,
2404 flash_table[hw->flash_vendor].cmdSECTOR_ERASE);
2405 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE,
2406 flash_table[hw->flash_vendor].cmdCHIP_ERASE);
2407 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID,
2408 flash_table[hw->flash_vendor].cmdRDID);
2409 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN,
2410 flash_table[hw->flash_vendor].cmdWREN);
2411 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR,
2412 flash_table[hw->flash_vendor].cmdRDSR);
2413 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR,
2414 flash_table[hw->flash_vendor].cmdWRSR);
2415 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ,
2416 flash_table[hw->flash_vendor].cmdREAD);
2417 }
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428 static s32 atl2_init_hw(struct atl2_hw *hw)
2429 {
2430 u32 ret_val = 0;
2431
2432 atl2_init_pcie(hw);
2433
2434
2435
2436 ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
2437 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
2438
2439 atl2_init_flash_opcode(hw);
2440
2441 ret_val = atl2_phy_init(hw);
2442
2443 return ret_val;
2444 }
2445
2446
2447
2448
2449
2450
2451
2452
2453 static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
2454 u16 *duplex)
2455 {
2456 s32 ret_val;
2457 u16 phy_data;
2458
2459
2460 ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
2461 if (ret_val)
2462 return ret_val;
2463
2464 if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
2465 return ATLX_ERR_PHY_RES;
2466
2467 switch (phy_data & MII_ATLX_PSSR_SPEED) {
2468 case MII_ATLX_PSSR_100MBS:
2469 *speed = SPEED_100;
2470 break;
2471 case MII_ATLX_PSSR_10MBS:
2472 *speed = SPEED_10;
2473 break;
2474 default:
2475 return ATLX_ERR_PHY_SPEED;
2476 }
2477
2478 if (phy_data & MII_ATLX_PSSR_DPLX)
2479 *duplex = FULL_DUPLEX;
2480 else
2481 *duplex = HALF_DUPLEX;
2482
2483 return 0;
2484 }
2485
2486
2487
2488
2489
2490
2491 static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data)
2492 {
2493 u32 val;
2494 int i;
2495
2496 val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
2497 MDIO_START |
2498 MDIO_SUP_PREAMBLE |
2499 MDIO_RW |
2500 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
2501 ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
2502
2503 wmb();
2504
2505 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2506 udelay(2);
2507 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2508 if (!(val & (MDIO_START | MDIO_BUSY)))
2509 break;
2510 wmb();
2511 }
2512 if (!(val & (MDIO_START | MDIO_BUSY))) {
2513 *phy_data = (u16)val;
2514 return 0;
2515 }
2516
2517 return ATLX_ERR_PHY;
2518 }
2519
2520
2521
2522
2523
2524
2525
2526 static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data)
2527 {
2528 int i;
2529 u32 val;
2530
2531 val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
2532 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
2533 MDIO_SUP_PREAMBLE |
2534 MDIO_START |
2535 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
2536 ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
2537
2538 wmb();
2539
2540 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2541 udelay(2);
2542 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2543 if (!(val & (MDIO_START | MDIO_BUSY)))
2544 break;
2545
2546 wmb();
2547 }
2548
2549 if (!(val & (MDIO_START | MDIO_BUSY)))
2550 return 0;
2551
2552 return ATLX_ERR_PHY;
2553 }
2554
2555
2556
2557
2558
2559
2560 static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
2561 {
2562 s32 ret_val;
2563 s16 mii_autoneg_adv_reg;
2564
2565
2566 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
2579
2580
2581
2582 switch (hw->MediaType) {
2583 case MEDIA_TYPE_AUTO_SENSOR:
2584 mii_autoneg_adv_reg |=
2585 (MII_AR_10T_HD_CAPS |
2586 MII_AR_10T_FD_CAPS |
2587 MII_AR_100TX_HD_CAPS|
2588 MII_AR_100TX_FD_CAPS);
2589 hw->autoneg_advertised =
2590 ADVERTISE_10_HALF |
2591 ADVERTISE_10_FULL |
2592 ADVERTISE_100_HALF|
2593 ADVERTISE_100_FULL;
2594 break;
2595 case MEDIA_TYPE_100M_FULL:
2596 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
2597 hw->autoneg_advertised = ADVERTISE_100_FULL;
2598 break;
2599 case MEDIA_TYPE_100M_HALF:
2600 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
2601 hw->autoneg_advertised = ADVERTISE_100_HALF;
2602 break;
2603 case MEDIA_TYPE_10M_FULL:
2604 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
2605 hw->autoneg_advertised = ADVERTISE_10_FULL;
2606 break;
2607 default:
2608 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
2609 hw->autoneg_advertised = ADVERTISE_10_HALF;
2610 break;
2611 }
2612
2613
2614 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
2615
2616 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
2617
2618 ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
2619
2620 if (ret_val)
2621 return ret_val;
2622
2623 return 0;
2624 }
2625
2626
2627
2628
2629
2630
2631
2632
2633 static s32 atl2_phy_commit(struct atl2_hw *hw)
2634 {
2635 s32 ret_val;
2636 u16 phy_data;
2637
2638 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2639 ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data);
2640 if (ret_val) {
2641 u32 val;
2642 int i;
2643
2644 for (i = 0; i < 25; i++) {
2645 msleep(1);
2646 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2647 if (!(val & (MDIO_START | MDIO_BUSY)))
2648 break;
2649 }
2650
2651 if (0 != (val & (MDIO_START | MDIO_BUSY))) {
2652 printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n");
2653 return ret_val;
2654 }
2655 }
2656 return 0;
2657 }
2658
2659 static s32 atl2_phy_init(struct atl2_hw *hw)
2660 {
2661 s32 ret_val;
2662 u16 phy_val;
2663
2664 if (hw->phy_configured)
2665 return 0;
2666
2667
2668 ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1);
2669 ATL2_WRITE_FLUSH(hw);
2670 msleep(1);
2671
2672
2673 atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
2674 atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
2675
2676
2677 if (phy_val & 0x1000) {
2678 phy_val &= ~0x1000;
2679 atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val);
2680 }
2681
2682 msleep(1);
2683
2684
2685 ret_val = atl2_write_phy_reg(hw, 18, 0xC00);
2686 if (ret_val)
2687 return ret_val;
2688
2689
2690 ret_val = atl2_phy_setup_autoneg_adv(hw);
2691 if (ret_val)
2692 return ret_val;
2693
2694
2695 ret_val = atl2_phy_commit(hw);
2696 if (ret_val)
2697 return ret_val;
2698
2699 hw->phy_configured = true;
2700
2701 return ret_val;
2702 }
2703
2704 static void atl2_set_mac_addr(struct atl2_hw *hw)
2705 {
2706 u32 value;
2707
2708
2709
2710 value = (((u32)hw->mac_addr[2]) << 24) |
2711 (((u32)hw->mac_addr[3]) << 16) |
2712 (((u32)hw->mac_addr[4]) << 8) |
2713 (((u32)hw->mac_addr[5]));
2714 ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
2715
2716 value = (((u32)hw->mac_addr[0]) << 8) |
2717 (((u32)hw->mac_addr[1]));
2718 ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
2719 }
2720
2721
2722
2723
2724
2725 static int atl2_check_eeprom_exist(struct atl2_hw *hw)
2726 {
2727 u32 value;
2728
2729 value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
2730 if (value & SPI_FLASH_CTRL_EN_VPD) {
2731 value &= ~SPI_FLASH_CTRL_EN_VPD;
2732 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2733 }
2734 value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST);
2735 return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
2736 }
2737
2738
2739 static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value)
2740 {
2741 return true;
2742 }
2743
2744 static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue)
2745 {
2746 int i;
2747 u32 Control;
2748
2749 if (Offset & 0x3)
2750 return false;
2751
2752 ATL2_WRITE_REG(hw, REG_VPD_DATA, 0);
2753 Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
2754 ATL2_WRITE_REG(hw, REG_VPD_CAP, Control);
2755
2756 for (i = 0; i < 10; i++) {
2757 msleep(2);
2758 Control = ATL2_READ_REG(hw, REG_VPD_CAP);
2759 if (Control & VPD_CAP_VPD_FLAG)
2760 break;
2761 }
2762
2763 if (Control & VPD_CAP_VPD_FLAG) {
2764 *pValue = ATL2_READ_REG(hw, REG_VPD_DATA);
2765 return true;
2766 }
2767 return false;
2768 }
2769
2770 static void atl2_force_ps(struct atl2_hw *hw)
2771 {
2772 u16 phy_val;
2773
2774 atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
2775 atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
2776 atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000);
2777
2778 atl2_write_phy_reg(hw, MII_DBG_ADDR, 2);
2779 atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
2780 atl2_write_phy_reg(hw, MII_DBG_ADDR, 3);
2781 atl2_write_phy_reg(hw, MII_DBG_DATA, 0);
2782 }
2783
2784
2785
2786
2787 #define ATL2_MAX_NIC 4
2788
2789 #define OPTION_UNSET -1
2790 #define OPTION_DISABLED 0
2791 #define OPTION_ENABLED 1
2792
2793
2794
2795
2796
2797 #define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET}
2798 #ifndef module_param_array
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809 #define ATL2_PARAM(X, desc) \
2810 static const int X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
2811 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
2812 MODULE_PARM_DESC(X, desc);
2813 #else
2814 #define ATL2_PARAM(X, desc) \
2815 static int X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \
2816 static unsigned int num_##X; \
2817 module_param_array_named(X, X, int, &num_##X, 0); \
2818 MODULE_PARM_DESC(X, desc);
2819 #endif
2820
2821
2822
2823
2824
2825
2826 #define ATL2_MIN_TX_MEMSIZE 4
2827 #define ATL2_MAX_TX_MEMSIZE 64
2828 #define ATL2_DEFAULT_TX_MEMSIZE 8
2829 ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory");
2830
2831
2832
2833
2834
2835
2836 #define ATL2_MIN_RXD_COUNT 16
2837 #define ATL2_MAX_RXD_COUNT 512
2838 #define ATL2_DEFAULT_RXD_COUNT 64
2839 ATL2_PARAM(RxMemBlock, "Number of receive memory block");
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853 ATL2_PARAM(MediaType, "MediaType Select");
2854
2855
2856
2857
2858
2859
2860 #define INT_MOD_DEFAULT_CNT 100
2861 #define INT_MOD_MAX_CNT 65000
2862 #define INT_MOD_MIN_CNT 50
2863 ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer");
2864
2865
2866
2867
2868
2869
2870
2871
2872 ATL2_PARAM(FlashVendor, "SPI Flash Vendor");
2873
2874 #define AUTONEG_ADV_DEFAULT 0x2F
2875 #define AUTONEG_ADV_MASK 0x2F
2876 #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
2877
2878 #define FLASH_VENDOR_DEFAULT 0
2879 #define FLASH_VENDOR_MIN 0
2880 #define FLASH_VENDOR_MAX 2
2881
2882 struct atl2_option {
2883 enum { enable_option, range_option, list_option } type;
2884 char *name;
2885 char *err;
2886 int def;
2887 union {
2888 struct {
2889 int min;
2890 int max;
2891 } r;
2892 struct {
2893 int nr;
2894 struct atl2_opt_list { int i; char *str; } *p;
2895 } l;
2896 } arg;
2897 };
2898
2899 static int atl2_validate_option(int *value, struct atl2_option *opt)
2900 {
2901 int i;
2902 struct atl2_opt_list *ent;
2903
2904 if (*value == OPTION_UNSET) {
2905 *value = opt->def;
2906 return 0;
2907 }
2908
2909 switch (opt->type) {
2910 case enable_option:
2911 switch (*value) {
2912 case OPTION_ENABLED:
2913 printk(KERN_INFO "%s Enabled\n", opt->name);
2914 return 0;
2915 case OPTION_DISABLED:
2916 printk(KERN_INFO "%s Disabled\n", opt->name);
2917 return 0;
2918 }
2919 break;
2920 case range_option:
2921 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
2922 printk(KERN_INFO "%s set to %i\n", opt->name, *value);
2923 return 0;
2924 }
2925 break;
2926 case list_option:
2927 for (i = 0; i < opt->arg.l.nr; i++) {
2928 ent = &opt->arg.l.p[i];
2929 if (*value == ent->i) {
2930 if (ent->str[0] != '\0')
2931 printk(KERN_INFO "%s\n", ent->str);
2932 return 0;
2933 }
2934 }
2935 break;
2936 default:
2937 BUG();
2938 }
2939
2940 printk(KERN_INFO "Invalid %s specified (%i) %s\n",
2941 opt->name, *value, opt->err);
2942 *value = opt->def;
2943 return -1;
2944 }
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955 static void atl2_check_options(struct atl2_adapter *adapter)
2956 {
2957 int val;
2958 struct atl2_option opt;
2959 int bd = adapter->bd_number;
2960 if (bd >= ATL2_MAX_NIC) {
2961 printk(KERN_NOTICE "Warning: no configuration for board #%i\n",
2962 bd);
2963 printk(KERN_NOTICE "Using defaults for all values\n");
2964 #ifndef module_param_array
2965 bd = ATL2_MAX_NIC;
2966 #endif
2967 }
2968
2969
2970 opt.type = range_option;
2971 opt.name = "Bytes of Transmit Memory";
2972 opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE);
2973 opt.def = ATL2_DEFAULT_TX_MEMSIZE;
2974 opt.arg.r.min = ATL2_MIN_TX_MEMSIZE;
2975 opt.arg.r.max = ATL2_MAX_TX_MEMSIZE;
2976 #ifdef module_param_array
2977 if (num_TxMemSize > bd) {
2978 #endif
2979 val = TxMemSize[bd];
2980 atl2_validate_option(&val, &opt);
2981 adapter->txd_ring_size = ((u32) val) * 1024;
2982 #ifdef module_param_array
2983 } else
2984 adapter->txd_ring_size = ((u32)opt.def) * 1024;
2985 #endif
2986
2987 adapter->txs_ring_size = adapter->txd_ring_size / 128;
2988 if (adapter->txs_ring_size > 160)
2989 adapter->txs_ring_size = 160;
2990
2991
2992 opt.type = range_option;
2993 opt.name = "Number of receive memory block";
2994 opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT);
2995 opt.def = ATL2_DEFAULT_RXD_COUNT;
2996 opt.arg.r.min = ATL2_MIN_RXD_COUNT;
2997 opt.arg.r.max = ATL2_MAX_RXD_COUNT;
2998 #ifdef module_param_array
2999 if (num_RxMemBlock > bd) {
3000 #endif
3001 val = RxMemBlock[bd];
3002 atl2_validate_option(&val, &opt);
3003 adapter->rxd_ring_size = (u32)val;
3004
3005
3006 #ifdef module_param_array
3007 } else
3008 adapter->rxd_ring_size = (u32)opt.def;
3009 #endif
3010
3011 adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7;
3012 adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) >
3013 (adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) :
3014 (adapter->rxd_ring_size / 12);
3015
3016
3017 opt.type = range_option;
3018 opt.name = "Interrupt Moderate Timer";
3019 opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT);
3020 opt.def = INT_MOD_DEFAULT_CNT;
3021 opt.arg.r.min = INT_MOD_MIN_CNT;
3022 opt.arg.r.max = INT_MOD_MAX_CNT;
3023 #ifdef module_param_array
3024 if (num_IntModTimer > bd) {
3025 #endif
3026 val = IntModTimer[bd];
3027 atl2_validate_option(&val, &opt);
3028 adapter->imt = (u16) val;
3029 #ifdef module_param_array
3030 } else
3031 adapter->imt = (u16)(opt.def);
3032 #endif
3033
3034 opt.type = range_option;
3035 opt.name = "SPI Flash Vendor";
3036 opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT);
3037 opt.def = FLASH_VENDOR_DEFAULT;
3038 opt.arg.r.min = FLASH_VENDOR_MIN;
3039 opt.arg.r.max = FLASH_VENDOR_MAX;
3040 #ifdef module_param_array
3041 if (num_FlashVendor > bd) {
3042 #endif
3043 val = FlashVendor[bd];
3044 atl2_validate_option(&val, &opt);
3045 adapter->hw.flash_vendor = (u8) val;
3046 #ifdef module_param_array
3047 } else
3048 adapter->hw.flash_vendor = (u8)(opt.def);
3049 #endif
3050
3051 opt.type = range_option;
3052 opt.name = "Speed/Duplex Selection";
3053 opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR);
3054 opt.def = MEDIA_TYPE_AUTO_SENSOR;
3055 opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR;
3056 opt.arg.r.max = MEDIA_TYPE_10M_HALF;
3057 #ifdef module_param_array
3058 if (num_MediaType > bd) {
3059 #endif
3060 val = MediaType[bd];
3061 atl2_validate_option(&val, &opt);
3062 adapter->hw.MediaType = (u16) val;
3063 #ifdef module_param_array
3064 } else
3065 adapter->hw.MediaType = (u16)(opt.def);
3066 #endif
3067 }