This source file includes following definitions.
- typhoon_inc_index
- typhoon_inc_cmd_index
- typhoon_inc_resp_index
- typhoon_inc_rxfree_index
- typhoon_inc_tx_index
- typhoon_inc_rx_index
- typhoon_reset
- typhoon_wait_status
- typhoon_media_status
- typhoon_hello
- typhoon_process_response
- typhoon_num_free
- typhoon_num_free_cmd
- typhoon_num_free_resp
- typhoon_num_free_tx
- typhoon_issue_command
- typhoon_tso_fill
- typhoon_start_tx
- typhoon_set_rx_mode
- typhoon_do_get_stats
- typhoon_get_stats
- typhoon_get_drvinfo
- typhoon_get_link_ksettings
- typhoon_set_link_ksettings
- typhoon_get_wol
- typhoon_set_wol
- typhoon_get_ringparam
- typhoon_wait_interrupt
- typhoon_init_interface
- typhoon_init_rings
- typhoon_request_firmware
- typhoon_download_firmware
- typhoon_boot_3XP
- typhoon_clean_tx
- typhoon_tx_complete
- typhoon_recycle_rx_skb
- typhoon_alloc_rx_skb
- typhoon_rx
- typhoon_fill_free_ring
- typhoon_poll
- typhoon_interrupt
- typhoon_free_rx_rings
- typhoon_sleep
- typhoon_wakeup
- typhoon_start_runtime
- typhoon_stop_runtime
- typhoon_tx_timeout
- typhoon_open
- typhoon_close
- typhoon_resume
- typhoon_suspend
- typhoon_test_mmio
- typhoon_init_one
- typhoon_remove_one
- typhoon_init
- typhoon_cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 static int rx_copybreak = 200;
48
49
50
51
52
53
54 static unsigned int use_mmio = 2;
55
56
57
58
59
60 static const int multicast_filter_limit = 32;
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75 #define TXHI_ENTRIES 2
76 #define TXLO_ENTRIES 128
77 #define RX_ENTRIES 32
78 #define COMMAND_ENTRIES 16
79 #define RESPONSE_ENTRIES 32
80
81 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
82 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
83
84
85
86
87
88 #define RXFREE_ENTRIES 128
89 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
90
91
92
93
94 #define TX_TIMEOUT (2*HZ)
95
96 #define PKT_BUF_SZ 1536
97 #define FIRMWARE_NAME "3com/typhoon.bin"
98
99 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt
100
101 #include <linux/module.h>
102 #include <linux/kernel.h>
103 #include <linux/sched.h>
104 #include <linux/string.h>
105 #include <linux/timer.h>
106 #include <linux/errno.h>
107 #include <linux/ioport.h>
108 #include <linux/interrupt.h>
109 #include <linux/pci.h>
110 #include <linux/netdevice.h>
111 #include <linux/etherdevice.h>
112 #include <linux/skbuff.h>
113 #include <linux/mm.h>
114 #include <linux/init.h>
115 #include <linux/delay.h>
116 #include <linux/ethtool.h>
117 #include <linux/if_vlan.h>
118 #include <linux/crc32.h>
119 #include <linux/bitops.h>
120 #include <asm/processor.h>
121 #include <asm/io.h>
122 #include <linux/uaccess.h>
123 #include <linux/in6.h>
124 #include <linux/dma-mapping.h>
125 #include <linux/firmware.h>
126
127 #include "typhoon.h"
128
129 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
130 MODULE_VERSION("1.0");
131 MODULE_LICENSE("GPL");
132 MODULE_FIRMWARE(FIRMWARE_NAME);
133 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
134 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
135 "the buffer given back to the NIC. Default "
136 "is 200.");
137 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
138 "Default is to try MMIO and fallback to PIO.");
139 module_param(rx_copybreak, int, 0);
140 module_param(use_mmio, int, 0);
141
142 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
143 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
144 #undef NETIF_F_TSO
145 #endif
146
147 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
148 #error TX ring too small!
149 #endif
150
151 struct typhoon_card_info {
152 const char *name;
153 const int capabilities;
154 };
155
156 #define TYPHOON_CRYPTO_NONE 0x00
157 #define TYPHOON_CRYPTO_DES 0x01
158 #define TYPHOON_CRYPTO_3DES 0x02
159 #define TYPHOON_CRYPTO_VARIABLE 0x04
160 #define TYPHOON_FIBER 0x08
161 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10
162
163 enum typhoon_cards {
164 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
165 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
166 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
167 TYPHOON_FXM,
168 };
169
170
171 static struct typhoon_card_info typhoon_card_info[] = {
172 { "3Com Typhoon (3C990-TX)",
173 TYPHOON_CRYPTO_NONE},
174 { "3Com Typhoon (3CR990-TX-95)",
175 TYPHOON_CRYPTO_DES},
176 { "3Com Typhoon (3CR990-TX-97)",
177 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
178 { "3Com Typhoon (3C990SVR)",
179 TYPHOON_CRYPTO_NONE},
180 { "3Com Typhoon (3CR990SVR95)",
181 TYPHOON_CRYPTO_DES},
182 { "3Com Typhoon (3CR990SVR97)",
183 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
184 { "3Com Typhoon2 (3C990B-TX-M)",
185 TYPHOON_CRYPTO_VARIABLE},
186 { "3Com Typhoon2 (3C990BSVR)",
187 TYPHOON_CRYPTO_VARIABLE},
188 { "3Com Typhoon (3CR990-FX-95)",
189 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
190 { "3Com Typhoon (3CR990-FX-97)",
191 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
192 { "3Com Typhoon (3CR990-FX-95 Server)",
193 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
194 { "3Com Typhoon (3CR990-FX-97 Server)",
195 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
196 { "3Com Typhoon2 (3C990B-FX-97)",
197 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
198 };
199
200
201
202
203
204
205
206 static const struct pci_device_id typhoon_pci_tbl[] = {
207 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
209 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
211 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
213 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
214 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
215 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
216 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
217 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
218 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
220 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
222 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
223 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
224 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
225 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
226 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
227 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
229 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
230 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
231 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
232 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
233 { 0, }
234 };
235 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
236
237
238
239
240
241 #define __3xp_aligned ____cacheline_aligned
242 struct typhoon_shared {
243 struct typhoon_interface iface;
244 struct typhoon_indexes indexes __3xp_aligned;
245 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
246 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
247 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
248 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
249 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
250 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
251 u32 zeroWord;
252 struct tx_desc txHi[TXHI_ENTRIES];
253 } __packed;
254
255 struct rxbuff_ent {
256 struct sk_buff *skb;
257 dma_addr_t dma_addr;
258 };
259
260 struct typhoon {
261
262 struct transmit_ring txLoRing ____cacheline_aligned;
263 struct pci_dev * tx_pdev;
264 void __iomem *tx_ioaddr;
265 u32 txlo_dma_addr;
266
267
268 void __iomem *ioaddr ____cacheline_aligned;
269 struct typhoon_indexes *indexes;
270 u8 awaiting_resp;
271 u8 duplex;
272 u8 speed;
273 u8 card_state;
274 struct basic_ring rxLoRing;
275 struct pci_dev * pdev;
276 struct net_device * dev;
277 struct napi_struct napi;
278 struct basic_ring rxHiRing;
279 struct basic_ring rxBuffRing;
280 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
281
282
283 spinlock_t command_lock ____cacheline_aligned;
284 struct basic_ring cmdRing;
285 struct basic_ring respRing;
286 struct net_device_stats stats_saved;
287 struct typhoon_shared * shared;
288 dma_addr_t shared_dma;
289 __le16 xcvr_select;
290 __le16 wol_events;
291 __le32 offload;
292
293
294 int capabilities;
295 struct transmit_ring txHiRing;
296 };
297
298 enum completion_wait_values {
299 NoWait = 0, WaitNoSleep, WaitSleep,
300 };
301
302
303
304
305
306 enum state_values {
307 Sleeping = 0, Running,
308 };
309
310
311
312
313 #define typhoon_post_pci_writes(x) \
314 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
315
316
317
318 #define TYPHOON_UDELAY 50
319 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
320 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
321 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
322
323 #if defined(NETIF_F_TSO)
324 #define skb_tso_size(x) (skb_shinfo(x)->gso_size)
325 #define TSO_NUM_DESCRIPTORS 2
326 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
327 #else
328 #define NETIF_F_TSO 0
329 #define skb_tso_size(x) 0
330 #define TSO_NUM_DESCRIPTORS 0
331 #define TSO_OFFLOAD_ON 0
332 #endif
333
334 static inline void
335 typhoon_inc_index(u32 *index, const int count, const int num_entries)
336 {
337
338
339
340
341 *index += count * sizeof(struct cmd_desc);
342 *index %= num_entries * sizeof(struct cmd_desc);
343 }
344
345 static inline void
346 typhoon_inc_cmd_index(u32 *index, const int count)
347 {
348 typhoon_inc_index(index, count, COMMAND_ENTRIES);
349 }
350
351 static inline void
352 typhoon_inc_resp_index(u32 *index, const int count)
353 {
354 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
355 }
356
357 static inline void
358 typhoon_inc_rxfree_index(u32 *index, const int count)
359 {
360 typhoon_inc_index(index, count, RXFREE_ENTRIES);
361 }
362
363 static inline void
364 typhoon_inc_tx_index(u32 *index, const int count)
365 {
366
367 typhoon_inc_index(index, count, TXLO_ENTRIES);
368 }
369
370 static inline void
371 typhoon_inc_rx_index(u32 *index, const int count)
372 {
373
374 *index += count * sizeof(struct rx_desc);
375 *index %= RX_ENTRIES * sizeof(struct rx_desc);
376 }
377
378 static int
379 typhoon_reset(void __iomem *ioaddr, int wait_type)
380 {
381 int i, err = 0;
382 int timeout;
383
384 if(wait_type == WaitNoSleep)
385 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
386 else
387 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
388
389 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
390 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
391
392 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
393 typhoon_post_pci_writes(ioaddr);
394 udelay(1);
395 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
396
397 if(wait_type != NoWait) {
398 for(i = 0; i < timeout; i++) {
399 if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
400 TYPHOON_STATUS_WAITING_FOR_HOST)
401 goto out;
402
403 if(wait_type == WaitSleep)
404 schedule_timeout_uninterruptible(1);
405 else
406 udelay(TYPHOON_UDELAY);
407 }
408
409 err = -ETIMEDOUT;
410 }
411
412 out:
413 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
414 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
415
416
417
418
419
420
421
422
423
424
425
426 if(wait_type == WaitSleep)
427 msleep(5);
428 else
429 udelay(500);
430 return err;
431 }
432
433 static int
434 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
435 {
436 int i, err = 0;
437
438 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
439 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
440 goto out;
441 udelay(TYPHOON_UDELAY);
442 }
443
444 err = -ETIMEDOUT;
445
446 out:
447 return err;
448 }
449
450 static inline void
451 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
452 {
453 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
454 netif_carrier_off(dev);
455 else
456 netif_carrier_on(dev);
457 }
458
459 static inline void
460 typhoon_hello(struct typhoon *tp)
461 {
462 struct basic_ring *ring = &tp->cmdRing;
463 struct cmd_desc *cmd;
464
465
466
467
468
469 if(spin_trylock(&tp->command_lock)) {
470 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
471 typhoon_inc_cmd_index(&ring->lastWrite, 1);
472
473 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
474 wmb();
475 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
476 spin_unlock(&tp->command_lock);
477 }
478 }
479
480 static int
481 typhoon_process_response(struct typhoon *tp, int resp_size,
482 struct resp_desc *resp_save)
483 {
484 struct typhoon_indexes *indexes = tp->indexes;
485 struct resp_desc *resp;
486 u8 *base = tp->respRing.ringBase;
487 int count, len, wrap_len;
488 u32 cleared;
489 u32 ready;
490
491 cleared = le32_to_cpu(indexes->respCleared);
492 ready = le32_to_cpu(indexes->respReady);
493 while(cleared != ready) {
494 resp = (struct resp_desc *)(base + cleared);
495 count = resp->numDesc + 1;
496 if(resp_save && resp->seqNo) {
497 if(count > resp_size) {
498 resp_save->flags = TYPHOON_RESP_ERROR;
499 goto cleanup;
500 }
501
502 wrap_len = 0;
503 len = count * sizeof(*resp);
504 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
505 wrap_len = cleared + len - RESPONSE_RING_SIZE;
506 len = RESPONSE_RING_SIZE - cleared;
507 }
508
509 memcpy(resp_save, resp, len);
510 if(unlikely(wrap_len)) {
511 resp_save += len / sizeof(*resp);
512 memcpy(resp_save, base, wrap_len);
513 }
514
515 resp_save = NULL;
516 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
517 typhoon_media_status(tp->dev, resp);
518 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
519 typhoon_hello(tp);
520 } else {
521 netdev_err(tp->dev,
522 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
523 le16_to_cpu(resp->cmd),
524 resp->numDesc, resp->flags,
525 le16_to_cpu(resp->parm1),
526 le32_to_cpu(resp->parm2),
527 le32_to_cpu(resp->parm3));
528 }
529
530 cleanup:
531 typhoon_inc_resp_index(&cleared, count);
532 }
533
534 indexes->respCleared = cpu_to_le32(cleared);
535 wmb();
536 return resp_save == NULL;
537 }
538
539 static inline int
540 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
541 {
542
543
544
545 lastWrite /= sizeof(struct cmd_desc);
546 lastRead /= sizeof(struct cmd_desc);
547 return (ringSize + lastRead - lastWrite - 1) % ringSize;
548 }
549
550 static inline int
551 typhoon_num_free_cmd(struct typhoon *tp)
552 {
553 int lastWrite = tp->cmdRing.lastWrite;
554 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
555
556 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
557 }
558
559 static inline int
560 typhoon_num_free_resp(struct typhoon *tp)
561 {
562 int respReady = le32_to_cpu(tp->indexes->respReady);
563 int respCleared = le32_to_cpu(tp->indexes->respCleared);
564
565 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
566 }
567
568 static inline int
569 typhoon_num_free_tx(struct transmit_ring *ring)
570 {
571
572 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
573 }
574
575 static int
576 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
577 int num_resp, struct resp_desc *resp)
578 {
579 struct typhoon_indexes *indexes = tp->indexes;
580 struct basic_ring *ring = &tp->cmdRing;
581 struct resp_desc local_resp;
582 int i, err = 0;
583 int got_resp;
584 int freeCmd, freeResp;
585 int len, wrap_len;
586
587 spin_lock(&tp->command_lock);
588
589 freeCmd = typhoon_num_free_cmd(tp);
590 freeResp = typhoon_num_free_resp(tp);
591
592 if(freeCmd < num_cmd || freeResp < num_resp) {
593 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
594 freeCmd, num_cmd, freeResp, num_resp);
595 err = -ENOMEM;
596 goto out;
597 }
598
599 if(cmd->flags & TYPHOON_CMD_RESPOND) {
600
601
602
603 tp->awaiting_resp = 1;
604 if(resp == NULL) {
605 resp = &local_resp;
606 num_resp = 1;
607 }
608 }
609
610 wrap_len = 0;
611 len = num_cmd * sizeof(*cmd);
612 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
613 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
614 len = COMMAND_RING_SIZE - ring->lastWrite;
615 }
616
617 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
618 if(unlikely(wrap_len)) {
619 struct cmd_desc *wrap_ptr = cmd;
620 wrap_ptr += len / sizeof(*cmd);
621 memcpy(ring->ringBase, wrap_ptr, wrap_len);
622 }
623
624 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
625
626
627
628 wmb();
629 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
630 typhoon_post_pci_writes(tp->ioaddr);
631
632 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
633 goto out;
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651 got_resp = 0;
652 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
653 if(indexes->respCleared != indexes->respReady)
654 got_resp = typhoon_process_response(tp, num_resp,
655 resp);
656 udelay(TYPHOON_UDELAY);
657 }
658
659 if(!got_resp) {
660 err = -ETIMEDOUT;
661 goto out;
662 }
663
664
665
666
667 if(resp->flags & TYPHOON_RESP_ERROR)
668 err = -EIO;
669
670 out:
671 if(tp->awaiting_resp) {
672 tp->awaiting_resp = 0;
673 smp_wmb();
674
675
676
677
678
679
680
681
682 if(indexes->respCleared != indexes->respReady)
683 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
684 }
685
686 spin_unlock(&tp->command_lock);
687 return err;
688 }
689
690 static inline void
691 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
692 u32 ring_dma)
693 {
694 struct tcpopt_desc *tcpd;
695 u32 tcpd_offset = ring_dma;
696
697 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
698 tcpd_offset += txRing->lastWrite;
699 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
700 typhoon_inc_tx_index(&txRing->lastWrite, 1);
701
702 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
703 tcpd->numDesc = 1;
704 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
705 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
706 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
707 tcpd->bytesTx = cpu_to_le32(skb->len);
708 tcpd->status = 0;
709 }
710
711 static netdev_tx_t
712 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
713 {
714 struct typhoon *tp = netdev_priv(dev);
715 struct transmit_ring *txRing;
716 struct tx_desc *txd, *first_txd;
717 dma_addr_t skb_dma;
718 int numDesc;
719
720
721
722
723
724
725
726 txRing = &tp->txLoRing;
727
728
729
730
731
732
733
734
735
736
737
738
739 numDesc = skb_shinfo(skb)->nr_frags + 1;
740 if (skb_is_gso(skb))
741 numDesc++;
742
743
744
745
746
747
748
749
750
751
752 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
753 smp_rmb();
754
755 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
756 typhoon_inc_tx_index(&txRing->lastWrite, 1);
757
758 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
759 first_txd->numDesc = 0;
760 first_txd->len = 0;
761 first_txd->tx_addr = (u64)((unsigned long) skb);
762 first_txd->processFlags = 0;
763
764 if(skb->ip_summed == CHECKSUM_PARTIAL) {
765
766 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
767 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
768 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
769 }
770
771 if (skb_vlan_tag_present(skb)) {
772 first_txd->processFlags |=
773 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
774 first_txd->processFlags |=
775 cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
776 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
777 }
778
779 if (skb_is_gso(skb)) {
780 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
781 first_txd->numDesc++;
782
783 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
784 }
785
786 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
787 typhoon_inc_tx_index(&txRing->lastWrite, 1);
788
789
790
791
792 if(skb_shinfo(skb)->nr_frags == 0) {
793 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
794 PCI_DMA_TODEVICE);
795 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
796 txd->len = cpu_to_le16(skb->len);
797 txd->frag.addr = cpu_to_le32(skb_dma);
798 txd->frag.addrHi = 0;
799 first_txd->numDesc++;
800 } else {
801 int i, len;
802
803 len = skb_headlen(skb);
804 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
805 PCI_DMA_TODEVICE);
806 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
807 txd->len = cpu_to_le16(len);
808 txd->frag.addr = cpu_to_le32(skb_dma);
809 txd->frag.addrHi = 0;
810 first_txd->numDesc++;
811
812 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
813 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
814 void *frag_addr;
815
816 txd = (struct tx_desc *) (txRing->ringBase +
817 txRing->lastWrite);
818 typhoon_inc_tx_index(&txRing->lastWrite, 1);
819
820 len = skb_frag_size(frag);
821 frag_addr = skb_frag_address(frag);
822 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
823 PCI_DMA_TODEVICE);
824 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
825 txd->len = cpu_to_le16(len);
826 txd->frag.addr = cpu_to_le32(skb_dma);
827 txd->frag.addrHi = 0;
828 first_txd->numDesc++;
829 }
830 }
831
832
833
834 wmb();
835 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
836
837
838
839
840
841
842 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
843
844 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
845 netif_stop_queue(dev);
846
847
848
849
850
851 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
852 netif_wake_queue(dev);
853 }
854
855 return NETDEV_TX_OK;
856 }
857
858 static void
859 typhoon_set_rx_mode(struct net_device *dev)
860 {
861 struct typhoon *tp = netdev_priv(dev);
862 struct cmd_desc xp_cmd;
863 u32 mc_filter[2];
864 __le16 filter;
865
866 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
867 if(dev->flags & IFF_PROMISC) {
868 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
869 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
870 (dev->flags & IFF_ALLMULTI)) {
871
872 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
873 } else if (!netdev_mc_empty(dev)) {
874 struct netdev_hw_addr *ha;
875
876 memset(mc_filter, 0, sizeof(mc_filter));
877 netdev_for_each_mc_addr(ha, dev) {
878 int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
879 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
880 }
881
882 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
883 TYPHOON_CMD_SET_MULTICAST_HASH);
884 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
885 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
886 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
887 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
888
889 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
890 }
891
892 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
893 xp_cmd.parm1 = filter;
894 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
895 }
896
897 static int
898 typhoon_do_get_stats(struct typhoon *tp)
899 {
900 struct net_device_stats *stats = &tp->dev->stats;
901 struct net_device_stats *saved = &tp->stats_saved;
902 struct cmd_desc xp_cmd;
903 struct resp_desc xp_resp[7];
904 struct stats_resp *s = (struct stats_resp *) xp_resp;
905 int err;
906
907 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
908 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
909 if(err < 0)
910 return err;
911
912
913
914
915
916
917
918 stats->tx_packets = le32_to_cpu(s->txPackets) +
919 saved->tx_packets;
920 stats->tx_bytes = le64_to_cpu(s->txBytes) +
921 saved->tx_bytes;
922 stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
923 saved->tx_errors;
924 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
925 saved->tx_carrier_errors;
926 stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
927 saved->collisions;
928 stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
929 saved->rx_packets;
930 stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
931 saved->rx_bytes;
932 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
933 saved->rx_fifo_errors;
934 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
935 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
936 saved->rx_errors;
937 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
938 saved->rx_crc_errors;
939 stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
940 saved->rx_length_errors;
941 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
942 SPEED_100 : SPEED_10;
943 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
944 DUPLEX_FULL : DUPLEX_HALF;
945
946 return 0;
947 }
948
949 static struct net_device_stats *
950 typhoon_get_stats(struct net_device *dev)
951 {
952 struct typhoon *tp = netdev_priv(dev);
953 struct net_device_stats *stats = &tp->dev->stats;
954 struct net_device_stats *saved = &tp->stats_saved;
955
956 smp_rmb();
957 if(tp->card_state == Sleeping)
958 return saved;
959
960 if(typhoon_do_get_stats(tp) < 0) {
961 netdev_err(dev, "error getting stats\n");
962 return saved;
963 }
964
965 return stats;
966 }
967
968 static void
969 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
970 {
971 struct typhoon *tp = netdev_priv(dev);
972 struct pci_dev *pci_dev = tp->pdev;
973 struct cmd_desc xp_cmd;
974 struct resp_desc xp_resp[3];
975
976 smp_rmb();
977 if(tp->card_state == Sleeping) {
978 strlcpy(info->fw_version, "Sleep image",
979 sizeof(info->fw_version));
980 } else {
981 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
982 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
983 strlcpy(info->fw_version, "Unknown runtime",
984 sizeof(info->fw_version));
985 } else {
986 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
987 snprintf(info->fw_version, sizeof(info->fw_version),
988 "%02x.%03x.%03x", sleep_ver >> 24,
989 (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
990 }
991 }
992
993 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
994 strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
995 }
996
997 static int
998 typhoon_get_link_ksettings(struct net_device *dev,
999 struct ethtool_link_ksettings *cmd)
1000 {
1001 struct typhoon *tp = netdev_priv(dev);
1002 u32 supported, advertising = 0;
1003
1004 supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1005 SUPPORTED_Autoneg;
1006
1007 switch (tp->xcvr_select) {
1008 case TYPHOON_XCVR_10HALF:
1009 advertising = ADVERTISED_10baseT_Half;
1010 break;
1011 case TYPHOON_XCVR_10FULL:
1012 advertising = ADVERTISED_10baseT_Full;
1013 break;
1014 case TYPHOON_XCVR_100HALF:
1015 advertising = ADVERTISED_100baseT_Half;
1016 break;
1017 case TYPHOON_XCVR_100FULL:
1018 advertising = ADVERTISED_100baseT_Full;
1019 break;
1020 case TYPHOON_XCVR_AUTONEG:
1021 advertising = ADVERTISED_10baseT_Half |
1022 ADVERTISED_10baseT_Full |
1023 ADVERTISED_100baseT_Half |
1024 ADVERTISED_100baseT_Full |
1025 ADVERTISED_Autoneg;
1026 break;
1027 }
1028
1029 if(tp->capabilities & TYPHOON_FIBER) {
1030 supported |= SUPPORTED_FIBRE;
1031 advertising |= ADVERTISED_FIBRE;
1032 cmd->base.port = PORT_FIBRE;
1033 } else {
1034 supported |= SUPPORTED_10baseT_Half |
1035 SUPPORTED_10baseT_Full |
1036 SUPPORTED_TP;
1037 advertising |= ADVERTISED_TP;
1038 cmd->base.port = PORT_TP;
1039 }
1040
1041
1042 typhoon_do_get_stats(tp);
1043 cmd->base.speed = tp->speed;
1044 cmd->base.duplex = tp->duplex;
1045 cmd->base.phy_address = 0;
1046 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1047 cmd->base.autoneg = AUTONEG_ENABLE;
1048 else
1049 cmd->base.autoneg = AUTONEG_DISABLE;
1050
1051 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1052 supported);
1053 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1054 advertising);
1055
1056 return 0;
1057 }
1058
1059 static int
1060 typhoon_set_link_ksettings(struct net_device *dev,
1061 const struct ethtool_link_ksettings *cmd)
1062 {
1063 struct typhoon *tp = netdev_priv(dev);
1064 u32 speed = cmd->base.speed;
1065 struct cmd_desc xp_cmd;
1066 __le16 xcvr;
1067 int err;
1068
1069 err = -EINVAL;
1070 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1071 xcvr = TYPHOON_XCVR_AUTONEG;
1072 } else {
1073 if (cmd->base.duplex == DUPLEX_HALF) {
1074 if (speed == SPEED_10)
1075 xcvr = TYPHOON_XCVR_10HALF;
1076 else if (speed == SPEED_100)
1077 xcvr = TYPHOON_XCVR_100HALF;
1078 else
1079 goto out;
1080 } else if (cmd->base.duplex == DUPLEX_FULL) {
1081 if (speed == SPEED_10)
1082 xcvr = TYPHOON_XCVR_10FULL;
1083 else if (speed == SPEED_100)
1084 xcvr = TYPHOON_XCVR_100FULL;
1085 else
1086 goto out;
1087 } else
1088 goto out;
1089 }
1090
1091 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1092 xp_cmd.parm1 = xcvr;
1093 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1094 if(err < 0)
1095 goto out;
1096
1097 tp->xcvr_select = xcvr;
1098 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1099 tp->speed = 0xff;
1100 tp->duplex = 0xff;
1101 } else {
1102 tp->speed = speed;
1103 tp->duplex = cmd->base.duplex;
1104 }
1105
1106 out:
1107 return err;
1108 }
1109
1110 static void
1111 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1112 {
1113 struct typhoon *tp = netdev_priv(dev);
1114
1115 wol->supported = WAKE_PHY | WAKE_MAGIC;
1116 wol->wolopts = 0;
1117 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1118 wol->wolopts |= WAKE_PHY;
1119 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1120 wol->wolopts |= WAKE_MAGIC;
1121 memset(&wol->sopass, 0, sizeof(wol->sopass));
1122 }
1123
1124 static int
1125 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1126 {
1127 struct typhoon *tp = netdev_priv(dev);
1128
1129 if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1130 return -EINVAL;
1131
1132 tp->wol_events = 0;
1133 if(wol->wolopts & WAKE_PHY)
1134 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1135 if(wol->wolopts & WAKE_MAGIC)
1136 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1137
1138 return 0;
1139 }
1140
1141 static void
1142 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1143 {
1144 ering->rx_max_pending = RXENT_ENTRIES;
1145 ering->tx_max_pending = TXLO_ENTRIES - 1;
1146
1147 ering->rx_pending = RXENT_ENTRIES;
1148 ering->tx_pending = TXLO_ENTRIES - 1;
1149 }
1150
1151 static const struct ethtool_ops typhoon_ethtool_ops = {
1152 .get_drvinfo = typhoon_get_drvinfo,
1153 .get_wol = typhoon_get_wol,
1154 .set_wol = typhoon_set_wol,
1155 .get_link = ethtool_op_get_link,
1156 .get_ringparam = typhoon_get_ringparam,
1157 .get_link_ksettings = typhoon_get_link_ksettings,
1158 .set_link_ksettings = typhoon_set_link_ksettings,
1159 };
1160
1161 static int
1162 typhoon_wait_interrupt(void __iomem *ioaddr)
1163 {
1164 int i, err = 0;
1165
1166 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1167 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1168 TYPHOON_INTR_BOOTCMD)
1169 goto out;
1170 udelay(TYPHOON_UDELAY);
1171 }
1172
1173 err = -ETIMEDOUT;
1174
1175 out:
1176 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1177 return err;
1178 }
1179
1180 #define shared_offset(x) offsetof(struct typhoon_shared, x)
1181
1182 static void
1183 typhoon_init_interface(struct typhoon *tp)
1184 {
1185 struct typhoon_interface *iface = &tp->shared->iface;
1186 dma_addr_t shared_dma;
1187
1188 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1189
1190
1191
1192 shared_dma = tp->shared_dma + shared_offset(indexes);
1193 iface->ringIndex = cpu_to_le32(shared_dma);
1194
1195 shared_dma = tp->shared_dma + shared_offset(txLo);
1196 iface->txLoAddr = cpu_to_le32(shared_dma);
1197 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1198
1199 shared_dma = tp->shared_dma + shared_offset(txHi);
1200 iface->txHiAddr = cpu_to_le32(shared_dma);
1201 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1202
1203 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1204 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1205 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1206 sizeof(struct rx_free));
1207
1208 shared_dma = tp->shared_dma + shared_offset(rxLo);
1209 iface->rxLoAddr = cpu_to_le32(shared_dma);
1210 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1211
1212 shared_dma = tp->shared_dma + shared_offset(rxHi);
1213 iface->rxHiAddr = cpu_to_le32(shared_dma);
1214 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1215
1216 shared_dma = tp->shared_dma + shared_offset(cmd);
1217 iface->cmdAddr = cpu_to_le32(shared_dma);
1218 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1219
1220 shared_dma = tp->shared_dma + shared_offset(resp);
1221 iface->respAddr = cpu_to_le32(shared_dma);
1222 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1223
1224 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1225 iface->zeroAddr = cpu_to_le32(shared_dma);
1226
1227 tp->indexes = &tp->shared->indexes;
1228 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1229 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1230 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1231 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1232 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1233 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1234 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1235
1236 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1237 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1238
1239 tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1240 tp->card_state = Sleeping;
1241
1242 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1243 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1244 tp->offload |= TYPHOON_OFFLOAD_VLAN;
1245
1246 spin_lock_init(&tp->command_lock);
1247
1248
1249 wmb();
1250 }
1251
1252 static void
1253 typhoon_init_rings(struct typhoon *tp)
1254 {
1255 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1256
1257 tp->txLoRing.lastWrite = 0;
1258 tp->txHiRing.lastWrite = 0;
1259 tp->rxLoRing.lastWrite = 0;
1260 tp->rxHiRing.lastWrite = 0;
1261 tp->rxBuffRing.lastWrite = 0;
1262 tp->cmdRing.lastWrite = 0;
1263 tp->respRing.lastWrite = 0;
1264
1265 tp->txLoRing.lastRead = 0;
1266 tp->txHiRing.lastRead = 0;
1267 }
1268
1269 static const struct firmware *typhoon_fw;
1270
1271 static int
1272 typhoon_request_firmware(struct typhoon *tp)
1273 {
1274 const struct typhoon_file_header *fHdr;
1275 const struct typhoon_section_header *sHdr;
1276 const u8 *image_data;
1277 u32 numSections;
1278 u32 section_len;
1279 u32 remaining;
1280 int err;
1281
1282 if (typhoon_fw)
1283 return 0;
1284
1285 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1286 if (err) {
1287 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1288 FIRMWARE_NAME);
1289 return err;
1290 }
1291
1292 image_data = typhoon_fw->data;
1293 remaining = typhoon_fw->size;
1294 if (remaining < sizeof(struct typhoon_file_header))
1295 goto invalid_fw;
1296
1297 fHdr = (struct typhoon_file_header *) image_data;
1298 if (memcmp(fHdr->tag, "TYPHOON", 8))
1299 goto invalid_fw;
1300
1301 numSections = le32_to_cpu(fHdr->numSections);
1302 image_data += sizeof(struct typhoon_file_header);
1303 remaining -= sizeof(struct typhoon_file_header);
1304
1305 while (numSections--) {
1306 if (remaining < sizeof(struct typhoon_section_header))
1307 goto invalid_fw;
1308
1309 sHdr = (struct typhoon_section_header *) image_data;
1310 image_data += sizeof(struct typhoon_section_header);
1311 section_len = le32_to_cpu(sHdr->len);
1312
1313 if (remaining < section_len)
1314 goto invalid_fw;
1315
1316 image_data += section_len;
1317 remaining -= section_len;
1318 }
1319
1320 return 0;
1321
1322 invalid_fw:
1323 netdev_err(tp->dev, "Invalid firmware image\n");
1324 release_firmware(typhoon_fw);
1325 typhoon_fw = NULL;
1326 return -EINVAL;
1327 }
1328
1329 static int
1330 typhoon_download_firmware(struct typhoon *tp)
1331 {
1332 void __iomem *ioaddr = tp->ioaddr;
1333 struct pci_dev *pdev = tp->pdev;
1334 const struct typhoon_file_header *fHdr;
1335 const struct typhoon_section_header *sHdr;
1336 const u8 *image_data;
1337 void *dpage;
1338 dma_addr_t dpage_dma;
1339 __sum16 csum;
1340 u32 irqEnabled;
1341 u32 irqMasked;
1342 u32 numSections;
1343 u32 section_len;
1344 u32 len;
1345 u32 load_addr;
1346 u32 hmac;
1347 int i;
1348 int err;
1349
1350 image_data = typhoon_fw->data;
1351 fHdr = (struct typhoon_file_header *) image_data;
1352
1353
1354
1355
1356
1357 err = -ENOMEM;
1358 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1359 if(!dpage) {
1360 netdev_err(tp->dev, "no DMA mem for firmware\n");
1361 goto err_out;
1362 }
1363
1364 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1365 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1366 ioaddr + TYPHOON_REG_INTR_ENABLE);
1367 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1368 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1369 ioaddr + TYPHOON_REG_INTR_MASK);
1370
1371 err = -ETIMEDOUT;
1372 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1373 netdev_err(tp->dev, "card ready timeout\n");
1374 goto err_out_irq;
1375 }
1376
1377 numSections = le32_to_cpu(fHdr->numSections);
1378 load_addr = le32_to_cpu(fHdr->startAddr);
1379
1380 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1381 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1382 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1383 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1384 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1385 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1386 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1387 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1388 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1389 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1390 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1391 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1392 typhoon_post_pci_writes(ioaddr);
1393 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1394
1395 image_data += sizeof(struct typhoon_file_header);
1396
1397
1398
1399
1400
1401 for(i = 0; i < numSections; i++) {
1402 sHdr = (struct typhoon_section_header *) image_data;
1403 image_data += sizeof(struct typhoon_section_header);
1404 load_addr = le32_to_cpu(sHdr->startAddr);
1405 section_len = le32_to_cpu(sHdr->len);
1406
1407 while(section_len) {
1408 len = min_t(u32, section_len, PAGE_SIZE);
1409
1410 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1411 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1412 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1413 netdev_err(tp->dev, "segment ready timeout\n");
1414 goto err_out_irq;
1415 }
1416
1417
1418
1419
1420
1421
1422 csum = csum_fold(csum_partial_copy_nocheck(image_data,
1423 dpage, len,
1424 0));
1425
1426 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1427 iowrite32(le16_to_cpu((__force __le16)csum),
1428 ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1429 iowrite32(load_addr,
1430 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1431 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1432 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1433 typhoon_post_pci_writes(ioaddr);
1434 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1435 ioaddr + TYPHOON_REG_COMMAND);
1436
1437 image_data += len;
1438 load_addr += len;
1439 section_len -= len;
1440 }
1441 }
1442
1443 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1444 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1445 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1446 netdev_err(tp->dev, "final segment ready timeout\n");
1447 goto err_out_irq;
1448 }
1449
1450 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1451
1452 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1453 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1454 ioread32(ioaddr + TYPHOON_REG_STATUS));
1455 goto err_out_irq;
1456 }
1457
1458 err = 0;
1459
1460 err_out_irq:
1461 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1462 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1463
1464 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1465
1466 err_out:
1467 return err;
1468 }
1469
1470 static int
1471 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1472 {
1473 void __iomem *ioaddr = tp->ioaddr;
1474
1475 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1476 netdev_err(tp->dev, "boot ready timeout\n");
1477 goto out_timeout;
1478 }
1479
1480 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1481 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1482 typhoon_post_pci_writes(ioaddr);
1483 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1484 ioaddr + TYPHOON_REG_COMMAND);
1485
1486 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1487 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1488 ioread32(ioaddr + TYPHOON_REG_STATUS));
1489 goto out_timeout;
1490 }
1491
1492
1493
1494 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1495 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1496 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1497 typhoon_post_pci_writes(ioaddr);
1498 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1499
1500 return 0;
1501
1502 out_timeout:
1503 return -ETIMEDOUT;
1504 }
1505
1506 static u32
1507 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1508 volatile __le32 * index)
1509 {
1510 u32 lastRead = txRing->lastRead;
1511 struct tx_desc *tx;
1512 dma_addr_t skb_dma;
1513 int dma_len;
1514 int type;
1515
1516 while(lastRead != le32_to_cpu(*index)) {
1517 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1518 type = tx->flags & TYPHOON_TYPE_MASK;
1519
1520 if(type == TYPHOON_TX_DESC) {
1521
1522
1523 unsigned long ptr = tx->tx_addr;
1524 struct sk_buff *skb = (struct sk_buff *) ptr;
1525 dev_kfree_skb_irq(skb);
1526 } else if(type == TYPHOON_FRAG_DESC) {
1527
1528
1529 skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1530 dma_len = le16_to_cpu(tx->len);
1531 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1532 PCI_DMA_TODEVICE);
1533 }
1534
1535 tx->flags = 0;
1536 typhoon_inc_tx_index(&lastRead, 1);
1537 }
1538
1539 return lastRead;
1540 }
1541
1542 static void
1543 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1544 volatile __le32 * index)
1545 {
1546 u32 lastRead;
1547 int numDesc = MAX_SKB_FRAGS + 1;
1548
1549
1550 lastRead = typhoon_clean_tx(tp, txRing, index);
1551 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1552 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1553 netif_wake_queue(tp->dev);
1554
1555 txRing->lastRead = lastRead;
1556 smp_wmb();
1557 }
1558
1559 static void
1560 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1561 {
1562 struct typhoon_indexes *indexes = tp->indexes;
1563 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1564 struct basic_ring *ring = &tp->rxBuffRing;
1565 struct rx_free *r;
1566
1567 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1568 le32_to_cpu(indexes->rxBuffCleared)) {
1569
1570
1571 dev_kfree_skb_any(rxb->skb);
1572 rxb->skb = NULL;
1573 return;
1574 }
1575
1576 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1577 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1578 r->virtAddr = idx;
1579 r->physAddr = cpu_to_le32(rxb->dma_addr);
1580
1581
1582 wmb();
1583 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1584 }
1585
1586 static int
1587 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1588 {
1589 struct typhoon_indexes *indexes = tp->indexes;
1590 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1591 struct basic_ring *ring = &tp->rxBuffRing;
1592 struct rx_free *r;
1593 struct sk_buff *skb;
1594 dma_addr_t dma_addr;
1595
1596 rxb->skb = NULL;
1597
1598 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1599 le32_to_cpu(indexes->rxBuffCleared))
1600 return -ENOMEM;
1601
1602 skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
1603 if(!skb)
1604 return -ENOMEM;
1605
1606 #if 0
1607
1608
1609
1610 skb_reserve(skb, 2);
1611 #endif
1612
1613 dma_addr = pci_map_single(tp->pdev, skb->data,
1614 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1615
1616
1617
1618
1619 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1620 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1621 r->virtAddr = idx;
1622 r->physAddr = cpu_to_le32(dma_addr);
1623 rxb->skb = skb;
1624 rxb->dma_addr = dma_addr;
1625
1626
1627 wmb();
1628 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1629 return 0;
1630 }
1631
1632 static int
1633 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1634 volatile __le32 * cleared, int budget)
1635 {
1636 struct rx_desc *rx;
1637 struct sk_buff *skb, *new_skb;
1638 struct rxbuff_ent *rxb;
1639 dma_addr_t dma_addr;
1640 u32 local_ready;
1641 u32 rxaddr;
1642 int pkt_len;
1643 u32 idx;
1644 __le32 csum_bits;
1645 int received;
1646
1647 received = 0;
1648 local_ready = le32_to_cpu(*ready);
1649 rxaddr = le32_to_cpu(*cleared);
1650 while(rxaddr != local_ready && budget > 0) {
1651 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1652 idx = rx->addr;
1653 rxb = &tp->rxbuffers[idx];
1654 skb = rxb->skb;
1655 dma_addr = rxb->dma_addr;
1656
1657 typhoon_inc_rx_index(&rxaddr, 1);
1658
1659 if(rx->flags & TYPHOON_RX_ERROR) {
1660 typhoon_recycle_rx_skb(tp, idx);
1661 continue;
1662 }
1663
1664 pkt_len = le16_to_cpu(rx->frameLen);
1665
1666 if(pkt_len < rx_copybreak &&
1667 (new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
1668 skb_reserve(new_skb, 2);
1669 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1670 PKT_BUF_SZ,
1671 PCI_DMA_FROMDEVICE);
1672 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1673 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1674 PKT_BUF_SZ,
1675 PCI_DMA_FROMDEVICE);
1676 skb_put(new_skb, pkt_len);
1677 typhoon_recycle_rx_skb(tp, idx);
1678 } else {
1679 new_skb = skb;
1680 skb_put(new_skb, pkt_len);
1681 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1682 PCI_DMA_FROMDEVICE);
1683 typhoon_alloc_rx_skb(tp, idx);
1684 }
1685 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1686 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1687 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1688 if(csum_bits ==
1689 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1690 csum_bits ==
1691 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1692 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1693 } else
1694 skb_checksum_none_assert(new_skb);
1695
1696 if (rx->rxStatus & TYPHOON_RX_VLAN)
1697 __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
1698 ntohl(rx->vlanTag) & 0xffff);
1699 netif_receive_skb(new_skb);
1700
1701 received++;
1702 budget--;
1703 }
1704 *cleared = cpu_to_le32(rxaddr);
1705
1706 return received;
1707 }
1708
1709 static void
1710 typhoon_fill_free_ring(struct typhoon *tp)
1711 {
1712 u32 i;
1713
1714 for(i = 0; i < RXENT_ENTRIES; i++) {
1715 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1716 if(rxb->skb)
1717 continue;
1718 if(typhoon_alloc_rx_skb(tp, i) < 0)
1719 break;
1720 }
1721 }
1722
1723 static int
1724 typhoon_poll(struct napi_struct *napi, int budget)
1725 {
1726 struct typhoon *tp = container_of(napi, struct typhoon, napi);
1727 struct typhoon_indexes *indexes = tp->indexes;
1728 int work_done;
1729
1730 rmb();
1731 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1732 typhoon_process_response(tp, 0, NULL);
1733
1734 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1735 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1736
1737 work_done = 0;
1738
1739 if(indexes->rxHiCleared != indexes->rxHiReady) {
1740 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1741 &indexes->rxHiCleared, budget);
1742 }
1743
1744 if(indexes->rxLoCleared != indexes->rxLoReady) {
1745 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1746 &indexes->rxLoCleared, budget - work_done);
1747 }
1748
1749 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1750
1751 typhoon_fill_free_ring(tp);
1752 }
1753
1754 if (work_done < budget) {
1755 napi_complete_done(napi, work_done);
1756 iowrite32(TYPHOON_INTR_NONE,
1757 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1758 typhoon_post_pci_writes(tp->ioaddr);
1759 }
1760
1761 return work_done;
1762 }
1763
1764 static irqreturn_t
1765 typhoon_interrupt(int irq, void *dev_instance)
1766 {
1767 struct net_device *dev = dev_instance;
1768 struct typhoon *tp = netdev_priv(dev);
1769 void __iomem *ioaddr = tp->ioaddr;
1770 u32 intr_status;
1771
1772 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1773 if(!(intr_status & TYPHOON_INTR_HOST_INT))
1774 return IRQ_NONE;
1775
1776 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1777
1778 if (napi_schedule_prep(&tp->napi)) {
1779 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1780 typhoon_post_pci_writes(ioaddr);
1781 __napi_schedule(&tp->napi);
1782 } else {
1783 netdev_err(dev, "Error, poll already scheduled\n");
1784 }
1785 return IRQ_HANDLED;
1786 }
1787
1788 static void
1789 typhoon_free_rx_rings(struct typhoon *tp)
1790 {
1791 u32 i;
1792
1793 for(i = 0; i < RXENT_ENTRIES; i++) {
1794 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1795 if(rxb->skb) {
1796 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1797 PCI_DMA_FROMDEVICE);
1798 dev_kfree_skb(rxb->skb);
1799 rxb->skb = NULL;
1800 }
1801 }
1802 }
1803
1804 static int
1805 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1806 {
1807 struct pci_dev *pdev = tp->pdev;
1808 void __iomem *ioaddr = tp->ioaddr;
1809 struct cmd_desc xp_cmd;
1810 int err;
1811
1812 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1813 xp_cmd.parm1 = events;
1814 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1815 if(err < 0) {
1816 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1817 err);
1818 return err;
1819 }
1820
1821 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1822 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1823 if(err < 0) {
1824 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1825 return err;
1826 }
1827
1828 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1829 return -ETIMEDOUT;
1830
1831
1832
1833
1834 netif_carrier_off(tp->dev);
1835
1836 pci_enable_wake(tp->pdev, state, 1);
1837 pci_disable_device(pdev);
1838 return pci_set_power_state(pdev, state);
1839 }
1840
1841 static int
1842 typhoon_wakeup(struct typhoon *tp, int wait_type)
1843 {
1844 struct pci_dev *pdev = tp->pdev;
1845 void __iomem *ioaddr = tp->ioaddr;
1846
1847 pci_set_power_state(pdev, PCI_D0);
1848 pci_restore_state(pdev);
1849
1850
1851
1852
1853
1854 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1855 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1856 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1857 return typhoon_reset(ioaddr, wait_type);
1858
1859 return 0;
1860 }
1861
1862 static int
1863 typhoon_start_runtime(struct typhoon *tp)
1864 {
1865 struct net_device *dev = tp->dev;
1866 void __iomem *ioaddr = tp->ioaddr;
1867 struct cmd_desc xp_cmd;
1868 int err;
1869
1870 typhoon_init_rings(tp);
1871 typhoon_fill_free_ring(tp);
1872
1873 err = typhoon_download_firmware(tp);
1874 if(err < 0) {
1875 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1876 goto error_out;
1877 }
1878
1879 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1880 netdev_err(tp->dev, "cannot boot 3XP\n");
1881 err = -EIO;
1882 goto error_out;
1883 }
1884
1885 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1886 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1887 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1888 if(err < 0)
1889 goto error_out;
1890
1891 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1892 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1893 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1894 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1895 if(err < 0)
1896 goto error_out;
1897
1898
1899
1900
1901 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1902 xp_cmd.parm1 = 0;
1903 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1904 if(err < 0)
1905 goto error_out;
1906
1907 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1908 xp_cmd.parm1 = tp->xcvr_select;
1909 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1910 if(err < 0)
1911 goto error_out;
1912
1913 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1914 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1915 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1916 if(err < 0)
1917 goto error_out;
1918
1919 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1920 xp_cmd.parm2 = tp->offload;
1921 xp_cmd.parm3 = tp->offload;
1922 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1923 if(err < 0)
1924 goto error_out;
1925
1926 typhoon_set_rx_mode(dev);
1927
1928 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1929 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1930 if(err < 0)
1931 goto error_out;
1932
1933 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1934 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1935 if(err < 0)
1936 goto error_out;
1937
1938 tp->card_state = Running;
1939 smp_wmb();
1940
1941 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1942 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1943 typhoon_post_pci_writes(ioaddr);
1944
1945 return 0;
1946
1947 error_out:
1948 typhoon_reset(ioaddr, WaitNoSleep);
1949 typhoon_free_rx_rings(tp);
1950 typhoon_init_rings(tp);
1951 return err;
1952 }
1953
1954 static int
1955 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
1956 {
1957 struct typhoon_indexes *indexes = tp->indexes;
1958 struct transmit_ring *txLo = &tp->txLoRing;
1959 void __iomem *ioaddr = tp->ioaddr;
1960 struct cmd_desc xp_cmd;
1961 int i;
1962
1963
1964
1965
1966
1967 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
1968
1969 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
1970 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1971
1972
1973
1974
1975 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1976 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
1977 break;
1978 udelay(TYPHOON_UDELAY);
1979 }
1980
1981 if(i == TYPHOON_WAIT_TIMEOUT)
1982 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
1983
1984 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
1985 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1986
1987
1988
1989
1990 tp->card_state = Sleeping;
1991 smp_wmb();
1992 typhoon_do_get_stats(tp);
1993 memcpy(&tp->stats_saved, &tp->dev->stats, sizeof(struct net_device_stats));
1994
1995 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
1996 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1997
1998 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
1999 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2000
2001 if(typhoon_reset(ioaddr, wait_type) < 0) {
2002 netdev_err(tp->dev, "unable to reset 3XP\n");
2003 return -ETIMEDOUT;
2004 }
2005
2006
2007 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2008 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2009 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2010 }
2011
2012 return 0;
2013 }
2014
2015 static void
2016 typhoon_tx_timeout(struct net_device *dev)
2017 {
2018 struct typhoon *tp = netdev_priv(dev);
2019
2020 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2021 netdev_warn(dev, "could not reset in tx timeout\n");
2022 goto truly_dead;
2023 }
2024
2025
2026 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2027 typhoon_free_rx_rings(tp);
2028
2029 if(typhoon_start_runtime(tp) < 0) {
2030 netdev_err(dev, "could not start runtime in tx timeout\n");
2031 goto truly_dead;
2032 }
2033
2034 netif_wake_queue(dev);
2035 return;
2036
2037 truly_dead:
2038
2039 typhoon_reset(tp->ioaddr, NoWait);
2040 netif_carrier_off(dev);
2041 }
2042
2043 static int
2044 typhoon_open(struct net_device *dev)
2045 {
2046 struct typhoon *tp = netdev_priv(dev);
2047 int err;
2048
2049 err = typhoon_request_firmware(tp);
2050 if (err)
2051 goto out;
2052
2053 err = typhoon_wakeup(tp, WaitSleep);
2054 if(err < 0) {
2055 netdev_err(dev, "unable to wakeup device\n");
2056 goto out_sleep;
2057 }
2058
2059 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2060 dev->name, dev);
2061 if(err < 0)
2062 goto out_sleep;
2063
2064 napi_enable(&tp->napi);
2065
2066 err = typhoon_start_runtime(tp);
2067 if(err < 0) {
2068 napi_disable(&tp->napi);
2069 goto out_irq;
2070 }
2071
2072 netif_start_queue(dev);
2073 return 0;
2074
2075 out_irq:
2076 free_irq(dev->irq, dev);
2077
2078 out_sleep:
2079 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2080 netdev_err(dev, "unable to reboot into sleep img\n");
2081 typhoon_reset(tp->ioaddr, NoWait);
2082 goto out;
2083 }
2084
2085 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2086 netdev_err(dev, "unable to go back to sleep\n");
2087
2088 out:
2089 return err;
2090 }
2091
2092 static int
2093 typhoon_close(struct net_device *dev)
2094 {
2095 struct typhoon *tp = netdev_priv(dev);
2096
2097 netif_stop_queue(dev);
2098 napi_disable(&tp->napi);
2099
2100 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2101 netdev_err(dev, "unable to stop runtime\n");
2102
2103
2104 free_irq(dev->irq, dev);
2105
2106 typhoon_free_rx_rings(tp);
2107 typhoon_init_rings(tp);
2108
2109 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2110 netdev_err(dev, "unable to boot sleep image\n");
2111
2112 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2113 netdev_err(dev, "unable to put card to sleep\n");
2114
2115 return 0;
2116 }
2117
2118 #ifdef CONFIG_PM
2119 static int
2120 typhoon_resume(struct pci_dev *pdev)
2121 {
2122 struct net_device *dev = pci_get_drvdata(pdev);
2123 struct typhoon *tp = netdev_priv(dev);
2124
2125
2126
2127 if(!netif_running(dev))
2128 return 0;
2129
2130 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2131 netdev_err(dev, "critical: could not wake up in resume\n");
2132 goto reset;
2133 }
2134
2135 if(typhoon_start_runtime(tp) < 0) {
2136 netdev_err(dev, "critical: could not start runtime in resume\n");
2137 goto reset;
2138 }
2139
2140 netif_device_attach(dev);
2141 return 0;
2142
2143 reset:
2144 typhoon_reset(tp->ioaddr, NoWait);
2145 return -EBUSY;
2146 }
2147
2148 static int
2149 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2150 {
2151 struct net_device *dev = pci_get_drvdata(pdev);
2152 struct typhoon *tp = netdev_priv(dev);
2153 struct cmd_desc xp_cmd;
2154
2155
2156
2157 if(!netif_running(dev))
2158 return 0;
2159
2160
2161 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
2162 netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
2163
2164 netif_device_detach(dev);
2165
2166 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2167 netdev_err(dev, "unable to stop runtime\n");
2168 goto need_resume;
2169 }
2170
2171 typhoon_free_rx_rings(tp);
2172 typhoon_init_rings(tp);
2173
2174 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2175 netdev_err(dev, "unable to boot sleep image\n");
2176 goto need_resume;
2177 }
2178
2179 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2180 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2181 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2182 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2183 netdev_err(dev, "unable to set mac address in suspend\n");
2184 goto need_resume;
2185 }
2186
2187 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2188 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2189 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2190 netdev_err(dev, "unable to set rx filter in suspend\n");
2191 goto need_resume;
2192 }
2193
2194 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2195 netdev_err(dev, "unable to put card to sleep\n");
2196 goto need_resume;
2197 }
2198
2199 return 0;
2200
2201 need_resume:
2202 typhoon_resume(pdev);
2203 return -EBUSY;
2204 }
2205 #endif
2206
2207 static int
2208 typhoon_test_mmio(struct pci_dev *pdev)
2209 {
2210 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2211 int mode = 0;
2212 u32 val;
2213
2214 if(!ioaddr)
2215 goto out;
2216
2217 if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2218 TYPHOON_STATUS_WAITING_FOR_HOST)
2219 goto out_unmap;
2220
2221 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2222 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2223 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2224
2225
2226
2227
2228
2229 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2230 if((val & TYPHOON_INTR_SELF) == 0) {
2231 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2232 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2233 udelay(50);
2234 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2235 if(val & TYPHOON_INTR_SELF)
2236 mode = 1;
2237 }
2238
2239 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2240 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2241 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2242 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2243
2244 out_unmap:
2245 pci_iounmap(pdev, ioaddr);
2246
2247 out:
2248 if(!mode)
2249 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2250 return mode;
2251 }
2252
2253 static const struct net_device_ops typhoon_netdev_ops = {
2254 .ndo_open = typhoon_open,
2255 .ndo_stop = typhoon_close,
2256 .ndo_start_xmit = typhoon_start_tx,
2257 .ndo_set_rx_mode = typhoon_set_rx_mode,
2258 .ndo_tx_timeout = typhoon_tx_timeout,
2259 .ndo_get_stats = typhoon_get_stats,
2260 .ndo_validate_addr = eth_validate_addr,
2261 .ndo_set_mac_address = eth_mac_addr,
2262 };
2263
2264 static int
2265 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2266 {
2267 struct net_device *dev;
2268 struct typhoon *tp;
2269 int card_id = (int) ent->driver_data;
2270 void __iomem *ioaddr;
2271 void *shared;
2272 dma_addr_t shared_dma;
2273 struct cmd_desc xp_cmd;
2274 struct resp_desc xp_resp[3];
2275 int err = 0;
2276 const char *err_msg;
2277
2278 dev = alloc_etherdev(sizeof(*tp));
2279 if(dev == NULL) {
2280 err_msg = "unable to alloc new net device";
2281 err = -ENOMEM;
2282 goto error_out;
2283 }
2284 SET_NETDEV_DEV(dev, &pdev->dev);
2285
2286 err = pci_enable_device(pdev);
2287 if(err < 0) {
2288 err_msg = "unable to enable device";
2289 goto error_out_dev;
2290 }
2291
2292 err = pci_set_mwi(pdev);
2293 if(err < 0) {
2294 err_msg = "unable to set MWI";
2295 goto error_out_disable;
2296 }
2297
2298 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2299 if(err < 0) {
2300 err_msg = "No usable DMA configuration";
2301 goto error_out_mwi;
2302 }
2303
2304
2305
2306 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2307 err_msg = "region #1 not a PCI IO resource, aborting";
2308 err = -ENODEV;
2309 goto error_out_mwi;
2310 }
2311 if(pci_resource_len(pdev, 0) < 128) {
2312 err_msg = "Invalid PCI IO region size, aborting";
2313 err = -ENODEV;
2314 goto error_out_mwi;
2315 }
2316 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2317 err_msg = "region #1 not a PCI MMIO resource, aborting";
2318 err = -ENODEV;
2319 goto error_out_mwi;
2320 }
2321 if(pci_resource_len(pdev, 1) < 128) {
2322 err_msg = "Invalid PCI MMIO region size, aborting";
2323 err = -ENODEV;
2324 goto error_out_mwi;
2325 }
2326
2327 err = pci_request_regions(pdev, KBUILD_MODNAME);
2328 if(err < 0) {
2329 err_msg = "could not request regions";
2330 goto error_out_mwi;
2331 }
2332
2333
2334
2335 if(use_mmio != 0 && use_mmio != 1)
2336 use_mmio = typhoon_test_mmio(pdev);
2337
2338 ioaddr = pci_iomap(pdev, use_mmio, 128);
2339 if (!ioaddr) {
2340 err_msg = "cannot remap registers, aborting";
2341 err = -EIO;
2342 goto error_out_regions;
2343 }
2344
2345
2346
2347 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2348 &shared_dma);
2349 if(!shared) {
2350 err_msg = "could not allocate DMA memory";
2351 err = -ENOMEM;
2352 goto error_out_remap;
2353 }
2354
2355 dev->irq = pdev->irq;
2356 tp = netdev_priv(dev);
2357 tp->shared = shared;
2358 tp->shared_dma = shared_dma;
2359 tp->pdev = pdev;
2360 tp->tx_pdev = pdev;
2361 tp->ioaddr = ioaddr;
2362 tp->tx_ioaddr = ioaddr;
2363 tp->dev = dev;
2364
2365
2366
2367
2368
2369
2370
2371
2372 err = typhoon_reset(ioaddr, WaitSleep);
2373 if (err < 0) {
2374 err_msg = "could not reset 3XP";
2375 goto error_out_dma;
2376 }
2377
2378
2379
2380
2381
2382 pci_set_master(pdev);
2383 pci_save_state(pdev);
2384
2385 typhoon_init_interface(tp);
2386 typhoon_init_rings(tp);
2387
2388 err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
2389 if (err < 0) {
2390 err_msg = "cannot boot 3XP sleep image";
2391 goto error_out_reset;
2392 }
2393
2394 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2395 err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
2396 if (err < 0) {
2397 err_msg = "cannot read MAC address";
2398 goto error_out_reset;
2399 }
2400
2401 *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2402 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2403
2404 if (!is_valid_ether_addr(dev->dev_addr)) {
2405 err_msg = "Could not obtain valid ethernet address, aborting";
2406 err = -EIO;
2407 goto error_out_reset;
2408 }
2409
2410
2411
2412
2413 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2414 err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
2415 if (err < 0) {
2416 err_msg = "Could not get Sleep Image version";
2417 goto error_out_reset;
2418 }
2419
2420 tp->capabilities = typhoon_card_info[card_id].capabilities;
2421 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2422
2423
2424
2425
2426
2427
2428
2429 if(xp_resp[0].numDesc != 0)
2430 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2431
2432 err = typhoon_sleep(tp, PCI_D3hot, 0);
2433 if (err < 0) {
2434 err_msg = "cannot put adapter to sleep";
2435 goto error_out_reset;
2436 }
2437
2438
2439 dev->netdev_ops = &typhoon_netdev_ops;
2440 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2441 dev->watchdog_timeo = TX_TIMEOUT;
2442
2443 dev->ethtool_ops = &typhoon_ethtool_ops;
2444
2445
2446
2447
2448
2449
2450
2451
2452 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2453 NETIF_F_HW_VLAN_CTAG_TX;
2454 dev->features = dev->hw_features |
2455 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2456
2457 err = register_netdev(dev);
2458 if (err < 0) {
2459 err_msg = "unable to register netdev";
2460 goto error_out_reset;
2461 }
2462
2463 pci_set_drvdata(pdev, dev);
2464
2465 netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2466 typhoon_card_info[card_id].name,
2467 use_mmio ? "MMIO" : "IO",
2468 (unsigned long long)pci_resource_start(pdev, use_mmio),
2469 dev->dev_addr);
2470
2471
2472
2473
2474 if(xp_resp[0].numDesc == 0) {
2475
2476
2477
2478 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2479 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2480 monthday >> 8, monthday & 0xff);
2481 } else if(xp_resp[0].numDesc == 2) {
2482
2483
2484 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2485 u8 *ver_string = (u8 *) &xp_resp[1];
2486 ver_string[25] = 0;
2487 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2488 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2489 sleep_ver & 0xfff, ver_string);
2490 } else {
2491 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2492 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2493 }
2494
2495 return 0;
2496
2497 error_out_reset:
2498 typhoon_reset(ioaddr, NoWait);
2499
2500 error_out_dma:
2501 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2502 shared, shared_dma);
2503 error_out_remap:
2504 pci_iounmap(pdev, ioaddr);
2505 error_out_regions:
2506 pci_release_regions(pdev);
2507 error_out_mwi:
2508 pci_clear_mwi(pdev);
2509 error_out_disable:
2510 pci_disable_device(pdev);
2511 error_out_dev:
2512 free_netdev(dev);
2513 error_out:
2514 pr_err("%s: %s\n", pci_name(pdev), err_msg);
2515 return err;
2516 }
2517
2518 static void
2519 typhoon_remove_one(struct pci_dev *pdev)
2520 {
2521 struct net_device *dev = pci_get_drvdata(pdev);
2522 struct typhoon *tp = netdev_priv(dev);
2523
2524 unregister_netdev(dev);
2525 pci_set_power_state(pdev, PCI_D0);
2526 pci_restore_state(pdev);
2527 typhoon_reset(tp->ioaddr, NoWait);
2528 pci_iounmap(pdev, tp->ioaddr);
2529 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2530 tp->shared, tp->shared_dma);
2531 pci_release_regions(pdev);
2532 pci_clear_mwi(pdev);
2533 pci_disable_device(pdev);
2534 free_netdev(dev);
2535 }
2536
2537 static struct pci_driver typhoon_driver = {
2538 .name = KBUILD_MODNAME,
2539 .id_table = typhoon_pci_tbl,
2540 .probe = typhoon_init_one,
2541 .remove = typhoon_remove_one,
2542 #ifdef CONFIG_PM
2543 .suspend = typhoon_suspend,
2544 .resume = typhoon_resume,
2545 #endif
2546 };
2547
2548 static int __init
2549 typhoon_init(void)
2550 {
2551 return pci_register_driver(&typhoon_driver);
2552 }
2553
2554 static void __exit
2555 typhoon_cleanup(void)
2556 {
2557 release_firmware(typhoon_fw);
2558 pci_unregister_driver(&typhoon_driver);
2559 }
2560
2561 module_init(typhoon_init);
2562 module_exit(typhoon_cleanup);