This source file includes following definitions.
- skfp_init_one
- skfp_remove_one
- skfp_driver_init
- skfp_open
- skfp_close
- skfp_interrupt
- skfp_ctl_get_stats
- skfp_ctl_set_multicast_list
- skfp_ctl_set_multicast_list_wo_lock
- skfp_ctl_set_mac_address
- skfp_ioctl
- skfp_send_pkt
- send_queued_packets
- CheckSourceAddress
- ResetAdapter
- llc_restart_tx
- mac_drv_get_space
- mac_drv_get_desc_mem
- mac_drv_virt2phys
- dma_master
- dma_complete
- mac_drv_tx_complete
- dump_data
- mac_drv_rx_complete
- mac_drv_requeue_rxd
- mac_drv_fill_rxd
- mac_drv_clear_rxd
- mac_drv_rx_init
- smt_timer_poll
- ring_status_indication
- smt_get_time
- smt_stat_counter
- cfm_state_change
- ecm_state_change
- rmt_state_change
- drv_reset_indication
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64 #define VERSION "2.07"
65
66 static const char * const boot_msg =
67 "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
68 " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
69
70
71
72 #include <linux/capability.h>
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/interrupt.h>
78 #include <linux/pci.h>
79 #include <linux/netdevice.h>
80 #include <linux/fddidevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/bitops.h>
83 #include <linux/gfp.h>
84
85 #include <asm/byteorder.h>
86 #include <asm/io.h>
87 #include <linux/uaccess.h>
88
89 #include "h/types.h"
90 #undef ADDR
91 #include "h/skfbi.h"
92 #include "h/fddi.h"
93 #include "h/smc.h"
94 #include "h/smtstate.h"
95
96
97
98 static int skfp_driver_init(struct net_device *dev);
99 static int skfp_open(struct net_device *dev);
100 static int skfp_close(struct net_device *dev);
101 static irqreturn_t skfp_interrupt(int irq, void *dev_id);
102 static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
103 static void skfp_ctl_set_multicast_list(struct net_device *dev);
104 static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
105 static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
106 static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
107 static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
108 struct net_device *dev);
109 static void send_queued_packets(struct s_smc *smc);
110 static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
111 static void ResetAdapter(struct s_smc *smc);
112
113
114
115 void *mac_drv_get_space(struct s_smc *smc, u_int size);
116 void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
117 unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
118 unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
119 void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
120 int flag);
121 void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
122 void llc_restart_tx(struct s_smc *smc);
123 void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
124 int frag_count, int len);
125 void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
126 int frag_count);
127 void mac_drv_fill_rxd(struct s_smc *smc);
128 void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
129 int frag_count);
130 int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
131 int la_len);
132 void dump_data(unsigned char *Data, int length);
133
134
135 extern u_int mac_drv_check_space(void);
136 extern int mac_drv_init(struct s_smc *smc);
137 extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
138 int len, int frame_status);
139 extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
140 int frame_len, int frame_status);
141 extern void fddi_isr(struct s_smc *smc);
142 extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
143 int len, int frame_status);
144 extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
145 extern void mac_drv_clear_rx_queue(struct s_smc *smc);
146 extern void enable_tx_irq(struct s_smc *smc, u_short queue);
147
148 static const struct pci_device_id skfddi_pci_tbl[] = {
149 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
150 { }
151 };
152 MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
153 MODULE_LICENSE("GPL");
154 MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
155
156
157
158 static int num_boards;
159
160 static const struct net_device_ops skfp_netdev_ops = {
161 .ndo_open = skfp_open,
162 .ndo_stop = skfp_close,
163 .ndo_start_xmit = skfp_send_pkt,
164 .ndo_get_stats = skfp_ctl_get_stats,
165 .ndo_set_rx_mode = skfp_ctl_set_multicast_list,
166 .ndo_set_mac_address = skfp_ctl_set_mac_address,
167 .ndo_do_ioctl = skfp_ioctl,
168 };
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199 static int skfp_init_one(struct pci_dev *pdev,
200 const struct pci_device_id *ent)
201 {
202 struct net_device *dev;
203 struct s_smc *smc;
204 void __iomem *mem;
205 int err;
206
207 pr_debug("entering skfp_init_one\n");
208
209 if (num_boards == 0)
210 printk("%s\n", boot_msg);
211
212 err = pci_enable_device(pdev);
213 if (err)
214 return err;
215
216 err = pci_request_regions(pdev, "skfddi");
217 if (err)
218 goto err_out1;
219
220 pci_set_master(pdev);
221
222 #ifdef MEM_MAPPED_IO
223 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
224 printk(KERN_ERR "skfp: region is not an MMIO resource\n");
225 err = -EIO;
226 goto err_out2;
227 }
228
229 mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
230 #else
231 if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
232 printk(KERN_ERR "skfp: region is not PIO resource\n");
233 err = -EIO;
234 goto err_out2;
235 }
236
237 mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
238 #endif
239 if (!mem) {
240 printk(KERN_ERR "skfp: Unable to map register, "
241 "FDDI adapter will be disabled.\n");
242 err = -EIO;
243 goto err_out2;
244 }
245
246 dev = alloc_fddidev(sizeof(struct s_smc));
247 if (!dev) {
248 printk(KERN_ERR "skfp: Unable to allocate fddi device, "
249 "FDDI adapter will be disabled.\n");
250 err = -ENOMEM;
251 goto err_out3;
252 }
253
254 dev->irq = pdev->irq;
255 dev->netdev_ops = &skfp_netdev_ops;
256
257 SET_NETDEV_DEV(dev, &pdev->dev);
258
259
260 smc = netdev_priv(dev);
261 smc->os.dev = dev;
262 smc->os.bus_type = SK_BUS_TYPE_PCI;
263 smc->os.pdev = *pdev;
264 smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
265 smc->os.MaxFrameSize = MAX_FRAME_SIZE;
266 smc->os.dev = dev;
267 smc->hw.slot = -1;
268 smc->hw.iop = mem;
269 smc->os.ResetRequested = FALSE;
270 skb_queue_head_init(&smc->os.SendSkbQueue);
271
272 dev->base_addr = (unsigned long)mem;
273
274 err = skfp_driver_init(dev);
275 if (err)
276 goto err_out4;
277
278 err = register_netdev(dev);
279 if (err)
280 goto err_out5;
281
282 ++num_boards;
283 pci_set_drvdata(pdev, dev);
284
285 if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
286 (pdev->subsystem_device & 0xff00) == 0x5800)
287 printk("%s: SysKonnect FDDI PCI adapter"
288 " found (SK-%04X)\n", dev->name,
289 pdev->subsystem_device);
290 else
291 printk("%s: FDDI PCI adapter found\n", dev->name);
292
293 return 0;
294 err_out5:
295 if (smc->os.SharedMemAddr)
296 dma_free_coherent(&pdev->dev, smc->os.SharedMemSize,
297 smc->os.SharedMemAddr,
298 smc->os.SharedMemDMA);
299 dma_free_coherent(&pdev->dev, MAX_FRAME_SIZE,
300 smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
301 err_out4:
302 free_netdev(dev);
303 err_out3:
304 #ifdef MEM_MAPPED_IO
305 iounmap(mem);
306 #else
307 ioport_unmap(mem);
308 #endif
309 err_out2:
310 pci_release_regions(pdev);
311 err_out1:
312 pci_disable_device(pdev);
313 return err;
314 }
315
316
317
318
319 static void skfp_remove_one(struct pci_dev *pdev)
320 {
321 struct net_device *p = pci_get_drvdata(pdev);
322 struct s_smc *lp = netdev_priv(p);
323
324 unregister_netdev(p);
325
326 if (lp->os.SharedMemAddr) {
327 dma_free_coherent(&pdev->dev,
328 lp->os.SharedMemSize,
329 lp->os.SharedMemAddr,
330 lp->os.SharedMemDMA);
331 lp->os.SharedMemAddr = NULL;
332 }
333 if (lp->os.LocalRxBuffer) {
334 dma_free_coherent(&pdev->dev,
335 MAX_FRAME_SIZE,
336 lp->os.LocalRxBuffer,
337 lp->os.LocalRxBufferDMA);
338 lp->os.LocalRxBuffer = NULL;
339 }
340 #ifdef MEM_MAPPED_IO
341 iounmap(lp->hw.iop);
342 #else
343 ioport_unmap(lp->hw.iop);
344 #endif
345 pci_release_regions(pdev);
346 free_netdev(p);
347
348 pci_disable_device(pdev);
349 }
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376 static int skfp_driver_init(struct net_device *dev)
377 {
378 struct s_smc *smc = netdev_priv(dev);
379 skfddi_priv *bp = &smc->os;
380 int err = -EIO;
381
382 pr_debug("entering skfp_driver_init\n");
383
384
385 bp->base_addr = dev->base_addr;
386
387
388 smc->hw.irq = dev->irq;
389
390 spin_lock_init(&bp->DriverLock);
391
392
393 bp->LocalRxBuffer = dma_alloc_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
394 &bp->LocalRxBufferDMA,
395 GFP_ATOMIC);
396 if (!bp->LocalRxBuffer) {
397 printk("could not allocate mem for ");
398 printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
399 goto fail;
400 }
401
402
403 bp->SharedMemSize = mac_drv_check_space();
404 pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
405 if (bp->SharedMemSize > 0) {
406 bp->SharedMemSize += 16;
407
408 bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
409 bp->SharedMemSize,
410 &bp->SharedMemDMA,
411 GFP_ATOMIC);
412 if (!bp->SharedMemAddr) {
413 printk("could not allocate mem for ");
414 printk("hardware module: %ld byte\n",
415 bp->SharedMemSize);
416 goto fail;
417 }
418
419 } else {
420 bp->SharedMemAddr = NULL;
421 }
422
423 bp->SharedMemHeap = 0;
424
425 card_stop(smc);
426
427 pr_debug("mac_drv_init()..\n");
428 if (mac_drv_init(smc) != 0) {
429 pr_debug("mac_drv_init() failed\n");
430 goto fail;
431 }
432 read_address(smc, NULL);
433 pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
434 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
435
436 smt_reset_defaults(smc, 0);
437
438 return 0;
439
440 fail:
441 if (bp->SharedMemAddr) {
442 dma_free_coherent(&bp->pdev.dev,
443 bp->SharedMemSize,
444 bp->SharedMemAddr,
445 bp->SharedMemDMA);
446 bp->SharedMemAddr = NULL;
447 }
448 if (bp->LocalRxBuffer) {
449 dma_free_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
450 bp->LocalRxBuffer, bp->LocalRxBufferDMA);
451 bp->LocalRxBuffer = NULL;
452 }
453 return err;
454 }
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478 static int skfp_open(struct net_device *dev)
479 {
480 struct s_smc *smc = netdev_priv(dev);
481 int err;
482
483 pr_debug("entering skfp_open\n");
484
485 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
486 dev->name, dev);
487 if (err)
488 return err;
489
490
491
492
493
494
495
496
497
498
499
500 read_address(smc, NULL);
501 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
502
503 init_smt(smc, NULL);
504 smt_online(smc, 1);
505 STI_FBI();
506
507
508 mac_clear_multicast(smc);
509
510
511 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
512
513 netif_start_queue(dev);
514 return 0;
515 }
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545 static int skfp_close(struct net_device *dev)
546 {
547 struct s_smc *smc = netdev_priv(dev);
548 skfddi_priv *bp = &smc->os;
549
550 CLI_FBI();
551 smt_reset_defaults(smc, 1);
552 card_stop(smc);
553 mac_drv_clear_tx_queue(smc);
554 mac_drv_clear_rx_queue(smc);
555
556 netif_stop_queue(dev);
557
558 free_irq(dev->irq, dev);
559
560 skb_queue_purge(&bp->SendSkbQueue);
561 bp->QueueSkb = MAX_TX_QUEUE_LEN;
562
563 return 0;
564 }
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602 static irqreturn_t skfp_interrupt(int irq, void *dev_id)
603 {
604 struct net_device *dev = dev_id;
605 struct s_smc *smc;
606 skfddi_priv *bp;
607
608 smc = netdev_priv(dev);
609 bp = &smc->os;
610
611
612 if (inpd(ADDR(B0_IMSK)) == 0) {
613
614 return IRQ_NONE;
615 }
616
617 if ((inpd(ISR_A) & smc->hw.is_imask) == 0) {
618
619 return IRQ_NONE;
620 }
621 CLI_FBI();
622 spin_lock(&bp->DriverLock);
623
624
625 fddi_isr(smc);
626
627 if (smc->os.ResetRequested) {
628 ResetAdapter(smc);
629 smc->os.ResetRequested = FALSE;
630 }
631 spin_unlock(&bp->DriverLock);
632 STI_FBI();
633
634 return IRQ_HANDLED;
635 }
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669 static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
670 {
671 struct s_smc *bp = netdev_priv(dev);
672
673
674
675 bp->os.MacStat.port_bs_flag[0] = 0x1234;
676 bp->os.MacStat.port_bs_flag[1] = 0x5678;
677
678 #if 0
679
680
681
682
683 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
684 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
685 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
686 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
687 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
688 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
689 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
690 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
691 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
692 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
693 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
694 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
695 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
696 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
697 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
698 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
699 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
700 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
701 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
702 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
703 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
704 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
705 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
706 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
707 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
708 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
709 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
710 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
711 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
712 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
713 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
714 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
715 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
716 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
717 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
718 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
719 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
720 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
721 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
722 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
723 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
724 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
725 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
726 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
727 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
728 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
729 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
730 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
731 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
732 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
733 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
734 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
735 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
736 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
737 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
738 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
739 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
740 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
741 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
742 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
743 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
744 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
745 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
746 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
747 memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
748 memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
749 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
750 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
751 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
752 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
753 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
754 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
755 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
756 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
757 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
758 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
759 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
760 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
761 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
762 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
763 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
764 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
765 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
766 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
767 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
768 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
769 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
770 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
771 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
772 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
773 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
774 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
775
776
777
778
779 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
780 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
781 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
782 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
783 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
784 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
785 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
786 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
787 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
788 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
789 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
790
791 #endif
792 return (struct net_device_stats *)&bp->os.MacStat;
793 }
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833 static void skfp_ctl_set_multicast_list(struct net_device *dev)
834 {
835 struct s_smc *smc = netdev_priv(dev);
836 skfddi_priv *bp = &smc->os;
837 unsigned long Flags;
838
839 spin_lock_irqsave(&bp->DriverLock, Flags);
840 skfp_ctl_set_multicast_list_wo_lock(dev);
841 spin_unlock_irqrestore(&bp->DriverLock, Flags);
842 }
843
844
845
846 static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
847 {
848 struct s_smc *smc = netdev_priv(dev);
849 struct netdev_hw_addr *ha;
850
851
852 if (dev->flags & IFF_PROMISC) {
853 mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
854 pr_debug("PROMISCUOUS MODE ENABLED\n");
855 }
856
857 else {
858 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
859 pr_debug("PROMISCUOUS MODE DISABLED\n");
860
861
862 mac_clear_multicast(smc);
863 mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
864
865 if (dev->flags & IFF_ALLMULTI) {
866 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
867 pr_debug("ENABLE ALL MC ADDRESSES\n");
868 } else if (!netdev_mc_empty(dev)) {
869 if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
870
871
872
873 netdev_for_each_mc_addr(ha, dev) {
874 mac_add_multicast(smc,
875 (struct fddi_addr *)ha->addr,
876 1);
877
878 pr_debug("ENABLE MC ADDRESS: %pMF\n",
879 ha->addr);
880 }
881
882 } else {
883
884 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
885 pr_debug("ENABLE ALL MC ADDRESSES\n");
886 }
887 } else {
888
889 pr_debug("DISABLE ALL MC ADDRESSES\n");
890 }
891
892
893 mac_update_multicast(smc);
894 }
895 }
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917 static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
918 {
919 struct s_smc *smc = netdev_priv(dev);
920 struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
921 skfddi_priv *bp = &smc->os;
922 unsigned long Flags;
923
924
925 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
926 spin_lock_irqsave(&bp->DriverLock, Flags);
927 ResetAdapter(smc);
928 spin_unlock_irqrestore(&bp->DriverLock, Flags);
929
930 return 0;
931 }
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957 static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
958 {
959 struct s_smc *smc = netdev_priv(dev);
960 skfddi_priv *lp = &smc->os;
961 struct s_skfp_ioctl ioc;
962 int status = 0;
963
964 if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
965 return -EFAULT;
966
967 switch (ioc.cmd) {
968 case SKFP_GET_STATS:
969 ioc.len = sizeof(lp->MacStat);
970 status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
971 ? -EFAULT : 0;
972 break;
973 case SKFP_CLR_STATS:
974 if (!capable(CAP_NET_ADMIN)) {
975 status = -EPERM;
976 } else {
977 memset(&lp->MacStat, 0, sizeof(lp->MacStat));
978 }
979 break;
980 default:
981 printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd);
982 status = -EOPNOTSUPP;
983
984 }
985
986 return status;
987 }
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
1038 struct net_device *dev)
1039 {
1040 struct s_smc *smc = netdev_priv(dev);
1041 skfddi_priv *bp = &smc->os;
1042
1043 pr_debug("skfp_send_pkt\n");
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
1055 bp->MacStat.gen.tx_errors++;
1056
1057 netif_start_queue(dev);
1058 dev_kfree_skb(skb);
1059 return NETDEV_TX_OK;
1060 }
1061 if (bp->QueueSkb == 0) {
1062
1063 netif_stop_queue(dev);
1064 return NETDEV_TX_BUSY;
1065 }
1066 bp->QueueSkb--;
1067 skb_queue_tail(&bp->SendSkbQueue, skb);
1068 send_queued_packets(netdev_priv(dev));
1069 if (bp->QueueSkb == 0) {
1070 netif_stop_queue(dev);
1071 }
1072 return NETDEV_TX_OK;
1073
1074 }
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 static void send_queued_packets(struct s_smc *smc)
1101 {
1102 skfddi_priv *bp = &smc->os;
1103 struct sk_buff *skb;
1104 unsigned char fc;
1105 int queue;
1106 struct s_smt_fp_txd *txd;
1107 dma_addr_t dma_address;
1108 unsigned long Flags;
1109
1110 int frame_status;
1111
1112 pr_debug("send queued packets\n");
1113 for (;;) {
1114
1115 skb = skb_dequeue(&bp->SendSkbQueue);
1116
1117 if (!skb) {
1118 pr_debug("queue empty\n");
1119 return;
1120 }
1121
1122 spin_lock_irqsave(&bp->DriverLock, Flags);
1123 fc = skb->data[0];
1124 queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
1125 #ifdef ESS
1126
1127
1128 if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1129
1130 if (!smc->ess.sync_bw_available)
1131 fc &= ~FC_SYNC_BIT;
1132
1133 else {
1134
1135 if (smc->mib.fddiESSSynchTxMode) {
1136
1137 fc |= FC_SYNC_BIT;
1138 }
1139 }
1140 }
1141 #endif
1142 frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
1143
1144 if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
1145
1146
1147 if ((frame_status & RING_DOWN) != 0) {
1148
1149 pr_debug("Tx attempt while ring down.\n");
1150 } else if ((frame_status & OUT_OF_TXD) != 0) {
1151 pr_debug("%s: out of TXDs.\n", bp->dev->name);
1152 } else {
1153 pr_debug("%s: out of transmit resources",
1154 bp->dev->name);
1155 }
1156
1157
1158
1159 skb_queue_head(&bp->SendSkbQueue, skb);
1160 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1161 return;
1162
1163 }
1164
1165 bp->QueueSkb++;
1166
1167
1168 CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
1169
1170 txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
1171
1172 dma_address = pci_map_single(&bp->pdev, skb->data,
1173 skb->len, PCI_DMA_TODEVICE);
1174 if (frame_status & LAN_TX) {
1175 txd->txd_os.skb = skb;
1176 txd->txd_os.dma_addr = dma_address;
1177 }
1178 hwm_tx_frag(smc, skb->data, dma_address, skb->len,
1179 frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
1180
1181 if (!(frame_status & LAN_TX)) {
1182 pci_unmap_single(&bp->pdev, dma_address,
1183 skb->len, PCI_DMA_TODEVICE);
1184 dev_kfree_skb_irq(skb);
1185 }
1186 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1187 }
1188
1189 return;
1190
1191 }
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1202 {
1203 unsigned char SRBit;
1204
1205 if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0)
1206
1207 return;
1208 if ((unsigned short) frame[1 + 10] != 0)
1209 return;
1210 SRBit = frame[1 + 6] & 0x01;
1211 memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
1212 frame[8] |= SRBit;
1213 }
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 static void ResetAdapter(struct s_smc *smc)
1228 {
1229
1230 pr_debug("[fddi: ResetAdapter]\n");
1231
1232
1233
1234 card_stop(smc);
1235
1236
1237 mac_drv_clear_tx_queue(smc);
1238 mac_drv_clear_rx_queue(smc);
1239
1240
1241
1242 smt_reset_defaults(smc, 1);
1243
1244 init_smt(smc, (smc->os.dev)->dev_addr);
1245
1246 smt_online(smc, 1);
1247 STI_FBI();
1248
1249
1250 skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
1251 }
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272 void llc_restart_tx(struct s_smc *smc)
1273 {
1274 skfddi_priv *bp = &smc->os;
1275
1276 pr_debug("[llc_restart_tx]\n");
1277
1278
1279 spin_unlock(&bp->DriverLock);
1280 send_queued_packets(smc);
1281 spin_lock(&bp->DriverLock);
1282 netif_start_queue(bp->dev);
1283
1284 }
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302 void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1303 {
1304 void *virt;
1305
1306 pr_debug("mac_drv_get_space (%d bytes), ", size);
1307 virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
1308
1309 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1310 printk("Unexpected SMT memory size requested: %d\n", size);
1311 return NULL;
1312 }
1313 smc->os.SharedMemHeap += size;
1314
1315 pr_debug("mac_drv_get_space end\n");
1316 pr_debug("virt addr: %lx\n", (ulong) virt);
1317 pr_debug("bus addr: %lx\n", (ulong)
1318 (smc->os.SharedMemDMA +
1319 ((char *) virt - (char *)smc->os.SharedMemAddr)));
1320 return virt;
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342 void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1343 {
1344
1345 char *virt;
1346
1347 pr_debug("mac_drv_get_desc_mem\n");
1348
1349
1350
1351 virt = mac_drv_get_space(smc, size);
1352
1353 size = (u_int) (16 - (((unsigned long) virt) & 15UL));
1354 size = size % 16;
1355
1356 pr_debug("Allocate %u bytes alignment gap ", size);
1357 pr_debug("for descriptor memory.\n");
1358
1359 if (!mac_drv_get_space(smc, size)) {
1360 printk("fddi: Unable to align descriptor memory.\n");
1361 return NULL;
1362 }
1363 return virt + size;
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380 unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1381 {
1382 return smc->os.SharedMemDMA +
1383 ((char *) virt - (char *)smc->os.SharedMemAddr);
1384 }
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415 u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1416 {
1417 return smc->os.SharedMemDMA +
1418 ((char *) virt - (char *)smc->os.SharedMemAddr);
1419 }
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443 void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
1444 {
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457 if (flag & DMA_WR) {
1458 skfddi_priv *bp = &smc->os;
1459 volatile struct s_smt_fp_rxd *r = &descr->r;
1460
1461
1462 if (r->rxd_os.skb && r->rxd_os.dma_addr) {
1463 int MaxFrameSize = bp->MaxFrameSize;
1464
1465 pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
1466 MaxFrameSize, PCI_DMA_FROMDEVICE);
1467 r->rxd_os.dma_addr = 0;
1468 }
1469 }
1470 }
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487 void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1488 {
1489 struct sk_buff *skb;
1490
1491 pr_debug("entering mac_drv_tx_complete\n");
1492
1493
1494 if (!(skb = txd->txd_os.skb)) {
1495 pr_debug("TXD with no skb assigned.\n");
1496 return;
1497 }
1498 txd->txd_os.skb = NULL;
1499
1500
1501 pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
1502 skb->len, PCI_DMA_TODEVICE);
1503 txd->txd_os.dma_addr = 0;
1504
1505 smc->os.MacStat.gen.tx_packets++;
1506 smc->os.MacStat.gen.tx_bytes+=skb->len;
1507
1508
1509 dev_kfree_skb_irq(skb);
1510
1511 pr_debug("leaving mac_drv_tx_complete\n");
1512 }
1513
1514
1515
1516
1517
1518
1519
1520 #ifdef DUMPPACKETS
1521 void dump_data(unsigned char *Data, int length)
1522 {
1523 int i, j;
1524 unsigned char s[255], sh[10];
1525 if (length > 64) {
1526 length = 64;
1527 }
1528 printk(KERN_INFO "---Packet start---\n");
1529 for (i = 0, j = 0; i < length / 8; i++, j += 8)
1530 printk(KERN_INFO "%02x %02x %02x %02x %02x %02x %02x %02x\n",
1531 Data[j + 0], Data[j + 1], Data[j + 2], Data[j + 3],
1532 Data[j + 4], Data[j + 5], Data[j + 6], Data[j + 7]);
1533 strcpy(s, "");
1534 for (i = 0; i < length % 8; i++) {
1535 sprintf(sh, "%02x ", Data[j + i]);
1536 strcat(s, sh);
1537 }
1538 printk(KERN_INFO "%s\n", s);
1539 printk(KERN_INFO "------------------\n");
1540 }
1541 #else
1542 #define dump_data(data,len)
1543 #endif
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569 void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1570 int frag_count, int len)
1571 {
1572 skfddi_priv *bp = &smc->os;
1573 struct sk_buff *skb;
1574 unsigned char *virt, *cp;
1575 unsigned short ri;
1576 u_int RifLength;
1577
1578 pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
1579 if (frag_count != 1) {
1580
1581 printk("fddi: Multi-fragment receive!\n");
1582 goto RequeueRxd;
1583
1584 }
1585 skb = rxd->rxd_os.skb;
1586 if (!skb) {
1587 pr_debug("No skb in rxd\n");
1588 smc->os.MacStat.gen.rx_errors++;
1589 goto RequeueRxd;
1590 }
1591 virt = skb->data;
1592
1593
1594
1595 dump_data(skb->data, len);
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612 if ((virt[1 + 6] & FDDI_RII) == 0)
1613 RifLength = 0;
1614 else {
1615 int n;
1616
1617 pr_debug("RIF found\n");
1618
1619 cp = virt + FDDI_MAC_HDR_LEN;
1620
1621 ri = ntohs(*((__be16 *) cp));
1622 RifLength = ri & FDDI_RCF_LEN_MASK;
1623 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1624 printk("fddi: Invalid RIF.\n");
1625 goto RequeueRxd;
1626
1627 }
1628 virt[1 + 6] &= ~FDDI_RII;
1629
1630
1631 virt = cp + RifLength;
1632 for (n = FDDI_MAC_HDR_LEN; n; n--)
1633 *--virt = *--cp;
1634
1635 skb_pull(skb, RifLength);
1636 len -= RifLength;
1637 RifLength = 0;
1638 }
1639
1640
1641 smc->os.MacStat.gen.rx_packets++;
1642
1643 smc->os.MacStat.gen.rx_bytes+=len;
1644
1645
1646 if (virt[1] & 0x01) {
1647
1648 smc->os.MacStat.gen.multicast++;
1649 }
1650
1651
1652 rxd->rxd_os.skb = NULL;
1653 skb_trim(skb, len);
1654 skb->protocol = fddi_type_trans(skb, bp->dev);
1655
1656 netif_rx(skb);
1657
1658 HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
1659 return;
1660
1661 RequeueRxd:
1662 pr_debug("Rx: re-queue RXD.\n");
1663 mac_drv_requeue_rxd(smc, rxd, frag_count);
1664 smc->os.MacStat.gen.rx_errors++;
1665
1666
1667 }
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1689 int frag_count)
1690 {
1691 volatile struct s_smt_fp_rxd *next_rxd;
1692 volatile struct s_smt_fp_rxd *src_rxd;
1693 struct sk_buff *skb;
1694 int MaxFrameSize;
1695 unsigned char *v_addr;
1696 dma_addr_t b_addr;
1697
1698 if (frag_count != 1)
1699
1700 printk("fddi: Multi-fragment requeue!\n");
1701
1702 MaxFrameSize = smc->os.MaxFrameSize;
1703 src_rxd = rxd;
1704 for (; frag_count > 0; frag_count--) {
1705 next_rxd = src_rxd->rxd_next;
1706 rxd = HWM_GET_CURR_RXD(smc);
1707
1708 skb = src_rxd->rxd_os.skb;
1709 if (skb == NULL) {
1710
1711 pr_debug("Requeue with no skb in rxd!\n");
1712 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1713 if (skb) {
1714
1715 rxd->rxd_os.skb = skb;
1716 skb_reserve(skb, 3);
1717 skb_put(skb, MaxFrameSize);
1718 v_addr = skb->data;
1719 b_addr = pci_map_single(&smc->os.pdev,
1720 v_addr,
1721 MaxFrameSize,
1722 PCI_DMA_FROMDEVICE);
1723 rxd->rxd_os.dma_addr = b_addr;
1724 } else {
1725
1726 pr_debug("Queueing invalid buffer!\n");
1727 rxd->rxd_os.skb = NULL;
1728 v_addr = smc->os.LocalRxBuffer;
1729 b_addr = smc->os.LocalRxBufferDMA;
1730 }
1731 } else {
1732
1733 rxd->rxd_os.skb = skb;
1734 v_addr = skb->data;
1735 b_addr = pci_map_single(&smc->os.pdev,
1736 v_addr,
1737 MaxFrameSize,
1738 PCI_DMA_FROMDEVICE);
1739 rxd->rxd_os.dma_addr = b_addr;
1740 }
1741 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1742 FIRST_FRAG | LAST_FRAG);
1743
1744 src_rxd = next_rxd;
1745 }
1746 }
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765 void mac_drv_fill_rxd(struct s_smc *smc)
1766 {
1767 int MaxFrameSize;
1768 unsigned char *v_addr;
1769 unsigned long b_addr;
1770 struct sk_buff *skb;
1771 volatile struct s_smt_fp_rxd *rxd;
1772
1773 pr_debug("entering mac_drv_fill_rxd\n");
1774
1775
1776
1777
1778 MaxFrameSize = smc->os.MaxFrameSize;
1779
1780 while (HWM_GET_RX_FREE(smc) > 0) {
1781 pr_debug(".\n");
1782
1783 rxd = HWM_GET_CURR_RXD(smc);
1784 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1785 if (skb) {
1786
1787 skb_reserve(skb, 3);
1788 skb_put(skb, MaxFrameSize);
1789 v_addr = skb->data;
1790 b_addr = pci_map_single(&smc->os.pdev,
1791 v_addr,
1792 MaxFrameSize,
1793 PCI_DMA_FROMDEVICE);
1794 rxd->rxd_os.dma_addr = b_addr;
1795 } else {
1796
1797
1798
1799
1800
1801 pr_debug("Queueing invalid buffer!\n");
1802 v_addr = smc->os.LocalRxBuffer;
1803 b_addr = smc->os.LocalRxBufferDMA;
1804 }
1805
1806 rxd->rxd_os.skb = skb;
1807
1808
1809 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1810 FIRST_FRAG | LAST_FRAG);
1811 }
1812 pr_debug("leaving mac_drv_fill_rxd\n");
1813 }
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832 void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1833 int frag_count)
1834 {
1835
1836 struct sk_buff *skb;
1837
1838 pr_debug("entering mac_drv_clear_rxd\n");
1839
1840 if (frag_count != 1)
1841
1842 printk("fddi: Multi-fragment clear!\n");
1843
1844 for (; frag_count > 0; frag_count--) {
1845 skb = rxd->rxd_os.skb;
1846 if (skb != NULL) {
1847 skfddi_priv *bp = &smc->os;
1848 int MaxFrameSize = bp->MaxFrameSize;
1849
1850 pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
1851 MaxFrameSize, PCI_DMA_FROMDEVICE);
1852
1853 dev_kfree_skb(skb);
1854 rxd->rxd_os.skb = NULL;
1855 }
1856 rxd = rxd->rxd_next;
1857
1858 }
1859 }
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889 int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1890 char *look_ahead, int la_len)
1891 {
1892 struct sk_buff *skb;
1893
1894 pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
1895
1896
1897
1898 if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
1899 pr_debug("fddi: Discard invalid local SMT frame\n");
1900 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1901 len, la_len, (unsigned long) look_ahead);
1902 return 0;
1903 }
1904 skb = alloc_skb(len + 3, GFP_ATOMIC);
1905 if (!skb) {
1906 pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1907 return 0;
1908 }
1909 skb_reserve(skb, 3);
1910 skb_put(skb, len);
1911 skb_copy_to_linear_data(skb, look_ahead, len);
1912
1913
1914 skb->protocol = fddi_type_trans(skb, smc->os.dev);
1915 netif_rx(skb);
1916
1917 return 0;
1918 }
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 void smt_timer_poll(struct s_smc *smc)
1937 {
1938 }
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954 void ring_status_indication(struct s_smc *smc, u_long status)
1955 {
1956 pr_debug("ring_status_indication( ");
1957 if (status & RS_RES15)
1958 pr_debug("RS_RES15 ");
1959 if (status & RS_HARDERROR)
1960 pr_debug("RS_HARDERROR ");
1961 if (status & RS_SOFTERROR)
1962 pr_debug("RS_SOFTERROR ");
1963 if (status & RS_BEACON)
1964 pr_debug("RS_BEACON ");
1965 if (status & RS_PATHTEST)
1966 pr_debug("RS_PATHTEST ");
1967 if (status & RS_SELFTEST)
1968 pr_debug("RS_SELFTEST ");
1969 if (status & RS_RES9)
1970 pr_debug("RS_RES9 ");
1971 if (status & RS_DISCONNECT)
1972 pr_debug("RS_DISCONNECT ");
1973 if (status & RS_RES7)
1974 pr_debug("RS_RES7 ");
1975 if (status & RS_DUPADDR)
1976 pr_debug("RS_DUPADDR ");
1977 if (status & RS_NORINGOP)
1978 pr_debug("RS_NORINGOP ");
1979 if (status & RS_VERSION)
1980 pr_debug("RS_VERSION ");
1981 if (status & RS_STUCKBYPASSS)
1982 pr_debug("RS_STUCKBYPASSS ");
1983 if (status & RS_EVENT)
1984 pr_debug("RS_EVENT ");
1985 if (status & RS_RINGOPCHANGE)
1986 pr_debug("RS_RINGOPCHANGE ");
1987 if (status & RS_RES0)
1988 pr_debug("RS_RES0 ");
1989 pr_debug("]\n");
1990 }
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008 unsigned long smt_get_time(void)
2009 {
2010 return jiffies;
2011 }
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028 void smt_stat_counter(struct s_smc *smc, int stat)
2029 {
2030
2031
2032 pr_debug("smt_stat_counter\n");
2033 switch (stat) {
2034 case 0:
2035 pr_debug("Ring operational change.\n");
2036 break;
2037 case 1:
2038 pr_debug("Receive fifo overflow.\n");
2039 smc->os.MacStat.gen.rx_errors++;
2040 break;
2041 default:
2042 pr_debug("Unknown status (%d).\n", stat);
2043 break;
2044 }
2045 }
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064 void cfm_state_change(struct s_smc *smc, int c_state)
2065 {
2066 #ifdef DRIVERDEBUG
2067 char *s;
2068
2069 switch (c_state) {
2070 case SC0_ISOLATED:
2071 s = "SC0_ISOLATED";
2072 break;
2073 case SC1_WRAP_A:
2074 s = "SC1_WRAP_A";
2075 break;
2076 case SC2_WRAP_B:
2077 s = "SC2_WRAP_B";
2078 break;
2079 case SC4_THRU_A:
2080 s = "SC4_THRU_A";
2081 break;
2082 case SC5_THRU_B:
2083 s = "SC5_THRU_B";
2084 break;
2085 case SC7_WRAP_S:
2086 s = "SC7_WRAP_S";
2087 break;
2088 case SC9_C_WRAP_A:
2089 s = "SC9_C_WRAP_A";
2090 break;
2091 case SC10_C_WRAP_B:
2092 s = "SC10_C_WRAP_B";
2093 break;
2094 case SC11_C_WRAP_S:
2095 s = "SC11_C_WRAP_S";
2096 break;
2097 default:
2098 pr_debug("cfm_state_change: unknown %d\n", c_state);
2099 return;
2100 }
2101 pr_debug("cfm_state_change: %s\n", s);
2102 #endif
2103 }
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122 void ecm_state_change(struct s_smc *smc, int e_state)
2123 {
2124 #ifdef DRIVERDEBUG
2125 char *s;
2126
2127 switch (e_state) {
2128 case EC0_OUT:
2129 s = "EC0_OUT";
2130 break;
2131 case EC1_IN:
2132 s = "EC1_IN";
2133 break;
2134 case EC2_TRACE:
2135 s = "EC2_TRACE";
2136 break;
2137 case EC3_LEAVE:
2138 s = "EC3_LEAVE";
2139 break;
2140 case EC4_PATH_TEST:
2141 s = "EC4_PATH_TEST";
2142 break;
2143 case EC5_INSERT:
2144 s = "EC5_INSERT";
2145 break;
2146 case EC6_CHECK:
2147 s = "EC6_CHECK";
2148 break;
2149 case EC7_DEINSERT:
2150 s = "EC7_DEINSERT";
2151 break;
2152 default:
2153 s = "unknown";
2154 break;
2155 }
2156 pr_debug("ecm_state_change: %s\n", s);
2157 #endif
2158 }
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177 void rmt_state_change(struct s_smc *smc, int r_state)
2178 {
2179 #ifdef DRIVERDEBUG
2180 char *s;
2181
2182 switch (r_state) {
2183 case RM0_ISOLATED:
2184 s = "RM0_ISOLATED";
2185 break;
2186 case RM1_NON_OP:
2187 s = "RM1_NON_OP - not operational";
2188 break;
2189 case RM2_RING_OP:
2190 s = "RM2_RING_OP - ring operational";
2191 break;
2192 case RM3_DETECT:
2193 s = "RM3_DETECT - detect dupl addresses";
2194 break;
2195 case RM4_NON_OP_DUP:
2196 s = "RM4_NON_OP_DUP - dupl. addr detected";
2197 break;
2198 case RM5_RING_OP_DUP:
2199 s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
2200 break;
2201 case RM6_DIRECTED:
2202 s = "RM6_DIRECTED - sending directed beacons";
2203 break;
2204 case RM7_TRACE:
2205 s = "RM7_TRACE - trace initiated";
2206 break;
2207 default:
2208 s = "unknown";
2209 break;
2210 }
2211 pr_debug("[rmt_state_change: %s]\n", s);
2212 #endif
2213 }
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229 void drv_reset_indication(struct s_smc *smc)
2230 {
2231 pr_debug("entering drv_reset_indication\n");
2232
2233 smc->os.ResetRequested = TRUE;
2234
2235 }
2236
2237 static struct pci_driver skfddi_pci_driver = {
2238 .name = "skfddi",
2239 .id_table = skfddi_pci_tbl,
2240 .probe = skfp_init_one,
2241 .remove = skfp_remove_one,
2242 };
2243
2244 module_pci_driver(skfddi_pci_driver);