This source file includes following definitions.
- InReg
- OutReg
- wr
- or
- cl
- scc_discard_buffers
- scc_notify
- flush_rx_FIFO
- start_hunt
- scc_txint
- scc_exint
- scc_rxint
- scc_spint
- scc_isr_dispatch
- scc_isr
- set_brg
- set_speed
- init_brg
- init_channel
- scc_key_trx
- __scc_start_tx_timer
- scc_start_tx_timer
- scc_start_defer
- scc_start_maxkeyup
- scc_tx_done
- is_grouped
- t_dwait
- t_txdelay
- t_tail
- t_busy
- t_maxkeyup
- t_idle
- scc_init_timer
- scc_set_param
- scc_get_param
- scc_stop_calibrate
- scc_start_calibrate
- z8530_init
- scc_net_alloc
- scc_net_setup
- scc_net_open
- scc_net_close
- scc_net_rx
- scc_net_tx
- scc_net_ioctl
- scc_net_set_mac_address
- scc_net_get_stats
- scc_net_seq_idx
- scc_net_seq_start
- scc_net_seq_next
- scc_net_seq_stop
- scc_net_seq_show
- scc_init_driver
- scc_cleanup_driver
1 #define RCS_ID "$Id: scc.c,v 1.75 1998/11/04 15:15:01 jreuter Exp jreuter $"
2
3 #define VERSION "3.0"
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139 #undef SCC_LDELAY
140 #undef SCC_DONT_CHECK
141
142 #define SCC_MAXCHIPS 4
143 #define SCC_BUFSIZE 384
144 #undef SCC_DEBUG
145
146 #define SCC_DEFAULT_CLOCK 4915200
147
148
149
150
151 #include <linux/module.h>
152 #include <linux/errno.h>
153 #include <linux/signal.h>
154 #include <linux/timer.h>
155 #include <linux/interrupt.h>
156 #include <linux/ioport.h>
157 #include <linux/string.h>
158 #include <linux/in.h>
159 #include <linux/fcntl.h>
160 #include <linux/ptrace.h>
161 #include <linux/delay.h>
162 #include <linux/skbuff.h>
163 #include <linux/netdevice.h>
164 #include <linux/rtnetlink.h>
165 #include <linux/if_ether.h>
166 #include <linux/if_arp.h>
167 #include <linux/socket.h>
168 #include <linux/init.h>
169 #include <linux/scc.h>
170 #include <linux/ctype.h>
171 #include <linux/kernel.h>
172 #include <linux/proc_fs.h>
173 #include <linux/seq_file.h>
174 #include <linux/bitops.h>
175
176 #include <net/net_namespace.h>
177 #include <net/ax25.h>
178
179 #include <asm/irq.h>
180 #include <asm/io.h>
181 #include <linux/uaccess.h>
182
183 #include "z8530.h"
184
185 static const char banner[] __initconst = KERN_INFO \
186 "AX.25: Z8530 SCC driver version "VERSION".dl1bke\n";
187
188 static void t_dwait(struct timer_list *t);
189 static void t_txdelay(struct timer_list *t);
190 static void t_tail(struct timer_list *t);
191 static void t_busy(struct timer_list *);
192 static void t_maxkeyup(struct timer_list *);
193 static void t_idle(struct timer_list *t);
194 static void scc_tx_done(struct scc_channel *);
195 static void scc_start_tx_timer(struct scc_channel *,
196 void (*)(struct timer_list *), unsigned long);
197 static void scc_start_maxkeyup(struct scc_channel *);
198 static void scc_start_defer(struct scc_channel *);
199
200 static void z8530_init(void);
201
202 static void init_channel(struct scc_channel *scc);
203 static void scc_key_trx (struct scc_channel *scc, char tx);
204 static void scc_init_timer(struct scc_channel *scc);
205
206 static int scc_net_alloc(const char *name, struct scc_channel *scc);
207 static void scc_net_setup(struct net_device *dev);
208 static int scc_net_open(struct net_device *dev);
209 static int scc_net_close(struct net_device *dev);
210 static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb);
211 static netdev_tx_t scc_net_tx(struct sk_buff *skb,
212 struct net_device *dev);
213 static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
214 static int scc_net_set_mac_address(struct net_device *dev, void *addr);
215 static struct net_device_stats * scc_net_get_stats(struct net_device *dev);
216
217 static unsigned char SCC_DriverName[] = "scc";
218
219 static struct irqflags { unsigned char used : 1; } Ivec[NR_IRQS];
220
221 static struct scc_channel SCC_Info[2 * SCC_MAXCHIPS];
222
223 static struct scc_ctrl {
224 io_port chan_A;
225 io_port chan_B;
226 int irq;
227 } SCC_ctrl[SCC_MAXCHIPS+1];
228
229 static unsigned char Driver_Initialized;
230 static int Nchips;
231 static io_port Vector_Latch;
232
233
234
235
236
237
238
239
240 static DEFINE_SPINLOCK(iolock);
241
242 static inline unsigned char InReg(io_port port, unsigned char reg)
243 {
244 unsigned long flags;
245 unsigned char r;
246
247 spin_lock_irqsave(&iolock, flags);
248 #ifdef SCC_LDELAY
249 Outb(port, reg);
250 udelay(SCC_LDELAY);
251 r=Inb(port);
252 udelay(SCC_LDELAY);
253 #else
254 Outb(port, reg);
255 r=Inb(port);
256 #endif
257 spin_unlock_irqrestore(&iolock, flags);
258 return r;
259 }
260
261 static inline void OutReg(io_port port, unsigned char reg, unsigned char val)
262 {
263 unsigned long flags;
264
265 spin_lock_irqsave(&iolock, flags);
266 #ifdef SCC_LDELAY
267 Outb(port, reg); udelay(SCC_LDELAY);
268 Outb(port, val); udelay(SCC_LDELAY);
269 #else
270 Outb(port, reg);
271 Outb(port, val);
272 #endif
273 spin_unlock_irqrestore(&iolock, flags);
274 }
275
276 static inline void wr(struct scc_channel *scc, unsigned char reg,
277 unsigned char val)
278 {
279 OutReg(scc->ctrl, reg, (scc->wreg[reg] = val));
280 }
281
282 static inline void or(struct scc_channel *scc, unsigned char reg, unsigned char val)
283 {
284 OutReg(scc->ctrl, reg, (scc->wreg[reg] |= val));
285 }
286
287 static inline void cl(struct scc_channel *scc, unsigned char reg, unsigned char val)
288 {
289 OutReg(scc->ctrl, reg, (scc->wreg[reg] &= ~val));
290 }
291
292
293
294
295
296 static inline void scc_discard_buffers(struct scc_channel *scc)
297 {
298 unsigned long flags;
299
300 spin_lock_irqsave(&scc->lock, flags);
301 if (scc->tx_buff != NULL)
302 {
303 dev_kfree_skb(scc->tx_buff);
304 scc->tx_buff = NULL;
305 }
306
307 while (!skb_queue_empty(&scc->tx_queue))
308 dev_kfree_skb(skb_dequeue(&scc->tx_queue));
309
310 spin_unlock_irqrestore(&scc->lock, flags);
311 }
312
313
314
315
316
317
318
319
320
321
322 static inline void scc_notify(struct scc_channel *scc, int event)
323 {
324 struct sk_buff *skb;
325 char *bp;
326
327 if (scc->kiss.fulldup != KISS_DUPLEX_OPTIMA)
328 return;
329
330 skb = dev_alloc_skb(2);
331 if (skb != NULL)
332 {
333 bp = skb_put(skb, 2);
334 *bp++ = PARAM_HWEVENT;
335 *bp++ = event;
336 scc_net_rx(scc, skb);
337 } else
338 scc->stat.nospace++;
339 }
340
341 static inline void flush_rx_FIFO(struct scc_channel *scc)
342 {
343 int k;
344
345 for (k=0; k<3; k++)
346 Inb(scc->data);
347
348 if(scc->rx_buff != NULL)
349 {
350 scc->stat.rxerrs++;
351 dev_kfree_skb_irq(scc->rx_buff);
352 scc->rx_buff = NULL;
353 }
354 }
355
356 static void start_hunt(struct scc_channel *scc)
357 {
358 if ((scc->modem.clocksrc != CLK_EXTERNAL))
359 OutReg(scc->ctrl,R14,SEARCH|scc->wreg[R14]);
360 or(scc,R3,ENT_HM|RxENABLE);
361 }
362
363
364
365
366
367 static inline void scc_txint(struct scc_channel *scc)
368 {
369 struct sk_buff *skb;
370
371 scc->stat.txints++;
372 skb = scc->tx_buff;
373
374
375
376 if (skb == NULL)
377 {
378 skb = skb_dequeue(&scc->tx_queue);
379 scc->tx_buff = skb;
380 netif_wake_queue(scc->dev);
381
382 if (skb == NULL)
383 {
384 scc_tx_done(scc);
385 Outb(scc->ctrl, RES_Tx_P);
386 return;
387 }
388
389 if (skb->len == 0)
390 {
391 dev_kfree_skb_irq(skb);
392 scc->tx_buff = NULL;
393 scc_tx_done(scc);
394 Outb(scc->ctrl, RES_Tx_P);
395 return;
396 }
397
398 scc->stat.tx_state = TXS_ACTIVE;
399
400 OutReg(scc->ctrl, R0, RES_Tx_CRC);
401
402 or(scc,R10,ABUNDER);
403 Outb(scc->data,*skb->data);
404 skb_pull(skb, 1);
405
406 if (!scc->enhanced)
407 Outb(scc->ctrl,RES_EOM_L);
408 return;
409 }
410
411
412
413 if (skb->len == 0)
414 {
415 Outb(scc->ctrl, RES_Tx_P);
416 cl(scc, R10, ABUNDER);
417 dev_kfree_skb_irq(skb);
418 scc->tx_buff = NULL;
419 scc->stat.tx_state = TXS_NEWFRAME;
420 return;
421 }
422
423
424
425 Outb(scc->data,*skb->data);
426 skb_pull(skb, 1);
427 }
428
429
430
431 static inline void scc_exint(struct scc_channel *scc)
432 {
433 unsigned char status,changes,chg_and_stat;
434
435 scc->stat.exints++;
436
437 status = InReg(scc->ctrl,R0);
438 changes = status ^ scc->status;
439 chg_and_stat = changes & status;
440
441
442
443 if (chg_and_stat & BRK_ABRT)
444 flush_rx_FIFO(scc);
445
446
447
448 if ((changes & SYNC_HUNT) && scc->kiss.softdcd)
449 {
450 if (status & SYNC_HUNT)
451 {
452 scc->dcd = 0;
453 flush_rx_FIFO(scc);
454 if ((scc->modem.clocksrc != CLK_EXTERNAL))
455 OutReg(scc->ctrl,R14,SEARCH|scc->wreg[R14]);
456 } else {
457 scc->dcd = 1;
458 }
459
460 scc_notify(scc, scc->dcd? HWEV_DCD_OFF:HWEV_DCD_ON);
461 }
462
463
464
465
466 if((changes & DCD) && !scc->kiss.softdcd)
467 {
468 if(status & DCD)
469 {
470 start_hunt(scc);
471 scc->dcd = 1;
472 } else {
473 cl(scc,R3,ENT_HM|RxENABLE);
474 flush_rx_FIFO(scc);
475 scc->dcd = 0;
476 }
477
478 scc_notify(scc, scc->dcd? HWEV_DCD_ON:HWEV_DCD_OFF);
479 }
480
481 #ifdef notdef
482
483
484
485
486
487
488 if (chg_and_stat & CTS)
489 {
490 if (scc->kiss.txdelay == 0)
491 scc_start_tx_timer(scc, t_txdelay, 0);
492 }
493 #endif
494
495 if (scc->stat.tx_state == TXS_ACTIVE && (status & TxEOM))
496 {
497 scc->stat.tx_under++;
498 Outb(scc->ctrl, RES_EXT_INT);
499
500 if (scc->tx_buff != NULL)
501 {
502 dev_kfree_skb_irq(scc->tx_buff);
503 scc->tx_buff = NULL;
504 }
505
506 or(scc,R10,ABUNDER);
507 scc_start_tx_timer(scc, t_txdelay, 0);
508 }
509
510 scc->status = status;
511 Outb(scc->ctrl,RES_EXT_INT);
512 }
513
514
515
516 static inline void scc_rxint(struct scc_channel *scc)
517 {
518 struct sk_buff *skb;
519
520 scc->stat.rxints++;
521
522 if((scc->wreg[5] & RTS) && scc->kiss.fulldup == KISS_DUPLEX_HALF)
523 {
524 Inb(scc->data);
525 or(scc,R3,ENT_HM);
526 return;
527 }
528
529 skb = scc->rx_buff;
530
531 if (skb == NULL)
532 {
533 skb = dev_alloc_skb(scc->stat.bufsize);
534 if (skb == NULL)
535 {
536 scc->dev_stat.rx_dropped++;
537 scc->stat.nospace++;
538 Inb(scc->data);
539 or(scc, R3, ENT_HM);
540 return;
541 }
542
543 scc->rx_buff = skb;
544 skb_put_u8(skb, 0);
545 }
546
547 if (skb->len >= scc->stat.bufsize)
548 {
549 #ifdef notdef
550 printk(KERN_DEBUG "z8530drv: oops, scc_rxint() received huge frame...\n");
551 #endif
552 dev_kfree_skb_irq(skb);
553 scc->rx_buff = NULL;
554 Inb(scc->data);
555 or(scc, R3, ENT_HM);
556 return;
557 }
558
559 skb_put_u8(skb, Inb(scc->data));
560 }
561
562
563
564 static inline void scc_spint(struct scc_channel *scc)
565 {
566 unsigned char status;
567 struct sk_buff *skb;
568
569 scc->stat.spints++;
570
571 status = InReg(scc->ctrl,R1);
572
573 Inb(scc->data);
574 skb = scc->rx_buff;
575
576 if(status & Rx_OVR)
577 {
578 scc->stat.rx_over++;
579 or(scc,R3,ENT_HM);
580
581 if (skb != NULL)
582 dev_kfree_skb_irq(skb);
583 scc->rx_buff = skb = NULL;
584 }
585
586 if(status & END_FR && skb != NULL)
587 {
588
589
590 if (!(status & CRC_ERR) && (status & 0xe) == RES8 && skb->len > 0)
591 {
592
593 skb_trim(skb, skb->len-1);
594 scc_net_rx(scc, skb);
595 scc->rx_buff = NULL;
596 scc->stat.rxframes++;
597 } else {
598 dev_kfree_skb_irq(skb);
599 scc->rx_buff = NULL;
600 scc->stat.rxerrs++;
601 }
602 }
603
604 Outb(scc->ctrl,ERR_RES);
605 }
606
607
608
609
610 static void scc_isr_dispatch(struct scc_channel *scc, int vector)
611 {
612 spin_lock(&scc->lock);
613 switch (vector & VECTOR_MASK)
614 {
615 case TXINT: scc_txint(scc); break;
616 case EXINT: scc_exint(scc); break;
617 case RXINT: scc_rxint(scc); break;
618 case SPINT: scc_spint(scc); break;
619 }
620 spin_unlock(&scc->lock);
621 }
622
623
624
625
626
627
628 #define SCC_IRQTIMEOUT 30000
629
630 static irqreturn_t scc_isr(int irq, void *dev_id)
631 {
632 int chip_irq = (long) dev_id;
633 unsigned char vector;
634 struct scc_channel *scc;
635 struct scc_ctrl *ctrl;
636 int k;
637
638 if (Vector_Latch)
639 {
640 for(k=0; k < SCC_IRQTIMEOUT; k++)
641 {
642 Outb(Vector_Latch, 0);
643
644
645 if((vector=Inb(Vector_Latch)) >= 16 * Nchips) break;
646 if (vector & 0x01) break;
647
648 scc=&SCC_Info[vector >> 3 ^ 0x01];
649 if (!scc->dev) break;
650
651 scc_isr_dispatch(scc, vector);
652
653 OutReg(scc->ctrl,R0,RES_H_IUS);
654 }
655
656 if (k == SCC_IRQTIMEOUT)
657 printk(KERN_WARNING "z8530drv: endless loop in scc_isr()?\n");
658
659 return IRQ_HANDLED;
660 }
661
662
663
664
665
666 ctrl = SCC_ctrl;
667 while (ctrl->chan_A)
668 {
669 if (ctrl->irq != chip_irq)
670 {
671 ctrl++;
672 continue;
673 }
674
675 scc = NULL;
676 for (k = 0; InReg(ctrl->chan_A,R3) && k < SCC_IRQTIMEOUT; k++)
677 {
678 vector=InReg(ctrl->chan_B,R2);
679 if (vector & 0x01) break;
680
681 scc = &SCC_Info[vector >> 3 ^ 0x01];
682 if (!scc->dev) break;
683
684 scc_isr_dispatch(scc, vector);
685 }
686
687 if (k == SCC_IRQTIMEOUT)
688 {
689 printk(KERN_WARNING "z8530drv: endless loop in scc_isr()?!\n");
690 break;
691 }
692
693
694
695
696
697
698
699
700 if (scc != NULL)
701 {
702 OutReg(scc->ctrl,R0,RES_H_IUS);
703 ctrl = SCC_ctrl;
704 } else
705 ctrl++;
706 }
707 return IRQ_HANDLED;
708 }
709
710
711
712
713
714
715
716
717
718
719 static inline void set_brg(struct scc_channel *scc, unsigned int tc)
720 {
721 cl(scc,R14,BRENABL);
722 wr(scc,R12,tc & 255);
723 wr(scc,R13,tc >> 8);
724 or(scc,R14,BRENABL);
725 }
726
727 static inline void set_speed(struct scc_channel *scc)
728 {
729 unsigned long flags;
730 spin_lock_irqsave(&scc->lock, flags);
731
732 if (scc->modem.speed > 0)
733 set_brg(scc, (unsigned) (scc->clock / (scc->modem.speed * 64)) - 2);
734
735 spin_unlock_irqrestore(&scc->lock, flags);
736 }
737
738
739
740
741 static inline void init_brg(struct scc_channel *scc)
742 {
743 wr(scc, R14, BRSRC);
744 OutReg(scc->ctrl, R14, SSBR|scc->wreg[R14]);
745 OutReg(scc->ctrl, R14, SNRZI|scc->wreg[R14]);
746 }
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793 static void init_channel(struct scc_channel *scc)
794 {
795 del_timer(&scc->tx_t);
796 del_timer(&scc->tx_wdog);
797
798 disable_irq(scc->irq);
799
800 wr(scc,R4,X1CLK|SDLC);
801 wr(scc,R1,0);
802 wr(scc,R3,Rx8|RxCRC_ENAB);
803 wr(scc,R5,Tx8|DTR|TxCRC_ENAB);
804 wr(scc,R6,0);
805 wr(scc,R7,FLAG);
806 wr(scc,R9,VIS);
807 wr(scc,R10,(scc->modem.nrz? NRZ : NRZI)|CRCPS|ABUNDER);
808 wr(scc,R14, 0);
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837 switch(scc->modem.clocksrc)
838 {
839 case CLK_DPLL:
840 wr(scc, R11, RCDPLL|TCDPLL|TRxCOI|TRxCDP);
841 init_brg(scc);
842 break;
843
844 case CLK_DIVIDER:
845 wr(scc, R11, ((scc->brand & BAYCOM)? TRxCDP : TRxCBR) | RCDPLL|TCRTxCP|TRxCOI);
846 init_brg(scc);
847 break;
848
849 case CLK_EXTERNAL:
850 wr(scc, R11, (scc->brand & BAYCOM)? RCTRxCP|TCRTxCP : RCRTxCP|TCTRxCP);
851 OutReg(scc->ctrl, R14, DISDPLL);
852 break;
853
854 }
855
856 set_speed(scc);
857
858 if(scc->enhanced)
859 {
860 or(scc,R15,SHDLCE|FIFOE);
861 wr(scc,R7,AUTOEOM);
862 }
863
864 if(scc->kiss.softdcd || (InReg(scc->ctrl,R0) & DCD))
865
866 {
867 start_hunt(scc);
868 }
869
870
871
872 wr(scc,R15, BRKIE|TxUIE|(scc->kiss.softdcd? SYNCIE:DCDIE));
873
874 Outb(scc->ctrl,RES_EXT_INT);
875 Outb(scc->ctrl,RES_EXT_INT);
876
877 or(scc,R1,INT_ALL_Rx|TxINT_ENAB|EXT_INT_ENAB);
878
879 scc->status = InReg(scc->ctrl,R0);
880
881 or(scc,R9,MIE);
882
883 scc_init_timer(scc);
884
885 enable_irq(scc->irq);
886 }
887
888
889
890
891
892
893
894
895
896
897
898
899 static void scc_key_trx(struct scc_channel *scc, char tx)
900 {
901 unsigned int time_const;
902
903 if (scc->brand & PRIMUS)
904 Outb(scc->ctrl + 4, scc->option | (tx? 0x80 : 0));
905
906 if (scc->modem.speed < 300)
907 scc->modem.speed = 1200;
908
909 time_const = (unsigned) (scc->clock / (scc->modem.speed * (tx? 2:64))) - 2;
910
911 disable_irq(scc->irq);
912
913 if (tx)
914 {
915 or(scc, R1, TxINT_ENAB);
916 or(scc, R15, TxUIE);
917 }
918
919 if (scc->modem.clocksrc == CLK_DPLL)
920 {
921 if (tx)
922 {
923 #ifdef CONFIG_SCC_TRXECHO
924 cl(scc, R3, RxENABLE|ENT_HM);
925 cl(scc, R15, DCDIE|SYNCIE);
926 #endif
927 set_brg(scc, time_const);
928
929
930 wr(scc, R11, RCDPLL|TCBR|TRxCOI|TRxCBR);
931
932
933 if (scc->kiss.tx_inhibit)
934 {
935 or(scc,R5, TxENAB);
936 scc->wreg[R5] |= RTS;
937 } else {
938 or(scc,R5,RTS|TxENAB);
939 }
940 } else {
941 cl(scc,R5,RTS|TxENAB);
942
943 set_brg(scc, time_const);
944
945
946 wr(scc, R11, RCDPLL|TCDPLL|TRxCOI|TRxCDP);
947
948 #ifndef CONFIG_SCC_TRXECHO
949 if (scc->kiss.softdcd)
950 #endif
951 {
952 or(scc,R15, scc->kiss.softdcd? SYNCIE:DCDIE);
953 start_hunt(scc);
954 }
955 }
956 } else {
957 if (tx)
958 {
959 #ifdef CONFIG_SCC_TRXECHO
960 if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
961 {
962 cl(scc, R3, RxENABLE);
963 cl(scc, R15, DCDIE|SYNCIE);
964 }
965 #endif
966
967 if (scc->kiss.tx_inhibit)
968 {
969 or(scc,R5, TxENAB);
970 scc->wreg[R5] |= RTS;
971 } else {
972 or(scc,R5,RTS|TxENAB);
973 }
974 } else {
975 cl(scc,R5,RTS|TxENAB);
976
977 if ((scc->kiss.fulldup == KISS_DUPLEX_HALF) &&
978 #ifndef CONFIG_SCC_TRXECHO
979 scc->kiss.softdcd)
980 #else
981 1)
982 #endif
983 {
984 or(scc, R15, scc->kiss.softdcd? SYNCIE:DCDIE);
985 start_hunt(scc);
986 }
987 }
988 }
989
990 enable_irq(scc->irq);
991 }
992
993
994
995
996 static void __scc_start_tx_timer(struct scc_channel *scc,
997 void (*handler)(struct timer_list *t),
998 unsigned long when)
999 {
1000 del_timer(&scc->tx_t);
1001
1002 if (when == 0)
1003 {
1004 handler(&scc->tx_t);
1005 } else
1006 if (when != TIMER_OFF)
1007 {
1008 scc->tx_t.function = handler;
1009 scc->tx_t.expires = jiffies + (when*HZ)/100;
1010 add_timer(&scc->tx_t);
1011 }
1012 }
1013
1014 static void scc_start_tx_timer(struct scc_channel *scc,
1015 void (*handler)(struct timer_list *t),
1016 unsigned long when)
1017 {
1018 unsigned long flags;
1019
1020 spin_lock_irqsave(&scc->lock, flags);
1021 __scc_start_tx_timer(scc, handler, when);
1022 spin_unlock_irqrestore(&scc->lock, flags);
1023 }
1024
1025 static void scc_start_defer(struct scc_channel *scc)
1026 {
1027 unsigned long flags;
1028
1029 spin_lock_irqsave(&scc->lock, flags);
1030 del_timer(&scc->tx_wdog);
1031
1032 if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
1033 {
1034 scc->tx_wdog.function = t_busy;
1035 scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxdefer;
1036 add_timer(&scc->tx_wdog);
1037 }
1038 spin_unlock_irqrestore(&scc->lock, flags);
1039 }
1040
1041 static void scc_start_maxkeyup(struct scc_channel *scc)
1042 {
1043 unsigned long flags;
1044
1045 spin_lock_irqsave(&scc->lock, flags);
1046 del_timer(&scc->tx_wdog);
1047
1048 if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
1049 {
1050 scc->tx_wdog.function = t_maxkeyup;
1051 scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxkeyup;
1052 add_timer(&scc->tx_wdog);
1053 }
1054 spin_unlock_irqrestore(&scc->lock, flags);
1055 }
1056
1057
1058
1059
1060
1061
1062 static void scc_tx_done(struct scc_channel *scc)
1063 {
1064
1065
1066
1067
1068 switch (scc->kiss.fulldup)
1069 {
1070 case KISS_DUPLEX_LINK:
1071 scc->stat.tx_state = TXS_IDLE2;
1072 if (scc->kiss.idletime != TIMER_OFF)
1073 scc_start_tx_timer(scc, t_idle,
1074 scc->kiss.idletime*100);
1075 break;
1076 case KISS_DUPLEX_OPTIMA:
1077 scc_notify(scc, HWEV_ALL_SENT);
1078 break;
1079 default:
1080 scc->stat.tx_state = TXS_BUSY;
1081 scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
1082 }
1083
1084 netif_wake_queue(scc->dev);
1085 }
1086
1087
1088 static unsigned char Rand = 17;
1089
1090 static inline int is_grouped(struct scc_channel *scc)
1091 {
1092 int k;
1093 struct scc_channel *scc2;
1094 unsigned char grp1, grp2;
1095
1096 grp1 = scc->kiss.group;
1097
1098 for (k = 0; k < (Nchips * 2); k++)
1099 {
1100 scc2 = &SCC_Info[k];
1101 grp2 = scc2->kiss.group;
1102
1103 if (scc2 == scc || !(scc2->dev && grp2))
1104 continue;
1105
1106 if ((grp1 & 0x3f) == (grp2 & 0x3f))
1107 {
1108 if ( (grp1 & TXGROUP) && (scc2->wreg[R5] & RTS) )
1109 return 1;
1110
1111 if ( (grp1 & RXGROUP) && scc2->dcd )
1112 return 1;
1113 }
1114 }
1115 return 0;
1116 }
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 static void t_dwait(struct timer_list *t)
1127 {
1128 struct scc_channel *scc = from_timer(scc, t, tx_t);
1129
1130 if (scc->stat.tx_state == TXS_WAIT)
1131 {
1132 if (skb_queue_empty(&scc->tx_queue)) {
1133 scc->stat.tx_state = TXS_IDLE;
1134 netif_wake_queue(scc->dev);
1135 return;
1136 }
1137
1138 scc->stat.tx_state = TXS_BUSY;
1139 }
1140
1141 if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
1142 {
1143 Rand = Rand * 17 + 31;
1144
1145 if (scc->dcd || (scc->kiss.persist) < Rand || (scc->kiss.group && is_grouped(scc)) )
1146 {
1147 scc_start_defer(scc);
1148 scc_start_tx_timer(scc, t_dwait, scc->kiss.slottime);
1149 return ;
1150 }
1151 }
1152
1153 if ( !(scc->wreg[R5] & RTS) )
1154 {
1155 scc_key_trx(scc, TX_ON);
1156 scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
1157 } else {
1158 scc_start_tx_timer(scc, t_txdelay, 0);
1159 }
1160 }
1161
1162
1163
1164
1165
1166
1167
1168 static void t_txdelay(struct timer_list *t)
1169 {
1170 struct scc_channel *scc = from_timer(scc, t, tx_t);
1171
1172 scc_start_maxkeyup(scc);
1173
1174 if (scc->tx_buff == NULL)
1175 {
1176 disable_irq(scc->irq);
1177 scc_txint(scc);
1178 enable_irq(scc->irq);
1179 }
1180 }
1181
1182
1183
1184
1185
1186
1187
1188
1189 static void t_tail(struct timer_list *t)
1190 {
1191 struct scc_channel *scc = from_timer(scc, t, tx_t);
1192 unsigned long flags;
1193
1194 spin_lock_irqsave(&scc->lock, flags);
1195 del_timer(&scc->tx_wdog);
1196 scc_key_trx(scc, TX_OFF);
1197 spin_unlock_irqrestore(&scc->lock, flags);
1198
1199 if (scc->stat.tx_state == TXS_TIMEOUT)
1200 {
1201 scc->stat.tx_state = TXS_WAIT;
1202 scc_start_tx_timer(scc, t_dwait, scc->kiss.mintime*100);
1203 return;
1204 }
1205
1206 scc->stat.tx_state = TXS_IDLE;
1207 netif_wake_queue(scc->dev);
1208 }
1209
1210
1211
1212
1213
1214
1215
1216 static void t_busy(struct timer_list *t)
1217 {
1218 struct scc_channel *scc = from_timer(scc, t, tx_wdog);
1219
1220 del_timer(&scc->tx_t);
1221 netif_stop_queue(scc->dev);
1222
1223 scc_discard_buffers(scc);
1224 scc->stat.txerrs++;
1225 scc->stat.tx_state = TXS_IDLE;
1226
1227 netif_wake_queue(scc->dev);
1228 }
1229
1230
1231
1232
1233
1234
1235 static void t_maxkeyup(struct timer_list *t)
1236 {
1237 struct scc_channel *scc = from_timer(scc, t, tx_wdog);
1238 unsigned long flags;
1239
1240 spin_lock_irqsave(&scc->lock, flags);
1241
1242
1243
1244
1245
1246 netif_stop_queue(scc->dev);
1247 scc_discard_buffers(scc);
1248
1249 del_timer(&scc->tx_t);
1250
1251 cl(scc, R1, TxINT_ENAB);
1252 cl(scc, R15, TxUIE);
1253 OutReg(scc->ctrl, R0, RES_Tx_P);
1254
1255 spin_unlock_irqrestore(&scc->lock, flags);
1256
1257 scc->stat.txerrs++;
1258 scc->stat.tx_state = TXS_TIMEOUT;
1259 scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269 static void t_idle(struct timer_list *t)
1270 {
1271 struct scc_channel *scc = from_timer(scc, t, tx_t);
1272
1273 del_timer(&scc->tx_wdog);
1274
1275 scc_key_trx(scc, TX_OFF);
1276 if(scc->kiss.mintime)
1277 scc_start_tx_timer(scc, t_dwait, scc->kiss.mintime*100);
1278 scc->stat.tx_state = TXS_WAIT;
1279 }
1280
1281 static void scc_init_timer(struct scc_channel *scc)
1282 {
1283 unsigned long flags;
1284
1285 spin_lock_irqsave(&scc->lock, flags);
1286 scc->stat.tx_state = TXS_IDLE;
1287 spin_unlock_irqrestore(&scc->lock, flags);
1288 }
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300 #define CAST(x) (unsigned long)(x)
1301
1302 static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, unsigned int arg)
1303 {
1304 switch (cmd)
1305 {
1306 case PARAM_TXDELAY: scc->kiss.txdelay=arg; break;
1307 case PARAM_PERSIST: scc->kiss.persist=arg; break;
1308 case PARAM_SLOTTIME: scc->kiss.slottime=arg; break;
1309 case PARAM_TXTAIL: scc->kiss.tailtime=arg; break;
1310 case PARAM_FULLDUP: scc->kiss.fulldup=arg; break;
1311 case PARAM_DTR: break;
1312 case PARAM_GROUP: scc->kiss.group=arg; break;
1313 case PARAM_IDLE: scc->kiss.idletime=arg; break;
1314 case PARAM_MIN: scc->kiss.mintime=arg; break;
1315 case PARAM_MAXKEY: scc->kiss.maxkeyup=arg; break;
1316 case PARAM_WAIT: scc->kiss.waittime=arg; break;
1317 case PARAM_MAXDEFER: scc->kiss.maxdefer=arg; break;
1318 case PARAM_TX: scc->kiss.tx_inhibit=arg; break;
1319
1320 case PARAM_SOFTDCD:
1321 scc->kiss.softdcd=arg;
1322 if (arg)
1323 {
1324 or(scc, R15, SYNCIE);
1325 cl(scc, R15, DCDIE);
1326 start_hunt(scc);
1327 } else {
1328 or(scc, R15, DCDIE);
1329 cl(scc, R15, SYNCIE);
1330 }
1331 break;
1332
1333 case PARAM_SPEED:
1334 if (arg < 256)
1335 scc->modem.speed=arg*100;
1336 else
1337 scc->modem.speed=arg;
1338
1339 if (scc->stat.tx_state == 0)
1340 set_speed(scc);
1341 break;
1342
1343 case PARAM_RTS:
1344 if ( !(scc->wreg[R5] & RTS) )
1345 {
1346 if (arg != TX_OFF) {
1347 scc_key_trx(scc, TX_ON);
1348 scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
1349 }
1350 } else {
1351 if (arg == TX_OFF)
1352 {
1353 scc->stat.tx_state = TXS_BUSY;
1354 scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
1355 }
1356 }
1357 break;
1358
1359 case PARAM_HWEVENT:
1360 scc_notify(scc, scc->dcd? HWEV_DCD_ON:HWEV_DCD_OFF);
1361 break;
1362
1363 default: return -EINVAL;
1364 }
1365
1366 return 0;
1367 }
1368
1369
1370
1371 static unsigned long scc_get_param(struct scc_channel *scc, unsigned int cmd)
1372 {
1373 switch (cmd)
1374 {
1375 case PARAM_TXDELAY: return CAST(scc->kiss.txdelay);
1376 case PARAM_PERSIST: return CAST(scc->kiss.persist);
1377 case PARAM_SLOTTIME: return CAST(scc->kiss.slottime);
1378 case PARAM_TXTAIL: return CAST(scc->kiss.tailtime);
1379 case PARAM_FULLDUP: return CAST(scc->kiss.fulldup);
1380 case PARAM_SOFTDCD: return CAST(scc->kiss.softdcd);
1381 case PARAM_DTR: return CAST((scc->wreg[R5] & DTR)? 1:0);
1382 case PARAM_RTS: return CAST((scc->wreg[R5] & RTS)? 1:0);
1383 case PARAM_SPEED: return CAST(scc->modem.speed);
1384 case PARAM_GROUP: return CAST(scc->kiss.group);
1385 case PARAM_IDLE: return CAST(scc->kiss.idletime);
1386 case PARAM_MIN: return CAST(scc->kiss.mintime);
1387 case PARAM_MAXKEY: return CAST(scc->kiss.maxkeyup);
1388 case PARAM_WAIT: return CAST(scc->kiss.waittime);
1389 case PARAM_MAXDEFER: return CAST(scc->kiss.maxdefer);
1390 case PARAM_TX: return CAST(scc->kiss.tx_inhibit);
1391 default: return NO_SUCH_PARAM;
1392 }
1393
1394 }
1395
1396 #undef CAST
1397
1398
1399
1400
1401
1402 static void scc_stop_calibrate(struct timer_list *t)
1403 {
1404 struct scc_channel *scc = from_timer(scc, t, tx_wdog);
1405 unsigned long flags;
1406
1407 spin_lock_irqsave(&scc->lock, flags);
1408 del_timer(&scc->tx_wdog);
1409 scc_key_trx(scc, TX_OFF);
1410 wr(scc, R6, 0);
1411 wr(scc, R7, FLAG);
1412 Outb(scc->ctrl,RES_EXT_INT);
1413 Outb(scc->ctrl,RES_EXT_INT);
1414
1415 netif_wake_queue(scc->dev);
1416 spin_unlock_irqrestore(&scc->lock, flags);
1417 }
1418
1419
1420 static void
1421 scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern)
1422 {
1423 unsigned long flags;
1424
1425 spin_lock_irqsave(&scc->lock, flags);
1426 netif_stop_queue(scc->dev);
1427 scc_discard_buffers(scc);
1428
1429 del_timer(&scc->tx_wdog);
1430
1431 scc->tx_wdog.function = scc_stop_calibrate;
1432 scc->tx_wdog.expires = jiffies + HZ*duration;
1433 add_timer(&scc->tx_wdog);
1434
1435
1436 wr(scc, R6, 0);
1437 wr(scc, R7, pattern);
1438
1439
1440
1441
1442
1443
1444 Outb(scc->ctrl,RES_EXT_INT);
1445 Outb(scc->ctrl,RES_EXT_INT);
1446
1447 scc_key_trx(scc, TX_ON);
1448 spin_unlock_irqrestore(&scc->lock, flags);
1449 }
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459 static void z8530_init(void)
1460 {
1461 struct scc_channel *scc;
1462 int chip, k;
1463 unsigned long flags;
1464 char *flag;
1465
1466
1467 printk(KERN_INFO "Init Z8530 driver: %u channels, IRQ", Nchips*2);
1468
1469 flag=" ";
1470 for (k = 0; k < nr_irqs; k++)
1471 if (Ivec[k].used)
1472 {
1473 printk("%s%d", flag, k);
1474 flag=",";
1475 }
1476 printk("\n");
1477
1478
1479
1480 for (chip = 0; chip < Nchips; chip++)
1481 {
1482 scc=&SCC_Info[2*chip];
1483 if (!scc->ctrl) continue;
1484
1485
1486
1487 if(scc->brand & EAGLE)
1488 Outb(scc->special,0x08);
1489
1490 if(scc->brand & (PC100 | PRIMUS))
1491 Outb(scc->special,scc->option);
1492
1493
1494
1495
1496 spin_lock_irqsave(&scc->lock, flags);
1497
1498 Outb(scc->ctrl, 0);
1499 OutReg(scc->ctrl,R9,FHWRES);
1500 udelay(100);
1501 wr(scc, R2, chip*16);
1502 wr(scc, R9, VIS);
1503 spin_unlock_irqrestore(&scc->lock, flags);
1504 }
1505
1506
1507 Driver_Initialized = 1;
1508 }
1509
1510
1511
1512
1513
1514 static int scc_net_alloc(const char *name, struct scc_channel *scc)
1515 {
1516 int err;
1517 struct net_device *dev;
1518
1519 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, scc_net_setup);
1520 if (!dev)
1521 return -ENOMEM;
1522
1523 dev->ml_priv = scc;
1524 scc->dev = dev;
1525 spin_lock_init(&scc->lock);
1526 timer_setup(&scc->tx_t, NULL, 0);
1527 timer_setup(&scc->tx_wdog, NULL, 0);
1528
1529 err = register_netdevice(dev);
1530 if (err) {
1531 printk(KERN_ERR "%s: can't register network device (%d)\n",
1532 name, err);
1533 free_netdev(dev);
1534 scc->dev = NULL;
1535 return err;
1536 }
1537
1538 return 0;
1539 }
1540
1541
1542
1543
1544
1545
1546
1547 static const struct net_device_ops scc_netdev_ops = {
1548 .ndo_open = scc_net_open,
1549 .ndo_stop = scc_net_close,
1550 .ndo_start_xmit = scc_net_tx,
1551 .ndo_set_mac_address = scc_net_set_mac_address,
1552 .ndo_get_stats = scc_net_get_stats,
1553 .ndo_do_ioctl = scc_net_ioctl,
1554 };
1555
1556
1557
1558 static void scc_net_setup(struct net_device *dev)
1559 {
1560 dev->tx_queue_len = 16;
1561
1562 dev->netdev_ops = &scc_netdev_ops;
1563 dev->header_ops = &ax25_header_ops;
1564
1565 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
1566 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
1567
1568 dev->flags = 0;
1569
1570 dev->type = ARPHRD_AX25;
1571 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
1572 dev->mtu = AX25_DEF_PACLEN;
1573 dev->addr_len = AX25_ADDR_LEN;
1574
1575 }
1576
1577
1578
1579 static int scc_net_open(struct net_device *dev)
1580 {
1581 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1582
1583 if (!scc->init)
1584 return -EINVAL;
1585
1586 scc->tx_buff = NULL;
1587 skb_queue_head_init(&scc->tx_queue);
1588
1589 init_channel(scc);
1590
1591 netif_start_queue(dev);
1592 return 0;
1593 }
1594
1595
1596
1597 static int scc_net_close(struct net_device *dev)
1598 {
1599 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1600 unsigned long flags;
1601
1602 netif_stop_queue(dev);
1603
1604 spin_lock_irqsave(&scc->lock, flags);
1605 Outb(scc->ctrl,0);
1606 wr(scc,R1,0);
1607 wr(scc,R3,0);
1608 spin_unlock_irqrestore(&scc->lock, flags);
1609
1610 del_timer_sync(&scc->tx_t);
1611 del_timer_sync(&scc->tx_wdog);
1612
1613 scc_discard_buffers(scc);
1614
1615 return 0;
1616 }
1617
1618
1619
1620 static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
1621 {
1622 if (skb->len == 0) {
1623 dev_kfree_skb_irq(skb);
1624 return;
1625 }
1626
1627 scc->dev_stat.rx_packets++;
1628 scc->dev_stat.rx_bytes += skb->len;
1629
1630 skb->protocol = ax25_type_trans(skb, scc->dev);
1631
1632 netif_rx(skb);
1633 }
1634
1635
1636
1637 static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
1638 {
1639 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1640 unsigned long flags;
1641 char kisscmd;
1642
1643 if (skb->protocol == htons(ETH_P_IP))
1644 return ax25_ip_xmit(skb);
1645
1646 if (skb->len > scc->stat.bufsize || skb->len < 2) {
1647 scc->dev_stat.tx_dropped++;
1648 dev_kfree_skb(skb);
1649 return NETDEV_TX_OK;
1650 }
1651
1652 scc->dev_stat.tx_packets++;
1653 scc->dev_stat.tx_bytes += skb->len;
1654 scc->stat.txframes++;
1655
1656 kisscmd = *skb->data & 0x1f;
1657 skb_pull(skb, 1);
1658
1659 if (kisscmd) {
1660 scc_set_param(scc, kisscmd, *skb->data);
1661 dev_kfree_skb(skb);
1662 return NETDEV_TX_OK;
1663 }
1664
1665 spin_lock_irqsave(&scc->lock, flags);
1666
1667 if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) {
1668 struct sk_buff *skb_del;
1669 skb_del = skb_dequeue(&scc->tx_queue);
1670 dev_kfree_skb(skb_del);
1671 }
1672 skb_queue_tail(&scc->tx_queue, skb);
1673 netif_trans_update(dev);
1674
1675
1676
1677
1678
1679
1680
1681
1682 if(scc->stat.tx_state == TXS_IDLE || scc->stat.tx_state == TXS_IDLE2) {
1683 scc->stat.tx_state = TXS_BUSY;
1684 if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
1685 __scc_start_tx_timer(scc, t_dwait, scc->kiss.waittime);
1686 else
1687 __scc_start_tx_timer(scc, t_dwait, 0);
1688 }
1689 spin_unlock_irqrestore(&scc->lock, flags);
1690 return NETDEV_TX_OK;
1691 }
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706 static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1707 {
1708 struct scc_kiss_cmd kiss_cmd;
1709 struct scc_mem_config memcfg;
1710 struct scc_hw_config hwcfg;
1711 struct scc_calibrate cal;
1712 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1713 int chan;
1714 unsigned char device_name[IFNAMSIZ];
1715 void __user *arg = ifr->ifr_data;
1716
1717
1718 if (!Driver_Initialized)
1719 {
1720 if (cmd == SIOCSCCCFG)
1721 {
1722 int found = 1;
1723
1724 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
1725 if (!arg) return -EFAULT;
1726
1727 if (Nchips >= SCC_MAXCHIPS)
1728 return -EINVAL;
1729
1730 if (copy_from_user(&hwcfg, arg, sizeof(hwcfg)))
1731 return -EFAULT;
1732
1733 if (hwcfg.irq == 2) hwcfg.irq = 9;
1734
1735 if (hwcfg.irq < 0 || hwcfg.irq >= nr_irqs)
1736 return -EINVAL;
1737
1738 if (!Ivec[hwcfg.irq].used && hwcfg.irq)
1739 {
1740 if (request_irq(hwcfg.irq, scc_isr,
1741 0, "AX.25 SCC",
1742 (void *)(long) hwcfg.irq))
1743 printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
1744 else
1745 Ivec[hwcfg.irq].used = 1;
1746 }
1747
1748 if (hwcfg.vector_latch && !Vector_Latch) {
1749 if (!request_region(hwcfg.vector_latch, 1, "scc vector latch"))
1750 printk(KERN_WARNING "z8530drv: warning, cannot reserve vector latch port 0x%lx\n, disabled.", hwcfg.vector_latch);
1751 else
1752 Vector_Latch = hwcfg.vector_latch;
1753 }
1754
1755 if (hwcfg.clock == 0)
1756 hwcfg.clock = SCC_DEFAULT_CLOCK;
1757
1758 #ifndef SCC_DONT_CHECK
1759
1760 if(request_region(hwcfg.ctrl_a, 1, "scc-probe"))
1761 {
1762 disable_irq(hwcfg.irq);
1763 Outb(hwcfg.ctrl_a, 0);
1764 OutReg(hwcfg.ctrl_a, R9, FHWRES);
1765 udelay(100);
1766 OutReg(hwcfg.ctrl_a,R13,0x55);
1767 udelay(5);
1768
1769 if (InReg(hwcfg.ctrl_a,R13) != 0x55)
1770 found = 0;
1771 enable_irq(hwcfg.irq);
1772 release_region(hwcfg.ctrl_a, 1);
1773 }
1774 else
1775 found = 0;
1776 #endif
1777
1778 if (found)
1779 {
1780 SCC_Info[2*Nchips ].ctrl = hwcfg.ctrl_a;
1781 SCC_Info[2*Nchips ].data = hwcfg.data_a;
1782 SCC_Info[2*Nchips ].irq = hwcfg.irq;
1783 SCC_Info[2*Nchips+1].ctrl = hwcfg.ctrl_b;
1784 SCC_Info[2*Nchips+1].data = hwcfg.data_b;
1785 SCC_Info[2*Nchips+1].irq = hwcfg.irq;
1786
1787 SCC_ctrl[Nchips].chan_A = hwcfg.ctrl_a;
1788 SCC_ctrl[Nchips].chan_B = hwcfg.ctrl_b;
1789 SCC_ctrl[Nchips].irq = hwcfg.irq;
1790 }
1791
1792
1793 for (chan = 0; chan < 2; chan++)
1794 {
1795 sprintf(device_name, "%s%i", SCC_DriverName, 2*Nchips+chan);
1796
1797 SCC_Info[2*Nchips+chan].special = hwcfg.special;
1798 SCC_Info[2*Nchips+chan].clock = hwcfg.clock;
1799 SCC_Info[2*Nchips+chan].brand = hwcfg.brand;
1800 SCC_Info[2*Nchips+chan].option = hwcfg.option;
1801 SCC_Info[2*Nchips+chan].enhanced = hwcfg.escc;
1802
1803 #ifdef SCC_DONT_CHECK
1804 printk(KERN_INFO "%s: data port = 0x%3.3x control port = 0x%3.3x\n",
1805 device_name,
1806 SCC_Info[2*Nchips+chan].data,
1807 SCC_Info[2*Nchips+chan].ctrl);
1808
1809 #else
1810 printk(KERN_INFO "%s: data port = 0x%3.3lx control port = 0x%3.3lx -- %s\n",
1811 device_name,
1812 chan? hwcfg.data_b : hwcfg.data_a,
1813 chan? hwcfg.ctrl_b : hwcfg.ctrl_a,
1814 found? "found" : "missing");
1815 #endif
1816
1817 if (found)
1818 {
1819 request_region(SCC_Info[2*Nchips+chan].ctrl, 1, "scc ctrl");
1820 request_region(SCC_Info[2*Nchips+chan].data, 1, "scc data");
1821 if (Nchips+chan != 0 &&
1822 scc_net_alloc(device_name,
1823 &SCC_Info[2*Nchips+chan]))
1824 return -EINVAL;
1825 }
1826 }
1827
1828 if (found) Nchips++;
1829
1830 return 0;
1831 }
1832
1833 if (cmd == SIOCSCCINI)
1834 {
1835 if (!capable(CAP_SYS_RAWIO))
1836 return -EPERM;
1837
1838 if (Nchips == 0)
1839 return -EINVAL;
1840
1841 z8530_init();
1842 return 0;
1843 }
1844
1845 return -EINVAL;
1846 }
1847
1848 if (!scc->init)
1849 {
1850 if (cmd == SIOCSCCCHANINI)
1851 {
1852 if (!capable(CAP_NET_ADMIN)) return -EPERM;
1853 if (!arg) return -EINVAL;
1854
1855 scc->stat.bufsize = SCC_BUFSIZE;
1856
1857 if (copy_from_user(&scc->modem, arg, sizeof(struct scc_modem)))
1858 return -EINVAL;
1859
1860
1861
1862 if (scc->modem.speed < 4800)
1863 {
1864 scc->kiss.txdelay = 36;
1865 scc->kiss.persist = 42;
1866 scc->kiss.slottime = 16;
1867 scc->kiss.tailtime = 4;
1868 scc->kiss.fulldup = 0;
1869 scc->kiss.waittime = 50;
1870 scc->kiss.maxkeyup = 10;
1871 scc->kiss.mintime = 3;
1872 scc->kiss.idletime = 30;
1873 scc->kiss.maxdefer = 120;
1874 scc->kiss.softdcd = 0;
1875 } else {
1876 scc->kiss.txdelay = 10;
1877 scc->kiss.persist = 64;
1878 scc->kiss.slottime = 8;
1879 scc->kiss.tailtime = 1;
1880 scc->kiss.fulldup = 0;
1881 scc->kiss.waittime = 50;
1882 scc->kiss.maxkeyup = 7;
1883 scc->kiss.mintime = 3;
1884 scc->kiss.idletime = 30;
1885 scc->kiss.maxdefer = 120;
1886 scc->kiss.softdcd = 0;
1887 }
1888
1889 scc->tx_buff = NULL;
1890 skb_queue_head_init(&scc->tx_queue);
1891 scc->init = 1;
1892
1893 return 0;
1894 }
1895
1896 return -EINVAL;
1897 }
1898
1899 switch(cmd)
1900 {
1901 case SIOCSCCRESERVED:
1902 return -ENOIOCTLCMD;
1903
1904 case SIOCSCCSMEM:
1905 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
1906 if (!arg || copy_from_user(&memcfg, arg, sizeof(memcfg)))
1907 return -EINVAL;
1908 scc->stat.bufsize = memcfg.bufsize;
1909 return 0;
1910
1911 case SIOCSCCGSTAT:
1912 if (!arg || copy_to_user(arg, &scc->stat, sizeof(scc->stat)))
1913 return -EINVAL;
1914 return 0;
1915
1916 case SIOCSCCGKISS:
1917 if (!arg || copy_from_user(&kiss_cmd, arg, sizeof(kiss_cmd)))
1918 return -EINVAL;
1919 kiss_cmd.param = scc_get_param(scc, kiss_cmd.command);
1920 if (copy_to_user(arg, &kiss_cmd, sizeof(kiss_cmd)))
1921 return -EINVAL;
1922 return 0;
1923
1924 case SIOCSCCSKISS:
1925 if (!capable(CAP_NET_ADMIN)) return -EPERM;
1926 if (!arg || copy_from_user(&kiss_cmd, arg, sizeof(kiss_cmd)))
1927 return -EINVAL;
1928 return scc_set_param(scc, kiss_cmd.command, kiss_cmd.param);
1929
1930 case SIOCSCCCAL:
1931 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
1932 if (!arg || copy_from_user(&cal, arg, sizeof(cal)) || cal.time == 0)
1933 return -EINVAL;
1934
1935 scc_start_calibrate(scc, cal.time, cal.pattern);
1936 return 0;
1937
1938 default:
1939 return -ENOIOCTLCMD;
1940
1941 }
1942
1943 return -EINVAL;
1944 }
1945
1946
1947
1948 static int scc_net_set_mac_address(struct net_device *dev, void *addr)
1949 {
1950 struct sockaddr *sa = (struct sockaddr *) addr;
1951 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
1952 return 0;
1953 }
1954
1955
1956
1957 static struct net_device_stats *scc_net_get_stats(struct net_device *dev)
1958 {
1959 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1960
1961 scc->dev_stat.rx_errors = scc->stat.rxerrs + scc->stat.rx_over;
1962 scc->dev_stat.tx_errors = scc->stat.txerrs + scc->stat.tx_under;
1963 scc->dev_stat.rx_fifo_errors = scc->stat.rx_over;
1964 scc->dev_stat.tx_fifo_errors = scc->stat.tx_under;
1965
1966 return &scc->dev_stat;
1967 }
1968
1969
1970
1971
1972
1973 #ifdef CONFIG_PROC_FS
1974
1975 static inline struct scc_channel *scc_net_seq_idx(loff_t pos)
1976 {
1977 int k;
1978
1979 for (k = 0; k < Nchips*2; ++k) {
1980 if (!SCC_Info[k].init)
1981 continue;
1982 if (pos-- == 0)
1983 return &SCC_Info[k];
1984 }
1985 return NULL;
1986 }
1987
1988 static void *scc_net_seq_start(struct seq_file *seq, loff_t *pos)
1989 {
1990 return *pos ? scc_net_seq_idx(*pos - 1) : SEQ_START_TOKEN;
1991
1992 }
1993
1994 static void *scc_net_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1995 {
1996 unsigned k;
1997 struct scc_channel *scc = v;
1998 ++*pos;
1999
2000 for (k = (v == SEQ_START_TOKEN) ? 0 : (scc - SCC_Info)+1;
2001 k < Nchips*2; ++k) {
2002 if (SCC_Info[k].init)
2003 return &SCC_Info[k];
2004 }
2005 return NULL;
2006 }
2007
2008 static void scc_net_seq_stop(struct seq_file *seq, void *v)
2009 {
2010 }
2011
2012 static int scc_net_seq_show(struct seq_file *seq, void *v)
2013 {
2014 if (v == SEQ_START_TOKEN) {
2015 seq_puts(seq, "z8530drv-"VERSION"\n");
2016 } else if (!Driver_Initialized) {
2017 seq_puts(seq, "not initialized\n");
2018 } else if (!Nchips) {
2019 seq_puts(seq, "chips missing\n");
2020 } else {
2021 const struct scc_channel *scc = v;
2022 const struct scc_stat *stat = &scc->stat;
2023 const struct scc_kiss *kiss = &scc->kiss;
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035 seq_printf(seq, "%s\t%3.3lx %3.3lx %d %lu %2.2x %d %3.3lx %3.3lx %d\n",
2036 scc->dev->name,
2037 scc->data, scc->ctrl, scc->irq, scc->clock, scc->brand,
2038 scc->enhanced, Vector_Latch, scc->special,
2039 scc->option);
2040 seq_printf(seq, "\t%lu %d %d %d %d\n",
2041 scc->modem.speed, scc->modem.nrz,
2042 scc->modem.clocksrc, kiss->softdcd,
2043 stat->bufsize);
2044 seq_printf(seq, "\t%lu %lu %lu %lu\n",
2045 stat->rxints, stat->txints, stat->exints, stat->spints);
2046 seq_printf(seq, "\t%lu %lu %d / %lu %lu %d / %d %d\n",
2047 stat->rxframes, stat->rxerrs, stat->rx_over,
2048 stat->txframes, stat->txerrs, stat->tx_under,
2049 stat->nospace, stat->tx_state);
2050
2051 #define K(x) kiss->x
2052 seq_printf(seq, "\t%d %d %d %d %d %d %d %d %d %d %d %d\n",
2053 K(txdelay), K(persist), K(slottime), K(tailtime),
2054 K(fulldup), K(waittime), K(mintime), K(maxkeyup),
2055 K(idletime), K(maxdefer), K(tx_inhibit), K(group));
2056 #undef K
2057 #ifdef SCC_DEBUG
2058 {
2059 int reg;
2060
2061 seq_printf(seq, "\tW ");
2062 for (reg = 0; reg < 16; reg++)
2063 seq_printf(seq, "%2.2x ", scc->wreg[reg]);
2064 seq_printf(seq, "\n");
2065
2066 seq_printf(seq, "\tR %2.2x %2.2x XX ", InReg(scc->ctrl,R0), InReg(scc->ctrl,R1));
2067 for (reg = 3; reg < 8; reg++)
2068 seq_printf(seq, "%2.2x ", InReg(scc->ctrl, reg));
2069 seq_printf(seq, "XX ");
2070 for (reg = 9; reg < 16; reg++)
2071 seq_printf(seq, "%2.2x ", InReg(scc->ctrl, reg));
2072 seq_printf(seq, "\n");
2073 }
2074 #endif
2075 seq_putc(seq, '\n');
2076 }
2077
2078 return 0;
2079 }
2080
2081 static const struct seq_operations scc_net_seq_ops = {
2082 .start = scc_net_seq_start,
2083 .next = scc_net_seq_next,
2084 .stop = scc_net_seq_stop,
2085 .show = scc_net_seq_show,
2086 };
2087 #endif
2088
2089
2090
2091
2092
2093
2094 static int __init scc_init_driver (void)
2095 {
2096 char devname[IFNAMSIZ];
2097
2098 printk(banner);
2099
2100 sprintf(devname,"%s0", SCC_DriverName);
2101
2102 rtnl_lock();
2103 if (scc_net_alloc(devname, SCC_Info)) {
2104 rtnl_unlock();
2105 printk(KERN_ERR "z8530drv: cannot initialize module\n");
2106 return -EIO;
2107 }
2108 rtnl_unlock();
2109
2110 proc_create_seq("z8530drv", 0, init_net.proc_net, &scc_net_seq_ops);
2111
2112 return 0;
2113 }
2114
2115 static void __exit scc_cleanup_driver(void)
2116 {
2117 io_port ctrl;
2118 int k;
2119 struct scc_channel *scc;
2120 struct net_device *dev;
2121
2122 if (Nchips == 0 && (dev = SCC_Info[0].dev))
2123 {
2124 unregister_netdev(dev);
2125 free_netdev(dev);
2126 }
2127
2128
2129 local_irq_disable();
2130
2131 for (k = 0; k < Nchips; k++)
2132 if ( (ctrl = SCC_ctrl[k].chan_A) )
2133 {
2134 Outb(ctrl, 0);
2135 OutReg(ctrl,R9,FHWRES);
2136 udelay(50);
2137 }
2138
2139
2140 for (k = 0; k < nr_irqs ; k++)
2141 if (Ivec[k].used) free_irq(k, NULL);
2142
2143 local_irq_enable();
2144
2145
2146 for (k = 0; k < Nchips*2; k++)
2147 {
2148 scc = &SCC_Info[k];
2149 if (scc->ctrl)
2150 {
2151 release_region(scc->ctrl, 1);
2152 release_region(scc->data, 1);
2153 }
2154 if (scc->dev)
2155 {
2156 unregister_netdev(scc->dev);
2157 free_netdev(scc->dev);
2158 }
2159 }
2160
2161
2162 if (Vector_Latch)
2163 release_region(Vector_Latch, 1);
2164
2165 remove_proc_entry("z8530drv", init_net.proc_net);
2166 }
2167
2168 MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>");
2169 MODULE_DESCRIPTION("AX.25 Device Driver for Z8530 based HDLC cards");
2170 MODULE_SUPPORTED_DEVICE("Z8530 based SCC cards for Amateur Radio");
2171 MODULE_LICENSE("GPL");
2172 module_init(scc_init_driver);
2173 module_exit(scc_cleanup_driver);