This source file includes following definitions.
- z8530_read_port
- z8530_write_port
- read_zsreg
- read_zsdata
- write_zsreg
- write_zsctrl
- write_zsdata
- z8530_flush_fifo
- z8530_rtsdtr
- z8530_rx
- z8530_tx
- z8530_status
- z8530_dma_rx
- z8530_dma_tx
- z8530_dma_status
- z8530_rx_clear
- z8530_tx_clear
- z8530_status_clear
- z8530_interrupt
- z8530_sync_open
- z8530_sync_close
- z8530_sync_dma_open
- z8530_sync_dma_close
- z8530_sync_txdma_open
- z8530_sync_txdma_close
- z8530_describe
- do_z8530_init
- z8530_init
- z8530_shutdown
- z8530_channel_load
- z8530_tx_begin
- z8530_tx_done
- z8530_null_rx
- z8530_rx_done
- spans_boundary
- z8530_queue_xmit
- z85230_init_driver
- z85230_cleanup_driver
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
38 #include <linux/module.h>
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/netdevice.h>
44 #include <linux/if_arp.h>
45 #include <linux/delay.h>
46 #include <linux/hdlc.h>
47 #include <linux/ioport.h>
48 #include <linux/init.h>
49 #include <linux/gfp.h>
50 #include <asm/dma.h>
51 #include <asm/io.h>
52 #define RT_LOCK
53 #define RT_UNLOCK
54 #include <linux/spinlock.h>
55
56 #include "z85230.h"
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75 static inline int z8530_read_port(unsigned long p)
76 {
77 u8 r=inb(Z8530_PORT_OF(p));
78 if(p&Z8530_PORT_SLEEP)
79 udelay(5);
80 return r;
81 }
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99 static inline void z8530_write_port(unsigned long p, u8 d)
100 {
101 outb(d,Z8530_PORT_OF(p));
102 if(p&Z8530_PORT_SLEEP)
103 udelay(5);
104 }
105
106
107
108 static void z8530_rx_done(struct z8530_channel *c);
109 static void z8530_tx_done(struct z8530_channel *c);
110
111
112
113
114
115
116
117
118
119
120
121
122
123 static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
124 {
125 if(reg)
126 z8530_write_port(c->ctrlio, reg);
127 return z8530_read_port(c->ctrlio);
128 }
129
130
131
132
133
134
135
136
137
138 static inline u8 read_zsdata(struct z8530_channel *c)
139 {
140 u8 r;
141 r=z8530_read_port(c->dataio);
142 return r;
143 }
144
145
146
147
148
149
150
151
152
153
154
155
156
157 static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
158 {
159 if(reg)
160 z8530_write_port(c->ctrlio, reg);
161 z8530_write_port(c->ctrlio, val);
162
163 }
164
165
166
167
168
169
170
171
172
173 static inline void write_zsctrl(struct z8530_channel *c, u8 val)
174 {
175 z8530_write_port(c->ctrlio, val);
176 }
177
178
179
180
181
182
183
184
185
186
187 static inline void write_zsdata(struct z8530_channel *c, u8 val)
188 {
189 z8530_write_port(c->dataio, val);
190 }
191
192
193
194
195
196 u8 z8530_dead_port[]=
197 {
198 255
199 };
200
201 EXPORT_SYMBOL(z8530_dead_port);
202
203
204
205
206
207
208
209
210
211
212
213 u8 z8530_hdlc_kilostream[]=
214 {
215 4, SYNC_ENAB|SDLC|X1CLK,
216 2, 0,
217 1, 0,
218 3, ENT_HM|RxCRC_ENAB|Rx8,
219 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
220 9, 0,
221 6, 0xFF,
222 7, FLAG,
223 10, ABUNDER|NRZ|CRCPS,
224 11, TCTRxCP,
225 14, DISDPLL,
226 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
227 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
228 9, NV|MIE|NORESET,
229 255
230 };
231
232 EXPORT_SYMBOL(z8530_hdlc_kilostream);
233
234
235
236
237
238 u8 z8530_hdlc_kilostream_85230[]=
239 {
240 4, SYNC_ENAB|SDLC|X1CLK,
241 2, 0,
242 1, 0,
243 3, ENT_HM|RxCRC_ENAB|Rx8,
244 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
245 9, 0,
246 6, 0xFF,
247 7, FLAG,
248 10, ABUNDER|NRZ|CRCPS,
249 11, TCTRxCP,
250 14, DISDPLL,
251 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
252 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
253 9, NV|MIE|NORESET,
254 23, 3,
255
256 255
257 };
258
259 EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
260
261
262
263
264
265
266
267
268
269
270
271
272
273 static void z8530_flush_fifo(struct z8530_channel *c)
274 {
275 read_zsreg(c, R1);
276 read_zsreg(c, R1);
277 read_zsreg(c, R1);
278 read_zsreg(c, R1);
279 if(c->dev->type==Z85230)
280 {
281 read_zsreg(c, R1);
282 read_zsreg(c, R1);
283 read_zsreg(c, R1);
284 read_zsreg(c, R1);
285 }
286 }
287
288
289
290
291
292
293
294
295
296
297
298
299 static void z8530_rtsdtr(struct z8530_channel *c, int set)
300 {
301 if (set)
302 c->regs[5] |= (RTS | DTR);
303 else
304 c->regs[5] &= ~(RTS | DTR);
305 write_zsreg(c, R5, c->regs[5]);
306 }
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332 static void z8530_rx(struct z8530_channel *c)
333 {
334 u8 ch,stat;
335
336 while(1)
337 {
338
339 if(!(read_zsreg(c, R0)&1))
340 break;
341 ch=read_zsdata(c);
342 stat=read_zsreg(c, R1);
343
344
345
346
347 if(c->count < c->max)
348 {
349 *c->dptr++=ch;
350 c->count++;
351 }
352
353 if(stat&END_FR)
354 {
355
356
357
358
359 if(stat&(Rx_OVR|CRC_ERR))
360 {
361
362 if(c->skb)
363 c->dptr=c->skb->data;
364 c->count=0;
365 if(stat&Rx_OVR)
366 {
367 pr_warn("%s: overrun\n", c->dev->name);
368 c->rx_overrun++;
369 }
370 if(stat&CRC_ERR)
371 {
372 c->rx_crc_err++;
373
374 }
375
376 }
377 else
378 {
379
380
381
382
383 z8530_rx_done(c);
384 write_zsctrl(c, RES_Rx_CRC);
385 }
386 }
387 }
388
389
390
391 write_zsctrl(c, ERR_RES);
392 write_zsctrl(c, RES_H_IUS);
393 }
394
395
396
397
398
399
400
401
402
403
404
405
406 static void z8530_tx(struct z8530_channel *c)
407 {
408 while(c->txcount) {
409
410 if(!(read_zsreg(c, R0)&4))
411 return;
412 c->txcount--;
413
414
415
416 write_zsreg(c, R8, *c->tx_ptr++);
417 write_zsctrl(c, RES_H_IUS);
418
419 if(c->txcount==0)
420 {
421 write_zsctrl(c, RES_EOM_L);
422 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
423 }
424 }
425
426
427
428
429
430
431 write_zsctrl(c, RES_Tx_P);
432
433 z8530_tx_done(c);
434 write_zsctrl(c, RES_H_IUS);
435 }
436
437
438
439
440
441
442
443
444
445
446
447 static void z8530_status(struct z8530_channel *chan)
448 {
449 u8 status, altered;
450
451 status = read_zsreg(chan, R0);
452 altered = chan->status ^ status;
453
454 chan->status = status;
455
456 if (status & TxEOM) {
457
458 chan->netdevice->stats.tx_fifo_errors++;
459 write_zsctrl(chan, ERR_RES);
460 z8530_tx_done(chan);
461 }
462
463 if (altered & chan->dcdcheck)
464 {
465 if (status & chan->dcdcheck) {
466 pr_info("%s: DCD raised\n", chan->dev->name);
467 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
468 if (chan->netdevice)
469 netif_carrier_on(chan->netdevice);
470 } else {
471 pr_info("%s: DCD lost\n", chan->dev->name);
472 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
473 z8530_flush_fifo(chan);
474 if (chan->netdevice)
475 netif_carrier_off(chan->netdevice);
476 }
477
478 }
479 write_zsctrl(chan, RES_EXT_INT);
480 write_zsctrl(chan, RES_H_IUS);
481 }
482
483 struct z8530_irqhandler z8530_sync = {
484 .rx = z8530_rx,
485 .tx = z8530_tx,
486 .status = z8530_status,
487 };
488
489 EXPORT_SYMBOL(z8530_sync);
490
491
492
493
494
495
496
497
498
499
500
501 static void z8530_dma_rx(struct z8530_channel *chan)
502 {
503 if(chan->rxdma_on)
504 {
505
506 u8 status;
507
508 read_zsreg(chan, R7);
509 read_zsreg(chan, R6);
510
511 status=read_zsreg(chan, R1);
512
513 if(status&END_FR)
514 {
515 z8530_rx_done(chan);
516 }
517 write_zsctrl(chan, ERR_RES);
518 write_zsctrl(chan, RES_H_IUS);
519 }
520 else
521 {
522
523 z8530_rx(chan);
524 }
525 }
526
527
528
529
530
531
532
533
534
535 static void z8530_dma_tx(struct z8530_channel *chan)
536 {
537 if(!chan->dma_tx)
538 {
539 pr_warn("Hey who turned the DMA off?\n");
540 z8530_tx(chan);
541 return;
542 }
543
544 pr_err("DMA tx - bogus event!\n");
545 z8530_tx(chan);
546 }
547
548
549
550
551
552
553
554
555
556
557
558 static void z8530_dma_status(struct z8530_channel *chan)
559 {
560 u8 status, altered;
561
562 status=read_zsreg(chan, R0);
563 altered=chan->status^status;
564
565 chan->status=status;
566
567
568 if(chan->dma_tx)
569 {
570 if(status&TxEOM)
571 {
572 unsigned long flags;
573
574 flags=claim_dma_lock();
575 disable_dma(chan->txdma);
576 clear_dma_ff(chan->txdma);
577 chan->txdma_on=0;
578 release_dma_lock(flags);
579 z8530_tx_done(chan);
580 }
581 }
582
583 if (altered & chan->dcdcheck)
584 {
585 if (status & chan->dcdcheck) {
586 pr_info("%s: DCD raised\n", chan->dev->name);
587 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
588 if (chan->netdevice)
589 netif_carrier_on(chan->netdevice);
590 } else {
591 pr_info("%s: DCD lost\n", chan->dev->name);
592 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
593 z8530_flush_fifo(chan);
594 if (chan->netdevice)
595 netif_carrier_off(chan->netdevice);
596 }
597 }
598
599 write_zsctrl(chan, RES_EXT_INT);
600 write_zsctrl(chan, RES_H_IUS);
601 }
602
603 static struct z8530_irqhandler z8530_dma_sync = {
604 .rx = z8530_dma_rx,
605 .tx = z8530_dma_tx,
606 .status = z8530_dma_status,
607 };
608
609 static struct z8530_irqhandler z8530_txdma_sync = {
610 .rx = z8530_rx,
611 .tx = z8530_dma_tx,
612 .status = z8530_dma_status,
613 };
614
615
616
617
618
619
620
621
622
623
624
625 static void z8530_rx_clear(struct z8530_channel *c)
626 {
627
628
629
630 u8 stat;
631
632 read_zsdata(c);
633 stat=read_zsreg(c, R1);
634
635 if(stat&END_FR)
636 write_zsctrl(c, RES_Rx_CRC);
637
638
639
640 write_zsctrl(c, ERR_RES);
641 write_zsctrl(c, RES_H_IUS);
642 }
643
644
645
646
647
648
649
650
651
652
653 static void z8530_tx_clear(struct z8530_channel *c)
654 {
655 write_zsctrl(c, RES_Tx_P);
656 write_zsctrl(c, RES_H_IUS);
657 }
658
659
660
661
662
663
664
665
666
667
668 static void z8530_status_clear(struct z8530_channel *chan)
669 {
670 u8 status=read_zsreg(chan, R0);
671 if(status&TxEOM)
672 write_zsctrl(chan, ERR_RES);
673 write_zsctrl(chan, RES_EXT_INT);
674 write_zsctrl(chan, RES_H_IUS);
675 }
676
677 struct z8530_irqhandler z8530_nop = {
678 .rx = z8530_rx_clear,
679 .tx = z8530_tx_clear,
680 .status = z8530_status_clear,
681 };
682
683
684 EXPORT_SYMBOL(z8530_nop);
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702 irqreturn_t z8530_interrupt(int irq, void *dev_id)
703 {
704 struct z8530_dev *dev=dev_id;
705 u8 uninitialized_var(intr);
706 static volatile int locker=0;
707 int work=0;
708 struct z8530_irqhandler *irqs;
709
710 if(locker)
711 {
712 pr_err("IRQ re-enter\n");
713 return IRQ_NONE;
714 }
715 locker=1;
716
717 spin_lock(&dev->lock);
718
719 while(++work<5000)
720 {
721
722 intr = read_zsreg(&dev->chanA, R3);
723 if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
724 break;
725
726
727
728
729
730
731
732 irqs=dev->chanA.irqs;
733
734 if(intr & (CHARxIP|CHATxIP|CHAEXT))
735 {
736 if(intr&CHARxIP)
737 irqs->rx(&dev->chanA);
738 if(intr&CHATxIP)
739 irqs->tx(&dev->chanA);
740 if(intr&CHAEXT)
741 irqs->status(&dev->chanA);
742 }
743
744 irqs=dev->chanB.irqs;
745
746 if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
747 {
748 if(intr&CHBRxIP)
749 irqs->rx(&dev->chanB);
750 if(intr&CHBTxIP)
751 irqs->tx(&dev->chanB);
752 if(intr&CHBEXT)
753 irqs->status(&dev->chanB);
754 }
755 }
756 spin_unlock(&dev->lock);
757 if(work==5000)
758 pr_err("%s: interrupt jammed - abort(0x%X)!\n",
759 dev->name, intr);
760
761 locker=0;
762 return IRQ_HANDLED;
763 }
764
765 EXPORT_SYMBOL(z8530_interrupt);
766
767 static const u8 reg_init[16]=
768 {
769 0,0,0,0,
770 0,0,0,0,
771 0,0,0,0,
772 0x55,0,0,0
773 };
774
775
776
777
778
779
780
781
782
783
784
785 int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
786 {
787 unsigned long flags;
788
789 spin_lock_irqsave(c->lock, flags);
790
791 c->sync = 1;
792 c->mtu = dev->mtu+64;
793 c->count = 0;
794 c->skb = NULL;
795 c->skb2 = NULL;
796 c->irqs = &z8530_sync;
797
798
799 z8530_rx_done(c);
800 z8530_rx_done(c);
801 z8530_rtsdtr(c,1);
802 c->dma_tx = 0;
803 c->regs[R1]|=TxINT_ENAB;
804 write_zsreg(c, R1, c->regs[R1]);
805 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
806
807 spin_unlock_irqrestore(c->lock, flags);
808 return 0;
809 }
810
811
812 EXPORT_SYMBOL(z8530_sync_open);
813
814
815
816
817
818
819
820
821
822
823 int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
824 {
825 u8 chk;
826 unsigned long flags;
827
828 spin_lock_irqsave(c->lock, flags);
829 c->irqs = &z8530_nop;
830 c->max = 0;
831 c->sync = 0;
832
833 chk=read_zsreg(c,R0);
834 write_zsreg(c, R3, c->regs[R3]);
835 z8530_rtsdtr(c,0);
836
837 spin_unlock_irqrestore(c->lock, flags);
838 return 0;
839 }
840
841 EXPORT_SYMBOL(z8530_sync_close);
842
843
844
845
846
847
848
849
850
851
852
853 int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
854 {
855 unsigned long cflags, dflags;
856
857 c->sync = 1;
858 c->mtu = dev->mtu+64;
859 c->count = 0;
860 c->skb = NULL;
861 c->skb2 = NULL;
862
863
864
865 c->rxdma_on = 0;
866 c->txdma_on = 0;
867
868
869
870
871
872
873
874 if(c->mtu > PAGE_SIZE/2)
875 return -EMSGSIZE;
876
877 c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
878 if(c->rx_buf[0]==NULL)
879 return -ENOBUFS;
880 c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
881
882 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
883 if(c->tx_dma_buf[0]==NULL)
884 {
885 free_page((unsigned long)c->rx_buf[0]);
886 c->rx_buf[0]=NULL;
887 return -ENOBUFS;
888 }
889 c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
890
891 c->tx_dma_used=0;
892 c->dma_tx = 1;
893 c->dma_num=0;
894 c->dma_ready=1;
895
896
897
898
899
900 spin_lock_irqsave(c->lock, cflags);
901
902
903
904
905
906 c->regs[R14]|= DTRREQ;
907 write_zsreg(c, R14, c->regs[R14]);
908
909 c->regs[R1]&= ~TxINT_ENAB;
910 write_zsreg(c, R1, c->regs[R1]);
911
912
913
914
915
916 c->regs[R1]|= WT_FN_RDYFN;
917 c->regs[R1]|= WT_RDY_RT;
918 c->regs[R1]|= INT_ERR_Rx;
919 c->regs[R1]&= ~TxINT_ENAB;
920 write_zsreg(c, R1, c->regs[R1]);
921 c->regs[R1]|= WT_RDY_ENAB;
922 write_zsreg(c, R1, c->regs[R1]);
923
924
925
926
927
928
929
930
931
932 dflags=claim_dma_lock();
933
934 disable_dma(c->rxdma);
935 clear_dma_ff(c->rxdma);
936 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
937 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
938 set_dma_count(c->rxdma, c->mtu);
939 enable_dma(c->rxdma);
940
941 disable_dma(c->txdma);
942 clear_dma_ff(c->txdma);
943 set_dma_mode(c->txdma, DMA_MODE_WRITE);
944 disable_dma(c->txdma);
945
946 release_dma_lock(dflags);
947
948
949
950
951
952 c->rxdma_on = 1;
953 c->txdma_on = 1;
954 c->tx_dma_used = 1;
955
956 c->irqs = &z8530_dma_sync;
957 z8530_rtsdtr(c,1);
958 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
959
960 spin_unlock_irqrestore(c->lock, cflags);
961
962 return 0;
963 }
964
965 EXPORT_SYMBOL(z8530_sync_dma_open);
966
967
968
969
970
971
972
973
974
975
976 int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
977 {
978 u8 chk;
979 unsigned long flags;
980
981 c->irqs = &z8530_nop;
982 c->max = 0;
983 c->sync = 0;
984
985
986
987
988
989 flags=claim_dma_lock();
990 disable_dma(c->rxdma);
991 clear_dma_ff(c->rxdma);
992
993 c->rxdma_on = 0;
994
995 disable_dma(c->txdma);
996 clear_dma_ff(c->txdma);
997 release_dma_lock(flags);
998
999 c->txdma_on = 0;
1000 c->tx_dma_used = 0;
1001
1002 spin_lock_irqsave(c->lock, flags);
1003
1004
1005
1006
1007
1008 c->regs[R1]&= ~WT_RDY_ENAB;
1009 write_zsreg(c, R1, c->regs[R1]);
1010 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1011 c->regs[R1]|= INT_ALL_Rx;
1012 write_zsreg(c, R1, c->regs[R1]);
1013 c->regs[R14]&= ~DTRREQ;
1014 write_zsreg(c, R14, c->regs[R14]);
1015
1016 if(c->rx_buf[0])
1017 {
1018 free_page((unsigned long)c->rx_buf[0]);
1019 c->rx_buf[0]=NULL;
1020 }
1021 if(c->tx_dma_buf[0])
1022 {
1023 free_page((unsigned long)c->tx_dma_buf[0]);
1024 c->tx_dma_buf[0]=NULL;
1025 }
1026 chk=read_zsreg(c,R0);
1027 write_zsreg(c, R3, c->regs[R3]);
1028 z8530_rtsdtr(c,0);
1029
1030 spin_unlock_irqrestore(c->lock, flags);
1031
1032 return 0;
1033 }
1034
1035 EXPORT_SYMBOL(z8530_sync_dma_close);
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047 int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1048 {
1049 unsigned long cflags, dflags;
1050
1051 printk("Opening sync interface for TX-DMA\n");
1052 c->sync = 1;
1053 c->mtu = dev->mtu+64;
1054 c->count = 0;
1055 c->skb = NULL;
1056 c->skb2 = NULL;
1057
1058
1059
1060
1061
1062
1063
1064 if(c->mtu > PAGE_SIZE/2)
1065 return -EMSGSIZE;
1066
1067 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1068 if(c->tx_dma_buf[0]==NULL)
1069 return -ENOBUFS;
1070
1071 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1072
1073
1074 spin_lock_irqsave(c->lock, cflags);
1075
1076
1077
1078
1079
1080 z8530_rx_done(c);
1081 z8530_rx_done(c);
1082
1083
1084
1085
1086
1087 c->rxdma_on = 0;
1088 c->txdma_on = 0;
1089
1090 c->tx_dma_used=0;
1091 c->dma_num=0;
1092 c->dma_ready=1;
1093 c->dma_tx = 1;
1094
1095
1096
1097
1098
1099
1100
1101
1102 c->regs[R14]|= DTRREQ;
1103 write_zsreg(c, R14, c->regs[R14]);
1104
1105 c->regs[R1]&= ~TxINT_ENAB;
1106 write_zsreg(c, R1, c->regs[R1]);
1107
1108
1109
1110
1111
1112 dflags = claim_dma_lock();
1113
1114 disable_dma(c->txdma);
1115 clear_dma_ff(c->txdma);
1116 set_dma_mode(c->txdma, DMA_MODE_WRITE);
1117 disable_dma(c->txdma);
1118
1119 release_dma_lock(dflags);
1120
1121
1122
1123
1124
1125 c->rxdma_on = 0;
1126 c->txdma_on = 1;
1127 c->tx_dma_used = 1;
1128
1129 c->irqs = &z8530_txdma_sync;
1130 z8530_rtsdtr(c,1);
1131 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1132 spin_unlock_irqrestore(c->lock, cflags);
1133
1134 return 0;
1135 }
1136
1137 EXPORT_SYMBOL(z8530_sync_txdma_open);
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1149 {
1150 unsigned long dflags, cflags;
1151 u8 chk;
1152
1153
1154 spin_lock_irqsave(c->lock, cflags);
1155
1156 c->irqs = &z8530_nop;
1157 c->max = 0;
1158 c->sync = 0;
1159
1160
1161
1162
1163
1164 dflags = claim_dma_lock();
1165
1166 disable_dma(c->txdma);
1167 clear_dma_ff(c->txdma);
1168 c->txdma_on = 0;
1169 c->tx_dma_used = 0;
1170
1171 release_dma_lock(dflags);
1172
1173
1174
1175
1176
1177 c->regs[R1]&= ~WT_RDY_ENAB;
1178 write_zsreg(c, R1, c->regs[R1]);
1179 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1180 c->regs[R1]|= INT_ALL_Rx;
1181 write_zsreg(c, R1, c->regs[R1]);
1182 c->regs[R14]&= ~DTRREQ;
1183 write_zsreg(c, R14, c->regs[R14]);
1184
1185 if(c->tx_dma_buf[0])
1186 {
1187 free_page((unsigned long)c->tx_dma_buf[0]);
1188 c->tx_dma_buf[0]=NULL;
1189 }
1190 chk=read_zsreg(c,R0);
1191 write_zsreg(c, R3, c->regs[R3]);
1192 z8530_rtsdtr(c,0);
1193
1194 spin_unlock_irqrestore(c->lock, cflags);
1195 return 0;
1196 }
1197
1198
1199 EXPORT_SYMBOL(z8530_sync_txdma_close);
1200
1201
1202
1203
1204
1205
1206
1207 static const char *z8530_type_name[]={
1208 "Z8530",
1209 "Z85C30",
1210 "Z85230"
1211 };
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224 void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1225 {
1226 pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
1227 dev->name,
1228 z8530_type_name[dev->type],
1229 mapping,
1230 Z8530_PORT_OF(io),
1231 dev->irq);
1232 }
1233
1234 EXPORT_SYMBOL(z8530_describe);
1235
1236
1237
1238
1239
1240 static inline int do_z8530_init(struct z8530_dev *dev)
1241 {
1242
1243
1244 dev->chanA.irqs=&z8530_nop;
1245 dev->chanB.irqs=&z8530_nop;
1246 dev->chanA.dcdcheck=DCD;
1247 dev->chanB.dcdcheck=DCD;
1248
1249
1250 write_zsreg(&dev->chanA, R9, 0xC0);
1251 udelay(200);
1252
1253 write_zsreg(&dev->chanA, R12, 0xAA);
1254 if(read_zsreg(&dev->chanA, R12)!=0xAA)
1255 return -ENODEV;
1256 write_zsreg(&dev->chanA, R12, 0x55);
1257 if(read_zsreg(&dev->chanA, R12)!=0x55)
1258 return -ENODEV;
1259
1260 dev->type=Z8530;
1261
1262
1263
1264
1265
1266 write_zsreg(&dev->chanA, R15, 0x01);
1267
1268
1269
1270
1271
1272
1273 if(read_zsreg(&dev->chanA, R15)==0x01)
1274 {
1275
1276
1277 write_zsreg(&dev->chanA, R8, 0);
1278 if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1279 dev->type = Z85230;
1280 else
1281 dev->type = Z85C30;
1282 }
1283
1284
1285
1286
1287
1288
1289
1290 write_zsreg(&dev->chanA, R15, 0);
1291
1292
1293
1294
1295
1296 memcpy(dev->chanA.regs, reg_init, 16);
1297 memcpy(dev->chanB.regs, reg_init ,16);
1298
1299 return 0;
1300 }
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319 int z8530_init(struct z8530_dev *dev)
1320 {
1321 unsigned long flags;
1322 int ret;
1323
1324
1325 spin_lock_init(&dev->lock);
1326 dev->chanA.lock = &dev->lock;
1327 dev->chanB.lock = &dev->lock;
1328
1329 spin_lock_irqsave(&dev->lock, flags);
1330 ret = do_z8530_init(dev);
1331 spin_unlock_irqrestore(&dev->lock, flags);
1332
1333 return ret;
1334 }
1335
1336
1337 EXPORT_SYMBOL(z8530_init);
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350 int z8530_shutdown(struct z8530_dev *dev)
1351 {
1352 unsigned long flags;
1353
1354
1355 spin_lock_irqsave(&dev->lock, flags);
1356 dev->chanA.irqs=&z8530_nop;
1357 dev->chanB.irqs=&z8530_nop;
1358 write_zsreg(&dev->chanA, R9, 0xC0);
1359
1360 udelay(100);
1361 spin_unlock_irqrestore(&dev->lock, flags);
1362 return 0;
1363 }
1364
1365 EXPORT_SYMBOL(z8530_shutdown);
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378 int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1379 {
1380 unsigned long flags;
1381
1382 spin_lock_irqsave(c->lock, flags);
1383
1384 while(*rtable!=255)
1385 {
1386 int reg=*rtable++;
1387 if(reg>0x0F)
1388 write_zsreg(c, R15, c->regs[15]|1);
1389 write_zsreg(c, reg&0x0F, *rtable);
1390 if(reg>0x0F)
1391 write_zsreg(c, R15, c->regs[15]&~1);
1392 c->regs[reg]=*rtable++;
1393 }
1394 c->rx_function=z8530_null_rx;
1395 c->skb=NULL;
1396 c->tx_skb=NULL;
1397 c->tx_next_skb=NULL;
1398 c->mtu=1500;
1399 c->max=0;
1400 c->count=0;
1401 c->status=read_zsreg(c, R0);
1402 c->sync=1;
1403 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1404
1405 spin_unlock_irqrestore(c->lock, flags);
1406 return 0;
1407 }
1408
1409 EXPORT_SYMBOL(z8530_channel_load);
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426 static void z8530_tx_begin(struct z8530_channel *c)
1427 {
1428 unsigned long flags;
1429 if(c->tx_skb)
1430 return;
1431
1432 c->tx_skb=c->tx_next_skb;
1433 c->tx_next_skb=NULL;
1434 c->tx_ptr=c->tx_next_ptr;
1435
1436 if(c->tx_skb==NULL)
1437 {
1438
1439 if(c->dma_tx)
1440 {
1441 flags=claim_dma_lock();
1442 disable_dma(c->txdma);
1443
1444
1445
1446 if (get_dma_residue(c->txdma))
1447 {
1448 c->netdevice->stats.tx_dropped++;
1449 c->netdevice->stats.tx_fifo_errors++;
1450 }
1451 release_dma_lock(flags);
1452 }
1453 c->txcount=0;
1454 }
1455 else
1456 {
1457 c->txcount=c->tx_skb->len;
1458
1459
1460 if(c->dma_tx)
1461 {
1462
1463
1464
1465
1466
1467
1468
1469 flags=claim_dma_lock();
1470 disable_dma(c->txdma);
1471
1472
1473
1474
1475
1476
1477 if(c->dev->type!=Z85230)
1478 {
1479 write_zsctrl(c, RES_Tx_CRC);
1480 write_zsctrl(c, RES_EOM_L);
1481 }
1482 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1483 clear_dma_ff(c->txdma);
1484 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1485 set_dma_count(c->txdma, c->txcount);
1486 enable_dma(c->txdma);
1487 release_dma_lock(flags);
1488 write_zsctrl(c, RES_EOM_L);
1489 write_zsreg(c, R5, c->regs[R5]|TxENAB);
1490 }
1491 else
1492 {
1493
1494
1495 write_zsreg(c, R10, c->regs[10]);
1496 write_zsctrl(c, RES_Tx_CRC);
1497
1498 while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1499 {
1500 write_zsreg(c, R8, *c->tx_ptr++);
1501 c->txcount--;
1502 }
1503
1504 }
1505 }
1506
1507
1508
1509 netif_wake_queue(c->netdevice);
1510 }
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523 static void z8530_tx_done(struct z8530_channel *c)
1524 {
1525 struct sk_buff *skb;
1526
1527
1528 if (c->tx_skb == NULL)
1529 return;
1530
1531 skb = c->tx_skb;
1532 c->tx_skb = NULL;
1533 z8530_tx_begin(c);
1534 c->netdevice->stats.tx_packets++;
1535 c->netdevice->stats.tx_bytes += skb->len;
1536 dev_consume_skb_irq(skb);
1537 }
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548 void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1549 {
1550 dev_kfree_skb_any(skb);
1551 }
1552
1553 EXPORT_SYMBOL(z8530_null_rx);
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568 static void z8530_rx_done(struct z8530_channel *c)
1569 {
1570 struct sk_buff *skb;
1571 int ct;
1572
1573
1574
1575
1576
1577 if(c->rxdma_on)
1578 {
1579
1580
1581
1582
1583
1584 int ready=c->dma_ready;
1585 unsigned char *rxb=c->rx_buf[c->dma_num];
1586 unsigned long flags;
1587
1588
1589
1590
1591
1592 flags=claim_dma_lock();
1593
1594 disable_dma(c->rxdma);
1595 clear_dma_ff(c->rxdma);
1596 c->rxdma_on=0;
1597 ct=c->mtu-get_dma_residue(c->rxdma);
1598 if(ct<0)
1599 ct=2;
1600 c->dma_ready=0;
1601
1602
1603
1604
1605
1606
1607 if(ready)
1608 {
1609 c->dma_num^=1;
1610 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1611 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1612 set_dma_count(c->rxdma, c->mtu);
1613 c->rxdma_on = 1;
1614 enable_dma(c->rxdma);
1615
1616
1617 write_zsreg(c, R0, RES_Rx_CRC);
1618 }
1619 else
1620
1621
1622 netdev_warn(c->netdevice, "DMA flip overrun!\n");
1623
1624 release_dma_lock(flags);
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634 skb = dev_alloc_skb(ct);
1635 if (skb == NULL) {
1636 c->netdevice->stats.rx_dropped++;
1637 netdev_warn(c->netdevice, "Memory squeeze\n");
1638 } else {
1639 skb_put(skb, ct);
1640 skb_copy_to_linear_data(skb, rxb, ct);
1641 c->netdevice->stats.rx_packets++;
1642 c->netdevice->stats.rx_bytes += ct;
1643 }
1644 c->dma_ready = 1;
1645 } else {
1646 RT_LOCK;
1647 skb = c->skb;
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 ct=c->count;
1662
1663 c->skb = c->skb2;
1664 c->count = 0;
1665 c->max = c->mtu;
1666 if (c->skb) {
1667 c->dptr = c->skb->data;
1668 c->max = c->mtu;
1669 } else {
1670 c->count = 0;
1671 c->max = 0;
1672 }
1673 RT_UNLOCK;
1674
1675 c->skb2 = dev_alloc_skb(c->mtu);
1676 if (c->skb2 == NULL)
1677 netdev_warn(c->netdevice, "memory squeeze\n");
1678 else
1679 skb_put(c->skb2, c->mtu);
1680 c->netdevice->stats.rx_packets++;
1681 c->netdevice->stats.rx_bytes += ct;
1682 }
1683
1684
1685
1686 if (skb) {
1687 skb_trim(skb, ct);
1688 c->rx_function(c, skb);
1689 } else {
1690 c->netdevice->stats.rx_dropped++;
1691 netdev_err(c->netdevice, "Lost a frame\n");
1692 }
1693 }
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703 static inline int spans_boundary(struct sk_buff *skb)
1704 {
1705 unsigned long a=(unsigned long)skb->data;
1706 a^=(a+skb->len);
1707 if(a&0x00010000)
1708 return 1;
1709 return 0;
1710 }
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726 netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1727 {
1728 unsigned long flags;
1729
1730 netif_stop_queue(c->netdevice);
1731 if(c->tx_next_skb)
1732 return NETDEV_TX_BUSY;
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742 if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1743 {
1744
1745
1746
1747
1748
1749
1750
1751 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1752 c->tx_dma_used^=1;
1753 skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1754 }
1755 else
1756 c->tx_next_ptr=skb->data;
1757 RT_LOCK;
1758 c->tx_next_skb=skb;
1759 RT_UNLOCK;
1760
1761 spin_lock_irqsave(c->lock, flags);
1762 z8530_tx_begin(c);
1763 spin_unlock_irqrestore(c->lock, flags);
1764
1765 return NETDEV_TX_OK;
1766 }
1767
1768 EXPORT_SYMBOL(z8530_queue_xmit);
1769
1770
1771
1772
1773 static const char banner[] __initconst =
1774 KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1775
1776 static int __init z85230_init_driver(void)
1777 {
1778 printk(banner);
1779 return 0;
1780 }
1781 module_init(z85230_init_driver);
1782
1783 static void __exit z85230_cleanup_driver(void)
1784 {
1785 }
1786 module_exit(z85230_cleanup_driver);
1787
1788 MODULE_AUTHOR("Red Hat Inc.");
1789 MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1790 MODULE_LICENSE("GPL");