1 /*********************************************************************
2  *
3  * Filename:      w83977af_ir.c
4  * Version:       1.0
5  * Description:   FIR driver for the Winbond W83977AF Super I/O chip
6  * Status:        Experimental.
7  * Author:        Paul VanderSpek
8  * Created at:    Wed Nov  4 11:46:16 1998
9  * Modified at:   Fri Jan 28 12:10:59 2000
10  * Modified by:   Dag Brattli <dagb@cs.uit.no>
11  *
12  *     Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13  *     Copyright (c) 1998-1999 Rebel.com
14  *
15  *     This program is free software; you can redistribute it and/or
16  *     modify it under the terms of the GNU General Public License as
17  *     published by the Free Software Foundation; either version 2 of
18  *     the License, or (at your option) any later version.
19  *
20  *     Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21  *     warranty for any of this software. This material is provided "AS-IS"
22  *     and at no charge.
23  *
24  *     If you find bugs in this file, its very likely that the same bug
25  *     will also be in pc87108.c since the implementations are quite
26  *     similar.
27  *
28  *     Notice that all functions that needs to access the chip in _any_
29  *     way, must save BSR register on entry, and restore it on exit.
30  *     It is _very_ important to follow this policy!
31  *
32  *         __u8 bank;
33  *
34  *         bank = inb( iobase+BSR);
35  *
36  *         do_your_stuff_here();
37  *
38  *         outb( bank, iobase+BSR);
39  *
40  ********************************************************************/
41 
42 #include <linux/module.h>
43 #include <linux/kernel.h>
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
46 #include <linux/netdevice.h>
47 #include <linux/ioport.h>
48 #include <linux/delay.h>
49 #include <linux/init.h>
50 #include <linux/interrupt.h>
51 #include <linux/rtnetlink.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/gfp.h>
54 
55 #include <asm/io.h>
56 #include <asm/dma.h>
57 #include <asm/byteorder.h>
58 
59 #include <net/irda/irda.h>
60 #include <net/irda/wrapper.h>
61 #include <net/irda/irda_device.h>
62 #include "w83977af.h"
63 #include "w83977af_ir.h"
64 
65 #define CONFIG_USE_W977_PNP        /* Currently needed */
66 #define PIO_MAX_SPEED       115200
67 
68 static char *driver_name = "w83977af_ir";
69 static int  qos_mtt_bits = 0x07;   /* 1 ms or more */
70 
71 #define CHIP_IO_EXTENT 8
72 
73 static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
74 #ifdef CONFIG_ARCH_NETWINDER             /* Adjust to NetWinder differences */
75 static unsigned int irq[] = { 6, 0, 0, 0 };
76 #else
77 static unsigned int irq[] = { 11, 0, 0, 0 };
78 #endif
79 static unsigned int dma[] = { 1, 0, 0, 0 };
80 static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
81 static unsigned int efio = W977_EFIO_BASE;
82 
83 static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
84 
85 /* Some prototypes */
86 static int  w83977af_open(int i, unsigned int iobase, unsigned int irq,
87                           unsigned int dma);
88 static int  w83977af_close(struct w83977af_ir *self);
89 static int  w83977af_probe(int iobase, int irq, int dma);
90 static int  w83977af_dma_receive(struct w83977af_ir *self);
91 static int  w83977af_dma_receive_complete(struct w83977af_ir *self);
92 static netdev_tx_t  w83977af_hard_xmit(struct sk_buff *skb,
93 					     struct net_device *dev);
94 static int  w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
95 static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
96 static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
97 static int  w83977af_is_receiving(struct w83977af_ir *self);
98 
99 static int  w83977af_net_open(struct net_device *dev);
100 static int  w83977af_net_close(struct net_device *dev);
101 static int  w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
102 
103 /*
104  * Function w83977af_init ()
105  *
106  *    Initialize chip. Just try to find out how many chips we are dealing with
107  *    and where they are
108  */
w83977af_init(void)109 static int __init w83977af_init(void)
110 {
111         int i;
112 
113 	for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
114 		if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
115 			return 0;
116 	}
117 	return -ENODEV;
118 }
119 
120 /*
121  * Function w83977af_cleanup ()
122  *
123  *    Close all configured chips
124  *
125  */
w83977af_cleanup(void)126 static void __exit w83977af_cleanup(void)
127 {
128 	int i;
129 
130 	for (i=0; i < ARRAY_SIZE(dev_self); i++) {
131 		if (dev_self[i])
132 			w83977af_close(dev_self[i]);
133 	}
134 }
135 
136 static const struct net_device_ops w83977_netdev_ops = {
137 	.ndo_open       = w83977af_net_open,
138 	.ndo_stop       = w83977af_net_close,
139 	.ndo_start_xmit = w83977af_hard_xmit,
140 	.ndo_do_ioctl   = w83977af_net_ioctl,
141 };
142 
143 /*
144  * Function w83977af_open (iobase, irq)
145  *
146  *    Open driver instance
147  *
148  */
w83977af_open(int i,unsigned int iobase,unsigned int irq,unsigned int dma)149 static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
150 			 unsigned int dma)
151 {
152 	struct net_device *dev;
153         struct w83977af_ir *self;
154 	int err;
155 
156 	/* Lock the port that we need */
157 	if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
158 		pr_debug("%s(), can't get iobase of 0x%03x\n",
159 			 __func__ , iobase);
160 		return -ENODEV;
161 	}
162 
163 	if (w83977af_probe(iobase, irq, dma) == -1) {
164 		err = -1;
165 		goto err_out;
166 	}
167 	/*
168 	 *  Allocate new instance of the driver
169 	 */
170 	dev = alloc_irdadev(sizeof(struct w83977af_ir));
171 	if (dev == NULL) {
172 		printk( KERN_ERR "IrDA: Can't allocate memory for "
173 			"IrDA control block!\n");
174 		err = -ENOMEM;
175 		goto err_out;
176 	}
177 
178 	self = netdev_priv(dev);
179 	spin_lock_init(&self->lock);
180 
181 
182 	/* Initialize IO */
183 	self->io.fir_base   = iobase;
184         self->io.irq       = irq;
185         self->io.fir_ext   = CHIP_IO_EXTENT;
186         self->io.dma       = dma;
187         self->io.fifo_size = 32;
188 
189 	/* Initialize QoS for this device */
190 	irda_init_max_qos_capabilies(&self->qos);
191 
192 	/* The only value we must override it the baudrate */
193 
194 	/* FIXME: The HP HDLS-1100 does not support 1152000! */
195 	self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
196 		IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
197 
198 	/* The HP HDLS-1100 needs 1 ms according to the specs */
199 	self->qos.min_turn_time.bits = qos_mtt_bits;
200 	irda_qos_bits_to_value(&self->qos);
201 
202 	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
203 	self->rx_buff.truesize = 14384;
204 	self->tx_buff.truesize = 4000;
205 
206 	/* Allocate memory if needed */
207 	self->rx_buff.head =
208 		dma_zalloc_coherent(NULL, self->rx_buff.truesize,
209 				    &self->rx_buff_dma, GFP_KERNEL);
210 	if (self->rx_buff.head == NULL) {
211 		err = -ENOMEM;
212 		goto err_out1;
213 	}
214 
215 	self->tx_buff.head =
216 		dma_zalloc_coherent(NULL, self->tx_buff.truesize,
217 				    &self->tx_buff_dma, GFP_KERNEL);
218 	if (self->tx_buff.head == NULL) {
219 		err = -ENOMEM;
220 		goto err_out2;
221 	}
222 
223 	self->rx_buff.in_frame = FALSE;
224 	self->rx_buff.state = OUTSIDE_FRAME;
225 	self->tx_buff.data = self->tx_buff.head;
226 	self->rx_buff.data = self->rx_buff.head;
227 	self->netdev = dev;
228 
229 	dev->netdev_ops	= &w83977_netdev_ops;
230 
231 	err = register_netdev(dev);
232 	if (err) {
233 		net_err_ratelimited("%s(), register_netdevice() failed!\n",
234 				    __func__);
235 		goto err_out3;
236 	}
237 	net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
238 
239 	/* Need to store self somewhere */
240 	dev_self[i] = self;
241 
242 	return 0;
243 err_out3:
244 	dma_free_coherent(NULL, self->tx_buff.truesize,
245 			  self->tx_buff.head, self->tx_buff_dma);
246 err_out2:
247 	dma_free_coherent(NULL, self->rx_buff.truesize,
248 			  self->rx_buff.head, self->rx_buff_dma);
249 err_out1:
250 	free_netdev(dev);
251 err_out:
252 	release_region(iobase, CHIP_IO_EXTENT);
253 	return err;
254 }
255 
256 /*
257  * Function w83977af_close (self)
258  *
259  *    Close driver instance
260  *
261  */
w83977af_close(struct w83977af_ir * self)262 static int w83977af_close(struct w83977af_ir *self)
263 {
264 	int iobase;
265 
266         iobase = self->io.fir_base;
267 
268 #ifdef CONFIG_USE_W977_PNP
269 	/* enter PnP configuration mode */
270 	w977_efm_enter(efio);
271 
272 	w977_select_device(W977_DEVICE_IR, efio);
273 
274 	/* Deactivate device */
275 	w977_write_reg(0x30, 0x00, efio);
276 
277 	w977_efm_exit(efio);
278 #endif /* CONFIG_USE_W977_PNP */
279 
280 	/* Remove netdevice */
281 	unregister_netdev(self->netdev);
282 
283 	/* Release the PORT that this driver is using */
284 	pr_debug("%s(), Releasing Region %03x\n",
285 		 __func__ , self->io.fir_base);
286 	release_region(self->io.fir_base, self->io.fir_ext);
287 
288 	if (self->tx_buff.head)
289 		dma_free_coherent(NULL, self->tx_buff.truesize,
290 				  self->tx_buff.head, self->tx_buff_dma);
291 
292 	if (self->rx_buff.head)
293 		dma_free_coherent(NULL, self->rx_buff.truesize,
294 				  self->rx_buff.head, self->rx_buff_dma);
295 
296 	free_netdev(self->netdev);
297 
298 	return 0;
299 }
300 
w83977af_probe(int iobase,int irq,int dma)301 static int w83977af_probe(int iobase, int irq, int dma)
302 {
303   	int version;
304 	int i;
305 
306  	for (i=0; i < 2; i++) {
307 #ifdef CONFIG_USE_W977_PNP
308  		/* Enter PnP configuration mode */
309 		w977_efm_enter(efbase[i]);
310 
311  		w977_select_device(W977_DEVICE_IR, efbase[i]);
312 
313  		/* Configure PnP port, IRQ, and DMA channel */
314  		w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
315  		w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
316 
317  		w977_write_reg(0x70, irq, efbase[i]);
318 #ifdef CONFIG_ARCH_NETWINDER
319 		/* Netwinder uses 1 higher than Linux */
320  		w977_write_reg(0x74, dma+1, efbase[i]);
321 #else
322  		w977_write_reg(0x74, dma, efbase[i]);
323 #endif /* CONFIG_ARCH_NETWINDER */
324  		w977_write_reg(0x75, 0x04, efbase[i]);  /* Disable Tx DMA */
325 
326  		/* Set append hardware CRC, enable IR bank selection */
327  		w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
328 
329  		/* Activate device */
330  		w977_write_reg(0x30, 0x01, efbase[i]);
331 
332  		w977_efm_exit(efbase[i]);
333 #endif /* CONFIG_USE_W977_PNP */
334   		/* Disable Advanced mode */
335   		switch_bank(iobase, SET2);
336   		outb(iobase+2, 0x00);
337 
338  		/* Turn on UART (global) interrupts */
339  		switch_bank(iobase, SET0);
340   		outb(HCR_EN_IRQ, iobase+HCR);
341 
342   		/* Switch to advanced mode */
343   		switch_bank(iobase, SET2);
344   		outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
345 
346   		/* Set default IR-mode */
347   		switch_bank(iobase, SET0);
348   		outb(HCR_SIR, iobase+HCR);
349 
350   		/* Read the Advanced IR ID */
351   		switch_bank(iobase, SET3);
352   		version = inb(iobase+AUID);
353 
354   		/* Should be 0x1? */
355   		if (0x10 == (version & 0xf0)) {
356  			efio = efbase[i];
357 
358  			/* Set FIFO size to 32 */
359  			switch_bank(iobase, SET2);
360  			outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
361 
362  			/* Set FIFO threshold to TX17, RX16 */
363  			switch_bank(iobase, SET0);
364  			outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
365 			     UFR_EN_FIFO,iobase+UFR);
366 
367  			/* Receiver frame length */
368  			switch_bank(iobase, SET4);
369 			outb(2048 & 0xff, iobase+6);
370 			outb((2048 >> 8) & 0x1f, iobase+7);
371 
372 			/*
373 			 * Init HP HSDL-1100 transceiver.
374 			 *
375 			 * Set IRX_MSL since we have 2 * receive paths IRRX,
376 			 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
377 			 * be a input pin used for IRRXH
378 			 *
379 			 *   IRRX  pin 37 connected to receiver
380 			 *   IRTX  pin 38 connected to transmitter
381 			 *   FIRRX pin 39 connected to receiver      (IRSL0)
382 			 *   CIRRX pin 40 connected to pin 37
383 			 */
384 			switch_bank(iobase, SET7);
385 			outb(0x40, iobase+7);
386 
387 			net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
388 					     version);
389 
390 			return 0;
391 		} else {
392 			/* Try next extented function register address */
393 			pr_debug("%s(), Wrong chip version", __func__);
394 		}
395   	}
396 	return -1;
397 }
398 
w83977af_change_speed(struct w83977af_ir * self,__u32 speed)399 static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
400 {
401 	int ir_mode = HCR_SIR;
402 	int iobase;
403 	__u8 set;
404 
405 	iobase = self->io.fir_base;
406 
407 	/* Update accounting for new speed */
408 	self->io.speed = speed;
409 
410 	/* Save current bank */
411 	set = inb(iobase+SSR);
412 
413 	/* Disable interrupts */
414 	switch_bank(iobase, SET0);
415 	outb(0, iobase+ICR);
416 
417 	/* Select Set 2 */
418 	switch_bank(iobase, SET2);
419 	outb(0x00, iobase+ABHL);
420 
421 	switch (speed) {
422 	case 9600:   outb(0x0c, iobase+ABLL); break;
423 	case 19200:  outb(0x06, iobase+ABLL); break;
424 	case 38400:  outb(0x03, iobase+ABLL); break;
425 	case 57600:  outb(0x02, iobase+ABLL); break;
426 	case 115200: outb(0x01, iobase+ABLL); break;
427 	case 576000:
428 		ir_mode = HCR_MIR_576;
429 		pr_debug("%s(), handling baud of 576000\n", __func__);
430 		break;
431 	case 1152000:
432 		ir_mode = HCR_MIR_1152;
433 		pr_debug("%s(), handling baud of 1152000\n", __func__);
434 		break;
435 	case 4000000:
436 		ir_mode = HCR_FIR;
437 		pr_debug("%s(), handling baud of 4000000\n", __func__);
438 		break;
439 	default:
440 		ir_mode = HCR_FIR;
441 		pr_debug("%s(), unknown baud rate of %d\n", __func__ , speed);
442 		break;
443 	}
444 
445 	/* Set speed mode */
446 	switch_bank(iobase, SET0);
447 	outb(ir_mode, iobase+HCR);
448 
449 	/* set FIFO size to 32 */
450 	switch_bank(iobase, SET2);
451 	outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
452 
453 	/* set FIFO threshold to TX17, RX16 */
454 	switch_bank(iobase, SET0);
455 	outb(0x00, iobase+UFR);        /* Reset */
456 	outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
457 	outb(0xa7, iobase+UFR);
458 
459 	netif_wake_queue(self->netdev);
460 
461 	/* Enable some interrupts so we can receive frames */
462 	switch_bank(iobase, SET0);
463 	if (speed > PIO_MAX_SPEED) {
464 		outb(ICR_EFSFI, iobase+ICR);
465 		w83977af_dma_receive(self);
466 	} else
467 		outb(ICR_ERBRI, iobase+ICR);
468 
469 	/* Restore SSR */
470 	outb(set, iobase+SSR);
471 }
472 
473 /*
474  * Function w83977af_hard_xmit (skb, dev)
475  *
476  *    Sets up a DMA transfer to send the current frame.
477  *
478  */
w83977af_hard_xmit(struct sk_buff * skb,struct net_device * dev)479 static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
480 					    struct net_device *dev)
481 {
482 	struct w83977af_ir *self;
483 	__s32 speed;
484 	int iobase;
485 	__u8 set;
486 	int mtt;
487 
488 	self = netdev_priv(dev);
489 
490 	iobase = self->io.fir_base;
491 
492 	pr_debug("%s(%ld), skb->len=%d\n", __func__ , jiffies,
493 		 (int)skb->len);
494 
495 	/* Lock transmit buffer */
496 	netif_stop_queue(dev);
497 
498 	/* Check if we need to change the speed */
499 	speed = irda_get_next_speed(skb);
500 	if ((speed != self->io.speed) && (speed != -1)) {
501 		/* Check for empty frame */
502 		if (!skb->len) {
503 			w83977af_change_speed(self, speed);
504 			dev_kfree_skb(skb);
505 			return NETDEV_TX_OK;
506 		} else
507 			self->new_speed = speed;
508 	}
509 
510 	/* Save current set */
511 	set = inb(iobase+SSR);
512 
513 	/* Decide if we should use PIO or DMA transfer */
514 	if (self->io.speed > PIO_MAX_SPEED) {
515 		self->tx_buff.data = self->tx_buff.head;
516 		skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
517 		self->tx_buff.len = skb->len;
518 
519 		mtt = irda_get_mtt(skb);
520 		pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
521 			if (mtt)
522 				udelay(mtt);
523 
524 			/* Enable DMA interrupt */
525 			switch_bank(iobase, SET0);
526 	 		outb(ICR_EDMAI, iobase+ICR);
527 	     		w83977af_dma_write(self, iobase);
528 	} else {
529 		self->tx_buff.data = self->tx_buff.head;
530 		self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
531 						   self->tx_buff.truesize);
532 
533 		/* Add interrupt on tx low level (will fire immediately) */
534 		switch_bank(iobase, SET0);
535 		outb(ICR_ETXTHI, iobase+ICR);
536 	}
537 	dev_kfree_skb(skb);
538 
539 	/* Restore set register */
540 	outb(set, iobase+SSR);
541 
542 	return NETDEV_TX_OK;
543 }
544 
545 /*
546  * Function w83977af_dma_write (self, iobase)
547  *
548  *    Send frame using DMA
549  *
550  */
w83977af_dma_write(struct w83977af_ir * self,int iobase)551 static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
552 {
553 	__u8 set;
554 	pr_debug("%s(), len=%d\n", __func__ , self->tx_buff.len);
555 
556 	/* Save current set */
557 	set = inb(iobase+SSR);
558 
559 	/* Disable DMA */
560 	switch_bank(iobase, SET0);
561 	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
562 
563 	/* Choose transmit DMA channel  */
564 	switch_bank(iobase, SET2);
565 	outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
566 	irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
567 		       DMA_MODE_WRITE);
568 	self->io.direction = IO_XMIT;
569 
570 	/* Enable DMA */
571  	switch_bank(iobase, SET0);
572 	outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
573 
574 	/* Restore set register */
575 	outb(set, iobase+SSR);
576 }
577 
578 /*
579  * Function w83977af_pio_write (iobase, buf, len, fifo_size)
580  *
581  *
582  *
583  */
w83977af_pio_write(int iobase,__u8 * buf,int len,int fifo_size)584 static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
585 {
586 	int actual = 0;
587 	__u8 set;
588 
589 	/* Save current bank */
590 	set = inb(iobase+SSR);
591 
592 	switch_bank(iobase, SET0);
593 	if (!(inb_p(iobase+USR) & USR_TSRE)) {
594 		pr_debug("%s(), warning, FIFO not empty yet!\n", __func__);
595 
596 		fifo_size -= 17;
597 		pr_debug("%s(), %d bytes left in tx fifo\n",
598 			 __func__ , fifo_size);
599 	}
600 
601 	/* Fill FIFO with current frame */
602 	while ((fifo_size-- > 0) && (actual < len)) {
603 		/* Transmit next byte */
604 		outb(buf[actual++], iobase+TBR);
605 	}
606 
607 	pr_debug("%s(), fifo_size %d ; %d sent of %d\n",
608 		 __func__ , fifo_size, actual, len);
609 
610 	/* Restore bank */
611 	outb(set, iobase+SSR);
612 
613 	return actual;
614 }
615 
616 /*
617  * Function w83977af_dma_xmit_complete (self)
618  *
619  *    The transfer of a frame in finished. So do the necessary things
620  *
621  *
622  */
w83977af_dma_xmit_complete(struct w83977af_ir * self)623 static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
624 {
625 	int iobase;
626 	__u8 set;
627 
628 	pr_debug("%s(%ld)\n", __func__ , jiffies);
629 
630 	IRDA_ASSERT(self != NULL, return;);
631 
632 	iobase = self->io.fir_base;
633 
634 	/* Save current set */
635 	set = inb(iobase+SSR);
636 
637 	/* Disable DMA */
638 	switch_bank(iobase, SET0);
639 	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
640 
641 	/* Check for underrun! */
642 	if (inb(iobase+AUDR) & AUDR_UNDR) {
643 		pr_debug("%s(), Transmit underrun!\n", __func__);
644 
645 		self->netdev->stats.tx_errors++;
646 		self->netdev->stats.tx_fifo_errors++;
647 
648 		/* Clear bit, by writing 1 to it */
649 		outb(AUDR_UNDR, iobase+AUDR);
650 	} else
651 		self->netdev->stats.tx_packets++;
652 
653 
654 	if (self->new_speed) {
655 		w83977af_change_speed(self, self->new_speed);
656 		self->new_speed = 0;
657 	}
658 
659 	/* Unlock tx_buff and request another frame */
660 	/* Tell the network layer, that we want more frames */
661 	netif_wake_queue(self->netdev);
662 
663 	/* Restore set */
664 	outb(set, iobase+SSR);
665 }
666 
667 /*
668  * Function w83977af_dma_receive (self)
669  *
670  *    Get ready for receiving a frame. The device will initiate a DMA
671  *    if it starts to receive a frame.
672  *
673  */
w83977af_dma_receive(struct w83977af_ir * self)674 static int w83977af_dma_receive(struct w83977af_ir *self)
675 {
676 	int iobase;
677 	__u8 set;
678 #ifdef CONFIG_ARCH_NETWINDER
679 	unsigned long flags;
680 	__u8 hcr;
681 #endif
682 	IRDA_ASSERT(self != NULL, return -1;);
683 
684 	pr_debug("%s\n", __func__);
685 
686 	iobase= self->io.fir_base;
687 
688 	/* Save current set */
689 	set = inb(iobase+SSR);
690 
691 	/* Disable DMA */
692 	switch_bank(iobase, SET0);
693 	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
694 
695 	/* Choose DMA Rx, DMA Fairness, and Advanced mode */
696 	switch_bank(iobase, SET2);
697 	outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
698 	     iobase+ADCR1);
699 
700 	self->io.direction = IO_RECV;
701 	self->rx_buff.data = self->rx_buff.head;
702 
703 #ifdef CONFIG_ARCH_NETWINDER
704 	spin_lock_irqsave(&self->lock, flags);
705 
706 	disable_dma(self->io.dma);
707 	clear_dma_ff(self->io.dma);
708 	set_dma_mode(self->io.dma, DMA_MODE_READ);
709 	set_dma_addr(self->io.dma, self->rx_buff_dma);
710 	set_dma_count(self->io.dma, self->rx_buff.truesize);
711 #else
712 	irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
713 		       DMA_MODE_READ);
714 #endif
715 	/*
716 	 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
717 	 * important that we don't reset the Tx FIFO since it might not
718 	 * be finished transmitting yet
719 	 */
720 	switch_bank(iobase, SET0);
721 	outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
722 	self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
723 
724 	/* Enable DMA */
725 	switch_bank(iobase, SET0);
726 #ifdef CONFIG_ARCH_NETWINDER
727 	hcr = inb(iobase+HCR);
728 	outb(hcr | HCR_EN_DMA, iobase+HCR);
729 	enable_dma(self->io.dma);
730 	spin_unlock_irqrestore(&self->lock, flags);
731 #else
732 	outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
733 #endif
734 	/* Restore set */
735 	outb(set, iobase+SSR);
736 
737 	return 0;
738 }
739 
740 /*
741  * Function w83977af_receive_complete (self)
742  *
743  *    Finished with receiving a frame
744  *
745  */
w83977af_dma_receive_complete(struct w83977af_ir * self)746 static int w83977af_dma_receive_complete(struct w83977af_ir *self)
747 {
748 	struct sk_buff *skb;
749 	struct st_fifo *st_fifo;
750 	int len;
751 	int iobase;
752 	__u8 set;
753 	__u8 status;
754 
755 	pr_debug("%s\n", __func__);
756 
757 	st_fifo = &self->st_fifo;
758 
759 	iobase = self->io.fir_base;
760 
761 	/* Save current set */
762 	set = inb(iobase+SSR);
763 
764 	iobase = self->io.fir_base;
765 
766 	/* Read status FIFO */
767 	switch_bank(iobase, SET5);
768 	while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
769 		st_fifo->entries[st_fifo->tail].status = status;
770 
771 		st_fifo->entries[st_fifo->tail].len  = inb(iobase+RFLFL);
772 		st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
773 
774 		st_fifo->tail++;
775 		st_fifo->len++;
776 	}
777 
778 	while (st_fifo->len) {
779 		/* Get first entry */
780 		status = st_fifo->entries[st_fifo->head].status;
781 		len    = st_fifo->entries[st_fifo->head].len;
782 		st_fifo->head++;
783 		st_fifo->len--;
784 
785 		/* Check for errors */
786 		if (status & FS_FO_ERR_MSK) {
787 			if (status & FS_FO_LST_FR) {
788 				/* Add number of lost frames to stats */
789 				self->netdev->stats.rx_errors += len;
790 			} else {
791 				/* Skip frame */
792 				self->netdev->stats.rx_errors++;
793 
794 				self->rx_buff.data += len;
795 
796 				if (status & FS_FO_MX_LEX)
797 					self->netdev->stats.rx_length_errors++;
798 
799 				if (status & FS_FO_PHY_ERR)
800 					self->netdev->stats.rx_frame_errors++;
801 
802 				if (status & FS_FO_CRC_ERR)
803 					self->netdev->stats.rx_crc_errors++;
804 			}
805 			/* The errors below can be reported in both cases */
806 			if (status & FS_FO_RX_OV)
807 				self->netdev->stats.rx_fifo_errors++;
808 
809 			if (status & FS_FO_FSF_OV)
810 				self->netdev->stats.rx_fifo_errors++;
811 
812 		} else {
813 			/* Check if we have transferred all data to memory */
814 			switch_bank(iobase, SET0);
815 			if (inb(iobase+USR) & USR_RDR) {
816 				udelay(80); /* Should be enough!? */
817 			}
818 
819 			skb = dev_alloc_skb(len+1);
820 			if (skb == NULL)  {
821 				printk(KERN_INFO
822 				       "%s(), memory squeeze, dropping frame.\n", __func__);
823 				/* Restore set register */
824 				outb(set, iobase+SSR);
825 
826 				return FALSE;
827 			}
828 
829 			/*  Align to 20 bytes */
830 			skb_reserve(skb, 1);
831 
832 			/* Copy frame without CRC */
833 			if (self->io.speed < 4000000) {
834 				skb_put(skb, len-2);
835 				skb_copy_to_linear_data(skb,
836 							self->rx_buff.data,
837 							len - 2);
838 			} else {
839 				skb_put(skb, len-4);
840 				skb_copy_to_linear_data(skb,
841 							self->rx_buff.data,
842 							len - 4);
843 			}
844 
845 			/* Move to next frame */
846 			self->rx_buff.data += len;
847 			self->netdev->stats.rx_packets++;
848 
849 			skb->dev = self->netdev;
850 			skb_reset_mac_header(skb);
851 			skb->protocol = htons(ETH_P_IRDA);
852 			netif_rx(skb);
853 		}
854 	}
855 	/* Restore set register */
856 	outb(set, iobase+SSR);
857 
858 	return TRUE;
859 }
860 
861 /*
862  * Function pc87108_pio_receive (self)
863  *
864  *    Receive all data in receiver FIFO
865  *
866  */
w83977af_pio_receive(struct w83977af_ir * self)867 static void w83977af_pio_receive(struct w83977af_ir *self)
868 {
869 	__u8 byte = 0x00;
870 	int iobase;
871 
872 	IRDA_ASSERT(self != NULL, return;);
873 
874 	iobase = self->io.fir_base;
875 
876 	/*  Receive all characters in Rx FIFO */
877 	do {
878 		byte = inb(iobase+RBR);
879 		async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
880 				  byte);
881 	} while (inb(iobase+USR) & USR_RDR); /* Data available */
882 }
883 
884 /*
885  * Function w83977af_sir_interrupt (self, eir)
886  *
887  *    Handle SIR interrupt
888  *
889  */
w83977af_sir_interrupt(struct w83977af_ir * self,int isr)890 static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
891 {
892 	int actual;
893 	__u8 new_icr = 0;
894 	__u8 set;
895 	int iobase;
896 
897 	pr_debug("%s(), isr=%#x\n", __func__ , isr);
898 
899 	iobase = self->io.fir_base;
900 	/* Transmit FIFO low on data */
901 	if (isr & ISR_TXTH_I) {
902 		/* Write data left in transmit buffer */
903 		actual = w83977af_pio_write(self->io.fir_base,
904 					    self->tx_buff.data,
905 					    self->tx_buff.len,
906 					    self->io.fifo_size);
907 
908 		self->tx_buff.data += actual;
909 		self->tx_buff.len  -= actual;
910 
911 		self->io.direction = IO_XMIT;
912 
913 		/* Check if finished */
914 		if (self->tx_buff.len > 0) {
915 			new_icr |= ICR_ETXTHI;
916 		} else {
917 			set = inb(iobase+SSR);
918 			switch_bank(iobase, SET0);
919 			outb(AUDR_SFEND, iobase+AUDR);
920 			outb(set, iobase+SSR);
921 
922 			self->netdev->stats.tx_packets++;
923 
924 			/* Feed me more packets */
925 			netif_wake_queue(self->netdev);
926 			new_icr |= ICR_ETBREI;
927 		}
928 	}
929 	/* Check if transmission has completed */
930 	if (isr & ISR_TXEMP_I) {
931 		/* Check if we need to change the speed? */
932 		if (self->new_speed) {
933 			pr_debug("%s(), Changing speed!\n", __func__);
934 			w83977af_change_speed(self, self->new_speed);
935 			self->new_speed = 0;
936 		}
937 
938 		/* Turn around and get ready to receive some data */
939 		self->io.direction = IO_RECV;
940 		new_icr |= ICR_ERBRI;
941 	}
942 
943 	/* Rx FIFO threshold or timeout */
944 	if (isr & ISR_RXTH_I) {
945 		w83977af_pio_receive(self);
946 
947 		/* Keep receiving */
948 		new_icr |= ICR_ERBRI;
949 	}
950 	return new_icr;
951 }
952 
953 /*
954  * Function pc87108_fir_interrupt (self, eir)
955  *
956  *    Handle MIR/FIR interrupt
957  *
958  */
w83977af_fir_interrupt(struct w83977af_ir * self,int isr)959 static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
960 {
961 	__u8 new_icr = 0;
962 	__u8 set;
963 	int iobase;
964 
965 	iobase = self->io.fir_base;
966 	set = inb(iobase+SSR);
967 
968 	/* End of frame detected in FIFO */
969 	if (isr & (ISR_FEND_I|ISR_FSF_I)) {
970 		if (w83977af_dma_receive_complete(self)) {
971 
972 			/* Wait for next status FIFO interrupt */
973 			new_icr |= ICR_EFSFI;
974 		} else {
975 			/* DMA not finished yet */
976 
977 			/* Set timer value, resolution 1 ms */
978 			switch_bank(iobase, SET4);
979 			outb(0x01, iobase+TMRL); /* 1 ms */
980 			outb(0x00, iobase+TMRH);
981 
982 			/* Start timer */
983 			outb(IR_MSL_EN_TMR, iobase+IR_MSL);
984 
985 			new_icr |= ICR_ETMRI;
986 		}
987 	}
988 	/* Timer finished */
989 	if (isr & ISR_TMR_I) {
990 		/* Disable timer */
991 		switch_bank(iobase, SET4);
992 		outb(0, iobase+IR_MSL);
993 
994 		/* Clear timer event */
995 		/* switch_bank(iobase, SET0); */
996 /* 		outb(ASCR_CTE, iobase+ASCR); */
997 
998 		/* Check if this is a TX timer interrupt */
999 		if (self->io.direction == IO_XMIT) {
1000 			w83977af_dma_write(self, iobase);
1001 
1002 			new_icr |= ICR_EDMAI;
1003 		} else {
1004 			/* Check if DMA has now finished */
1005 			w83977af_dma_receive_complete(self);
1006 
1007 			new_icr |= ICR_EFSFI;
1008 		}
1009 	}
1010 	/* Finished with DMA */
1011 	if (isr & ISR_DMA_I) {
1012 		w83977af_dma_xmit_complete(self);
1013 
1014 		/* Check if there are more frames to be transmitted */
1015 		/* if (irda_device_txqueue_empty(self)) { */
1016 
1017 		/* Prepare for receive
1018 		 *
1019 		 * ** Netwinder Tx DMA likes that we do this anyway **
1020 		 */
1021 		w83977af_dma_receive(self);
1022 		new_icr = ICR_EFSFI;
1023 	       /* } */
1024 	}
1025 
1026 	/* Restore set */
1027 	outb(set, iobase+SSR);
1028 
1029 	return new_icr;
1030 }
1031 
1032 /*
1033  * Function w83977af_interrupt (irq, dev_id, regs)
1034  *
1035  *    An interrupt from the chip has arrived. Time to do some work
1036  *
1037  */
w83977af_interrupt(int irq,void * dev_id)1038 static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1039 {
1040 	struct net_device *dev = dev_id;
1041 	struct w83977af_ir *self;
1042 	__u8 set, icr, isr;
1043 	int iobase;
1044 
1045 	self = netdev_priv(dev);
1046 
1047 	iobase = self->io.fir_base;
1048 
1049 	/* Save current bank */
1050 	set = inb(iobase+SSR);
1051 	switch_bank(iobase, SET0);
1052 
1053 	icr = inb(iobase+ICR);
1054 	isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
1055 
1056 	outb(0, iobase+ICR); /* Disable interrupts */
1057 
1058 	if (isr) {
1059 		/* Dispatch interrupt handler for the current speed */
1060 		if (self->io.speed > PIO_MAX_SPEED )
1061 			icr = w83977af_fir_interrupt(self, isr);
1062 		else
1063 			icr = w83977af_sir_interrupt(self, isr);
1064 	}
1065 
1066 	outb(icr, iobase+ICR);    /* Restore (new) interrupts */
1067 	outb(set, iobase+SSR);    /* Restore bank register */
1068 	return IRQ_RETVAL(isr);
1069 }
1070 
1071 /*
1072  * Function w83977af_is_receiving (self)
1073  *
1074  *    Return TRUE is we are currently receiving a frame
1075  *
1076  */
w83977af_is_receiving(struct w83977af_ir * self)1077 static int w83977af_is_receiving(struct w83977af_ir *self)
1078 {
1079 	int status = FALSE;
1080 	int iobase;
1081 	__u8 set;
1082 
1083 	IRDA_ASSERT(self != NULL, return FALSE;);
1084 
1085 	if (self->io.speed > 115200) {
1086 		iobase = self->io.fir_base;
1087 
1088 		/* Check if rx FIFO is not empty */
1089 		set = inb(iobase+SSR);
1090 		switch_bank(iobase, SET2);
1091 		if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1092 			/* We are receiving something */
1093 			status =  TRUE;
1094 		}
1095 		outb(set, iobase+SSR);
1096 	} else
1097 		status = (self->rx_buff.state != OUTSIDE_FRAME);
1098 
1099 	return status;
1100 }
1101 
1102 /*
1103  * Function w83977af_net_open (dev)
1104  *
1105  *    Start the device
1106  *
1107  */
w83977af_net_open(struct net_device * dev)1108 static int w83977af_net_open(struct net_device *dev)
1109 {
1110 	struct w83977af_ir *self;
1111 	int iobase;
1112 	char hwname[32];
1113 	__u8 set;
1114 
1115 
1116 	IRDA_ASSERT(dev != NULL, return -1;);
1117 	self = netdev_priv(dev);
1118 
1119 	IRDA_ASSERT(self != NULL, return 0;);
1120 
1121 	iobase = self->io.fir_base;
1122 
1123 	if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1124 			(void *) dev)) {
1125 		return -EAGAIN;
1126 	}
1127 	/*
1128 	 * Always allocate the DMA channel after the IRQ,
1129 	 * and clean up on failure.
1130 	 */
1131 	if (request_dma(self->io.dma, dev->name)) {
1132 		free_irq(self->io.irq, dev);
1133 		return -EAGAIN;
1134 	}
1135 
1136 	/* Save current set */
1137 	set = inb(iobase+SSR);
1138 
1139  	/* Enable some interrupts so we can receive frames again */
1140  	switch_bank(iobase, SET0);
1141  	if (self->io.speed > 115200) {
1142  		outb(ICR_EFSFI, iobase+ICR);
1143  		w83977af_dma_receive(self);
1144  	} else
1145  		outb(ICR_ERBRI, iobase+ICR);
1146 
1147 	/* Restore bank register */
1148 	outb(set, iobase+SSR);
1149 
1150 	/* Ready to play! */
1151 	netif_start_queue(dev);
1152 
1153 	/* Give self a hardware name */
1154 	sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1155 
1156 	/*
1157 	 * Open new IrLAP layer instance, now that everything should be
1158 	 * initialized properly
1159 	 */
1160 	self->irlap = irlap_open(dev, &self->qos, hwname);
1161 
1162 	return 0;
1163 }
1164 
1165 /*
1166  * Function w83977af_net_close (dev)
1167  *
1168  *    Stop the device
1169  *
1170  */
w83977af_net_close(struct net_device * dev)1171 static int w83977af_net_close(struct net_device *dev)
1172 {
1173 	struct w83977af_ir *self;
1174 	int iobase;
1175 	__u8 set;
1176 
1177 	IRDA_ASSERT(dev != NULL, return -1;);
1178 
1179 	self = netdev_priv(dev);
1180 
1181 	IRDA_ASSERT(self != NULL, return 0;);
1182 
1183 	iobase = self->io.fir_base;
1184 
1185 	/* Stop device */
1186 	netif_stop_queue(dev);
1187 
1188 	/* Stop and remove instance of IrLAP */
1189 	if (self->irlap)
1190 		irlap_close(self->irlap);
1191 	self->irlap = NULL;
1192 
1193 	disable_dma(self->io.dma);
1194 
1195 	/* Save current set */
1196 	set = inb(iobase+SSR);
1197 
1198 	/* Disable interrupts */
1199 	switch_bank(iobase, SET0);
1200 	outb(0, iobase+ICR);
1201 
1202 	free_irq(self->io.irq, dev);
1203 	free_dma(self->io.dma);
1204 
1205 	/* Restore bank register */
1206 	outb(set, iobase+SSR);
1207 
1208 	return 0;
1209 }
1210 
1211 /*
1212  * Function w83977af_net_ioctl (dev, rq, cmd)
1213  *
1214  *    Process IOCTL commands for this device
1215  *
1216  */
w83977af_net_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1217 static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1218 {
1219 	struct if_irda_req *irq = (struct if_irda_req *) rq;
1220 	struct w83977af_ir *self;
1221 	unsigned long flags;
1222 	int ret = 0;
1223 
1224 	IRDA_ASSERT(dev != NULL, return -1;);
1225 
1226 	self = netdev_priv(dev);
1227 
1228 	IRDA_ASSERT(self != NULL, return -1;);
1229 
1230 	pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
1231 
1232 	spin_lock_irqsave(&self->lock, flags);
1233 
1234 	switch (cmd) {
1235 	case SIOCSBANDWIDTH: /* Set bandwidth */
1236 		if (!capable(CAP_NET_ADMIN)) {
1237 			ret = -EPERM;
1238 			goto out;
1239 		}
1240 		w83977af_change_speed(self, irq->ifr_baudrate);
1241 		break;
1242 	case SIOCSMEDIABUSY: /* Set media busy */
1243 		if (!capable(CAP_NET_ADMIN)) {
1244 			ret = -EPERM;
1245 			goto out;
1246 		}
1247 		irda_device_set_media_busy(self->netdev, TRUE);
1248 		break;
1249 	case SIOCGRECEIVING: /* Check if we are receiving right now */
1250 		irq->ifr_receiving = w83977af_is_receiving(self);
1251 		break;
1252 	default:
1253 		ret = -EOPNOTSUPP;
1254 	}
1255 out:
1256 	spin_unlock_irqrestore(&self->lock, flags);
1257 	return ret;
1258 }
1259 
1260 MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1261 MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1262 MODULE_LICENSE("GPL");
1263 
1264 
1265 module_param(qos_mtt_bits, int, 0);
1266 MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1267 module_param_array(io, int, NULL, 0);
1268 MODULE_PARM_DESC(io, "Base I/O addresses");
1269 module_param_array(irq, int, NULL, 0);
1270 MODULE_PARM_DESC(irq, "IRQ lines");
1271 
1272 /*
1273  * Function init_module (void)
1274  *
1275  *
1276  *
1277  */
1278 module_init(w83977af_init);
1279 
1280 /*
1281  * Function cleanup_module (void)
1282  *
1283  *
1284  *
1285  */
1286 module_exit(w83977af_cleanup);
1287