Lines Matching refs:self
88 static int w83977af_close(struct w83977af_ir *self);
90 static int w83977af_dma_receive(struct w83977af_ir *self);
91 static int w83977af_dma_receive_complete(struct w83977af_ir *self);
95 static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
96 static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
97 static int w83977af_is_receiving(struct w83977af_ir *self);
153 struct w83977af_ir *self; in w83977af_open() local
178 self = netdev_priv(dev); in w83977af_open()
179 spin_lock_init(&self->lock); in w83977af_open()
183 self->io.fir_base = iobase; in w83977af_open()
184 self->io.irq = irq; in w83977af_open()
185 self->io.fir_ext = CHIP_IO_EXTENT; in w83977af_open()
186 self->io.dma = dma; in w83977af_open()
187 self->io.fifo_size = 32; in w83977af_open()
190 irda_init_max_qos_capabilies(&self->qos); in w83977af_open()
195 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| in w83977af_open()
199 self->qos.min_turn_time.bits = qos_mtt_bits; in w83977af_open()
200 irda_qos_bits_to_value(&self->qos); in w83977af_open()
203 self->rx_buff.truesize = 14384; in w83977af_open()
204 self->tx_buff.truesize = 4000; in w83977af_open()
207 self->rx_buff.head = in w83977af_open()
208 dma_zalloc_coherent(NULL, self->rx_buff.truesize, in w83977af_open()
209 &self->rx_buff_dma, GFP_KERNEL); in w83977af_open()
210 if (self->rx_buff.head == NULL) { in w83977af_open()
215 self->tx_buff.head = in w83977af_open()
216 dma_zalloc_coherent(NULL, self->tx_buff.truesize, in w83977af_open()
217 &self->tx_buff_dma, GFP_KERNEL); in w83977af_open()
218 if (self->tx_buff.head == NULL) { in w83977af_open()
223 self->rx_buff.in_frame = FALSE; in w83977af_open()
224 self->rx_buff.state = OUTSIDE_FRAME; in w83977af_open()
225 self->tx_buff.data = self->tx_buff.head; in w83977af_open()
226 self->rx_buff.data = self->rx_buff.head; in w83977af_open()
227 self->netdev = dev; in w83977af_open()
240 dev_self[i] = self; in w83977af_open()
244 dma_free_coherent(NULL, self->tx_buff.truesize, in w83977af_open()
245 self->tx_buff.head, self->tx_buff_dma); in w83977af_open()
247 dma_free_coherent(NULL, self->rx_buff.truesize, in w83977af_open()
248 self->rx_buff.head, self->rx_buff_dma); in w83977af_open()
262 static int w83977af_close(struct w83977af_ir *self) in w83977af_close() argument
266 iobase = self->io.fir_base; in w83977af_close()
281 unregister_netdev(self->netdev); in w83977af_close()
285 __func__ , self->io.fir_base); in w83977af_close()
286 release_region(self->io.fir_base, self->io.fir_ext); in w83977af_close()
288 if (self->tx_buff.head) in w83977af_close()
289 dma_free_coherent(NULL, self->tx_buff.truesize, in w83977af_close()
290 self->tx_buff.head, self->tx_buff_dma); in w83977af_close()
292 if (self->rx_buff.head) in w83977af_close()
293 dma_free_coherent(NULL, self->rx_buff.truesize, in w83977af_close()
294 self->rx_buff.head, self->rx_buff_dma); in w83977af_close()
296 free_netdev(self->netdev); in w83977af_close()
399 static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed) in w83977af_change_speed() argument
405 iobase = self->io.fir_base; in w83977af_change_speed()
408 self->io.speed = speed; in w83977af_change_speed()
459 netif_wake_queue(self->netdev); in w83977af_change_speed()
465 w83977af_dma_receive(self); in w83977af_change_speed()
482 struct w83977af_ir *self; in w83977af_hard_xmit() local
488 self = netdev_priv(dev); in w83977af_hard_xmit()
490 iobase = self->io.fir_base; in w83977af_hard_xmit()
500 if ((speed != self->io.speed) && (speed != -1)) { in w83977af_hard_xmit()
503 w83977af_change_speed(self, speed); in w83977af_hard_xmit()
507 self->new_speed = speed; in w83977af_hard_xmit()
514 if (self->io.speed > PIO_MAX_SPEED) { in w83977af_hard_xmit()
515 self->tx_buff.data = self->tx_buff.head; in w83977af_hard_xmit()
516 skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len); in w83977af_hard_xmit()
517 self->tx_buff.len = skb->len; in w83977af_hard_xmit()
527 w83977af_dma_write(self, iobase); in w83977af_hard_xmit()
529 self->tx_buff.data = self->tx_buff.head; in w83977af_hard_xmit()
530 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, in w83977af_hard_xmit()
531 self->tx_buff.truesize); in w83977af_hard_xmit()
551 static void w83977af_dma_write(struct w83977af_ir *self, int iobase) in w83977af_dma_write() argument
554 pr_debug("%s(), len=%d\n", __func__ , self->tx_buff.len); in w83977af_dma_write()
566 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, in w83977af_dma_write()
568 self->io.direction = IO_XMIT; in w83977af_dma_write()
623 static void w83977af_dma_xmit_complete(struct w83977af_ir *self) in w83977af_dma_xmit_complete() argument
630 IRDA_ASSERT(self != NULL, return;); in w83977af_dma_xmit_complete()
632 iobase = self->io.fir_base; in w83977af_dma_xmit_complete()
645 self->netdev->stats.tx_errors++; in w83977af_dma_xmit_complete()
646 self->netdev->stats.tx_fifo_errors++; in w83977af_dma_xmit_complete()
651 self->netdev->stats.tx_packets++; in w83977af_dma_xmit_complete()
654 if (self->new_speed) { in w83977af_dma_xmit_complete()
655 w83977af_change_speed(self, self->new_speed); in w83977af_dma_xmit_complete()
656 self->new_speed = 0; in w83977af_dma_xmit_complete()
661 netif_wake_queue(self->netdev); in w83977af_dma_xmit_complete()
674 static int w83977af_dma_receive(struct w83977af_ir *self) in w83977af_dma_receive() argument
682 IRDA_ASSERT(self != NULL, return -1;); in w83977af_dma_receive()
686 iobase= self->io.fir_base; in w83977af_dma_receive()
700 self->io.direction = IO_RECV; in w83977af_dma_receive()
701 self->rx_buff.data = self->rx_buff.head; in w83977af_dma_receive()
704 spin_lock_irqsave(&self->lock, flags); in w83977af_dma_receive()
706 disable_dma(self->io.dma); in w83977af_dma_receive()
707 clear_dma_ff(self->io.dma); in w83977af_dma_receive()
708 set_dma_mode(self->io.dma, DMA_MODE_READ); in w83977af_dma_receive()
709 set_dma_addr(self->io.dma, self->rx_buff_dma); in w83977af_dma_receive()
710 set_dma_count(self->io.dma, self->rx_buff.truesize); in w83977af_dma_receive()
712 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, in w83977af_dma_receive()
722 self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0; in w83977af_dma_receive()
729 enable_dma(self->io.dma); in w83977af_dma_receive()
730 spin_unlock_irqrestore(&self->lock, flags); in w83977af_dma_receive()
746 static int w83977af_dma_receive_complete(struct w83977af_ir *self) in w83977af_dma_receive_complete() argument
757 st_fifo = &self->st_fifo; in w83977af_dma_receive_complete()
759 iobase = self->io.fir_base; in w83977af_dma_receive_complete()
764 iobase = self->io.fir_base; in w83977af_dma_receive_complete()
789 self->netdev->stats.rx_errors += len; in w83977af_dma_receive_complete()
792 self->netdev->stats.rx_errors++; in w83977af_dma_receive_complete()
794 self->rx_buff.data += len; in w83977af_dma_receive_complete()
797 self->netdev->stats.rx_length_errors++; in w83977af_dma_receive_complete()
800 self->netdev->stats.rx_frame_errors++; in w83977af_dma_receive_complete()
803 self->netdev->stats.rx_crc_errors++; in w83977af_dma_receive_complete()
807 self->netdev->stats.rx_fifo_errors++; in w83977af_dma_receive_complete()
810 self->netdev->stats.rx_fifo_errors++; in w83977af_dma_receive_complete()
833 if (self->io.speed < 4000000) { in w83977af_dma_receive_complete()
836 self->rx_buff.data, in w83977af_dma_receive_complete()
841 self->rx_buff.data, in w83977af_dma_receive_complete()
846 self->rx_buff.data += len; in w83977af_dma_receive_complete()
847 self->netdev->stats.rx_packets++; in w83977af_dma_receive_complete()
849 skb->dev = self->netdev; in w83977af_dma_receive_complete()
867 static void w83977af_pio_receive(struct w83977af_ir *self) in w83977af_pio_receive() argument
872 IRDA_ASSERT(self != NULL, return;); in w83977af_pio_receive()
874 iobase = self->io.fir_base; in w83977af_pio_receive()
879 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff, in w83977af_pio_receive()
890 static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr) in w83977af_sir_interrupt() argument
899 iobase = self->io.fir_base; in w83977af_sir_interrupt()
903 actual = w83977af_pio_write(self->io.fir_base, in w83977af_sir_interrupt()
904 self->tx_buff.data, in w83977af_sir_interrupt()
905 self->tx_buff.len, in w83977af_sir_interrupt()
906 self->io.fifo_size); in w83977af_sir_interrupt()
908 self->tx_buff.data += actual; in w83977af_sir_interrupt()
909 self->tx_buff.len -= actual; in w83977af_sir_interrupt()
911 self->io.direction = IO_XMIT; in w83977af_sir_interrupt()
914 if (self->tx_buff.len > 0) { in w83977af_sir_interrupt()
922 self->netdev->stats.tx_packets++; in w83977af_sir_interrupt()
925 netif_wake_queue(self->netdev); in w83977af_sir_interrupt()
932 if (self->new_speed) { in w83977af_sir_interrupt()
934 w83977af_change_speed(self, self->new_speed); in w83977af_sir_interrupt()
935 self->new_speed = 0; in w83977af_sir_interrupt()
939 self->io.direction = IO_RECV; in w83977af_sir_interrupt()
945 w83977af_pio_receive(self); in w83977af_sir_interrupt()
959 static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr) in w83977af_fir_interrupt() argument
965 iobase = self->io.fir_base; in w83977af_fir_interrupt()
970 if (w83977af_dma_receive_complete(self)) { in w83977af_fir_interrupt()
999 if (self->io.direction == IO_XMIT) { in w83977af_fir_interrupt()
1000 w83977af_dma_write(self, iobase); in w83977af_fir_interrupt()
1005 w83977af_dma_receive_complete(self); in w83977af_fir_interrupt()
1012 w83977af_dma_xmit_complete(self); in w83977af_fir_interrupt()
1021 w83977af_dma_receive(self); in w83977af_fir_interrupt()
1041 struct w83977af_ir *self; in w83977af_interrupt() local
1045 self = netdev_priv(dev); in w83977af_interrupt()
1047 iobase = self->io.fir_base; in w83977af_interrupt()
1060 if (self->io.speed > PIO_MAX_SPEED ) in w83977af_interrupt()
1061 icr = w83977af_fir_interrupt(self, isr); in w83977af_interrupt()
1063 icr = w83977af_sir_interrupt(self, isr); in w83977af_interrupt()
1077 static int w83977af_is_receiving(struct w83977af_ir *self) in w83977af_is_receiving() argument
1083 IRDA_ASSERT(self != NULL, return FALSE;); in w83977af_is_receiving()
1085 if (self->io.speed > 115200) { in w83977af_is_receiving()
1086 iobase = self->io.fir_base; in w83977af_is_receiving()
1097 status = (self->rx_buff.state != OUTSIDE_FRAME); in w83977af_is_receiving()
1110 struct w83977af_ir *self; in w83977af_net_open() local
1117 self = netdev_priv(dev); in w83977af_net_open()
1119 IRDA_ASSERT(self != NULL, return 0;); in w83977af_net_open()
1121 iobase = self->io.fir_base; in w83977af_net_open()
1123 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name, in w83977af_net_open()
1131 if (request_dma(self->io.dma, dev->name)) { in w83977af_net_open()
1132 free_irq(self->io.irq, dev); in w83977af_net_open()
1141 if (self->io.speed > 115200) { in w83977af_net_open()
1143 w83977af_dma_receive(self); in w83977af_net_open()
1154 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base); in w83977af_net_open()
1160 self->irlap = irlap_open(dev, &self->qos, hwname); in w83977af_net_open()
1173 struct w83977af_ir *self; in w83977af_net_close() local
1179 self = netdev_priv(dev); in w83977af_net_close()
1181 IRDA_ASSERT(self != NULL, return 0;); in w83977af_net_close()
1183 iobase = self->io.fir_base; in w83977af_net_close()
1189 if (self->irlap) in w83977af_net_close()
1190 irlap_close(self->irlap); in w83977af_net_close()
1191 self->irlap = NULL; in w83977af_net_close()
1193 disable_dma(self->io.dma); in w83977af_net_close()
1202 free_irq(self->io.irq, dev); in w83977af_net_close()
1203 free_dma(self->io.dma); in w83977af_net_close()
1220 struct w83977af_ir *self; in w83977af_net_ioctl() local
1226 self = netdev_priv(dev); in w83977af_net_ioctl()
1228 IRDA_ASSERT(self != NULL, return -1;); in w83977af_net_ioctl()
1232 spin_lock_irqsave(&self->lock, flags); in w83977af_net_ioctl()
1240 w83977af_change_speed(self, irq->ifr_baudrate); in w83977af_net_ioctl()
1247 irda_device_set_media_busy(self->netdev, TRUE); in w83977af_net_ioctl()
1250 irq->ifr_receiving = w83977af_is_receiving(self); in w83977af_net_ioctl()
1256 spin_unlock_irqrestore(&self->lock, flags); in w83977af_net_ioctl()