Lines Matching refs:self

81 static int via_ircc_dma_receive(struct via_ircc_cb *self);
82 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
88 static void via_hw_init(struct via_ircc_cb *self);
89 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
91 static int via_ircc_is_receiving(struct via_ircc_cb *self);
100 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
101 static void hwreset(struct via_ircc_cb *self);
102 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
103 static int upload_rxdata(struct via_ircc_cb *self, int iobase);
285 struct via_ircc_cb *self; in via_ircc_open() local
293 self = netdev_priv(dev); in via_ircc_open()
294 self->netdev = dev; in via_ircc_open()
295 spin_lock_init(&self->lock); in via_ircc_open()
297 pci_set_drvdata(pdev, self); in via_ircc_open()
300 self->io.cfg_base = info->cfg_base; in via_ircc_open()
301 self->io.fir_base = info->fir_base; in via_ircc_open()
302 self->io.irq = info->irq; in via_ircc_open()
303 self->io.fir_ext = CHIP_IO_EXTENT; in via_ircc_open()
304 self->io.dma = info->dma; in via_ircc_open()
305 self->io.dma2 = info->dma2; in via_ircc_open()
306 self->io.fifo_size = 32; in via_ircc_open()
307 self->chip_id = id; in via_ircc_open()
308 self->st_fifo.len = 0; in via_ircc_open()
309 self->RxDataReady = 0; in via_ircc_open()
312 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) { in via_ircc_open()
314 __func__, self->io.fir_base); in via_ircc_open()
320 irda_init_max_qos_capabilies(&self->qos); in via_ircc_open()
324 dongle_id = via_ircc_read_dongle_id(self->io.fir_base); in via_ircc_open()
325 self->io.dongle_id = dongle_id; in via_ircc_open()
329 switch( self->io.dongle_id ){ in via_ircc_open()
331 self->qos.baud_rate.bits = in via_ircc_open()
336 self->qos.baud_rate.bits = in via_ircc_open()
348 self->qos.min_turn_time.bits = qos_mtt_bits; in via_ircc_open()
349 irda_qos_bits_to_value(&self->qos); in via_ircc_open()
352 self->rx_buff.truesize = 14384 + 2048; in via_ircc_open()
353 self->tx_buff.truesize = 14384 + 2048; in via_ircc_open()
356 self->rx_buff.head = in via_ircc_open()
357 dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize, in via_ircc_open()
358 &self->rx_buff_dma, GFP_KERNEL); in via_ircc_open()
359 if (self->rx_buff.head == NULL) { in via_ircc_open()
364 self->tx_buff.head = in via_ircc_open()
365 dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize, in via_ircc_open()
366 &self->tx_buff_dma, GFP_KERNEL); in via_ircc_open()
367 if (self->tx_buff.head == NULL) { in via_ircc_open()
372 self->rx_buff.in_frame = FALSE; in via_ircc_open()
373 self->rx_buff.state = OUTSIDE_FRAME; in via_ircc_open()
374 self->tx_buff.data = self->tx_buff.head; in via_ircc_open()
375 self->rx_buff.data = self->rx_buff.head; in via_ircc_open()
378 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in via_ircc_open()
379 self->tx_fifo.tail = self->tx_buff.head; in via_ircc_open()
393 self->io.speed = 9600; in via_ircc_open()
394 via_hw_init(self); in via_ircc_open()
397 dma_free_coherent(&pdev->dev, self->tx_buff.truesize, in via_ircc_open()
398 self->tx_buff.head, self->tx_buff_dma); in via_ircc_open()
400 dma_free_coherent(&pdev->dev, self->rx_buff.truesize, in via_ircc_open()
401 self->rx_buff.head, self->rx_buff_dma); in via_ircc_open()
403 release_region(self->io.fir_base, self->io.fir_ext); in via_ircc_open()
417 struct via_ircc_cb *self = pci_get_drvdata(pdev); in via_remove_one() local
420 iobase = self->io.fir_base; in via_remove_one()
424 unregister_netdev(self->netdev); in via_remove_one()
428 __func__, self->io.fir_base); in via_remove_one()
429 release_region(self->io.fir_base, self->io.fir_ext); in via_remove_one()
430 if (self->tx_buff.head) in via_remove_one()
431 dma_free_coherent(&pdev->dev, self->tx_buff.truesize, in via_remove_one()
432 self->tx_buff.head, self->tx_buff_dma); in via_remove_one()
433 if (self->rx_buff.head) in via_remove_one()
434 dma_free_coherent(&pdev->dev, self->rx_buff.truesize, in via_remove_one()
435 self->rx_buff.head, self->rx_buff_dma); in via_remove_one()
437 free_netdev(self->netdev); in via_remove_one()
449 static void via_hw_init(struct via_ircc_cb *self) in via_hw_init() argument
451 int iobase = self->io.fir_base; in via_hw_init()
489 self->io.speed = 9600; in via_hw_init()
490 self->st_fifo.len = 0; in via_hw_init()
492 via_ircc_change_dongle_speed(iobase, self->io.speed, in via_hw_init()
493 self->io.dongle_id); in via_hw_init()
659 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed) in via_ircc_change_speed() argument
661 struct net_device *dev = self->netdev; in via_ircc_change_speed()
665 iobase = self->io.fir_base; in via_ircc_change_speed()
667 self->io.speed = speed; in via_ircc_change_speed()
720 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id); in via_ircc_change_speed()
744 via_ircc_dma_receive(self); in via_ircc_change_speed()
761 struct via_ircc_cb *self; in via_ircc_hard_xmit_sir() local
766 self = netdev_priv(dev); in via_ircc_hard_xmit_sir()
767 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;); in via_ircc_hard_xmit_sir()
768 iobase = self->io.fir_base; in via_ircc_hard_xmit_sir()
773 if ((speed != self->io.speed) && (speed != -1)) { in via_ircc_hard_xmit_sir()
776 via_ircc_change_speed(self, speed); in via_ircc_hard_xmit_sir()
781 self->new_speed = speed; in via_ircc_hard_xmit_sir()
791 spin_lock_irqsave(&self->lock, flags); in via_ircc_hard_xmit_sir()
792 self->tx_buff.data = self->tx_buff.head; in via_ircc_hard_xmit_sir()
793 self->tx_buff.len = in via_ircc_hard_xmit_sir()
794 async_wrap_skb(skb, self->tx_buff.data, in via_ircc_hard_xmit_sir()
795 self->tx_buff.truesize); in via_ircc_hard_xmit_sir()
797 dev->stats.tx_bytes += self->tx_buff.len; in via_ircc_hard_xmit_sir()
799 SetBaudRate(iobase, self->io.speed); in via_ircc_hard_xmit_sir()
817 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, in via_ircc_hard_xmit_sir()
820 SetSendByte(iobase, self->tx_buff.len); in via_ircc_hard_xmit_sir()
825 spin_unlock_irqrestore(&self->lock, flags); in via_ircc_hard_xmit_sir()
833 struct via_ircc_cb *self; in via_ircc_hard_xmit_fir() local
838 self = netdev_priv(dev); in via_ircc_hard_xmit_fir()
839 iobase = self->io.fir_base; in via_ircc_hard_xmit_fir()
841 if (self->st_fifo.len) in via_ircc_hard_xmit_fir()
843 if (self->chip_id == 0x3076) in via_ircc_hard_xmit_fir()
849 if ((speed != self->io.speed) && (speed != -1)) { in via_ircc_hard_xmit_fir()
851 via_ircc_change_speed(self, speed); in via_ircc_hard_xmit_fir()
856 self->new_speed = speed; in via_ircc_hard_xmit_fir()
858 spin_lock_irqsave(&self->lock, flags); in via_ircc_hard_xmit_fir()
859 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail; in via_ircc_hard_xmit_fir()
860 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; in via_ircc_hard_xmit_fir()
862 self->tx_fifo.tail += skb->len; in via_ircc_hard_xmit_fir()
865 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len); in via_ircc_hard_xmit_fir()
866 self->tx_fifo.len++; in via_ircc_hard_xmit_fir()
867 self->tx_fifo.free++; in via_ircc_hard_xmit_fir()
869 via_ircc_dma_xmit(self, iobase); in via_ircc_hard_xmit_fir()
874 spin_unlock_irqrestore(&self->lock, flags); in via_ircc_hard_xmit_fir()
879 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase) in via_ircc_dma_xmit() argument
882 self->io.direction = IO_XMIT; in via_ircc_dma_xmit()
894 irda_setup_dma(self->io.dma, in via_ircc_dma_xmit()
895 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start - in via_ircc_dma_xmit()
896 self->tx_buff.head) + self->tx_buff_dma, in via_ircc_dma_xmit()
897 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE); in via_ircc_dma_xmit()
899 __func__, self->tx_fifo.ptr, in via_ircc_dma_xmit()
900 self->tx_fifo.queue[self->tx_fifo.ptr].len, in via_ircc_dma_xmit()
901 self->tx_fifo.len); in via_ircc_dma_xmit()
903 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len); in via_ircc_dma_xmit()
917 static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self) in via_ircc_dma_xmit_complete() argument
922 iobase = self->io.fir_base; in via_ircc_dma_xmit_complete()
929 self->netdev->stats.tx_errors++; in via_ircc_dma_xmit_complete()
930 self->netdev->stats.tx_fifo_errors++; in via_ircc_dma_xmit_complete()
931 hwreset(self); in via_ircc_dma_xmit_complete()
934 self->netdev->stats.tx_packets++; in via_ircc_dma_xmit_complete()
939 if (self->new_speed) { in via_ircc_dma_xmit_complete()
940 via_ircc_change_speed(self, self->new_speed); in via_ircc_dma_xmit_complete()
941 self->new_speed = 0; in via_ircc_dma_xmit_complete()
946 if (self->tx_fifo.len) { in via_ircc_dma_xmit_complete()
947 self->tx_fifo.len--; in via_ircc_dma_xmit_complete()
948 self->tx_fifo.ptr++; in via_ircc_dma_xmit_complete()
953 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free); in via_ircc_dma_xmit_complete()
963 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in via_ircc_dma_xmit_complete()
964 self->tx_fifo.tail = self->tx_buff.head; in via_ircc_dma_xmit_complete()
971 netif_wake_queue(self->netdev); in via_ircc_dma_xmit_complete()
982 static int via_ircc_dma_receive(struct via_ircc_cb *self) in via_ircc_dma_receive() argument
986 iobase = self->io.fir_base; in via_ircc_dma_receive()
988 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in via_ircc_dma_receive()
989 self->tx_fifo.tail = self->tx_buff.head; in via_ircc_dma_receive()
990 self->RxDataReady = 0; in via_ircc_dma_receive()
991 self->io.direction = IO_RECV; in via_ircc_dma_receive()
992 self->rx_buff.data = self->rx_buff.head; in via_ircc_dma_receive()
993 self->st_fifo.len = self->st_fifo.pending_bytes = 0; in via_ircc_dma_receive()
994 self->st_fifo.tail = self->st_fifo.head = 0; in via_ircc_dma_receive()
1009 irda_setup_dma(self->io.dma2, self->rx_buff_dma, in via_ircc_dma_receive()
1010 self->rx_buff.truesize, DMA_RX_MODE); in via_ircc_dma_receive()
1024 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self, in via_ircc_dma_receive_complete() argument
1032 iobase = self->io.fir_base; in via_ircc_dma_receive_complete()
1033 st_fifo = &self->st_fifo; in via_ircc_dma_receive_complete()
1035 if (self->io.speed < 4000000) { //Speed below FIR in via_ircc_dma_receive_complete()
1036 len = GetRecvByte(iobase, self); in via_ircc_dma_receive_complete()
1043 if (self->chip_id == 0x3076) { in via_ircc_dma_receive_complete()
1045 skb->data[i] = self->rx_buff.data[i * 2]; in via_ircc_dma_receive_complete()
1047 if (self->chip_id == 0x3096) { in via_ircc_dma_receive_complete()
1050 self->rx_buff.data[i]; in via_ircc_dma_receive_complete()
1054 self->rx_buff.data += len; in via_ircc_dma_receive_complete()
1055 self->netdev->stats.rx_bytes += len; in via_ircc_dma_receive_complete()
1056 self->netdev->stats.rx_packets++; in via_ircc_dma_receive_complete()
1057 skb->dev = self->netdev; in via_ircc_dma_receive_complete()
1065 len = GetRecvByte(iobase, self); in via_ircc_dma_receive_complete()
1070 __func__, len, RxCurCount(iobase, self), in via_ircc_dma_receive_complete()
1071 self->RxLastCount); in via_ircc_dma_receive_complete()
1072 hwreset(self); in via_ircc_dma_receive_complete()
1077 st_fifo->len, len - 4, RxCurCount(iobase, self)); in via_ircc_dma_receive_complete()
1086 self->RxDataReady = 0; in via_ircc_dma_receive_complete()
1115 (self->rx_buff.data == NULL) || (len < 6)) { in via_ircc_dma_receive_complete()
1116 self->netdev->stats.rx_dropped++; in via_ircc_dma_receive_complete()
1123 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); in via_ircc_dma_receive_complete()
1125 len - 4, self->rx_buff.data); in via_ircc_dma_receive_complete()
1128 self->rx_buff.data += len; in via_ircc_dma_receive_complete()
1129 self->netdev->stats.rx_bytes += len; in via_ircc_dma_receive_complete()
1130 self->netdev->stats.rx_packets++; in via_ircc_dma_receive_complete()
1131 skb->dev = self->netdev; in via_ircc_dma_receive_complete()
1145 static int upload_rxdata(struct via_ircc_cb *self, int iobase) in upload_rxdata() argument
1150 st_fifo = &self->st_fifo; in upload_rxdata()
1152 len = GetRecvByte(iobase, self); in upload_rxdata()
1157 self->netdev->stats.rx_dropped++; in upload_rxdata()
1163 self->netdev->stats.rx_dropped++; in upload_rxdata()
1168 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1); in upload_rxdata()
1174 self->rx_buff.data += len; in upload_rxdata()
1175 self->netdev->stats.rx_bytes += len; in upload_rxdata()
1176 self->netdev->stats.rx_packets++; in upload_rxdata()
1177 skb->dev = self->netdev; in upload_rxdata()
1195 static int RxTimerHandler(struct via_ircc_cb *self, int iobase) in RxTimerHandler() argument
1202 st_fifo = &self->st_fifo; in RxTimerHandler()
1204 if (CkRxRecv(iobase, self)) { in RxTimerHandler()
1206 self->RetryCount = 0; in RxTimerHandler()
1208 self->RxDataReady++; in RxTimerHandler()
1211 self->RetryCount++; in RxTimerHandler()
1213 if ((self->RetryCount >= 1) || in RxTimerHandler()
1214 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) || in RxTimerHandler()
1231 (self->rx_buff.data == NULL) || (len < 6)) { in RxTimerHandler()
1232 self->netdev->stats.rx_dropped++; in RxTimerHandler()
1237 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); in RxTimerHandler()
1243 self->rx_buff.data += len; in RxTimerHandler()
1244 self->netdev->stats.rx_bytes += len; in RxTimerHandler()
1245 self->netdev->stats.rx_packets++; in RxTimerHandler()
1246 skb->dev = self->netdev; in RxTimerHandler()
1251 self->RetryCount = 0; in RxTimerHandler()
1261 (RxCurCount(iobase, self) != self->RxLastCount)) { in RxTimerHandler()
1262 upload_rxdata(self, iobase); in RxTimerHandler()
1263 if (irda_device_txqueue_empty(self->netdev)) in RxTimerHandler()
1264 via_ircc_dma_receive(self); in RxTimerHandler()
1284 struct via_ircc_cb *self = netdev_priv(dev); in via_ircc_interrupt() local
1288 iobase = self->io.fir_base; in via_ircc_interrupt()
1289 spin_lock(&self->lock); in via_ircc_interrupt()
1300 self->EventFlag.TimeOut++; in via_ircc_interrupt()
1302 if (self->io.direction == IO_XMIT) { in via_ircc_interrupt()
1303 via_ircc_dma_xmit(self, iobase); in via_ircc_interrupt()
1305 if (self->io.direction == IO_RECV) { in via_ircc_interrupt()
1309 if (self->RxDataReady > 30) { in via_ircc_interrupt()
1310 hwreset(self); in via_ircc_interrupt()
1311 if (irda_device_txqueue_empty(self->netdev)) { in via_ircc_interrupt()
1312 via_ircc_dma_receive(self); in via_ircc_interrupt()
1315 RxTimerHandler(self, iobase); in via_ircc_interrupt()
1330 self->EventFlag.EOMessage++; // read and will auto clean in via_ircc_interrupt()
1331 if (via_ircc_dma_xmit_complete(self)) { in via_ircc_interrupt()
1333 (self->netdev)) { in via_ircc_interrupt()
1334 via_ircc_dma_receive(self); in via_ircc_interrupt()
1337 self->EventFlag.Unknown++; in via_ircc_interrupt()
1359 if (via_ircc_dma_receive_complete(self, iobase)) { in via_ircc_interrupt()
1361 via_ircc_dma_receive(self); in via_ircc_interrupt()
1367 RxCurCount(iobase, self), self->RxLastCount); in via_ircc_interrupt()
1375 hwreset(self); //F01 in via_ircc_interrupt()
1377 via_ircc_dma_receive(self); in via_ircc_interrupt()
1381 spin_unlock(&self->lock); in via_ircc_interrupt()
1385 static void hwreset(struct via_ircc_cb *self) in hwreset() argument
1388 iobase = self->io.fir_base; in hwreset()
1411 via_ircc_change_speed(self, self->io.speed); in hwreset()
1413 self->st_fifo.len = 0; in hwreset()
1422 static int via_ircc_is_receiving(struct via_ircc_cb *self) in via_ircc_is_receiving() argument
1427 IRDA_ASSERT(self != NULL, return FALSE;); in via_ircc_is_receiving()
1429 iobase = self->io.fir_base; in via_ircc_is_receiving()
1430 if (CkRxRecv(iobase, self)) in via_ircc_is_receiving()
1447 struct via_ircc_cb *self; in via_ircc_net_open() local
1452 self = netdev_priv(dev); in via_ircc_net_open()
1454 IRDA_ASSERT(self != NULL, return 0;); in via_ircc_net_open()
1455 iobase = self->io.fir_base; in via_ircc_net_open()
1456 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) { in via_ircc_net_open()
1458 driver_name, self->io.irq); in via_ircc_net_open()
1465 if (request_dma(self->io.dma, dev->name)) { in via_ircc_net_open()
1467 driver_name, self->io.dma); in via_ircc_net_open()
1468 free_irq(self->io.irq, dev); in via_ircc_net_open()
1471 if (self->io.dma2 != self->io.dma) { in via_ircc_net_open()
1472 if (request_dma(self->io.dma2, dev->name)) { in via_ircc_net_open()
1474 driver_name, self->io.dma2); in via_ircc_net_open()
1475 free_irq(self->io.irq, dev); in via_ircc_net_open()
1476 free_dma(self->io.dma); in via_ircc_net_open()
1488 via_ircc_dma_receive(self); in via_ircc_net_open()
1498 self->irlap = irlap_open(dev, &self->qos, hwname); in via_ircc_net_open()
1500 self->RxLastCount = 0; in via_ircc_net_open()
1513 struct via_ircc_cb *self; in via_ircc_net_close() local
1517 self = netdev_priv(dev); in via_ircc_net_close()
1518 IRDA_ASSERT(self != NULL, return 0;); in via_ircc_net_close()
1523 if (self->irlap) in via_ircc_net_close()
1524 irlap_close(self->irlap); in via_ircc_net_close()
1525 self->irlap = NULL; in via_ircc_net_close()
1526 iobase = self->io.fir_base; in via_ircc_net_close()
1529 DisableDmaChannel(self->io.dma); in via_ircc_net_close()
1533 free_irq(self->io.irq, dev); in via_ircc_net_close()
1534 free_dma(self->io.dma); in via_ircc_net_close()
1535 if (self->io.dma2 != self->io.dma) in via_ircc_net_close()
1536 free_dma(self->io.dma2); in via_ircc_net_close()
1551 struct via_ircc_cb *self; in via_ircc_net_ioctl() local
1556 self = netdev_priv(dev); in via_ircc_net_ioctl()
1557 IRDA_ASSERT(self != NULL, return -1;); in via_ircc_net_ioctl()
1561 spin_lock_irqsave(&self->lock, flags); in via_ircc_net_ioctl()
1568 via_ircc_change_speed(self, irq->ifr_baudrate); in via_ircc_net_ioctl()
1575 irda_device_set_media_busy(self->netdev, TRUE); in via_ircc_net_ioctl()
1578 irq->ifr_receiving = via_ircc_is_receiving(self); in via_ircc_net_ioctl()
1584 spin_unlock_irqrestore(&self->lock, flags); in via_ircc_net_ioctl()