Lines Matching refs:self

172 static int  nsc_ircc_close(struct nsc_ircc_cb *self);
174 static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self);
175 static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self);
176 static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase);
182 static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase);
183 static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 baud);
184 static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self);
366 struct nsc_ircc_cb *self; in nsc_ircc_open() local
396 self = netdev_priv(dev); in nsc_ircc_open()
397 self->netdev = dev; in nsc_ircc_open()
398 spin_lock_init(&self->lock); in nsc_ircc_open()
401 dev_self[chip_index] = self; in nsc_ircc_open()
402 self->index = chip_index; in nsc_ircc_open()
405 self->io.cfg_base = info->cfg_base; in nsc_ircc_open()
406 self->io.fir_base = info->fir_base; in nsc_ircc_open()
407 self->io.irq = info->irq; in nsc_ircc_open()
408 self->io.fir_ext = CHIP_IO_EXTENT; in nsc_ircc_open()
409 self->io.dma = info->dma; in nsc_ircc_open()
410 self->io.fifo_size = 32; in nsc_ircc_open()
413 ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name); in nsc_ircc_open()
416 __func__, self->io.fir_base); in nsc_ircc_open()
422 irda_init_max_qos_capabilies(&self->qos); in nsc_ircc_open()
425 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| in nsc_ircc_open()
428 self->qos.min_turn_time.bits = qos_mtt_bits; in nsc_ircc_open()
429 irda_qos_bits_to_value(&self->qos); in nsc_ircc_open()
432 self->rx_buff.truesize = 14384; in nsc_ircc_open()
433 self->tx_buff.truesize = 14384; in nsc_ircc_open()
436 self->rx_buff.head = in nsc_ircc_open()
437 dma_zalloc_coherent(NULL, self->rx_buff.truesize, in nsc_ircc_open()
438 &self->rx_buff_dma, GFP_KERNEL); in nsc_ircc_open()
439 if (self->rx_buff.head == NULL) { in nsc_ircc_open()
445 self->tx_buff.head = in nsc_ircc_open()
446 dma_zalloc_coherent(NULL, self->tx_buff.truesize, in nsc_ircc_open()
447 &self->tx_buff_dma, GFP_KERNEL); in nsc_ircc_open()
448 if (self->tx_buff.head == NULL) { in nsc_ircc_open()
453 self->rx_buff.in_frame = FALSE; in nsc_ircc_open()
454 self->rx_buff.state = OUTSIDE_FRAME; in nsc_ircc_open()
455 self->tx_buff.data = self->tx_buff.head; in nsc_ircc_open()
456 self->rx_buff.data = self->rx_buff.head; in nsc_ircc_open()
459 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in nsc_ircc_open()
460 self->tx_fifo.tail = self->tx_buff.head; in nsc_ircc_open()
476 dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base); in nsc_ircc_open()
485 self->io.dongle_id = dongle_id; in nsc_ircc_open()
486 nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id); in nsc_ircc_open()
488 self->pldev = platform_device_register_simple(NSC_IRCC_DRIVER_NAME, in nsc_ircc_open()
489 self->index, NULL, 0); in nsc_ircc_open()
490 if (IS_ERR(self->pldev)) { in nsc_ircc_open()
491 err = PTR_ERR(self->pldev); in nsc_ircc_open()
494 platform_set_drvdata(self->pldev, self); in nsc_ircc_open()
501 dma_free_coherent(NULL, self->tx_buff.truesize, in nsc_ircc_open()
502 self->tx_buff.head, self->tx_buff_dma); in nsc_ircc_open()
504 dma_free_coherent(NULL, self->rx_buff.truesize, in nsc_ircc_open()
505 self->rx_buff.head, self->rx_buff_dma); in nsc_ircc_open()
507 release_region(self->io.fir_base, self->io.fir_ext); in nsc_ircc_open()
520 static int __exit nsc_ircc_close(struct nsc_ircc_cb *self) in nsc_ircc_close() argument
524 IRDA_ASSERT(self != NULL, return -1;); in nsc_ircc_close()
526 iobase = self->io.fir_base; in nsc_ircc_close()
528 platform_device_unregister(self->pldev); in nsc_ircc_close()
531 unregister_netdev(self->netdev); in nsc_ircc_close()
535 __func__, self->io.fir_base); in nsc_ircc_close()
536 release_region(self->io.fir_base, self->io.fir_ext); in nsc_ircc_close()
538 if (self->tx_buff.head) in nsc_ircc_close()
539 dma_free_coherent(NULL, self->tx_buff.truesize, in nsc_ircc_close()
540 self->tx_buff.head, self->tx_buff_dma); in nsc_ircc_close()
542 if (self->rx_buff.head) in nsc_ircc_close()
543 dma_free_coherent(NULL, self->rx_buff.truesize, in nsc_ircc_close()
544 self->rx_buff.head, self->rx_buff_dma); in nsc_ircc_close()
546 dev_self[self->index] = NULL; in nsc_ircc_close()
547 free_netdev(self->netdev); in nsc_ircc_close()
1254 static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed) in nsc_ircc_change_speed() argument
1256 struct net_device *dev = self->netdev; in nsc_ircc_change_speed()
1264 IRDA_ASSERT(self != NULL, return 0;); in nsc_ircc_change_speed()
1266 iobase = self->io.fir_base; in nsc_ircc_change_speed()
1269 self->io.speed = speed; in nsc_ircc_change_speed()
1317 nsc_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id); in nsc_ircc_change_speed()
1340 nsc_ircc_dma_receive(self); in nsc_ircc_change_speed()
1365 struct nsc_ircc_cb *self; in nsc_ircc_hard_xmit_sir() local
1371 self = netdev_priv(dev); in nsc_ircc_hard_xmit_sir()
1373 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;); in nsc_ircc_hard_xmit_sir()
1375 iobase = self->io.fir_base; in nsc_ircc_hard_xmit_sir()
1380 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_hard_xmit_sir()
1384 if ((speed != self->io.speed) && (speed != -1)) { in nsc_ircc_hard_xmit_sir()
1391 if (self->io.direction == IO_RECV) { in nsc_ircc_hard_xmit_sir()
1392 nsc_ircc_change_speed(self, speed); in nsc_ircc_hard_xmit_sir()
1397 self->new_speed = speed; in nsc_ircc_hard_xmit_sir()
1403 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_hard_xmit_sir()
1407 self->new_speed = speed; in nsc_ircc_hard_xmit_sir()
1413 self->tx_buff.data = self->tx_buff.head; in nsc_ircc_hard_xmit_sir()
1415 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, in nsc_ircc_hard_xmit_sir()
1416 self->tx_buff.truesize); in nsc_ircc_hard_xmit_sir()
1418 dev->stats.tx_bytes += self->tx_buff.len; in nsc_ircc_hard_xmit_sir()
1428 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_hard_xmit_sir()
1438 struct nsc_ircc_cb *self; in nsc_ircc_hard_xmit_fir() local
1445 self = netdev_priv(dev); in nsc_ircc_hard_xmit_fir()
1446 iobase = self->io.fir_base; in nsc_ircc_hard_xmit_fir()
1451 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_hard_xmit_fir()
1455 if ((speed != self->io.speed) && (speed != -1)) { in nsc_ircc_hard_xmit_fir()
1460 if(self->tx_fifo.len == 0) { in nsc_ircc_hard_xmit_fir()
1461 nsc_ircc_change_speed(self, speed); in nsc_ircc_hard_xmit_fir()
1464 self->new_speed = speed; in nsc_ircc_hard_xmit_fir()
1474 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_hard_xmit_fir()
1479 self->new_speed = speed; in nsc_ircc_hard_xmit_fir()
1487 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail; in nsc_ircc_hard_xmit_fir()
1488 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; in nsc_ircc_hard_xmit_fir()
1489 self->tx_fifo.tail += skb->len; in nsc_ircc_hard_xmit_fir()
1493 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, in nsc_ircc_hard_xmit_fir()
1495 self->tx_fifo.len++; in nsc_ircc_hard_xmit_fir()
1496 self->tx_fifo.free++; in nsc_ircc_hard_xmit_fir()
1499 if (self->tx_fifo.len == 1) { in nsc_ircc_hard_xmit_fir()
1504 diff = ktime_us_delta(ktime_get(), self->stamp); in nsc_ircc_hard_xmit_fir()
1528 self->io.direction = IO_XMIT; in nsc_ircc_hard_xmit_fir()
1545 nsc_ircc_dma_xmit(self, iobase); in nsc_ircc_hard_xmit_fir()
1550 if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) in nsc_ircc_hard_xmit_fir()
1551 netif_wake_queue(self->netdev); in nsc_ircc_hard_xmit_fir()
1557 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_hard_xmit_fir()
1569 static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase) in nsc_ircc_dma_xmit() argument
1580 self->io.direction = IO_XMIT; in nsc_ircc_dma_xmit()
1586 irda_setup_dma(self->io.dma, in nsc_ircc_dma_xmit()
1587 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start - in nsc_ircc_dma_xmit()
1588 self->tx_buff.head) + self->tx_buff_dma, in nsc_ircc_dma_xmit()
1589 self->tx_fifo.queue[self->tx_fifo.ptr].len, in nsc_ircc_dma_xmit()
1646 static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self) in nsc_ircc_dma_xmit_complete() argument
1652 iobase = self->io.fir_base; in nsc_ircc_dma_xmit_complete()
1663 self->netdev->stats.tx_errors++; in nsc_ircc_dma_xmit_complete()
1664 self->netdev->stats.tx_fifo_errors++; in nsc_ircc_dma_xmit_complete()
1669 self->netdev->stats.tx_packets++; in nsc_ircc_dma_xmit_complete()
1673 self->tx_fifo.ptr++; in nsc_ircc_dma_xmit_complete()
1674 self->tx_fifo.len--; in nsc_ircc_dma_xmit_complete()
1677 if (self->tx_fifo.len) { in nsc_ircc_dma_xmit_complete()
1678 nsc_ircc_dma_xmit(self, iobase); in nsc_ircc_dma_xmit_complete()
1684 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in nsc_ircc_dma_xmit_complete()
1685 self->tx_fifo.tail = self->tx_buff.head; in nsc_ircc_dma_xmit_complete()
1690 if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) { in nsc_ircc_dma_xmit_complete()
1693 netif_wake_queue(self->netdev); in nsc_ircc_dma_xmit_complete()
1709 static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self) in nsc_ircc_dma_receive() argument
1714 iobase = self->io.fir_base; in nsc_ircc_dma_receive()
1717 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in nsc_ircc_dma_receive()
1718 self->tx_fifo.tail = self->tx_buff.head; in nsc_ircc_dma_receive()
1731 self->io.direction = IO_RECV; in nsc_ircc_dma_receive()
1732 self->rx_buff.data = self->rx_buff.head; in nsc_ircc_dma_receive()
1738 self->st_fifo.len = self->st_fifo.pending_bytes = 0; in nsc_ircc_dma_receive()
1739 self->st_fifo.tail = self->st_fifo.head = 0; in nsc_ircc_dma_receive()
1741 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, in nsc_ircc_dma_receive()
1761 static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) in nsc_ircc_dma_receive_complete() argument
1769 st_fifo = &self->st_fifo; in nsc_ircc_dma_receive_complete()
1804 self->netdev->stats.rx_errors += len; in nsc_ircc_dma_receive_complete()
1807 self->netdev->stats.rx_errors++; in nsc_ircc_dma_receive_complete()
1809 self->rx_buff.data += len; in nsc_ircc_dma_receive_complete()
1812 self->netdev->stats.rx_length_errors++; in nsc_ircc_dma_receive_complete()
1815 self->netdev->stats.rx_frame_errors++; in nsc_ircc_dma_receive_complete()
1818 self->netdev->stats.rx_crc_errors++; in nsc_ircc_dma_receive_complete()
1822 self->netdev->stats.rx_fifo_errors++; in nsc_ircc_dma_receive_complete()
1825 self->netdev->stats.rx_fifo_errors++; in nsc_ircc_dma_receive_complete()
1834 if (st_fifo->pending_bytes < self->io.fifo_size) { in nsc_ircc_dma_receive_complete()
1867 self->stamp = ktime_get(); in nsc_ircc_dma_receive_complete()
1871 self->netdev->stats.rx_dropped++; in nsc_ircc_dma_receive_complete()
1883 if (self->io.speed < 4000000) { in nsc_ircc_dma_receive_complete()
1886 self->rx_buff.data, in nsc_ircc_dma_receive_complete()
1891 self->rx_buff.data, in nsc_ircc_dma_receive_complete()
1896 self->rx_buff.data += len; in nsc_ircc_dma_receive_complete()
1897 self->netdev->stats.rx_bytes += len; in nsc_ircc_dma_receive_complete()
1898 self->netdev->stats.rx_packets++; in nsc_ircc_dma_receive_complete()
1900 skb->dev = self->netdev; in nsc_ircc_dma_receive_complete()
1918 static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self) in nsc_ircc_pio_receive() argument
1923 iobase = self->io.fir_base; in nsc_ircc_pio_receive()
1928 async_unwrap_char(self->netdev, &self->netdev->stats, in nsc_ircc_pio_receive()
1929 &self->rx_buff, byte); in nsc_ircc_pio_receive()
1939 static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir) in nsc_ircc_sir_interrupt() argument
1946 actual = nsc_ircc_pio_write(self->io.fir_base, in nsc_ircc_sir_interrupt()
1947 self->tx_buff.data, in nsc_ircc_sir_interrupt()
1948 self->tx_buff.len, in nsc_ircc_sir_interrupt()
1949 self->io.fifo_size); in nsc_ircc_sir_interrupt()
1950 self->tx_buff.data += actual; in nsc_ircc_sir_interrupt()
1951 self->tx_buff.len -= actual; in nsc_ircc_sir_interrupt()
1953 self->io.direction = IO_XMIT; in nsc_ircc_sir_interrupt()
1956 if (self->tx_buff.len > 0) in nsc_ircc_sir_interrupt()
1957 self->ier = IER_TXLDL_IE; in nsc_ircc_sir_interrupt()
1960 self->netdev->stats.tx_packets++; in nsc_ircc_sir_interrupt()
1961 netif_wake_queue(self->netdev); in nsc_ircc_sir_interrupt()
1962 self->ier = IER_TXEMP_IE; in nsc_ircc_sir_interrupt()
1969 self->io.direction = IO_RECV; in nsc_ircc_sir_interrupt()
1970 self->ier = IER_RXHDL_IE; in nsc_ircc_sir_interrupt()
1974 if (self->new_speed) { in nsc_ircc_sir_interrupt()
1976 self->ier = nsc_ircc_change_speed(self, in nsc_ircc_sir_interrupt()
1977 self->new_speed); in nsc_ircc_sir_interrupt()
1978 self->new_speed = 0; in nsc_ircc_sir_interrupt()
1979 netif_wake_queue(self->netdev); in nsc_ircc_sir_interrupt()
1982 if (self->io.speed > 115200) { in nsc_ircc_sir_interrupt()
1991 nsc_ircc_pio_receive(self); in nsc_ircc_sir_interrupt()
1994 self->ier = IER_RXHDL_IE; in nsc_ircc_sir_interrupt()
2004 static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase, in nsc_ircc_fir_interrupt() argument
2014 if (nsc_ircc_dma_receive_complete(self, iobase)) { in nsc_ircc_fir_interrupt()
2016 self->ier = IER_SFIF_IE; in nsc_ircc_fir_interrupt()
2018 self->ier = IER_SFIF_IE | IER_TMR_IE; in nsc_ircc_fir_interrupt()
2030 if (self->io.direction == IO_XMIT) { in nsc_ircc_fir_interrupt()
2031 nsc_ircc_dma_xmit(self, iobase); in nsc_ircc_fir_interrupt()
2034 self->ier = IER_DMA_IE; in nsc_ircc_fir_interrupt()
2037 if (nsc_ircc_dma_receive_complete(self, iobase)) { in nsc_ircc_fir_interrupt()
2038 self->ier = IER_SFIF_IE; in nsc_ircc_fir_interrupt()
2040 self->ier = IER_SFIF_IE | IER_TMR_IE; in nsc_ircc_fir_interrupt()
2045 if (nsc_ircc_dma_xmit_complete(self)) { in nsc_ircc_fir_interrupt()
2046 if(self->new_speed != 0) { in nsc_ircc_fir_interrupt()
2050 self->ier = IER_TXEMP_IE; in nsc_ircc_fir_interrupt()
2054 if (irda_device_txqueue_empty(self->netdev)) { in nsc_ircc_fir_interrupt()
2056 nsc_ircc_dma_receive(self); in nsc_ircc_fir_interrupt()
2057 self->ier = IER_SFIF_IE; in nsc_ircc_fir_interrupt()
2064 self->ier = IER_DMA_IE; in nsc_ircc_fir_interrupt()
2069 self->ier = nsc_ircc_change_speed(self, self->new_speed); in nsc_ircc_fir_interrupt()
2070 self->new_speed = 0; in nsc_ircc_fir_interrupt()
2071 netif_wake_queue(self->netdev); in nsc_ircc_fir_interrupt()
2087 struct nsc_ircc_cb *self; in nsc_ircc_interrupt() local
2091 self = netdev_priv(dev); in nsc_ircc_interrupt()
2093 spin_lock(&self->lock); in nsc_ircc_interrupt()
2095 iobase = self->io.fir_base; in nsc_ircc_interrupt()
2100 self->ier = inb(iobase+IER); in nsc_ircc_interrupt()
2101 eir = inb(iobase+EIR) & self->ier; /* Mask out the interesting ones */ in nsc_ircc_interrupt()
2107 if (self->io.speed > 115200) in nsc_ircc_interrupt()
2108 nsc_ircc_fir_interrupt(self, iobase, eir); in nsc_ircc_interrupt()
2110 nsc_ircc_sir_interrupt(self, eir); in nsc_ircc_interrupt()
2113 outb(self->ier, iobase+IER); /* Restore interrupts */ in nsc_ircc_interrupt()
2116 spin_unlock(&self->lock); in nsc_ircc_interrupt()
2126 static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self) in nsc_ircc_is_receiving() argument
2133 IRDA_ASSERT(self != NULL, return FALSE;); in nsc_ircc_is_receiving()
2135 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_is_receiving()
2137 if (self->io.speed > 115200) { in nsc_ircc_is_receiving()
2138 iobase = self->io.fir_base; in nsc_ircc_is_receiving()
2149 status = (self->rx_buff.state != OUTSIDE_FRAME); in nsc_ircc_is_receiving()
2151 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_is_receiving()
2164 struct nsc_ircc_cb *self; in nsc_ircc_net_open() local
2171 self = netdev_priv(dev); in nsc_ircc_net_open()
2173 IRDA_ASSERT(self != NULL, return 0;); in nsc_ircc_net_open()
2175 iobase = self->io.fir_base; in nsc_ircc_net_open()
2177 if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) { in nsc_ircc_net_open()
2179 driver_name, self->io.irq); in nsc_ircc_net_open()
2186 if (request_dma(self->io.dma, dev->name)) { in nsc_ircc_net_open()
2188 driver_name, self->io.dma); in nsc_ircc_net_open()
2189 free_irq(self->io.irq, dev); in nsc_ircc_net_open()
2207 sprintf(hwname, "NSC-FIR @ 0x%03x", self->io.fir_base); in nsc_ircc_net_open()
2213 self->irlap = irlap_open(dev, &self->qos, hwname); in nsc_ircc_net_open()
2226 struct nsc_ircc_cb *self; in nsc_ircc_net_close() local
2233 self = netdev_priv(dev); in nsc_ircc_net_close()
2234 IRDA_ASSERT(self != NULL, return 0;); in nsc_ircc_net_close()
2240 if (self->irlap) in nsc_ircc_net_close()
2241 irlap_close(self->irlap); in nsc_ircc_net_close()
2242 self->irlap = NULL; in nsc_ircc_net_close()
2244 iobase = self->io.fir_base; in nsc_ircc_net_close()
2246 disable_dma(self->io.dma); in nsc_ircc_net_close()
2255 free_irq(self->io.irq, dev); in nsc_ircc_net_close()
2256 free_dma(self->io.dma); in nsc_ircc_net_close()
2273 struct nsc_ircc_cb *self; in nsc_ircc_net_ioctl() local
2279 self = netdev_priv(dev); in nsc_ircc_net_ioctl()
2281 IRDA_ASSERT(self != NULL, return -1;); in nsc_ircc_net_ioctl()
2291 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_net_ioctl()
2292 nsc_ircc_change_speed(self, irq->ifr_baudrate); in nsc_ircc_net_ioctl()
2293 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_net_ioctl()
2300 irda_device_set_media_busy(self->netdev, TRUE); in nsc_ircc_net_ioctl()
2304 irq->ifr_receiving = nsc_ircc_is_receiving(self); in nsc_ircc_net_ioctl()
2314 struct nsc_ircc_cb *self = platform_get_drvdata(dev); in nsc_ircc_suspend() local
2317 int iobase = self->io.fir_base; in nsc_ircc_suspend()
2319 if (self->io.suspended) in nsc_ircc_suspend()
2325 if (netif_running(self->netdev)) { in nsc_ircc_suspend()
2326 netif_device_detach(self->netdev); in nsc_ircc_suspend()
2327 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_suspend()
2338 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_suspend()
2339 free_irq(self->io.irq, self->netdev); in nsc_ircc_suspend()
2340 disable_dma(self->io.dma); in nsc_ircc_suspend()
2342 self->io.suspended = 1; in nsc_ircc_suspend()
2350 struct nsc_ircc_cb *self = platform_get_drvdata(dev); in nsc_ircc_resume() local
2353 if (!self->io.suspended) in nsc_ircc_resume()
2359 nsc_ircc_setup(&self->io); in nsc_ircc_resume()
2360 nsc_ircc_init_dongle_interface(self->io.fir_base, self->io.dongle_id); in nsc_ircc_resume()
2362 if (netif_running(self->netdev)) { in nsc_ircc_resume()
2363 if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, in nsc_ircc_resume()
2364 self->netdev->name, self->netdev)) { in nsc_ircc_resume()
2366 driver_name, self->io.irq); in nsc_ircc_resume()
2372 unregister_netdevice(self->netdev); in nsc_ircc_resume()
2374 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_resume()
2375 nsc_ircc_change_speed(self, self->io.speed); in nsc_ircc_resume()
2376 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_resume()
2377 netif_device_attach(self->netdev); in nsc_ircc_resume()
2381 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_resume()
2382 nsc_ircc_change_speed(self, 9600); in nsc_ircc_resume()
2383 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_resume()
2385 self->io.suspended = 0; in nsc_ircc_resume()