Lines Matching refs:self

168 	struct bfin_sir_self *self = netdev_priv(dev);  in bfin_sir_is_receiving()  local
169 struct bfin_sir_port *port = self->sir_port; in bfin_sir_is_receiving()
173 return self->rx_buff.state != OUTSIDE_FRAME; in bfin_sir_is_receiving()
180 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_tx_chars() local
181 struct bfin_sir_port *port = self->sir_port; in bfin_sir_tx_chars()
183 if (self->tx_buff.len != 0) { in bfin_sir_tx_chars()
184 chr = *(self->tx_buff.data); in bfin_sir_tx_chars()
186 self->tx_buff.data++; in bfin_sir_tx_chars()
187 self->tx_buff.len--; in bfin_sir_tx_chars()
189 self->stats.tx_packets++; in bfin_sir_tx_chars()
190 self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head; in bfin_sir_tx_chars()
191 if (self->newspeed) { in bfin_sir_tx_chars()
192 bfin_sir_set_speed(port, self->newspeed); in bfin_sir_tx_chars()
193 self->speed = self->newspeed; in bfin_sir_tx_chars()
194 self->newspeed = 0; in bfin_sir_tx_chars()
205 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_rx_chars() local
206 struct bfin_sir_port *port = self->sir_port; in bfin_sir_rx_chars()
211 async_unwrap_char(dev, &self->stats, &self->rx_buff, ch); in bfin_sir_rx_chars()
218 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_rx_int() local
219 struct bfin_sir_port *port = self->sir_port; in bfin_sir_rx_int()
221 spin_lock(&self->lock); in bfin_sir_rx_int()
224 spin_unlock(&self->lock); in bfin_sir_rx_int()
232 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_tx_int() local
233 struct bfin_sir_port *port = self->sir_port; in bfin_sir_tx_int()
235 spin_lock(&self->lock); in bfin_sir_tx_int()
238 spin_unlock(&self->lock); in bfin_sir_tx_int()
247 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_dma_tx_chars() local
248 struct bfin_sir_port *port = self->sir_port; in bfin_sir_dma_tx_chars()
254 if (self->tx_buff.len == 0) { in bfin_sir_dma_tx_chars()
255 self->stats.tx_packets++; in bfin_sir_dma_tx_chars()
256 if (self->newspeed) { in bfin_sir_dma_tx_chars()
257 bfin_sir_set_speed(port, self->newspeed); in bfin_sir_dma_tx_chars()
258 self->speed = self->newspeed; in bfin_sir_dma_tx_chars()
259 self->newspeed = 0; in bfin_sir_dma_tx_chars()
267 blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data), in bfin_sir_dma_tx_chars()
268 (unsigned long)(self->tx_buff.data+self->tx_buff.len)); in bfin_sir_dma_tx_chars()
274 (unsigned long)(self->tx_buff.data)); in bfin_sir_dma_tx_chars()
275 set_dma_x_count(port->tx_dma_channel, self->tx_buff.len); in bfin_sir_dma_tx_chars()
283 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_dma_tx_int() local
284 struct bfin_sir_port *port = self->sir_port; in bfin_sir_dma_tx_int()
286 spin_lock(&self->lock); in bfin_sir_dma_tx_int()
291 self->stats.tx_packets++; in bfin_sir_dma_tx_int()
292 self->stats.tx_bytes += self->tx_buff.len; in bfin_sir_dma_tx_int()
293 self->tx_buff.len = 0; in bfin_sir_dma_tx_int()
294 if (self->newspeed) { in bfin_sir_dma_tx_int()
295 bfin_sir_set_speed(port, self->newspeed); in bfin_sir_dma_tx_int()
296 self->speed = self->newspeed; in bfin_sir_dma_tx_int()
297 self->newspeed = 0; in bfin_sir_dma_tx_int()
304 spin_unlock(&self->lock); in bfin_sir_dma_tx_int()
311 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_dma_rx_chars() local
312 struct bfin_sir_port *port = self->sir_port; in bfin_sir_dma_rx_chars()
318 async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]); in bfin_sir_dma_rx_chars()
323 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_rx_dma_timeout() local
324 struct bfin_sir_port *port = self->sir_port; in bfin_sir_rx_dma_timeout()
328 spin_lock_irqsave(&self->lock, flags); in bfin_sir_rx_dma_timeout()
340 spin_unlock_irqrestore(&self->lock, flags); in bfin_sir_rx_dma_timeout()
346 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_dma_rx_int() local
347 struct bfin_sir_port *port = self->sir_port; in bfin_sir_dma_rx_int()
350 spin_lock(&self->lock); in bfin_sir_dma_rx_int()
363 spin_unlock(&self->lock); in bfin_sir_dma_rx_int()
457 struct bfin_sir_self *self; in bfin_sir_suspend() local
464 self = netdev_priv(dev); in bfin_sir_suspend()
465 if (self->open) { in bfin_sir_suspend()
466 flush_work(&self->work); in bfin_sir_suspend()
467 bfin_sir_shutdown(self->sir_port, dev); in bfin_sir_suspend()
477 struct bfin_sir_self *self; in bfin_sir_resume() local
485 self = netdev_priv(dev); in bfin_sir_resume()
486 port = self->sir_port; in bfin_sir_resume()
487 if (self->open) { in bfin_sir_resume()
488 if (self->newspeed) { in bfin_sir_resume()
489 self->speed = self->newspeed; in bfin_sir_resume()
490 self->newspeed = 0; in bfin_sir_resume()
506 struct bfin_sir_self *self = container_of(work, struct bfin_sir_self, work); in bfin_sir_send_work() local
507 struct net_device *dev = self->sir_port->dev; in bfin_sir_send_work()
508 struct bfin_sir_port *port = self->sir_port; in bfin_sir_send_work()
513 turnaround_delay(dev->last_rx, self->mtt); in bfin_sir_send_work()
539 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_hard_xmit() local
544 self->mtt = irda_get_mtt(skb); in bfin_sir_hard_xmit()
546 if (speed != self->speed && speed != -1) in bfin_sir_hard_xmit()
547 self->newspeed = speed; in bfin_sir_hard_xmit()
549 self->tx_buff.data = self->tx_buff.head; in bfin_sir_hard_xmit()
551 self->tx_buff.len = 0; in bfin_sir_hard_xmit()
553 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize); in bfin_sir_hard_xmit()
555 schedule_work(&self->work); in bfin_sir_hard_xmit()
564 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_ioctl() local
565 struct bfin_sir_port *port = self->sir_port; in bfin_sir_ioctl()
571 if (self->open) { in bfin_sir_ioctl()
603 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_stats() local
605 return &self->stats; in bfin_sir_stats()
610 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_open() local
611 struct bfin_sir_port *port = self->sir_port; in bfin_sir_open()
614 self->newspeed = 0; in bfin_sir_open()
615 self->speed = 9600; in bfin_sir_open()
617 spin_lock_init(&self->lock); in bfin_sir_open()
625 self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME); in bfin_sir_open()
626 if (!self->irlap) { in bfin_sir_open()
631 INIT_WORK(&self->work, bfin_sir_send_work); in bfin_sir_open()
636 self->open = 1; in bfin_sir_open()
644 self->open = 0; in bfin_sir_open()
652 struct bfin_sir_self *self = netdev_priv(dev); in bfin_sir_stop() local
654 flush_work(&self->work); in bfin_sir_stop()
655 bfin_sir_shutdown(self->sir_port, dev); in bfin_sir_stop()
657 if (self->rxskb) { in bfin_sir_stop()
658 dev_kfree_skb(self->rxskb); in bfin_sir_stop()
659 self->rxskb = NULL; in bfin_sir_stop()
663 if (self->irlap) { in bfin_sir_stop()
664 irlap_close(self->irlap); in bfin_sir_stop()
665 self->irlap = NULL; in bfin_sir_stop()
669 self->open = 0; in bfin_sir_stop()
697 struct bfin_sir_self *self; in bfin_sir_probe() local
719 dev = alloc_irdadev(sizeof(*self)); in bfin_sir_probe()
723 self = netdev_priv(dev); in bfin_sir_probe()
724 self->dev = &pdev->dev; in bfin_sir_probe()
725 self->sir_port = sir_port; in bfin_sir_probe()
728 err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU); in bfin_sir_probe()
731 err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME); in bfin_sir_probe()
738 irda_init_max_qos_capabilies(&self->qos); in bfin_sir_probe()
757 self->qos.baud_rate.bits &= baudrate_mask; in bfin_sir_probe()
759 self->qos.min_turn_time.bits = 1; /* 10 ms or more */ in bfin_sir_probe()
761 irda_qos_bits_to_value(&self->qos); in bfin_sir_probe()
766 kfree(self->tx_buff.head); in bfin_sir_probe()
768 kfree(self->rx_buff.head); in bfin_sir_probe()
785 struct bfin_sir_self *self; in bfin_sir_remove() local
791 self = netdev_priv(dev); in bfin_sir_remove()
793 kfree(self->tx_buff.head); in bfin_sir_remove()
794 kfree(self->rx_buff.head); in bfin_sir_remove()