Lines Matching refs:sl

142 static void slc_bump(struct slcan *sl)  in slc_bump()  argument
148 char *cmd = sl->rbuff; in slc_bump()
158 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN]; in slc_bump()
159 sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0; in slc_bump()
169 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN]; in slc_bump()
170 sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0; in slc_bump()
178 if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid)) in slc_bump()
210 skb->dev = sl->dev; in slc_bump()
216 can_skb_prv(skb)->ifindex = sl->dev->ifindex; in slc_bump()
222 sl->dev->stats.rx_packets++; in slc_bump()
223 sl->dev->stats.rx_bytes += cf.can_dlc; in slc_bump()
228 static void slcan_unesc(struct slcan *sl, unsigned char s) in slcan_unesc() argument
231 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && in slcan_unesc()
232 (sl->rcount > 4)) { in slcan_unesc()
233 slc_bump(sl); in slcan_unesc()
235 sl->rcount = 0; in slcan_unesc()
237 if (!test_bit(SLF_ERROR, &sl->flags)) { in slcan_unesc()
238 if (sl->rcount < SLC_MTU) { in slcan_unesc()
239 sl->rbuff[sl->rcount++] = s; in slcan_unesc()
242 sl->dev->stats.rx_over_errors++; in slcan_unesc()
243 set_bit(SLF_ERROR, &sl->flags); in slcan_unesc()
254 static void slc_encaps(struct slcan *sl, struct can_frame *cf) in slc_encaps() argument
261 pos = sl->xbuff; in slc_encaps()
305 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); in slc_encaps()
306 actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff); in slc_encaps()
307 sl->xleft = (pos - sl->xbuff) - actual; in slc_encaps()
308 sl->xhead = sl->xbuff + actual; in slc_encaps()
309 sl->dev->stats.tx_bytes += cf->can_dlc; in slc_encaps()
315 struct slcan *sl = container_of(work, struct slcan, tx_work); in slcan_transmit() local
318 spin_lock_bh(&sl->lock); in slcan_transmit()
320 if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) { in slcan_transmit()
321 spin_unlock_bh(&sl->lock); in slcan_transmit()
325 if (sl->xleft <= 0) { in slcan_transmit()
328 sl->dev->stats.tx_packets++; in slcan_transmit()
329 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); in slcan_transmit()
330 spin_unlock_bh(&sl->lock); in slcan_transmit()
331 netif_wake_queue(sl->dev); in slcan_transmit()
335 actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft); in slcan_transmit()
336 sl->xleft -= actual; in slcan_transmit()
337 sl->xhead += actual; in slcan_transmit()
338 spin_unlock_bh(&sl->lock); in slcan_transmit()
347 struct slcan *sl = tty->disc_data; in slcan_write_wakeup() local
349 schedule_work(&sl->tx_work); in slcan_write_wakeup()
355 struct slcan *sl = netdev_priv(dev); in slc_xmit() local
360 spin_lock(&sl->lock); in slc_xmit()
362 spin_unlock(&sl->lock); in slc_xmit()
366 if (sl->tty == NULL) { in slc_xmit()
367 spin_unlock(&sl->lock); in slc_xmit()
371 netif_stop_queue(sl->dev); in slc_xmit()
372 slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */ in slc_xmit()
373 spin_unlock(&sl->lock); in slc_xmit()
388 struct slcan *sl = netdev_priv(dev); in slc_close() local
390 spin_lock_bh(&sl->lock); in slc_close()
391 if (sl->tty) { in slc_close()
393 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); in slc_close()
396 sl->rcount = 0; in slc_close()
397 sl->xleft = 0; in slc_close()
398 spin_unlock_bh(&sl->lock); in slc_close()
406 struct slcan *sl = netdev_priv(dev); in slc_open() local
408 if (sl->tty == NULL) in slc_open()
411 sl->flags &= (1 << SLF_INUSE); in slc_open()
469 struct slcan *sl = (struct slcan *) tty->disc_data; in slcan_receive_buf() local
471 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) in slcan_receive_buf()
477 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) in slcan_receive_buf()
478 sl->dev->stats.rx_errors++; in slcan_receive_buf()
482 slcan_unesc(sl, *cp++); in slcan_receive_buf()
495 struct slcan *sl; in slc_sync() local
502 sl = netdev_priv(dev); in slc_sync()
503 if (sl->tty) in slc_sync()
516 struct slcan *sl; in slc_alloc() local
530 dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, slc_setup); in slc_alloc()
535 sl = netdev_priv(dev); in slc_alloc()
538 sl->magic = SLCAN_MAGIC; in slc_alloc()
539 sl->dev = dev; in slc_alloc()
540 spin_lock_init(&sl->lock); in slc_alloc()
541 INIT_WORK(&sl->tx_work, slcan_transmit); in slc_alloc()
544 return sl; in slc_alloc()
559 struct slcan *sl; in slcan_open() local
577 sl = tty->disc_data; in slcan_open()
581 if (sl && sl->magic == SLCAN_MAGIC) in slcan_open()
586 sl = slc_alloc(tty_devnum(tty)); in slcan_open()
587 if (sl == NULL) in slcan_open()
590 sl->tty = tty; in slcan_open()
591 tty->disc_data = sl; in slcan_open()
593 if (!test_bit(SLF_INUSE, &sl->flags)) { in slcan_open()
595 sl->rcount = 0; in slcan_open()
596 sl->xleft = 0; in slcan_open()
598 set_bit(SLF_INUSE, &sl->flags); in slcan_open()
600 err = register_netdevice(sl->dev); in slcan_open()
613 sl->tty = NULL; in slcan_open()
615 clear_bit(SLF_INUSE, &sl->flags); in slcan_open()
634 struct slcan *sl = (struct slcan *) tty->disc_data; in slcan_close() local
637 if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty) in slcan_close()
640 spin_lock_bh(&sl->lock); in slcan_close()
642 sl->tty = NULL; in slcan_close()
643 spin_unlock_bh(&sl->lock); in slcan_close()
645 flush_work(&sl->tx_work); in slcan_close()
648 unregister_netdev(sl->dev); in slcan_close()
662 struct slcan *sl = (struct slcan *) tty->disc_data; in slcan_ioctl() local
666 if (!sl || sl->magic != SLCAN_MAGIC) in slcan_ioctl()
671 tmp = strlen(sl->dev->name) + 1; in slcan_ioctl()
672 if (copy_to_user((void __user *)arg, sl->dev->name, tmp)) in slcan_ioctl()
723 struct slcan *sl; in slcan_exit() local
741 sl = netdev_priv(dev); in slcan_exit()
742 spin_lock_bh(&sl->lock); in slcan_exit()
743 if (sl->tty) { in slcan_exit()
745 tty_hangup(sl->tty); in slcan_exit()
747 spin_unlock_bh(&sl->lock); in slcan_exit()
760 sl = netdev_priv(dev); in slcan_exit()
761 if (sl->tty) { in slcan_exit()