Lines Matching refs:sdev

38 void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum,  in stub_enqueue_ret_unlink()  argument
45 usbip_event_add(&sdev->ud, VDEV_EVENT_ERROR_MALLOC); in stub_enqueue_ret_unlink()
52 list_add_tail(&unlink->list, &sdev->unlink_tx); in stub_enqueue_ret_unlink()
67 struct stub_device *sdev = priv->sdev; in stub_complete() local
99 spin_lock_irqsave(&sdev->priv_lock, flags); in stub_complete()
101 stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status); in stub_complete()
104 list_move_tail(&priv->list, &sdev->priv_tx); in stub_complete()
106 spin_unlock_irqrestore(&sdev->priv_lock, flags); in stub_complete()
109 wake_up(&sdev->tx_waitq); in stub_complete()
137 static struct stub_priv *dequeue_from_priv_tx(struct stub_device *sdev) in dequeue_from_priv_tx() argument
142 spin_lock_irqsave(&sdev->priv_lock, flags); in dequeue_from_priv_tx()
144 list_for_each_entry_safe(priv, tmp, &sdev->priv_tx, list) { in dequeue_from_priv_tx()
145 list_move_tail(&priv->list, &sdev->priv_free); in dequeue_from_priv_tx()
146 spin_unlock_irqrestore(&sdev->priv_lock, flags); in dequeue_from_priv_tx()
150 spin_unlock_irqrestore(&sdev->priv_lock, flags); in dequeue_from_priv_tx()
155 static int stub_send_ret_submit(struct stub_device *sdev) in stub_send_ret_submit() argument
165 while ((priv = dequeue_from_priv_tx(sdev)) != NULL) { in stub_send_ret_submit()
185 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC); in stub_send_ret_submit()
232 dev_err(&sdev->interface->dev, in stub_send_ret_submit()
237 usbip_event_add(&sdev->ud, in stub_send_ret_submit()
249 usbip_event_add(&sdev->ud, in stub_send_ret_submit()
261 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, in stub_send_ret_submit()
264 dev_err(&sdev->interface->dev, in stub_send_ret_submit()
269 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); in stub_send_ret_submit()
279 spin_lock_irqsave(&sdev->priv_lock, flags); in stub_send_ret_submit()
280 list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) { in stub_send_ret_submit()
283 spin_unlock_irqrestore(&sdev->priv_lock, flags); in stub_send_ret_submit()
288 static struct stub_unlink *dequeue_from_unlink_tx(struct stub_device *sdev) in dequeue_from_unlink_tx() argument
293 spin_lock_irqsave(&sdev->priv_lock, flags); in dequeue_from_unlink_tx()
295 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) { in dequeue_from_unlink_tx()
296 list_move_tail(&unlink->list, &sdev->unlink_free); in dequeue_from_unlink_tx()
297 spin_unlock_irqrestore(&sdev->priv_lock, flags); in dequeue_from_unlink_tx()
301 spin_unlock_irqrestore(&sdev->priv_lock, flags); in dequeue_from_unlink_tx()
306 static int stub_send_ret_unlink(struct stub_device *sdev) in stub_send_ret_unlink() argument
317 while ((unlink = dequeue_from_unlink_tx(sdev)) != NULL) { in stub_send_ret_unlink()
336 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov, in stub_send_ret_unlink()
339 dev_err(&sdev->interface->dev, in stub_send_ret_unlink()
342 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); in stub_send_ret_unlink()
350 spin_lock_irqsave(&sdev->priv_lock, flags); in stub_send_ret_unlink()
352 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) { in stub_send_ret_unlink()
357 spin_unlock_irqrestore(&sdev->priv_lock, flags); in stub_send_ret_unlink()
365 struct stub_device *sdev = container_of(ud, struct stub_device, ud); in stub_tx_loop() local
385 if (stub_send_ret_submit(sdev) < 0) in stub_tx_loop()
388 if (stub_send_ret_unlink(sdev) < 0) in stub_tx_loop()
391 wait_event_interruptible(sdev->tx_waitq, in stub_tx_loop()
392 (!list_empty(&sdev->priv_tx) || in stub_tx_loop()
393 !list_empty(&sdev->unlink_tx) || in stub_tx_loop()