Lines Matching refs:vs
240 static void vhost_scsi_init_inflight(struct vhost_scsi *vs, in vhost_scsi_init_inflight() argument
248 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
253 idx = vs->vqs[i].inflight_idx; in vhost_scsi_init_inflight()
255 old_inflight[i] = &vs->vqs[i].inflights[idx]; in vhost_scsi_init_inflight()
258 vs->vqs[i].inflight_idx = idx ^ 1; in vhost_scsi_init_inflight()
259 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; in vhost_scsi_init_inflight()
534 struct vhost_scsi *vs = cmd->tvc_vhost; in vhost_scsi_complete_cmd() local
536 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); in vhost_scsi_complete_cmd()
538 vhost_work_queue(&vs->dev, &vs->vs_completion_work); in vhost_scsi_complete_cmd()
567 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) in vhost_scsi_free_evt() argument
569 vs->vs_events_nr--; in vhost_scsi_free_evt()
574 vhost_scsi_allocate_evt(struct vhost_scsi *vs, in vhost_scsi_allocate_evt() argument
577 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt()
580 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { in vhost_scsi_allocate_evt()
581 vs->vs_events_missed = true; in vhost_scsi_allocate_evt()
588 vs->vs_events_missed = true; in vhost_scsi_allocate_evt()
594 vs->vs_events_nr++; in vhost_scsi_allocate_evt()
614 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) in vhost_scsi_do_evt_work() argument
616 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_evt_work()
623 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
628 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_do_evt_work()
633 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
637 if (vhost_enable_notify(&vs->dev, vq)) in vhost_scsi_do_evt_work()
639 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
646 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
650 if (vs->vs_events_missed) { in vhost_scsi_do_evt_work()
652 vs->vs_events_missed = false; in vhost_scsi_do_evt_work()
658 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_do_evt_work()
665 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, in vhost_scsi_evt_work() local
667 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_evt_work()
672 llnode = llist_del_all(&vs->vs_event_list); in vhost_scsi_evt_work()
676 vhost_scsi_do_evt_work(vs, evt); in vhost_scsi_evt_work()
677 vhost_scsi_free_evt(vs, evt); in vhost_scsi_evt_work()
689 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, in vhost_scsi_complete_cmd_work() local
700 llnode = llist_del_all(&vs->vs_completion_list); in vhost_scsi_complete_cmd_work()
726 vq = q - vs->vqs; in vhost_scsi_complete_cmd_work()
737 vhost_signal(&vs->dev, &vs->vqs[vq].vq); in vhost_scsi_complete_cmd_work()
986 vhost_scsi_send_bad_target(struct vhost_scsi *vs, in vhost_scsi_send_bad_target() argument
999 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_send_bad_target()
1005 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) in vhost_scsi_handle_vq() argument
1032 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_handle_vq()
1045 if (unlikely(vhost_enable_notify(&vs->dev, vq))) { in vhost_scsi_handle_vq()
1046 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_handle_vq()
1096 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1102 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1109 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1155 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1163 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1201 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1210 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1213 cmd->tvc_vhost = vs; in vhost_scsi_handle_vq()
1230 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1259 vhost_scsi_send_evt(struct vhost_scsi *vs, in vhost_scsi_send_evt() argument
1267 evt = vhost_scsi_allocate_evt(vs, event, reason); in vhost_scsi_send_evt()
1284 llist_add(&evt->list, &vs->vs_event_list); in vhost_scsi_send_evt()
1285 vhost_work_queue(&vs->dev, &vs->vs_event_work); in vhost_scsi_send_evt()
1292 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_evt_handle_kick() local
1298 if (vs->vs_events_missed) in vhost_scsi_evt_handle_kick()
1299 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); in vhost_scsi_evt_handle_kick()
1308 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_handle_kick() local
1310 vhost_scsi_handle_vq(vs, vq); in vhost_scsi_handle_kick()
1313 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) in vhost_scsi_flush_vq() argument
1315 vhost_poll_flush(&vs->vqs[index].vq.poll); in vhost_scsi_flush_vq()
1319 static void vhost_scsi_flush(struct vhost_scsi *vs) in vhost_scsi_flush() argument
1325 vhost_scsi_init_inflight(vs, old_inflight); in vhost_scsi_flush()
1337 vhost_scsi_flush_vq(vs, i); in vhost_scsi_flush()
1338 vhost_work_flush(&vs->dev, &vs->vs_completion_work); in vhost_scsi_flush()
1339 vhost_work_flush(&vs->dev, &vs->vs_event_work); in vhost_scsi_flush()
1354 vhost_scsi_set_endpoint(struct vhost_scsi *vs, in vhost_scsi_set_endpoint() argument
1366 mutex_lock(&vs->dev.mutex); in vhost_scsi_set_endpoint()
1369 for (index = 0; index < vs->dev.nvqs; ++index) { in vhost_scsi_set_endpoint()
1371 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_set_endpoint()
1383 if (vs->vs_tpg) in vhost_scsi_set_endpoint()
1384 memcpy(vs_tpg, vs->vs_tpg, len); in vhost_scsi_set_endpoint()
1399 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { in vhost_scsi_set_endpoint()
1420 tpg->vhost_scsi = vs; in vhost_scsi_set_endpoint()
1429 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, in vhost_scsi_set_endpoint()
1430 sizeof(vs->vs_vhost_wwpn)); in vhost_scsi_set_endpoint()
1432 vq = &vs->vqs[i].vq; in vhost_scsi_set_endpoint()
1447 vhost_scsi_flush(vs); in vhost_scsi_set_endpoint()
1448 kfree(vs->vs_tpg); in vhost_scsi_set_endpoint()
1449 vs->vs_tpg = vs_tpg; in vhost_scsi_set_endpoint()
1452 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_endpoint()
1458 vhost_scsi_clear_endpoint(struct vhost_scsi *vs, in vhost_scsi_clear_endpoint() argument
1470 mutex_lock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1472 for (index = 0; index < vs->dev.nvqs; ++index) { in vhost_scsi_clear_endpoint()
1473 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_clear_endpoint()
1479 if (!vs->vs_tpg) { in vhost_scsi_clear_endpoint()
1486 tpg = vs->vs_tpg[target]; in vhost_scsi_clear_endpoint()
1507 vs->vs_tpg[target] = NULL; in vhost_scsi_clear_endpoint()
1519 vq = &vs->vqs[i].vq; in vhost_scsi_clear_endpoint()
1529 vhost_scsi_flush(vs); in vhost_scsi_clear_endpoint()
1530 kfree(vs->vs_tpg); in vhost_scsi_clear_endpoint()
1531 vs->vs_tpg = NULL; in vhost_scsi_clear_endpoint()
1532 WARN_ON(vs->vs_events_nr); in vhost_scsi_clear_endpoint()
1533 mutex_unlock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1540 mutex_unlock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1545 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) in vhost_scsi_set_features() argument
1553 mutex_lock(&vs->dev.mutex); in vhost_scsi_set_features()
1555 !vhost_log_access_ok(&vs->dev)) { in vhost_scsi_set_features()
1556 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_features()
1561 vq = &vs->vqs[i].vq; in vhost_scsi_set_features()
1566 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_features()
1572 struct vhost_scsi *vs; in vhost_scsi_open() local
1576 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); in vhost_scsi_open()
1577 if (!vs) { in vhost_scsi_open()
1578 vs = vzalloc(sizeof(*vs)); in vhost_scsi_open()
1579 if (!vs) in vhost_scsi_open()
1587 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); in vhost_scsi_open()
1588 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); in vhost_scsi_open()
1590 vs->vs_events_nr = 0; in vhost_scsi_open()
1591 vs->vs_events_missed = false; in vhost_scsi_open()
1593 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; in vhost_scsi_open()
1594 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_open()
1595 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; in vhost_scsi_open()
1596 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; in vhost_scsi_open()
1598 vqs[i] = &vs->vqs[i].vq; in vhost_scsi_open()
1599 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; in vhost_scsi_open()
1601 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); in vhost_scsi_open()
1603 vhost_scsi_init_inflight(vs, NULL); in vhost_scsi_open()
1605 f->private_data = vs; in vhost_scsi_open()
1609 kvfree(vs); in vhost_scsi_open()
1616 struct vhost_scsi *vs = f->private_data; in vhost_scsi_release() local
1619 mutex_lock(&vs->dev.mutex); in vhost_scsi_release()
1620 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); in vhost_scsi_release()
1621 mutex_unlock(&vs->dev.mutex); in vhost_scsi_release()
1622 vhost_scsi_clear_endpoint(vs, &t); in vhost_scsi_release()
1623 vhost_dev_stop(&vs->dev); in vhost_scsi_release()
1624 vhost_dev_cleanup(&vs->dev, false); in vhost_scsi_release()
1626 vhost_scsi_flush(vs); in vhost_scsi_release()
1627 kfree(vs->dev.vqs); in vhost_scsi_release()
1628 kvfree(vs); in vhost_scsi_release()
1637 struct vhost_scsi *vs = f->private_data; in vhost_scsi_ioctl() local
1645 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_ioctl()
1654 return vhost_scsi_set_endpoint(vs, &backend); in vhost_scsi_ioctl()
1661 return vhost_scsi_clear_endpoint(vs, &backend); in vhost_scsi_ioctl()
1670 vs->vs_events_missed = events_missed; in vhost_scsi_ioctl()
1675 events_missed = vs->vs_events_missed; in vhost_scsi_ioctl()
1688 return vhost_scsi_set_features(vs, features); in vhost_scsi_ioctl()
1690 mutex_lock(&vs->dev.mutex); in vhost_scsi_ioctl()
1691 r = vhost_dev_ioctl(&vs->dev, ioctl, argp); in vhost_scsi_ioctl()
1694 r = vhost_vring_ioctl(&vs->dev, ioctl, argp); in vhost_scsi_ioctl()
1695 mutex_unlock(&vs->dev.mutex); in vhost_scsi_ioctl()
1756 struct vhost_scsi *vs = tpg->vhost_scsi; in vhost_scsi_do_plug() local
1760 if (!vs) in vhost_scsi_do_plug()
1763 mutex_lock(&vs->dev.mutex); in vhost_scsi_do_plug()
1770 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_plug()
1773 vhost_scsi_send_evt(vs, tpg, lun, in vhost_scsi_do_plug()
1776 mutex_unlock(&vs->dev.mutex); in vhost_scsi_do_plug()