Lines Matching refs:vs

227 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,  in vhost_scsi_init_inflight()  argument
235 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
240 idx = vs->vqs[i].inflight_idx; in vhost_scsi_init_inflight()
242 old_inflight[i] = &vs->vqs[i].inflights[idx]; in vhost_scsi_init_inflight()
245 vs->vqs[i].inflight_idx = idx ^ 1; in vhost_scsi_init_inflight()
246 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; in vhost_scsi_init_inflight()
375 struct vhost_scsi *vs = cmd->tvc_vhost; in vhost_scsi_complete_cmd() local
377 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); in vhost_scsi_complete_cmd()
379 vhost_work_queue(&vs->dev, &vs->vs_completion_work); in vhost_scsi_complete_cmd()
408 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) in vhost_scsi_free_evt() argument
410 vs->vs_events_nr--; in vhost_scsi_free_evt()
415 vhost_scsi_allocate_evt(struct vhost_scsi *vs, in vhost_scsi_allocate_evt() argument
418 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt()
421 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { in vhost_scsi_allocate_evt()
422 vs->vs_events_missed = true; in vhost_scsi_allocate_evt()
429 vs->vs_events_missed = true; in vhost_scsi_allocate_evt()
435 vs->vs_events_nr++; in vhost_scsi_allocate_evt()
455 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) in vhost_scsi_do_evt_work() argument
457 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_evt_work()
464 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
469 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_do_evt_work()
474 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
478 if (vhost_enable_notify(&vs->dev, vq)) in vhost_scsi_do_evt_work()
480 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
487 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
491 if (vs->vs_events_missed) { in vhost_scsi_do_evt_work()
493 vs->vs_events_missed = false; in vhost_scsi_do_evt_work()
499 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_do_evt_work()
506 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, in vhost_scsi_evt_work() local
508 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_evt_work()
513 llnode = llist_del_all(&vs->vs_event_list); in vhost_scsi_evt_work()
517 vhost_scsi_do_evt_work(vs, evt); in vhost_scsi_evt_work()
518 vhost_scsi_free_evt(vs, evt); in vhost_scsi_evt_work()
530 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, in vhost_scsi_complete_cmd_work() local
541 llnode = llist_del_all(&vs->vs_completion_list); in vhost_scsi_complete_cmd_work()
567 vq = q - vs->vqs; in vhost_scsi_complete_cmd_work()
578 vhost_signal(&vs->dev, &vs->vqs[vq].vq); in vhost_scsi_complete_cmd_work()
828 vhost_scsi_send_bad_target(struct vhost_scsi *vs, in vhost_scsi_send_bad_target() argument
841 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_send_bad_target()
847 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) in vhost_scsi_handle_vq() argument
874 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_handle_vq()
887 if (unlikely(vhost_enable_notify(&vs->dev, vq))) { in vhost_scsi_handle_vq()
888 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_handle_vq()
938 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
944 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
951 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
997 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1005 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1043 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1052 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1055 cmd->tvc_vhost = vs; in vhost_scsi_handle_vq()
1072 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1101 vhost_scsi_send_evt(struct vhost_scsi *vs, in vhost_scsi_send_evt() argument
1109 evt = vhost_scsi_allocate_evt(vs, event, reason); in vhost_scsi_send_evt()
1126 llist_add(&evt->list, &vs->vs_event_list); in vhost_scsi_send_evt()
1127 vhost_work_queue(&vs->dev, &vs->vs_event_work); in vhost_scsi_send_evt()
1134 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_evt_handle_kick() local
1140 if (vs->vs_events_missed) in vhost_scsi_evt_handle_kick()
1141 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); in vhost_scsi_evt_handle_kick()
1150 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_handle_kick() local
1152 vhost_scsi_handle_vq(vs, vq); in vhost_scsi_handle_kick()
1155 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) in vhost_scsi_flush_vq() argument
1157 vhost_poll_flush(&vs->vqs[index].vq.poll); in vhost_scsi_flush_vq()
1161 static void vhost_scsi_flush(struct vhost_scsi *vs) in vhost_scsi_flush() argument
1167 vhost_scsi_init_inflight(vs, old_inflight); in vhost_scsi_flush()
1179 vhost_scsi_flush_vq(vs, i); in vhost_scsi_flush()
1180 vhost_work_flush(&vs->dev, &vs->vs_completion_work); in vhost_scsi_flush()
1181 vhost_work_flush(&vs->dev, &vs->vs_event_work); in vhost_scsi_flush()
1196 vhost_scsi_set_endpoint(struct vhost_scsi *vs, in vhost_scsi_set_endpoint() argument
1208 mutex_lock(&vs->dev.mutex); in vhost_scsi_set_endpoint()
1211 for (index = 0; index < vs->dev.nvqs; ++index) { in vhost_scsi_set_endpoint()
1213 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_set_endpoint()
1225 if (vs->vs_tpg) in vhost_scsi_set_endpoint()
1226 memcpy(vs_tpg, vs->vs_tpg, len); in vhost_scsi_set_endpoint()
1241 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { in vhost_scsi_set_endpoint()
1262 tpg->vhost_scsi = vs; in vhost_scsi_set_endpoint()
1271 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, in vhost_scsi_set_endpoint()
1272 sizeof(vs->vs_vhost_wwpn)); in vhost_scsi_set_endpoint()
1274 vq = &vs->vqs[i].vq; in vhost_scsi_set_endpoint()
1289 vhost_scsi_flush(vs); in vhost_scsi_set_endpoint()
1290 kfree(vs->vs_tpg); in vhost_scsi_set_endpoint()
1291 vs->vs_tpg = vs_tpg; in vhost_scsi_set_endpoint()
1294 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_endpoint()
1300 vhost_scsi_clear_endpoint(struct vhost_scsi *vs, in vhost_scsi_clear_endpoint() argument
1312 mutex_lock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1314 for (index = 0; index < vs->dev.nvqs; ++index) { in vhost_scsi_clear_endpoint()
1315 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_clear_endpoint()
1321 if (!vs->vs_tpg) { in vhost_scsi_clear_endpoint()
1328 tpg = vs->vs_tpg[target]; in vhost_scsi_clear_endpoint()
1349 vs->vs_tpg[target] = NULL; in vhost_scsi_clear_endpoint()
1361 vq = &vs->vqs[i].vq; in vhost_scsi_clear_endpoint()
1371 vhost_scsi_flush(vs); in vhost_scsi_clear_endpoint()
1372 kfree(vs->vs_tpg); in vhost_scsi_clear_endpoint()
1373 vs->vs_tpg = NULL; in vhost_scsi_clear_endpoint()
1374 WARN_ON(vs->vs_events_nr); in vhost_scsi_clear_endpoint()
1375 mutex_unlock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1382 mutex_unlock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1387 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) in vhost_scsi_set_features() argument
1395 mutex_lock(&vs->dev.mutex); in vhost_scsi_set_features()
1397 !vhost_log_access_ok(&vs->dev)) { in vhost_scsi_set_features()
1398 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_features()
1403 vq = &vs->vqs[i].vq; in vhost_scsi_set_features()
1408 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_features()
1414 struct vhost_scsi *vs; in vhost_scsi_open() local
1418 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); in vhost_scsi_open()
1419 if (!vs) { in vhost_scsi_open()
1420 vs = vzalloc(sizeof(*vs)); in vhost_scsi_open()
1421 if (!vs) in vhost_scsi_open()
1429 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); in vhost_scsi_open()
1430 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); in vhost_scsi_open()
1432 vs->vs_events_nr = 0; in vhost_scsi_open()
1433 vs->vs_events_missed = false; in vhost_scsi_open()
1435 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; in vhost_scsi_open()
1436 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_open()
1437 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; in vhost_scsi_open()
1438 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; in vhost_scsi_open()
1440 vqs[i] = &vs->vqs[i].vq; in vhost_scsi_open()
1441 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; in vhost_scsi_open()
1443 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); in vhost_scsi_open()
1445 vhost_scsi_init_inflight(vs, NULL); in vhost_scsi_open()
1447 f->private_data = vs; in vhost_scsi_open()
1451 kvfree(vs); in vhost_scsi_open()
1458 struct vhost_scsi *vs = f->private_data; in vhost_scsi_release() local
1461 mutex_lock(&vs->dev.mutex); in vhost_scsi_release()
1462 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); in vhost_scsi_release()
1463 mutex_unlock(&vs->dev.mutex); in vhost_scsi_release()
1464 vhost_scsi_clear_endpoint(vs, &t); in vhost_scsi_release()
1465 vhost_dev_stop(&vs->dev); in vhost_scsi_release()
1466 vhost_dev_cleanup(&vs->dev, false); in vhost_scsi_release()
1468 vhost_scsi_flush(vs); in vhost_scsi_release()
1469 kfree(vs->dev.vqs); in vhost_scsi_release()
1470 kvfree(vs); in vhost_scsi_release()
1479 struct vhost_scsi *vs = f->private_data; in vhost_scsi_ioctl() local
1487 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_ioctl()
1496 return vhost_scsi_set_endpoint(vs, &backend); in vhost_scsi_ioctl()
1503 return vhost_scsi_clear_endpoint(vs, &backend); in vhost_scsi_ioctl()
1512 vs->vs_events_missed = events_missed; in vhost_scsi_ioctl()
1517 events_missed = vs->vs_events_missed; in vhost_scsi_ioctl()
1530 return vhost_scsi_set_features(vs, features); in vhost_scsi_ioctl()
1532 mutex_lock(&vs->dev.mutex); in vhost_scsi_ioctl()
1533 r = vhost_dev_ioctl(&vs->dev, ioctl, argp); in vhost_scsi_ioctl()
1536 r = vhost_vring_ioctl(&vs->dev, ioctl, argp); in vhost_scsi_ioctl()
1537 mutex_unlock(&vs->dev.mutex); in vhost_scsi_ioctl()
1598 struct vhost_scsi *vs = tpg->vhost_scsi; in vhost_scsi_do_plug() local
1602 if (!vs) in vhost_scsi_do_plug()
1605 mutex_lock(&vs->dev.mutex); in vhost_scsi_do_plug()
1612 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_plug()
1615 vhost_scsi_send_evt(vs, tpg, lun, in vhost_scsi_do_plug()
1618 mutex_unlock(&vs->dev.mutex); in vhost_scsi_do_plug()