Lines Matching refs:kvm
160 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument
211 static void kvm_s390_sync_dirty_log(struct kvm *kvm, in kvm_s390_sync_dirty_log() argument
216 struct gmap *gmap = kvm->arch.gmap; in kvm_s390_sync_dirty_log()
225 mark_page_dirty(kvm, cur_gfn); in kvm_s390_sync_dirty_log()
234 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
242 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
248 memslot = id_to_memslot(kvm->memslots, log->slot); in kvm_vm_ioctl_get_dirty_log()
253 kvm_s390_sync_dirty_log(kvm, memslot); in kvm_vm_ioctl_get_dirty_log()
254 r = kvm_get_dirty_log(kvm, log, &is_dirty); in kvm_vm_ioctl_get_dirty_log()
265 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
269 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) in kvm_vm_ioctl_enable_cap() argument
278 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
282 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
286 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
287 if (atomic_read(&kvm->online_vcpus)) { in kvm_vm_ioctl_enable_cap()
290 set_kvm_facility(kvm->arch.model.fac->mask, 129); in kvm_vm_ioctl_enable_cap()
291 set_kvm_facility(kvm->arch.model.fac->list, 129); in kvm_vm_ioctl_enable_cap()
295 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
298 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
308 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_mem_control() argument
315 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
325 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_mem_control() argument
332 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
333 if (atomic_read(&kvm->online_vcpus) == 0) { in kvm_s390_set_mem_control()
334 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
337 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
340 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
341 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
342 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
343 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
344 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
350 if (kvm_is_ucontrol(kvm)) in kvm_s390_set_mem_control()
356 if (new_limit > kvm->arch.gmap->asce_end) in kvm_s390_set_mem_control()
360 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
361 if (atomic_read(&kvm->online_vcpus) == 0) { in kvm_s390_set_mem_control()
368 gmap_free(kvm->arch.gmap); in kvm_s390_set_mem_control()
369 new->private = kvm; in kvm_s390_set_mem_control()
370 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
374 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
386 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_crypto() argument
391 if (!test_kvm_facility(kvm, 76)) in kvm_s390_vm_set_crypto()
394 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
398 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
399 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
400 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
404 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
405 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
406 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
409 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
410 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
411 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
414 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
415 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
416 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
419 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
423 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vm_set_crypto()
427 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
431 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_high() argument
445 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_low() argument
459 mutex_lock(&kvm->lock); in kvm_s390_set_tod_low()
460 kvm->arch.epoch = gtod - host_tod; in kvm_s390_set_tod_low()
461 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { in kvm_s390_set_tod_low()
462 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; in kvm_s390_set_tod_low()
465 mutex_unlock(&kvm->lock); in kvm_s390_set_tod_low()
469 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod() argument
478 ret = kvm_s390_set_tod_high(kvm, attr); in kvm_s390_set_tod()
481 ret = kvm_s390_set_tod_low(kvm, attr); in kvm_s390_set_tod()
490 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_high() argument
501 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_low() argument
510 gtod = host_tod + kvm->arch.epoch; in kvm_s390_get_tod_low()
517 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod() argument
526 ret = kvm_s390_get_tod_high(kvm, attr); in kvm_s390_get_tod()
529 ret = kvm_s390_get_tod_low(kvm, attr); in kvm_s390_get_tod()
538 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_processor() argument
543 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
544 if (atomic_read(&kvm->online_vcpus)) { in kvm_s390_set_processor()
555 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, in kvm_s390_set_processor()
557 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
558 memcpy(kvm->arch.model.fac->list, proc->fac_list, in kvm_s390_set_processor()
564 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
568 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_cpu_model() argument
574 ret = kvm_s390_set_processor(kvm, attr); in kvm_s390_set_cpu_model()
580 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor() argument
590 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); in kvm_s390_get_processor()
591 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
592 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); in kvm_s390_get_processor()
600 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine() argument
612 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, in kvm_s390_get_machine()
623 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_cpu_model() argument
629 ret = kvm_s390_get_processor(kvm, attr); in kvm_s390_get_cpu_model()
632 ret = kvm_s390_get_machine(kvm, attr); in kvm_s390_get_cpu_model()
638 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_attr() argument
644 ret = kvm_s390_set_mem_control(kvm, attr); in kvm_s390_vm_set_attr()
647 ret = kvm_s390_set_tod(kvm, attr); in kvm_s390_vm_set_attr()
650 ret = kvm_s390_set_cpu_model(kvm, attr); in kvm_s390_vm_set_attr()
653 ret = kvm_s390_vm_set_crypto(kvm, attr); in kvm_s390_vm_set_attr()
663 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_get_attr() argument
669 ret = kvm_s390_get_mem_control(kvm, attr); in kvm_s390_vm_get_attr()
672 ret = kvm_s390_get_tod(kvm, attr); in kvm_s390_vm_get_attr()
675 ret = kvm_s390_get_cpu_model(kvm, attr); in kvm_s390_vm_get_attr()
685 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_has_attr() argument
745 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_get_skeys() argument
771 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
794 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_set_skeys() argument
825 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
850 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl() local
862 r = kvm_s390_inject_vm(kvm, &s390int); in kvm_arch_vm_ioctl()
870 r = kvm_vm_ioctl_enable_cap(kvm, &cap); in kvm_arch_vm_ioctl()
877 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
880 kvm_set_irq_routing(kvm, &routing, 0, 0); in kvm_arch_vm_ioctl()
889 r = kvm_s390_vm_set_attr(kvm, &attr); in kvm_arch_vm_ioctl()
896 r = kvm_s390_vm_get_attr(kvm, &attr); in kvm_arch_vm_ioctl()
903 r = kvm_s390_vm_has_attr(kvm, &attr); in kvm_arch_vm_ioctl()
913 r = kvm_s390_get_skeys(kvm, &args); in kvm_arch_vm_ioctl()
923 r = kvm_s390_set_skeys(kvm, &args); in kvm_arch_vm_ioctl()
972 static void kvm_s390_set_crycb_format(struct kvm *kvm) in kvm_s390_set_crycb_format() argument
974 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
977 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
979 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
988 static int kvm_s390_crypto_init(struct kvm *kvm) in kvm_s390_crypto_init() argument
990 if (!test_kvm_facility(kvm, 76)) in kvm_s390_crypto_init()
993 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), in kvm_s390_crypto_init()
995 if (!kvm->arch.crypto.crycb) in kvm_s390_crypto_init()
998 kvm_s390_set_crycb_format(kvm); in kvm_s390_crypto_init()
1001 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
1002 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
1003 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
1004 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
1005 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
1006 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
1011 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
1034 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); in kvm_arch_init_vm()
1035 if (!kvm->arch.sca) in kvm_arch_init_vm()
1041 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
1046 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); in kvm_arch_init_vm()
1047 if (!kvm->arch.dbf) in kvm_arch_init_vm()
1057 kvm->arch.model.fac = in kvm_arch_init_vm()
1059 if (!kvm->arch.model.fac) in kvm_arch_init_vm()
1063 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, in kvm_arch_init_vm()
1067 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; in kvm_arch_init_vm()
1069 kvm->arch.model.fac->mask[i] = 0UL; in kvm_arch_init_vm()
1073 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, in kvm_arch_init_vm()
1076 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); in kvm_arch_init_vm()
1077 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; in kvm_arch_init_vm()
1079 if (kvm_s390_crypto_init(kvm) < 0) in kvm_arch_init_vm()
1082 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
1084 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
1085 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
1086 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
1088 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
1089 VM_EVENT(kvm, 3, "%s", "vm created"); in kvm_arch_init_vm()
1092 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
1094 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); in kvm_arch_init_vm()
1095 if (!kvm->arch.gmap) in kvm_arch_init_vm()
1097 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
1098 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
1101 kvm->arch.css_support = 0; in kvm_arch_init_vm()
1102 kvm->arch.use_irqchip = 0; in kvm_arch_init_vm()
1103 kvm->arch.epoch = 0; in kvm_arch_init_vm()
1105 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
1109 kfree(kvm->arch.crypto.crycb); in kvm_arch_init_vm()
1110 free_page((unsigned long)kvm->arch.model.fac); in kvm_arch_init_vm()
1111 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
1112 free_page((unsigned long)(kvm->arch.sca)); in kvm_arch_init_vm()
1122 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_destroy()
1124 (unsigned long *) &vcpu->kvm->arch.sca->mcn); in kvm_arch_vcpu_destroy()
1125 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == in kvm_arch_vcpu_destroy()
1127 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; in kvm_arch_vcpu_destroy()
1131 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
1134 if (kvm_s390_cmma_enabled(vcpu->kvm)) in kvm_arch_vcpu_destroy()
1142 static void kvm_free_vcpus(struct kvm *kvm) in kvm_free_vcpus() argument
1147 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
1150 mutex_lock(&kvm->lock); in kvm_free_vcpus()
1151 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
1152 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
1154 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
1155 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
1158 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
1160 kvm_free_vcpus(kvm); in kvm_arch_destroy_vm()
1161 free_page((unsigned long)kvm->arch.model.fac); in kvm_arch_destroy_vm()
1162 free_page((unsigned long)(kvm->arch.sca)); in kvm_arch_destroy_vm()
1163 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
1164 kfree(kvm->arch.crypto.crycb); in kvm_arch_destroy_vm()
1165 if (!kvm_is_ucontrol(kvm)) in kvm_arch_destroy_vm()
1166 gmap_free(kvm->arch.gmap); in kvm_arch_destroy_vm()
1167 kvm_s390_destroy_adapters(kvm); in kvm_arch_destroy_vm()
1168 kvm_s390_clear_float_irqs(kvm); in kvm_arch_destroy_vm()
1177 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
1192 if (test_kvm_facility(vcpu->kvm, 129)) in kvm_arch_vcpu_init()
1195 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_init()
1204 if (test_kvm_facility(vcpu->kvm, 129)) in kvm_arch_vcpu_load()
1209 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_arch_vcpu_load()
1225 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_arch_vcpu_put()
1234 if (test_kvm_facility(vcpu->kvm, 129)) in kvm_arch_vcpu_put()
1259 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_s390_vcpu_initial_reset()
1266 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
1267 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
1268 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
1269 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_postcreate()
1270 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
1275 if (!test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
1280 if (vcpu->kvm->arch.crypto.aes_kw) in kvm_s390_vcpu_crypto_setup()
1282 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
1285 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
1307 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
1325 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) in kvm_arch_vcpu_setup()
1334 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_arch_vcpu_setup()
1340 if (kvm_s390_cmma_enabled(vcpu->kvm)) { in kvm_arch_vcpu_setup()
1353 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, in kvm_arch_vcpu_create() argument
1378 if (!kvm_is_ucontrol(kvm)) { in kvm_arch_vcpu_create()
1379 if (!kvm->arch.sca) { in kvm_arch_vcpu_create()
1383 if (!kvm->arch.sca->cpu[id].sda) in kvm_arch_vcpu_create()
1384 kvm->arch.sca->cpu[id].sda = in kvm_arch_vcpu_create()
1387 (__u32)(((__u64)kvm->arch.sca) >> 32); in kvm_arch_vcpu_create()
1388 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; in kvm_arch_vcpu_create()
1389 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); in kvm_arch_vcpu_create()
1393 vcpu->arch.local_int.float_int = &kvm->arch.float_int; in kvm_arch_vcpu_create()
1397 rc = kvm_vcpu_init(vcpu, kvm, id); in kvm_arch_vcpu_create()
1400 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, in kvm_arch_vcpu_create()
1449 struct kvm *kvm = gmap->private; in kvm_gmap_notifier() local
1452 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_gmap_notifier()
1691 vcpu->kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vcpu_ioctl_set_mpstate()
1710 bool kvm_s390_cmma_enabled(struct kvm *kvm) in kvm_s390_cmma_enabled() argument
1717 if (!kvm->arch.use_cmma) in kvm_s390_cmma_enabled()
1806 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
1859 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
1887 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
1948 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
1972 if (kvm_is_ucontrol(vcpu->kvm)) in vcpu_post_run()
1990 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
1997 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
2008 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
2013 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
2074 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
2199 if (!test_kvm_facility(vcpu->kvm, 129)) in kvm_s390_vcpu_store_adtl_status()
2219 static void __disable_ibs_on_all_vcpus(struct kvm *kvm) in __disable_ibs_on_all_vcpus() argument
2224 kvm_for_each_vcpu(i, vcpu, kvm) { in __disable_ibs_on_all_vcpus()
2245 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
2246 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
2249 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) in kvm_s390_vcpu_start()
2262 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
2271 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
2285 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
2286 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
2295 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { in kvm_s390_vcpu_stop()
2297 started_vcpu = vcpu->kvm->vcpus[i]; in kvm_s390_vcpu_stop()
2309 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
2323 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
2324 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
2325 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
2357 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_guest_mem_op()
2386 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_guest_mem_op()
2426 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
2428 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
2463 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
2480 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
2554 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
2563 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_arch_create_memslot() argument
2570 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
2589 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
2607 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, in kvm_arch_commit_memory_region()