Lines Matching refs:kvm
152 struct kvm *kvm; in kvm_clock_sync() local
157 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_clock_sync()
158 kvm->arch.epoch -= *delta; in kvm_clock_sync()
159 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_clock_sync()
215 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument
266 static void kvm_s390_sync_dirty_log(struct kvm *kvm, in kvm_s390_sync_dirty_log() argument
271 struct gmap *gmap = kvm->arch.gmap; in kvm_s390_sync_dirty_log()
280 mark_page_dirty(kvm, cur_gfn); in kvm_s390_sync_dirty_log()
289 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
298 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
304 slots = kvm_memslots(kvm); in kvm_vm_ioctl_get_dirty_log()
310 kvm_s390_sync_dirty_log(kvm, memslot); in kvm_vm_ioctl_get_dirty_log()
311 r = kvm_get_dirty_log(kvm, log, &is_dirty); in kvm_vm_ioctl_get_dirty_log()
322 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
326 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) in kvm_vm_ioctl_enable_cap() argument
335 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP"); in kvm_vm_ioctl_enable_cap()
336 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
340 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP"); in kvm_vm_ioctl_enable_cap()
341 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
345 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
346 if (atomic_read(&kvm->online_vcpus)) { in kvm_vm_ioctl_enable_cap()
349 set_kvm_facility(kvm->arch.model.fac->mask, 129); in kvm_vm_ioctl_enable_cap()
350 set_kvm_facility(kvm->arch.model.fac->list, 129); in kvm_vm_ioctl_enable_cap()
354 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
355 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", in kvm_vm_ioctl_enable_cap()
359 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI"); in kvm_vm_ioctl_enable_cap()
360 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
370 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_mem_control() argument
377 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
378 kvm->arch.gmap->asce_end); in kvm_s390_get_mem_control()
379 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
389 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_mem_control() argument
401 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support"); in kvm_s390_set_mem_control()
402 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
403 if (atomic_read(&kvm->online_vcpus) == 0) { in kvm_s390_set_mem_control()
404 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
407 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
411 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
414 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states"); in kvm_s390_set_mem_control()
415 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
416 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
417 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
418 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
419 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
425 if (kvm_is_ucontrol(kvm)) in kvm_s390_set_mem_control()
431 if (new_limit > kvm->arch.gmap->asce_end) in kvm_s390_set_mem_control()
435 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
436 if (atomic_read(&kvm->online_vcpus) == 0) { in kvm_s390_set_mem_control()
443 gmap_free(kvm->arch.gmap); in kvm_s390_set_mem_control()
444 new->private = kvm; in kvm_s390_set_mem_control()
445 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
449 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
450 VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit); in kvm_s390_set_mem_control()
462 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_crypto() argument
467 if (!test_kvm_facility(kvm, 76)) in kvm_s390_vm_set_crypto()
470 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
474 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
475 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
476 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
477 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
481 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
482 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
483 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
484 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
487 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
488 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
489 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
490 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
493 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
494 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
495 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
496 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
499 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
503 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vm_set_crypto()
507 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
511 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_high() argument
521 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high); in kvm_s390_set_tod_high()
526 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_low() argument
533 kvm_s390_set_tod_clock(kvm, gtod); in kvm_s390_set_tod_low()
534 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod); in kvm_s390_set_tod_low()
538 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod() argument
547 ret = kvm_s390_set_tod_high(kvm, attr); in kvm_s390_set_tod()
550 ret = kvm_s390_set_tod_low(kvm, attr); in kvm_s390_set_tod()
559 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_high() argument
566 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
571 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_low() argument
575 gtod = kvm_s390_get_tod_clock_fast(kvm); in kvm_s390_get_tod_low()
578 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
583 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod() argument
592 ret = kvm_s390_get_tod_high(kvm, attr); in kvm_s390_get_tod()
595 ret = kvm_s390_get_tod_low(kvm, attr); in kvm_s390_get_tod()
604 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_processor() argument
609 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
610 if (atomic_read(&kvm->online_vcpus)) { in kvm_s390_set_processor()
621 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, in kvm_s390_set_processor()
623 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
624 memcpy(kvm->arch.model.fac->list, proc->fac_list, in kvm_s390_set_processor()
630 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
634 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_cpu_model() argument
640 ret = kvm_s390_set_processor(kvm, attr); in kvm_s390_set_cpu_model()
646 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor() argument
656 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); in kvm_s390_get_processor()
657 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
658 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); in kvm_s390_get_processor()
666 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine() argument
678 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, in kvm_s390_get_machine()
689 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_cpu_model() argument
695 ret = kvm_s390_get_processor(kvm, attr); in kvm_s390_get_cpu_model()
698 ret = kvm_s390_get_machine(kvm, attr); in kvm_s390_get_cpu_model()
704 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_attr() argument
710 ret = kvm_s390_set_mem_control(kvm, attr); in kvm_s390_vm_set_attr()
713 ret = kvm_s390_set_tod(kvm, attr); in kvm_s390_vm_set_attr()
716 ret = kvm_s390_set_cpu_model(kvm, attr); in kvm_s390_vm_set_attr()
719 ret = kvm_s390_vm_set_crypto(kvm, attr); in kvm_s390_vm_set_attr()
729 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_get_attr() argument
735 ret = kvm_s390_get_mem_control(kvm, attr); in kvm_s390_vm_get_attr()
738 ret = kvm_s390_get_tod(kvm, attr); in kvm_s390_vm_get_attr()
741 ret = kvm_s390_get_cpu_model(kvm, attr); in kvm_s390_vm_get_attr()
751 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_has_attr() argument
811 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_get_skeys() argument
837 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
860 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_set_skeys() argument
893 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
918 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl() local
930 r = kvm_s390_inject_vm(kvm, &s390int); in kvm_arch_vm_ioctl()
938 r = kvm_vm_ioctl_enable_cap(kvm, &cap); in kvm_arch_vm_ioctl()
945 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
948 r = kvm_set_irq_routing(kvm, &routing, 0, 0); in kvm_arch_vm_ioctl()
956 r = kvm_s390_vm_set_attr(kvm, &attr); in kvm_arch_vm_ioctl()
963 r = kvm_s390_vm_get_attr(kvm, &attr); in kvm_arch_vm_ioctl()
970 r = kvm_s390_vm_has_attr(kvm, &attr); in kvm_arch_vm_ioctl()
980 r = kvm_s390_get_skeys(kvm, &args); in kvm_arch_vm_ioctl()
990 r = kvm_s390_set_skeys(kvm, &args); in kvm_arch_vm_ioctl()
1039 static void kvm_s390_set_crycb_format(struct kvm *kvm) in kvm_s390_set_crycb_format() argument
1041 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
1044 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
1046 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
1055 static int kvm_s390_crypto_init(struct kvm *kvm) in kvm_s390_crypto_init() argument
1057 if (!test_kvm_facility(kvm, 76)) in kvm_s390_crypto_init()
1060 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), in kvm_s390_crypto_init()
1062 if (!kvm->arch.crypto.crycb) in kvm_s390_crypto_init()
1065 kvm_s390_set_crycb_format(kvm); in kvm_s390_crypto_init()
1068 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
1069 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
1070 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
1071 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
1072 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
1073 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
1078 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
1101 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); in kvm_arch_init_vm()
1102 if (!kvm->arch.sca) in kvm_arch_init_vm()
1108 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
1113 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
1114 if (!kvm->arch.dbf) in kvm_arch_init_vm()
1124 kvm->arch.model.fac = in kvm_arch_init_vm()
1126 if (!kvm->arch.model.fac) in kvm_arch_init_vm()
1130 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, in kvm_arch_init_vm()
1134 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; in kvm_arch_init_vm()
1136 kvm->arch.model.fac->mask[i] = 0UL; in kvm_arch_init_vm()
1140 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, in kvm_arch_init_vm()
1143 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); in kvm_arch_init_vm()
1144 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
1146 if (kvm_s390_crypto_init(kvm) < 0) in kvm_arch_init_vm()
1149 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
1151 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
1152 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
1153 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
1155 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
1156 VM_EVENT(kvm, 3, "vm created with type %lu", type); in kvm_arch_init_vm()
1159 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
1161 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); in kvm_arch_init_vm()
1162 if (!kvm->arch.gmap) in kvm_arch_init_vm()
1164 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
1165 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
1168 kvm->arch.css_support = 0; in kvm_arch_init_vm()
1169 kvm->arch.use_irqchip = 0; in kvm_arch_init_vm()
1170 kvm->arch.epoch = 0; in kvm_arch_init_vm()
1172 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
1173 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
1177 kfree(kvm->arch.crypto.crycb); in kvm_arch_init_vm()
1178 free_page((unsigned long)kvm->arch.model.fac); in kvm_arch_init_vm()
1179 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
1180 free_page((unsigned long)(kvm->arch.sca)); in kvm_arch_init_vm()
1191 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_destroy()
1193 (unsigned long *) &vcpu->kvm->arch.sca->mcn); in kvm_arch_vcpu_destroy()
1194 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == in kvm_arch_vcpu_destroy()
1196 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; in kvm_arch_vcpu_destroy()
1200 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
1203 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
1211 static void kvm_free_vcpus(struct kvm *kvm) in kvm_free_vcpus() argument
1216 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
1219 mutex_lock(&kvm->lock); in kvm_free_vcpus()
1220 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
1221 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
1223 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
1224 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
1227 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
1229 kvm_free_vcpus(kvm); in kvm_arch_destroy_vm()
1230 free_page((unsigned long)kvm->arch.model.fac); in kvm_arch_destroy_vm()
1231 free_page((unsigned long)(kvm->arch.sca)); in kvm_arch_destroy_vm()
1232 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
1233 kfree(kvm->arch.crypto.crycb); in kvm_arch_destroy_vm()
1234 if (!kvm_is_ucontrol(kvm)) in kvm_arch_destroy_vm()
1235 gmap_free(kvm->arch.gmap); in kvm_arch_destroy_vm()
1236 kvm_s390_destroy_adapters(kvm); in kvm_arch_destroy_vm()
1237 kvm_s390_clear_float_irqs(kvm); in kvm_arch_destroy_vm()
1238 KVM_EVENT(3, "vm 0x%p destroyed", kvm); in kvm_arch_destroy_vm()
1247 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
1262 if (test_kvm_facility(vcpu->kvm, 129)) in kvm_arch_vcpu_init()
1265 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_init()
1329 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_s390_vcpu_initial_reset()
1336 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
1338 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
1340 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
1341 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_postcreate()
1342 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
1347 if (!test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
1352 if (vcpu->kvm->arch.crypto.aes_kw) in kvm_s390_vcpu_crypto_setup()
1354 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
1357 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
1379 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
1394 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_arch_vcpu_setup()
1396 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_arch_vcpu_setup()
1402 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) in kvm_arch_vcpu_setup()
1411 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_arch_vcpu_setup()
1417 if (vcpu->kvm->arch.use_cmma) { in kvm_arch_vcpu_setup()
1430 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, in kvm_arch_vcpu_create() argument
1454 if (!kvm_is_ucontrol(kvm)) { in kvm_arch_vcpu_create()
1455 if (!kvm->arch.sca) { in kvm_arch_vcpu_create()
1459 if (!kvm->arch.sca->cpu[id].sda) in kvm_arch_vcpu_create()
1460 kvm->arch.sca->cpu[id].sda = in kvm_arch_vcpu_create()
1463 (__u32)(((__u64)kvm->arch.sca) >> 32); in kvm_arch_vcpu_create()
1464 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; in kvm_arch_vcpu_create()
1465 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); in kvm_arch_vcpu_create()
1469 vcpu->arch.local_int.float_int = &kvm->arch.float_int; in kvm_arch_vcpu_create()
1473 rc = kvm_vcpu_init(vcpu, kvm, id); in kvm_arch_vcpu_create()
1476 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, in kvm_arch_vcpu_create()
1537 struct kvm *kvm = gmap->private; in kvm_gmap_notifier() local
1540 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_gmap_notifier()
1786 vcpu->kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vcpu_ioctl_set_mpstate()
1862 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod) in kvm_s390_set_tod_clock() argument
1867 mutex_lock(&kvm->lock); in kvm_s390_set_tod_clock()
1869 kvm->arch.epoch = tod - get_tod_clock(); in kvm_s390_set_tod_clock()
1870 kvm_s390_vcpu_block_all(kvm); in kvm_s390_set_tod_clock()
1871 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_s390_set_tod_clock()
1872 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in kvm_s390_set_tod_clock()
1873 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_set_tod_clock()
1875 mutex_unlock(&kvm->lock); in kvm_s390_set_tod_clock()
1907 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
1960 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
1988 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
2049 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
2073 if (kvm_is_ucontrol(vcpu->kvm)) in vcpu_post_run()
2091 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
2098 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
2111 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
2116 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
2177 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
2311 if (!test_kvm_facility(vcpu->kvm, 129)) in kvm_s390_vcpu_store_adtl_status()
2333 static void __disable_ibs_on_all_vcpus(struct kvm *kvm) in __disable_ibs_on_all_vcpus() argument
2338 kvm_for_each_vcpu(i, vcpu, kvm) { in __disable_ibs_on_all_vcpus()
2358 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
2359 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
2362 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) in kvm_s390_vcpu_start()
2375 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
2384 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
2398 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
2399 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
2408 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { in kvm_s390_vcpu_stop()
2410 started_vcpu = vcpu->kvm->vcpus[i]; in kvm_s390_vcpu_stop()
2422 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
2436 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
2437 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
2438 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
2439 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
2471 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_guest_mem_op()
2500 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_guest_mem_op()
2540 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
2542 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
2577 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
2594 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
2668 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
2677 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_arch_create_memslot() argument
2684 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
2703 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
2722 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, in kvm_arch_commit_memory_region()