Lines Matching refs:vcpu
384 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
388 struct kvm_vcpu *vcpu; in kvm_s390_vm_set_crypto() local
423 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vm_set_crypto()
424 kvm_s390_vcpu_crypto_setup(vcpu); in kvm_s390_vm_set_crypto()
425 exit_sie(vcpu); in kvm_s390_vm_set_crypto()
1116 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
1118 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); in kvm_arch_vcpu_destroy()
1119 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
1120 kvm_s390_clear_local_irqs(vcpu); in kvm_arch_vcpu_destroy()
1121 kvm_clear_async_pf_completion_queue(vcpu); in kvm_arch_vcpu_destroy()
1122 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_destroy()
1123 clear_bit(63 - vcpu->vcpu_id, in kvm_arch_vcpu_destroy()
1124 (unsigned long *) &vcpu->kvm->arch.sca->mcn); in kvm_arch_vcpu_destroy()
1125 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == in kvm_arch_vcpu_destroy()
1126 (__u64) vcpu->arch.sie_block) in kvm_arch_vcpu_destroy()
1127 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; in kvm_arch_vcpu_destroy()
1131 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
1132 gmap_free(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
1134 if (kvm_s390_cmma_enabled(vcpu->kvm)) in kvm_arch_vcpu_destroy()
1135 kvm_s390_vcpu_unsetup_cmma(vcpu); in kvm_arch_vcpu_destroy()
1136 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
1138 kvm_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
1139 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvm_arch_vcpu_destroy()
1145 struct kvm_vcpu *vcpu; in kvm_free_vcpus() local
1147 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
1148 kvm_arch_vcpu_destroy(vcpu); in kvm_free_vcpus()
1172 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) in __kvm_ucontrol_vcpu_init() argument
1174 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
1175 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
1177 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
1182 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_init() argument
1184 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_init()
1185 kvm_clear_async_pf_completion_queue(vcpu); in kvm_arch_vcpu_init()
1186 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_init()
1192 if (test_kvm_facility(vcpu->kvm, 129)) in kvm_arch_vcpu_init()
1193 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_init()
1195 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_init()
1196 return __kvm_ucontrol_vcpu_init(vcpu); in kvm_arch_vcpu_init()
1201 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
1203 save_fp_ctl(&vcpu->arch.host_fpregs.fpc); in kvm_arch_vcpu_load()
1204 if (test_kvm_facility(vcpu->kvm, 129)) in kvm_arch_vcpu_load()
1205 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); in kvm_arch_vcpu_load()
1207 save_fp_regs(vcpu->arch.host_fpregs.fprs); in kvm_arch_vcpu_load()
1208 save_access_regs(vcpu->arch.host_acrs); in kvm_arch_vcpu_load()
1209 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_arch_vcpu_load()
1210 restore_fp_ctl(&vcpu->run->s.regs.fpc); in kvm_arch_vcpu_load()
1211 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); in kvm_arch_vcpu_load()
1213 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); in kvm_arch_vcpu_load()
1214 restore_fp_regs(vcpu->arch.guest_fpregs.fprs); in kvm_arch_vcpu_load()
1216 restore_access_regs(vcpu->run->s.regs.acrs); in kvm_arch_vcpu_load()
1217 gmap_enable(vcpu->arch.gmap); in kvm_arch_vcpu_load()
1218 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_load()
1221 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
1223 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_put()
1224 gmap_disable(vcpu->arch.gmap); in kvm_arch_vcpu_put()
1225 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_arch_vcpu_put()
1226 save_fp_ctl(&vcpu->run->s.regs.fpc); in kvm_arch_vcpu_put()
1227 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); in kvm_arch_vcpu_put()
1229 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); in kvm_arch_vcpu_put()
1230 save_fp_regs(vcpu->arch.guest_fpregs.fprs); in kvm_arch_vcpu_put()
1232 save_access_regs(vcpu->run->s.regs.acrs); in kvm_arch_vcpu_put()
1233 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); in kvm_arch_vcpu_put()
1234 if (test_kvm_facility(vcpu->kvm, 129)) in kvm_arch_vcpu_put()
1235 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); in kvm_arch_vcpu_put()
1237 restore_fp_regs(vcpu->arch.host_fpregs.fprs); in kvm_arch_vcpu_put()
1238 restore_access_regs(vcpu->arch.host_acrs); in kvm_arch_vcpu_put()
1241 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_initial_reset() argument
1244 vcpu->arch.sie_block->gpsw.mask = 0UL; in kvm_s390_vcpu_initial_reset()
1245 vcpu->arch.sie_block->gpsw.addr = 0UL; in kvm_s390_vcpu_initial_reset()
1246 kvm_s390_set_prefix(vcpu, 0); in kvm_s390_vcpu_initial_reset()
1247 vcpu->arch.sie_block->cputm = 0UL; in kvm_s390_vcpu_initial_reset()
1248 vcpu->arch.sie_block->ckc = 0UL; in kvm_s390_vcpu_initial_reset()
1249 vcpu->arch.sie_block->todpr = 0; in kvm_s390_vcpu_initial_reset()
1250 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); in kvm_s390_vcpu_initial_reset()
1251 vcpu->arch.sie_block->gcr[0] = 0xE0UL; in kvm_s390_vcpu_initial_reset()
1252 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; in kvm_s390_vcpu_initial_reset()
1253 vcpu->arch.guest_fpregs.fpc = 0; in kvm_s390_vcpu_initial_reset()
1254 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); in kvm_s390_vcpu_initial_reset()
1255 vcpu->arch.sie_block->gbea = 1; in kvm_s390_vcpu_initial_reset()
1256 vcpu->arch.sie_block->pp = 0; in kvm_s390_vcpu_initial_reset()
1257 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_s390_vcpu_initial_reset()
1258 kvm_clear_async_pf_completion_queue(vcpu); in kvm_s390_vcpu_initial_reset()
1259 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_s390_vcpu_initial_reset()
1260 kvm_s390_vcpu_stop(vcpu); in kvm_s390_vcpu_initial_reset()
1261 kvm_s390_clear_local_irqs(vcpu); in kvm_s390_vcpu_initial_reset()
1264 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
1266 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
1267 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
1268 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
1269 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_postcreate()
1270 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
1273 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_crypto_setup() argument
1275 if (!test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
1278 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
1280 if (vcpu->kvm->arch.crypto.aes_kw) in kvm_s390_vcpu_crypto_setup()
1281 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
1282 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
1283 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
1285 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
1288 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_unsetup_cmma() argument
1290 free_page(vcpu->arch.sie_block->cbrlo); in kvm_s390_vcpu_unsetup_cmma()
1291 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
1294 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_setup_cmma() argument
1296 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); in kvm_s390_vcpu_setup_cmma()
1297 if (!vcpu->arch.sie_block->cbrlo) in kvm_s390_vcpu_setup_cmma()
1300 vcpu->arch.sie_block->ecb2 |= 0x80; in kvm_s390_vcpu_setup_cmma()
1301 vcpu->arch.sie_block->ecb2 &= ~0x08; in kvm_s390_vcpu_setup_cmma()
1305 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_setup_model() argument
1307 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
1309 vcpu->arch.cpu_id = model->cpu_id; in kvm_s390_vcpu_setup_model()
1310 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
1311 vcpu->arch.sie_block->fac = (int) (long) model->fac->list; in kvm_s390_vcpu_setup_model()
1314 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_setup() argument
1318 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_arch_vcpu_setup()
1322 kvm_s390_vcpu_setup_model(vcpu); in kvm_arch_vcpu_setup()
1324 vcpu->arch.sie_block->ecb = 6; in kvm_arch_vcpu_setup()
1325 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) in kvm_arch_vcpu_setup()
1326 vcpu->arch.sie_block->ecb |= 0x10; in kvm_arch_vcpu_setup()
1328 vcpu->arch.sie_block->ecb2 = 8; in kvm_arch_vcpu_setup()
1329 vcpu->arch.sie_block->eca = 0xC1002000U; in kvm_arch_vcpu_setup()
1331 vcpu->arch.sie_block->eca |= 1; in kvm_arch_vcpu_setup()
1333 vcpu->arch.sie_block->eca |= 0x10000000U; in kvm_arch_vcpu_setup()
1334 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_arch_vcpu_setup()
1335 vcpu->arch.sie_block->eca |= 0x00020000; in kvm_arch_vcpu_setup()
1336 vcpu->arch.sie_block->ecd |= 0x20000000; in kvm_arch_vcpu_setup()
1338 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_arch_vcpu_setup()
1340 if (kvm_s390_cmma_enabled(vcpu->kvm)) { in kvm_arch_vcpu_setup()
1341 rc = kvm_s390_vcpu_setup_cmma(vcpu); in kvm_arch_vcpu_setup()
1345 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_arch_vcpu_setup()
1346 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_arch_vcpu_setup()
1348 kvm_s390_vcpu_crypto_setup(vcpu); in kvm_arch_vcpu_setup()
1356 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_create() local
1365 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in kvm_arch_vcpu_create()
1366 if (!vcpu) in kvm_arch_vcpu_create()
1373 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
1374 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; in kvm_arch_vcpu_create()
1375 vcpu->arch.host_vregs = &sie_page->vregs; in kvm_arch_vcpu_create()
1377 vcpu->arch.sie_block->icpua = id; in kvm_arch_vcpu_create()
1385 (__u64) vcpu->arch.sie_block; in kvm_arch_vcpu_create()
1386 vcpu->arch.sie_block->scaoh = in kvm_arch_vcpu_create()
1388 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; in kvm_arch_vcpu_create()
1392 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
1393 vcpu->arch.local_int.float_int = &kvm->arch.float_int; in kvm_arch_vcpu_create()
1394 vcpu->arch.local_int.wq = &vcpu->wq; in kvm_arch_vcpu_create()
1395 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; in kvm_arch_vcpu_create()
1397 rc = kvm_vcpu_init(vcpu, kvm, id); in kvm_arch_vcpu_create()
1400 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, in kvm_arch_vcpu_create()
1401 vcpu->arch.sie_block); in kvm_arch_vcpu_create()
1402 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
1404 return vcpu; in kvm_arch_vcpu_create()
1406 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
1408 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvm_arch_vcpu_create()
1413 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
1415 return kvm_s390_vcpu_has_irq(vcpu, 0); in kvm_arch_vcpu_runnable()
1418 void s390_vcpu_block(struct kvm_vcpu *vcpu) in s390_vcpu_block() argument
1420 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in s390_vcpu_block()
1423 void s390_vcpu_unblock(struct kvm_vcpu *vcpu) in s390_vcpu_unblock() argument
1425 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in s390_vcpu_unblock()
1432 void exit_sie(struct kvm_vcpu *vcpu) in exit_sie() argument
1434 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); in exit_sie()
1435 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
1440 void exit_sie_sync(struct kvm_vcpu *vcpu) in exit_sie_sync() argument
1442 s390_vcpu_block(vcpu); in exit_sie_sync()
1443 exit_sie(vcpu); in exit_sie_sync()
1450 struct kvm_vcpu *vcpu; in kvm_gmap_notifier() local
1452 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_gmap_notifier()
1454 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { in kvm_gmap_notifier()
1455 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); in kvm_gmap_notifier()
1456 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); in kvm_gmap_notifier()
1457 exit_sie_sync(vcpu); in kvm_gmap_notifier()
1462 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
1469 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_one_reg() argument
1476 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
1480 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
1484 r = put_user(vcpu->arch.sie_block->cputm, in kvm_arch_vcpu_ioctl_get_one_reg()
1488 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
1492 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
1496 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
1500 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
1504 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
1508 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
1518 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_one_reg() argument
1525 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
1529 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
1533 r = get_user(vcpu->arch.sie_block->cputm, in kvm_arch_vcpu_ioctl_set_one_reg()
1537 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
1541 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
1543 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
1544 kvm_clear_async_pf_completion_queue(vcpu); in kvm_arch_vcpu_ioctl_set_one_reg()
1547 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
1551 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
1555 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
1559 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
1569 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_initial_reset() argument
1571 kvm_s390_vcpu_initial_reset(vcpu); in kvm_arch_vcpu_ioctl_initial_reset()
1575 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
1577 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
1581 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
1583 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
1587 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
1590 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
1591 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
1592 restore_access_regs(vcpu->run->s.regs.acrs); in kvm_arch_vcpu_ioctl_set_sregs()
1596 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
1599 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
1600 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
1604 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
1608 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_set_fpu()
1609 vcpu->arch.guest_fpregs.fpc = fpu->fpc; in kvm_arch_vcpu_ioctl_set_fpu()
1610 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); in kvm_arch_vcpu_ioctl_set_fpu()
1611 restore_fp_regs(vcpu->arch.guest_fpregs.fprs); in kvm_arch_vcpu_ioctl_set_fpu()
1615 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
1617 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_get_fpu()
1618 fpu->fpc = vcpu->arch.guest_fpregs.fpc; in kvm_arch_vcpu_ioctl_get_fpu()
1622 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) in kvm_arch_vcpu_ioctl_set_initial_psw() argument
1626 if (!is_vcpu_stopped(vcpu)) in kvm_arch_vcpu_ioctl_set_initial_psw()
1629 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
1630 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
1635 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
1645 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
1650 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
1651 kvm_s390_clear_bp_data(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
1657 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
1659 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
1662 rc = kvm_s390_import_bp_data(vcpu, dbg); in kvm_arch_vcpu_ioctl_set_guest_debug()
1664 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
1665 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
1669 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
1670 kvm_s390_clear_bp_data(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
1671 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
1677 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
1681 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : in kvm_arch_vcpu_ioctl_get_mpstate()
1685 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
1691 vcpu->kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vcpu_ioctl_set_mpstate()
1695 kvm_s390_vcpu_stop(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
1698 kvm_s390_vcpu_start(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
1722 static bool ibs_enabled(struct kvm_vcpu *vcpu) in ibs_enabled() argument
1724 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; in ibs_enabled()
1727 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) in kvm_s390_handle_requests() argument
1730 s390_vcpu_unblock(vcpu); in kvm_s390_handle_requests()
1738 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { in kvm_s390_handle_requests()
1740 rc = gmap_ipte_notify(vcpu->arch.gmap, in kvm_s390_handle_requests()
1741 kvm_s390_get_prefix(vcpu), in kvm_s390_handle_requests()
1748 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { in kvm_s390_handle_requests()
1749 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
1753 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { in kvm_s390_handle_requests()
1754 if (!ibs_enabled(vcpu)) { in kvm_s390_handle_requests()
1755 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
1757 &vcpu->arch.sie_block->cpuflags); in kvm_s390_handle_requests()
1762 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { in kvm_s390_handle_requests()
1763 if (ibs_enabled(vcpu)) { in kvm_s390_handle_requests()
1764 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
1766 &vcpu->arch.sie_block->cpuflags); in kvm_s390_handle_requests()
1772 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); in kvm_s390_handle_requests()
1787 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) in kvm_arch_fault_in_page() argument
1789 return gmap_fault(vcpu->arch.gmap, gpa, in kvm_arch_fault_in_page()
1793 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, in __kvm_inject_pfault_token() argument
1802 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); in __kvm_inject_pfault_token()
1806 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
1810 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_not_present() argument
1813 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
1814 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
1817 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_present() argument
1820 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
1821 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
1824 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, in kvm_arch_async_page_ready() argument
1830 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) in kvm_arch_can_inject_async_page_present() argument
1839 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) in kvm_arch_setup_async_pf() argument
1845 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
1847 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
1848 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
1850 if (psw_extint_disabled(vcpu)) in kvm_arch_setup_async_pf()
1852 if (kvm_s390_vcpu_has_irq(vcpu, 0)) in kvm_arch_setup_async_pf()
1854 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) in kvm_arch_setup_async_pf()
1856 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
1859 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
1861 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
1864 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); in kvm_arch_setup_async_pf()
1868 static int vcpu_pre_run(struct kvm_vcpu *vcpu) in vcpu_pre_run() argument
1877 kvm_check_async_pf_completion(vcpu); in vcpu_pre_run()
1879 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); in vcpu_pre_run()
1887 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
1888 rc = kvm_s390_deliver_pending_interrupts(vcpu); in vcpu_pre_run()
1893 rc = kvm_s390_handle_requests(vcpu); in vcpu_pre_run()
1897 if (guestdbg_enabled(vcpu)) { in vcpu_pre_run()
1898 kvm_s390_backup_guest_per_regs(vcpu); in vcpu_pre_run()
1899 kvm_s390_patch_guest_per_regs(vcpu); in vcpu_pre_run()
1902 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
1903 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
1904 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); in vcpu_pre_run()
1905 trace_kvm_s390_sie_enter(vcpu, cpuflags); in vcpu_pre_run()
1910 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) in vcpu_post_run_fault_in_sie() argument
1912 psw_t *psw = &vcpu->arch.sie_block->gpsw; in vcpu_post_run_fault_in_sie()
1916 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); in vcpu_post_run_fault_in_sie()
1917 trace_kvm_s390_sie_fault(vcpu); in vcpu_post_run_fault_in_sie()
1927 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1); in vcpu_post_run_fault_in_sie()
1929 return kvm_s390_inject_prog_cond(vcpu, rc); in vcpu_post_run_fault_in_sie()
1932 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in vcpu_post_run_fault_in_sie()
1935 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) in vcpu_post_run() argument
1939 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", in vcpu_post_run()
1940 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
1941 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
1943 if (guestdbg_enabled(vcpu)) in vcpu_post_run()
1944 kvm_s390_restore_guest_per_regs(vcpu); in vcpu_post_run()
1948 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
1949 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_post_run()
1950 vcpu->run->s390_ucontrol.trans_exc_code = in vcpu_post_run()
1952 vcpu->run->s390_ucontrol.pgm_code = 0x10; in vcpu_post_run()
1956 trace_kvm_s390_major_guest_pfault(vcpu); in vcpu_post_run()
1958 if (kvm_arch_setup_async_pf(vcpu)) { in vcpu_post_run()
1962 rc = kvm_arch_fault_in_page(vcpu, gpa, 1); in vcpu_post_run()
1967 rc = vcpu_post_run_fault_in_sie(vcpu); in vcpu_post_run()
1969 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); in vcpu_post_run()
1972 if (kvm_is_ucontrol(vcpu->kvm)) in vcpu_post_run()
1974 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; in vcpu_post_run()
1976 rc = kvm_handle_sie_intercept(vcpu); in vcpu_post_run()
1982 static int __vcpu_run(struct kvm_vcpu *vcpu) in __vcpu_run() argument
1990 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
1993 rc = vcpu_pre_run(vcpu); in __vcpu_run()
1997 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
2005 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
2006 vcpu->run->s.regs.gprs); in __vcpu_run()
2008 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
2010 rc = vcpu_post_run(vcpu, exit_reason); in __vcpu_run()
2011 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); in __vcpu_run()
2013 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
2017 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) in sync_regs() argument
2019 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs()
2020 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs()
2022 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
2024 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
2026 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in sync_regs()
2029 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; in sync_regs()
2030 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
2031 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs()
2032 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs()
2033 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs()
2036 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs()
2037 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs()
2038 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs()
2039 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs()
2040 kvm_clear_async_pf_completion_queue(vcpu); in sync_regs()
2045 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) in store_regs() argument
2047 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
2048 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
2049 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
2050 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
2051 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; in store_regs()
2052 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
2053 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs()
2054 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs()
2055 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs()
2056 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
2057 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
2058 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
2061 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) in kvm_arch_vcpu_ioctl_run() argument
2066 if (guestdbg_exit_pending(vcpu)) { in kvm_arch_vcpu_ioctl_run()
2067 kvm_s390_prepare_debug_exit(vcpu); in kvm_arch_vcpu_ioctl_run()
2071 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
2072 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); in kvm_arch_vcpu_ioctl_run()
2074 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
2075 kvm_s390_vcpu_start(vcpu); in kvm_arch_vcpu_ioctl_run()
2076 } else if (is_vcpu_stopped(vcpu)) { in kvm_arch_vcpu_ioctl_run()
2078 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
2082 sync_regs(vcpu, kvm_run); in kvm_arch_vcpu_ioctl_run()
2085 rc = __vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
2092 if (guestdbg_exit_pending(vcpu) && !rc) { in kvm_arch_vcpu_ioctl_run()
2093 kvm_s390_prepare_debug_exit(vcpu); in kvm_arch_vcpu_ioctl_run()
2100 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in kvm_arch_vcpu_ioctl_run()
2101 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in kvm_arch_vcpu_ioctl_run()
2102 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in kvm_arch_vcpu_ioctl_run()
2112 store_regs(vcpu, kvm_run); in kvm_arch_vcpu_ioctl_run()
2114 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
2117 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
2127 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) in kvm_s390_store_status_unloaded() argument
2135 if (write_guest_abs(vcpu, 163, &archmode, 1)) in kvm_s390_store_status_unloaded()
2139 if (write_guest_real(vcpu, 163, &archmode, 1)) in kvm_s390_store_status_unloaded()
2141 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); in kvm_s390_store_status_unloaded()
2143 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), in kvm_s390_store_status_unloaded()
2144 vcpu->arch.guest_fpregs.fprs, 128); in kvm_s390_store_status_unloaded()
2145 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), in kvm_s390_store_status_unloaded()
2146 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
2147 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), in kvm_s390_store_status_unloaded()
2148 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
2149 px = kvm_s390_get_prefix(vcpu); in kvm_s390_store_status_unloaded()
2150 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), in kvm_s390_store_status_unloaded()
2152 rc |= write_guest_abs(vcpu, in kvm_s390_store_status_unloaded()
2154 &vcpu->arch.guest_fpregs.fpc, 4); in kvm_s390_store_status_unloaded()
2155 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), in kvm_s390_store_status_unloaded()
2156 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
2157 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), in kvm_s390_store_status_unloaded()
2158 &vcpu->arch.sie_block->cputm, 8); in kvm_s390_store_status_unloaded()
2159 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
2160 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), in kvm_s390_store_status_unloaded()
2162 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), in kvm_s390_store_status_unloaded()
2163 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
2164 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), in kvm_s390_store_status_unloaded()
2165 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
2169 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) in kvm_s390_vcpu_store_status() argument
2176 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); in kvm_s390_vcpu_store_status()
2177 save_fp_regs(vcpu->arch.guest_fpregs.fprs); in kvm_s390_vcpu_store_status()
2178 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
2180 return kvm_s390_store_status_unloaded(vcpu, addr); in kvm_s390_vcpu_store_status()
2186 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, in kvm_s390_store_adtl_status_unloaded() argument
2193 return write_guest_abs(vcpu, gpa & ~0x3ff, in kvm_s390_store_adtl_status_unloaded()
2194 (void *)&vcpu->run->s.regs.vrs, 512); in kvm_s390_store_adtl_status_unloaded()
2197 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) in kvm_s390_vcpu_store_adtl_status() argument
2199 if (!test_kvm_facility(vcpu->kvm, 129)) in kvm_s390_vcpu_store_adtl_status()
2207 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); in kvm_s390_vcpu_store_adtl_status()
2209 return kvm_s390_store_adtl_status_unloaded(vcpu, addr); in kvm_s390_vcpu_store_adtl_status()
2212 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) in __disable_ibs_on_vcpu() argument
2214 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); in __disable_ibs_on_vcpu()
2215 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); in __disable_ibs_on_vcpu()
2216 exit_sie_sync(vcpu); in __disable_ibs_on_vcpu()
2222 struct kvm_vcpu *vcpu; in __disable_ibs_on_all_vcpus() local
2224 kvm_for_each_vcpu(i, vcpu, kvm) { in __disable_ibs_on_all_vcpus()
2225 __disable_ibs_on_vcpu(vcpu); in __disable_ibs_on_all_vcpus()
2229 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) in __enable_ibs_on_vcpu() argument
2231 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); in __enable_ibs_on_vcpu()
2232 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); in __enable_ibs_on_vcpu()
2233 exit_sie_sync(vcpu); in __enable_ibs_on_vcpu()
2236 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_start() argument
2240 if (!is_vcpu_stopped(vcpu)) in kvm_s390_vcpu_start()
2243 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
2245 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
2246 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
2249 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) in kvm_s390_vcpu_start()
2255 __enable_ibs_on_vcpu(vcpu); in kvm_s390_vcpu_start()
2262 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
2265 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); in kvm_s390_vcpu_start()
2270 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_s390_vcpu_start()
2271 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
2275 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_stop() argument
2280 if (is_vcpu_stopped(vcpu)) in kvm_s390_vcpu_stop()
2283 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
2285 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
2286 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
2289 kvm_s390_clear_stop_irq(vcpu); in kvm_s390_vcpu_stop()
2291 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); in kvm_s390_vcpu_stop()
2292 __disable_ibs_on_vcpu(vcpu); in kvm_s390_vcpu_stop()
2295 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { in kvm_s390_vcpu_stop()
2297 started_vcpu = vcpu->kvm->vcpus[i]; in kvm_s390_vcpu_stop()
2309 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
2313 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
2323 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
2324 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
2325 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
2336 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, in kvm_s390_guest_mem_op() argument
2357 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_guest_mem_op()
2362 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false); in kvm_s390_guest_mem_op()
2365 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
2373 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true); in kvm_s390_guest_mem_op()
2380 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
2386 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_guest_mem_op()
2389 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_guest_mem_op()
2398 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
2410 r = kvm_s390_inject_vcpu(vcpu, &s390irq); in kvm_arch_vcpu_ioctl()
2422 r = kvm_s390_inject_vcpu(vcpu, &s390irq); in kvm_arch_vcpu_ioctl()
2426 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
2427 r = kvm_s390_vcpu_store_status(vcpu, arg); in kvm_arch_vcpu_ioctl()
2428 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
2436 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); in kvm_arch_vcpu_ioctl()
2440 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); in kvm_arch_vcpu_ioctl()
2449 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
2451 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
2463 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
2468 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
2480 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
2485 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
2491 r = gmap_fault(vcpu->arch.gmap, arg, 0); in kvm_arch_vcpu_ioctl()
2500 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
2507 r = kvm_s390_guest_mem_op(vcpu, &mem_op); in kvm_arch_vcpu_ioctl()
2524 r = kvm_s390_set_irq_state(vcpu, in kvm_arch_vcpu_ioctl()
2539 r = kvm_s390_get_irq_state(vcpu, in kvm_arch_vcpu_ioctl()
2550 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
2554 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
2555 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()