Lines Matching refs:vcpu

153 	struct kvm_vcpu *vcpu;  in kvm_clock_sync()  local
159 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_clock_sync()
160 vcpu->arch.sie_block->epoch -= *delta; in kvm_clock_sync()
460 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
464 struct kvm_vcpu *vcpu; in kvm_s390_vm_set_crypto() local
503 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vm_set_crypto()
504 kvm_s390_vcpu_crypto_setup(vcpu); in kvm_s390_vm_set_crypto()
505 exit_sie(vcpu); in kvm_s390_vm_set_crypto()
1185 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
1187 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); in kvm_arch_vcpu_destroy()
1188 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
1189 kvm_s390_clear_local_irqs(vcpu); in kvm_arch_vcpu_destroy()
1190 kvm_clear_async_pf_completion_queue(vcpu); in kvm_arch_vcpu_destroy()
1191 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_destroy()
1192 clear_bit(63 - vcpu->vcpu_id, in kvm_arch_vcpu_destroy()
1193 (unsigned long *) &vcpu->kvm->arch.sca->mcn); in kvm_arch_vcpu_destroy()
1194 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == in kvm_arch_vcpu_destroy()
1195 (__u64) vcpu->arch.sie_block) in kvm_arch_vcpu_destroy()
1196 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; in kvm_arch_vcpu_destroy()
1200 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
1201 gmap_free(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
1203 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
1204 kvm_s390_vcpu_unsetup_cmma(vcpu); in kvm_arch_vcpu_destroy()
1205 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
1207 kvm_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
1208 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvm_arch_vcpu_destroy()
1214 struct kvm_vcpu *vcpu; in kvm_free_vcpus() local
1216 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
1217 kvm_arch_vcpu_destroy(vcpu); in kvm_free_vcpus()
1242 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) in __kvm_ucontrol_vcpu_init() argument
1244 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
1245 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
1247 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
1252 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_init() argument
1254 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_init()
1255 kvm_clear_async_pf_completion_queue(vcpu); in kvm_arch_vcpu_init()
1256 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_init()
1262 if (test_kvm_facility(vcpu->kvm, 129)) in kvm_arch_vcpu_init()
1263 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_init()
1265 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_init()
1266 return __kvm_ucontrol_vcpu_init(vcpu); in kvm_arch_vcpu_init()
1271 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
1275 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; in kvm_arch_vcpu_load()
1276 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; in kvm_arch_vcpu_load()
1281 current->thread.fpu.regs = vcpu->run->s.regs.vrs; in kvm_arch_vcpu_load()
1282 current->thread.fpu.fpc = vcpu->run->s.regs.fpc; in kvm_arch_vcpu_load()
1287 save_access_regs(vcpu->arch.host_acrs); in kvm_arch_vcpu_load()
1288 restore_access_regs(vcpu->run->s.regs.acrs); in kvm_arch_vcpu_load()
1289 gmap_enable(vcpu->arch.gmap); in kvm_arch_vcpu_load()
1290 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_load()
1293 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
1295 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_put()
1296 gmap_disable(vcpu->arch.gmap); in kvm_arch_vcpu_put()
1300 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in kvm_arch_vcpu_put()
1303 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; in kvm_arch_vcpu_put()
1304 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; in kvm_arch_vcpu_put()
1306 save_access_regs(vcpu->run->s.regs.acrs); in kvm_arch_vcpu_put()
1307 restore_access_regs(vcpu->arch.host_acrs); in kvm_arch_vcpu_put()
1310 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_initial_reset() argument
1313 vcpu->arch.sie_block->gpsw.mask = 0UL; in kvm_s390_vcpu_initial_reset()
1314 vcpu->arch.sie_block->gpsw.addr = 0UL; in kvm_s390_vcpu_initial_reset()
1315 kvm_s390_set_prefix(vcpu, 0); in kvm_s390_vcpu_initial_reset()
1316 vcpu->arch.sie_block->cputm = 0UL; in kvm_s390_vcpu_initial_reset()
1317 vcpu->arch.sie_block->ckc = 0UL; in kvm_s390_vcpu_initial_reset()
1318 vcpu->arch.sie_block->todpr = 0; in kvm_s390_vcpu_initial_reset()
1319 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); in kvm_s390_vcpu_initial_reset()
1320 vcpu->arch.sie_block->gcr[0] = 0xE0UL; in kvm_s390_vcpu_initial_reset()
1321 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; in kvm_s390_vcpu_initial_reset()
1325 vcpu->arch.sie_block->gbea = 1; in kvm_s390_vcpu_initial_reset()
1326 vcpu->arch.sie_block->pp = 0; in kvm_s390_vcpu_initial_reset()
1327 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_s390_vcpu_initial_reset()
1328 kvm_clear_async_pf_completion_queue(vcpu); in kvm_s390_vcpu_initial_reset()
1329 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_s390_vcpu_initial_reset()
1330 kvm_s390_vcpu_stop(vcpu); in kvm_s390_vcpu_initial_reset()
1331 kvm_s390_clear_local_irqs(vcpu); in kvm_s390_vcpu_initial_reset()
1334 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
1336 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
1338 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
1340 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
1341 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_postcreate()
1342 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
1345 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_crypto_setup() argument
1347 if (!test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
1350 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
1352 if (vcpu->kvm->arch.crypto.aes_kw) in kvm_s390_vcpu_crypto_setup()
1353 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
1354 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
1355 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
1357 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
1360 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_unsetup_cmma() argument
1362 free_page(vcpu->arch.sie_block->cbrlo); in kvm_s390_vcpu_unsetup_cmma()
1363 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
1366 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_setup_cmma() argument
1368 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); in kvm_s390_vcpu_setup_cmma()
1369 if (!vcpu->arch.sie_block->cbrlo) in kvm_s390_vcpu_setup_cmma()
1372 vcpu->arch.sie_block->ecb2 |= 0x80; in kvm_s390_vcpu_setup_cmma()
1373 vcpu->arch.sie_block->ecb2 &= ~0x08; in kvm_s390_vcpu_setup_cmma()
1377 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_setup_model() argument
1379 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
1381 vcpu->arch.cpu_id = model->cpu_id; in kvm_s390_vcpu_setup_model()
1382 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
1383 vcpu->arch.sie_block->fac = (int) (long) model->fac->list; in kvm_s390_vcpu_setup_model()
1386 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_setup() argument
1390 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_arch_vcpu_setup()
1394 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_arch_vcpu_setup()
1395 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_setup()
1396 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_arch_vcpu_setup()
1397 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_setup()
1399 kvm_s390_vcpu_setup_model(vcpu); in kvm_arch_vcpu_setup()
1401 vcpu->arch.sie_block->ecb = 6; in kvm_arch_vcpu_setup()
1402 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) in kvm_arch_vcpu_setup()
1403 vcpu->arch.sie_block->ecb |= 0x10; in kvm_arch_vcpu_setup()
1405 vcpu->arch.sie_block->ecb2 = 8; in kvm_arch_vcpu_setup()
1406 vcpu->arch.sie_block->eca = 0xC1002000U; in kvm_arch_vcpu_setup()
1408 vcpu->arch.sie_block->eca |= 1; in kvm_arch_vcpu_setup()
1410 vcpu->arch.sie_block->eca |= 0x10000000U; in kvm_arch_vcpu_setup()
1411 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_arch_vcpu_setup()
1412 vcpu->arch.sie_block->eca |= 0x00020000; in kvm_arch_vcpu_setup()
1413 vcpu->arch.sie_block->ecd |= 0x20000000; in kvm_arch_vcpu_setup()
1415 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_arch_vcpu_setup()
1417 if (vcpu->kvm->arch.use_cmma) { in kvm_arch_vcpu_setup()
1418 rc = kvm_s390_vcpu_setup_cmma(vcpu); in kvm_arch_vcpu_setup()
1422 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_arch_vcpu_setup()
1423 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_arch_vcpu_setup()
1425 kvm_s390_vcpu_crypto_setup(vcpu); in kvm_arch_vcpu_setup()
1433 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_create() local
1442 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in kvm_arch_vcpu_create()
1443 if (!vcpu) in kvm_arch_vcpu_create()
1450 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
1451 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; in kvm_arch_vcpu_create()
1453 vcpu->arch.sie_block->icpua = id; in kvm_arch_vcpu_create()
1461 (__u64) vcpu->arch.sie_block; in kvm_arch_vcpu_create()
1462 vcpu->arch.sie_block->scaoh = in kvm_arch_vcpu_create()
1464 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; in kvm_arch_vcpu_create()
1468 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
1469 vcpu->arch.local_int.float_int = &kvm->arch.float_int; in kvm_arch_vcpu_create()
1470 vcpu->arch.local_int.wq = &vcpu->wq; in kvm_arch_vcpu_create()
1471 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; in kvm_arch_vcpu_create()
1473 rc = kvm_vcpu_init(vcpu, kvm, id); in kvm_arch_vcpu_create()
1476 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, in kvm_arch_vcpu_create()
1477 vcpu->arch.sie_block); in kvm_arch_vcpu_create()
1478 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
1480 return vcpu; in kvm_arch_vcpu_create()
1482 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
1484 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvm_arch_vcpu_create()
1489 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
1491 return kvm_s390_vcpu_has_irq(vcpu, 0); in kvm_arch_vcpu_runnable()
1494 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_block() argument
1496 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
1497 exit_sie(vcpu); in kvm_s390_vcpu_block()
1500 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_unblock() argument
1502 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
1505 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_request() argument
1507 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
1508 exit_sie(vcpu); in kvm_s390_vcpu_request()
1511 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_request_handled() argument
1513 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
1520 void exit_sie(struct kvm_vcpu *vcpu) in exit_sie() argument
1522 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); in exit_sie()
1523 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
1528 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu) in kvm_s390_sync_request() argument
1530 kvm_make_request(req, vcpu); in kvm_s390_sync_request()
1531 kvm_s390_vcpu_request(vcpu); in kvm_s390_sync_request()
1538 struct kvm_vcpu *vcpu; in kvm_gmap_notifier() local
1540 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_gmap_notifier()
1542 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { in kvm_gmap_notifier()
1543 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); in kvm_gmap_notifier()
1544 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu); in kvm_gmap_notifier()
1549 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
1556 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_one_reg() argument
1563 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
1567 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
1571 r = put_user(vcpu->arch.sie_block->cputm, in kvm_arch_vcpu_ioctl_get_one_reg()
1575 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
1579 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
1583 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
1587 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
1591 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
1595 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
1605 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_one_reg() argument
1612 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
1616 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
1620 r = get_user(vcpu->arch.sie_block->cputm, in kvm_arch_vcpu_ioctl_set_one_reg()
1624 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
1628 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
1630 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
1631 kvm_clear_async_pf_completion_queue(vcpu); in kvm_arch_vcpu_ioctl_set_one_reg()
1634 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
1638 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
1642 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
1646 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
1656 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_initial_reset() argument
1658 kvm_s390_vcpu_initial_reset(vcpu); in kvm_arch_vcpu_ioctl_initial_reset()
1662 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
1664 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
1668 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
1670 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
1674 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
1677 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
1678 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
1679 restore_access_regs(vcpu->run->s.regs.acrs); in kvm_arch_vcpu_ioctl_set_sregs()
1683 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
1686 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
1687 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
1691 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
1705 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
1717 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) in kvm_arch_vcpu_ioctl_set_initial_psw() argument
1721 if (!is_vcpu_stopped(vcpu)) in kvm_arch_vcpu_ioctl_set_initial_psw()
1724 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
1725 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
1730 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
1740 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
1745 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
1746 kvm_s390_clear_bp_data(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
1752 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
1754 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
1757 rc = kvm_s390_import_bp_data(vcpu, dbg); in kvm_arch_vcpu_ioctl_set_guest_debug()
1759 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
1760 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
1764 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
1765 kvm_s390_clear_bp_data(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
1766 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
1772 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
1776 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : in kvm_arch_vcpu_ioctl_get_mpstate()
1780 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
1786 vcpu->kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vcpu_ioctl_set_mpstate()
1790 kvm_s390_vcpu_stop(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
1793 kvm_s390_vcpu_start(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
1805 static bool ibs_enabled(struct kvm_vcpu *vcpu) in ibs_enabled() argument
1807 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; in ibs_enabled()
1810 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) in kvm_s390_handle_requests() argument
1813 kvm_s390_vcpu_request_handled(vcpu); in kvm_s390_handle_requests()
1814 if (!vcpu->requests) in kvm_s390_handle_requests()
1823 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { in kvm_s390_handle_requests()
1825 rc = gmap_ipte_notify(vcpu->arch.gmap, in kvm_s390_handle_requests()
1826 kvm_s390_get_prefix(vcpu), in kvm_s390_handle_requests()
1833 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { in kvm_s390_handle_requests()
1834 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
1838 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { in kvm_s390_handle_requests()
1839 if (!ibs_enabled(vcpu)) { in kvm_s390_handle_requests()
1840 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
1842 &vcpu->arch.sie_block->cpuflags); in kvm_s390_handle_requests()
1847 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { in kvm_s390_handle_requests()
1848 if (ibs_enabled(vcpu)) { in kvm_s390_handle_requests()
1849 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
1851 &vcpu->arch.sie_block->cpuflags); in kvm_s390_handle_requests()
1857 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); in kvm_s390_handle_requests()
1864 struct kvm_vcpu *vcpu; in kvm_s390_set_tod_clock() local
1871 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_s390_set_tod_clock()
1872 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in kvm_s390_set_tod_clock()
1888 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) in kvm_arch_fault_in_page() argument
1890 return gmap_fault(vcpu->arch.gmap, gpa, in kvm_arch_fault_in_page()
1894 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, in __kvm_inject_pfault_token() argument
1903 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); in __kvm_inject_pfault_token()
1907 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
1911 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_not_present() argument
1914 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
1915 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
1918 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_present() argument
1921 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
1922 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
1925 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, in kvm_arch_async_page_ready() argument
1931 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) in kvm_arch_can_inject_async_page_present() argument
1940 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) in kvm_arch_setup_async_pf() argument
1946 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
1948 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
1949 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
1951 if (psw_extint_disabled(vcpu)) in kvm_arch_setup_async_pf()
1953 if (kvm_s390_vcpu_has_irq(vcpu, 0)) in kvm_arch_setup_async_pf()
1955 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) in kvm_arch_setup_async_pf()
1957 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
1960 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
1962 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
1965 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); in kvm_arch_setup_async_pf()
1969 static int vcpu_pre_run(struct kvm_vcpu *vcpu) in vcpu_pre_run() argument
1978 kvm_check_async_pf_completion(vcpu); in vcpu_pre_run()
1980 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); in vcpu_pre_run()
1988 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
1989 rc = kvm_s390_deliver_pending_interrupts(vcpu); in vcpu_pre_run()
1994 rc = kvm_s390_handle_requests(vcpu); in vcpu_pre_run()
1998 if (guestdbg_enabled(vcpu)) { in vcpu_pre_run()
1999 kvm_s390_backup_guest_per_regs(vcpu); in vcpu_pre_run()
2000 kvm_s390_patch_guest_per_regs(vcpu); in vcpu_pre_run()
2003 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
2004 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
2005 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); in vcpu_pre_run()
2006 trace_kvm_s390_sie_enter(vcpu, cpuflags); in vcpu_pre_run()
2011 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) in vcpu_post_run_fault_in_sie() argument
2013 psw_t *psw = &vcpu->arch.sie_block->gpsw; in vcpu_post_run_fault_in_sie()
2017 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); in vcpu_post_run_fault_in_sie()
2018 trace_kvm_s390_sie_fault(vcpu); in vcpu_post_run_fault_in_sie()
2028 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1); in vcpu_post_run_fault_in_sie()
2030 return kvm_s390_inject_prog_cond(vcpu, rc); in vcpu_post_run_fault_in_sie()
2033 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in vcpu_post_run_fault_in_sie()
2036 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) in vcpu_post_run() argument
2040 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", in vcpu_post_run()
2041 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
2042 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
2044 if (guestdbg_enabled(vcpu)) in vcpu_post_run()
2045 kvm_s390_restore_guest_per_regs(vcpu); in vcpu_post_run()
2049 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
2050 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_post_run()
2051 vcpu->run->s390_ucontrol.trans_exc_code = in vcpu_post_run()
2053 vcpu->run->s390_ucontrol.pgm_code = 0x10; in vcpu_post_run()
2057 trace_kvm_s390_major_guest_pfault(vcpu); in vcpu_post_run()
2059 if (kvm_arch_setup_async_pf(vcpu)) { in vcpu_post_run()
2063 rc = kvm_arch_fault_in_page(vcpu, gpa, 1); in vcpu_post_run()
2068 rc = vcpu_post_run_fault_in_sie(vcpu); in vcpu_post_run()
2070 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); in vcpu_post_run()
2073 if (kvm_is_ucontrol(vcpu->kvm)) in vcpu_post_run()
2075 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; in vcpu_post_run()
2077 rc = kvm_handle_sie_intercept(vcpu); in vcpu_post_run()
2083 static int __vcpu_run(struct kvm_vcpu *vcpu) in __vcpu_run() argument
2091 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
2094 rc = vcpu_pre_run(vcpu); in __vcpu_run()
2098 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
2106 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
2107 vcpu->run->s.regs.gprs); in __vcpu_run()
2111 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
2113 rc = vcpu_post_run(vcpu, exit_reason); in __vcpu_run()
2114 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); in __vcpu_run()
2116 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
2120 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) in sync_regs() argument
2122 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs()
2123 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs()
2125 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
2127 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
2129 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in sync_regs()
2132 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; in sync_regs()
2133 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
2134 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs()
2135 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs()
2136 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs()
2139 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs()
2140 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs()
2141 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs()
2142 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs()
2143 kvm_clear_async_pf_completion_queue(vcpu); in sync_regs()
2148 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) in store_regs() argument
2150 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
2151 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
2152 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
2153 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
2154 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; in store_regs()
2155 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
2156 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs()
2157 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs()
2158 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs()
2159 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
2160 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
2161 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
2164 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) in kvm_arch_vcpu_ioctl_run() argument
2169 if (guestdbg_exit_pending(vcpu)) { in kvm_arch_vcpu_ioctl_run()
2170 kvm_s390_prepare_debug_exit(vcpu); in kvm_arch_vcpu_ioctl_run()
2174 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
2175 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); in kvm_arch_vcpu_ioctl_run()
2177 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
2178 kvm_s390_vcpu_start(vcpu); in kvm_arch_vcpu_ioctl_run()
2179 } else if (is_vcpu_stopped(vcpu)) { in kvm_arch_vcpu_ioctl_run()
2181 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
2185 sync_regs(vcpu, kvm_run); in kvm_arch_vcpu_ioctl_run()
2188 rc = __vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
2195 if (guestdbg_exit_pending(vcpu) && !rc) { in kvm_arch_vcpu_ioctl_run()
2196 kvm_s390_prepare_debug_exit(vcpu); in kvm_arch_vcpu_ioctl_run()
2203 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in kvm_arch_vcpu_ioctl_run()
2204 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in kvm_arch_vcpu_ioctl_run()
2205 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in kvm_arch_vcpu_ioctl_run()
2215 store_regs(vcpu, kvm_run); in kvm_arch_vcpu_ioctl_run()
2217 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
2220 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
2230 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) in kvm_s390_store_status_unloaded() argument
2238 px = kvm_s390_get_prefix(vcpu); in kvm_s390_store_status_unloaded()
2240 if (write_guest_abs(vcpu, 163, &archmode, 1)) in kvm_s390_store_status_unloaded()
2244 if (write_guest_real(vcpu, 163, &archmode, 1)) in kvm_s390_store_status_unloaded()
2252 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); in kvm_s390_store_status_unloaded()
2253 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, in kvm_s390_store_status_unloaded()
2256 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, in kvm_s390_store_status_unloaded()
2257 vcpu->run->s.regs.vrs, 128); in kvm_s390_store_status_unloaded()
2259 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA, in kvm_s390_store_status_unloaded()
2260 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
2261 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA, in kvm_s390_store_status_unloaded()
2262 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
2263 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA, in kvm_s390_store_status_unloaded()
2265 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA, in kvm_s390_store_status_unloaded()
2266 &vcpu->run->s.regs.fpc, 4); in kvm_s390_store_status_unloaded()
2267 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA, in kvm_s390_store_status_unloaded()
2268 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
2269 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA, in kvm_s390_store_status_unloaded()
2270 &vcpu->arch.sie_block->cputm, 8); in kvm_s390_store_status_unloaded()
2271 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
2272 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA, in kvm_s390_store_status_unloaded()
2274 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA, in kvm_s390_store_status_unloaded()
2275 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
2276 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA, in kvm_s390_store_status_unloaded()
2277 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
2281 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) in kvm_s390_vcpu_store_status() argument
2289 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in kvm_s390_vcpu_store_status()
2290 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
2292 return kvm_s390_store_status_unloaded(vcpu, addr); in kvm_s390_vcpu_store_status()
2298 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, in kvm_s390_store_adtl_status_unloaded() argument
2305 return write_guest_abs(vcpu, gpa & ~0x3ff, in kvm_s390_store_adtl_status_unloaded()
2306 (void *)&vcpu->run->s.regs.vrs, 512); in kvm_s390_store_adtl_status_unloaded()
2309 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) in kvm_s390_vcpu_store_adtl_status() argument
2311 if (!test_kvm_facility(vcpu->kvm, 129)) in kvm_s390_vcpu_store_adtl_status()
2324 return kvm_s390_store_adtl_status_unloaded(vcpu, addr); in kvm_s390_vcpu_store_adtl_status()
2327 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) in __disable_ibs_on_vcpu() argument
2329 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); in __disable_ibs_on_vcpu()
2330 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu); in __disable_ibs_on_vcpu()
2336 struct kvm_vcpu *vcpu; in __disable_ibs_on_all_vcpus() local
2338 kvm_for_each_vcpu(i, vcpu, kvm) { in __disable_ibs_on_all_vcpus()
2339 __disable_ibs_on_vcpu(vcpu); in __disable_ibs_on_all_vcpus()
2343 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) in __enable_ibs_on_vcpu() argument
2345 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); in __enable_ibs_on_vcpu()
2346 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu); in __enable_ibs_on_vcpu()
2349 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_start() argument
2353 if (!is_vcpu_stopped(vcpu)) in kvm_s390_vcpu_start()
2356 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
2358 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
2359 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
2362 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) in kvm_s390_vcpu_start()
2368 __enable_ibs_on_vcpu(vcpu); in kvm_s390_vcpu_start()
2375 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
2378 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); in kvm_s390_vcpu_start()
2383 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_s390_vcpu_start()
2384 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
2388 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) in kvm_s390_vcpu_stop() argument
2393 if (is_vcpu_stopped(vcpu)) in kvm_s390_vcpu_stop()
2396 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
2398 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
2399 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
2402 kvm_s390_clear_stop_irq(vcpu); in kvm_s390_vcpu_stop()
2404 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); in kvm_s390_vcpu_stop()
2405 __disable_ibs_on_vcpu(vcpu); in kvm_s390_vcpu_stop()
2408 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { in kvm_s390_vcpu_stop()
2410 started_vcpu = vcpu->kvm->vcpus[i]; in kvm_s390_vcpu_stop()
2422 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
2426 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
2436 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
2437 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
2438 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
2439 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
2450 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, in kvm_s390_guest_mem_op() argument
2471 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_guest_mem_op()
2476 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false); in kvm_s390_guest_mem_op()
2479 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
2487 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true); in kvm_s390_guest_mem_op()
2494 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
2500 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_guest_mem_op()
2503 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_guest_mem_op()
2512 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
2524 r = kvm_s390_inject_vcpu(vcpu, &s390irq); in kvm_arch_vcpu_ioctl()
2536 r = kvm_s390_inject_vcpu(vcpu, &s390irq); in kvm_arch_vcpu_ioctl()
2540 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
2541 r = kvm_s390_vcpu_store_status(vcpu, arg); in kvm_arch_vcpu_ioctl()
2542 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
2550 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); in kvm_arch_vcpu_ioctl()
2554 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); in kvm_arch_vcpu_ioctl()
2563 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
2565 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
2577 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
2582 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
2594 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
2599 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
2605 r = gmap_fault(vcpu->arch.gmap, arg, 0); in kvm_arch_vcpu_ioctl()
2614 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
2621 r = kvm_s390_guest_mem_op(vcpu, &mem_op); in kvm_arch_vcpu_ioctl()
2638 r = kvm_s390_set_irq_state(vcpu, in kvm_arch_vcpu_ioctl()
2653 r = kvm_s390_get_irq_state(vcpu, in kvm_arch_vcpu_ioctl()
2664 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
2668 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
2669 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()