Lines Matching refs:arch
158 kvm->arch.epoch -= *delta; in kvm_clock_sync()
160 vcpu->arch.sie_block->epoch -= *delta; in kvm_clock_sync()
271 struct gmap *gmap = kvm->arch.gmap; in kvm_s390_sync_dirty_log()
336 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
341 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
349 set_kvm_facility(kvm->arch.model.fac->mask, 129); in kvm_vm_ioctl_enable_cap()
350 set_kvm_facility(kvm->arch.model.fac->list, 129); in kvm_vm_ioctl_enable_cap()
360 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
378 kvm->arch.gmap->asce_end); in kvm_s390_get_mem_control()
379 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
404 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
411 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
417 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
431 if (new_limit > kvm->arch.gmap->asce_end) in kvm_s390_set_mem_control()
443 gmap_free(kvm->arch.gmap); in kvm_s390_set_mem_control()
445 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
474 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
475 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
476 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
481 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
482 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
483 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
487 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
488 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
489 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
493 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
494 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
495 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
621 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, in kvm_s390_set_processor()
623 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
624 memcpy(kvm->arch.model.fac->list, proc->fac_list, in kvm_s390_set_processor()
656 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); in kvm_s390_get_processor()
657 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
658 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); in kvm_s390_get_processor()
678 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, in kvm_s390_get_machine()
945 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
1041 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
1044 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
1046 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
1060 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), in kvm_s390_crypto_init()
1062 if (!kvm->arch.crypto.crycb) in kvm_s390_crypto_init()
1068 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
1069 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
1070 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
1071 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
1072 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
1073 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
1101 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); in kvm_arch_init_vm()
1102 if (!kvm->arch.sca) in kvm_arch_init_vm()
1108 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
1113 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
1114 if (!kvm->arch.dbf) in kvm_arch_init_vm()
1124 kvm->arch.model.fac = in kvm_arch_init_vm()
1126 if (!kvm->arch.model.fac) in kvm_arch_init_vm()
1130 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, in kvm_arch_init_vm()
1134 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; in kvm_arch_init_vm()
1136 kvm->arch.model.fac->mask[i] = 0UL; in kvm_arch_init_vm()
1140 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, in kvm_arch_init_vm()
1143 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); in kvm_arch_init_vm()
1144 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
1149 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
1151 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
1152 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
1153 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
1155 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
1159 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
1161 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); in kvm_arch_init_vm()
1162 if (!kvm->arch.gmap) in kvm_arch_init_vm()
1164 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
1165 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
1168 kvm->arch.css_support = 0; in kvm_arch_init_vm()
1169 kvm->arch.use_irqchip = 0; in kvm_arch_init_vm()
1170 kvm->arch.epoch = 0; in kvm_arch_init_vm()
1172 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
1177 kfree(kvm->arch.crypto.crycb); in kvm_arch_init_vm()
1178 free_page((unsigned long)kvm->arch.model.fac); in kvm_arch_init_vm()
1179 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
1180 free_page((unsigned long)(kvm->arch.sca)); in kvm_arch_init_vm()
1193 (unsigned long *) &vcpu->kvm->arch.sca->mcn); in kvm_arch_vcpu_destroy()
1194 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == in kvm_arch_vcpu_destroy()
1195 (__u64) vcpu->arch.sie_block) in kvm_arch_vcpu_destroy()
1196 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; in kvm_arch_vcpu_destroy()
1201 gmap_free(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
1203 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
1205 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
1230 free_page((unsigned long)kvm->arch.model.fac); in kvm_arch_destroy_vm()
1231 free_page((unsigned long)(kvm->arch.sca)); in kvm_arch_destroy_vm()
1232 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
1233 kfree(kvm->arch.crypto.crycb); in kvm_arch_destroy_vm()
1235 gmap_free(kvm->arch.gmap); in kvm_arch_destroy_vm()
1244 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
1245 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
1247 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
1254 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_init()
1275 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; in kvm_arch_vcpu_load()
1276 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; in kvm_arch_vcpu_load()
1287 save_access_regs(vcpu->arch.host_acrs); in kvm_arch_vcpu_load()
1289 gmap_enable(vcpu->arch.gmap); in kvm_arch_vcpu_load()
1290 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_load()
1295 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_put()
1296 gmap_disable(vcpu->arch.gmap); in kvm_arch_vcpu_put()
1303 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; in kvm_arch_vcpu_put()
1304 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; in kvm_arch_vcpu_put()
1307 restore_access_regs(vcpu->arch.host_acrs); in kvm_arch_vcpu_put()
1313 vcpu->arch.sie_block->gpsw.mask = 0UL; in kvm_s390_vcpu_initial_reset()
1314 vcpu->arch.sie_block->gpsw.addr = 0UL; in kvm_s390_vcpu_initial_reset()
1316 vcpu->arch.sie_block->cputm = 0UL; in kvm_s390_vcpu_initial_reset()
1317 vcpu->arch.sie_block->ckc = 0UL; in kvm_s390_vcpu_initial_reset()
1318 vcpu->arch.sie_block->todpr = 0; in kvm_s390_vcpu_initial_reset()
1319 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); in kvm_s390_vcpu_initial_reset()
1320 vcpu->arch.sie_block->gcr[0] = 0xE0UL; in kvm_s390_vcpu_initial_reset()
1321 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; in kvm_s390_vcpu_initial_reset()
1325 vcpu->arch.sie_block->gbea = 1; in kvm_s390_vcpu_initial_reset()
1326 vcpu->arch.sie_block->pp = 0; in kvm_s390_vcpu_initial_reset()
1327 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_s390_vcpu_initial_reset()
1338 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
1342 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
1350 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
1352 if (vcpu->kvm->arch.crypto.aes_kw) in kvm_s390_vcpu_crypto_setup()
1353 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
1354 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
1355 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
1357 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
1362 free_page(vcpu->arch.sie_block->cbrlo); in kvm_s390_vcpu_unsetup_cmma()
1363 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
1368 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); in kvm_s390_vcpu_setup_cmma()
1369 if (!vcpu->arch.sie_block->cbrlo) in kvm_s390_vcpu_setup_cmma()
1372 vcpu->arch.sie_block->ecb2 |= 0x80; in kvm_s390_vcpu_setup_cmma()
1373 vcpu->arch.sie_block->ecb2 &= ~0x08; in kvm_s390_vcpu_setup_cmma()
1379 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
1381 vcpu->arch.cpu_id = model->cpu_id; in kvm_s390_vcpu_setup_model()
1382 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
1383 vcpu->arch.sie_block->fac = (int) (long) model->fac->list; in kvm_s390_vcpu_setup_model()
1390 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_arch_vcpu_setup()
1395 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_setup()
1397 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_setup()
1401 vcpu->arch.sie_block->ecb = 6; in kvm_arch_vcpu_setup()
1403 vcpu->arch.sie_block->ecb |= 0x10; in kvm_arch_vcpu_setup()
1405 vcpu->arch.sie_block->ecb2 = 8; in kvm_arch_vcpu_setup()
1406 vcpu->arch.sie_block->eca = 0xC1002000U; in kvm_arch_vcpu_setup()
1408 vcpu->arch.sie_block->eca |= 1; in kvm_arch_vcpu_setup()
1410 vcpu->arch.sie_block->eca |= 0x10000000U; in kvm_arch_vcpu_setup()
1412 vcpu->arch.sie_block->eca |= 0x00020000; in kvm_arch_vcpu_setup()
1413 vcpu->arch.sie_block->ecd |= 0x20000000; in kvm_arch_vcpu_setup()
1415 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_arch_vcpu_setup()
1417 if (vcpu->kvm->arch.use_cmma) { in kvm_arch_vcpu_setup()
1422 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_arch_vcpu_setup()
1423 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_arch_vcpu_setup()
1450 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
1451 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; in kvm_arch_vcpu_create()
1453 vcpu->arch.sie_block->icpua = id; in kvm_arch_vcpu_create()
1455 if (!kvm->arch.sca) { in kvm_arch_vcpu_create()
1459 if (!kvm->arch.sca->cpu[id].sda) in kvm_arch_vcpu_create()
1460 kvm->arch.sca->cpu[id].sda = in kvm_arch_vcpu_create()
1461 (__u64) vcpu->arch.sie_block; in kvm_arch_vcpu_create()
1462 vcpu->arch.sie_block->scaoh = in kvm_arch_vcpu_create()
1463 (__u32)(((__u64)kvm->arch.sca) >> 32); in kvm_arch_vcpu_create()
1464 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; in kvm_arch_vcpu_create()
1465 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); in kvm_arch_vcpu_create()
1468 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
1469 vcpu->arch.local_int.float_int = &kvm->arch.float_int; in kvm_arch_vcpu_create()
1470 vcpu->arch.local_int.wq = &vcpu->wq; in kvm_arch_vcpu_create()
1471 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; in kvm_arch_vcpu_create()
1477 vcpu->arch.sie_block); in kvm_arch_vcpu_create()
1478 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
1482 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
1496 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
1502 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
1507 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
1513 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
1522 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); in exit_sie()
1523 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
1563 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
1567 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
1571 r = put_user(vcpu->arch.sie_block->cputm, in kvm_arch_vcpu_ioctl_get_one_reg()
1575 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
1579 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
1583 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
1587 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
1591 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
1595 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
1612 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
1616 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
1620 r = get_user(vcpu->arch.sie_block->cputm, in kvm_arch_vcpu_ioctl_set_one_reg()
1624 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
1628 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
1630 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
1634 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
1638 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
1642 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
1646 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
1678 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
1687 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
1754 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
1759 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
1760 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
1766 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
1786 vcpu->kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vcpu_ioctl_set_mpstate()
1807 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; in ibs_enabled()
1825 rc = gmap_ipte_notify(vcpu->arch.gmap, in kvm_s390_handle_requests()
1834 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
1842 &vcpu->arch.sie_block->cpuflags); in kvm_s390_handle_requests()
1851 &vcpu->arch.sie_block->cpuflags); in kvm_s390_handle_requests()
1869 kvm->arch.epoch = tod - get_tod_clock(); in kvm_s390_set_tod_clock()
1872 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in kvm_s390_set_tod_clock()
1890 return gmap_fault(vcpu->arch.gmap, gpa, in kvm_arch_fault_in_page()
1914 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
1915 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
1921 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
1922 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
1943 struct kvm_arch_async_pf arch; in kvm_arch_setup_async_pf() local
1946 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
1948 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
1949 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
1955 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) in kvm_arch_setup_async_pf()
1957 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
1962 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
1965 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); in kvm_arch_setup_async_pf()
1980 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); in vcpu_pre_run()
2003 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
2004 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
2013 psw_t *psw = &vcpu->arch.sie_block->gpsw; in vcpu_post_run_fault_in_sie()
2041 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
2042 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
2070 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); in vcpu_post_run()
2075 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; in vcpu_post_run()
2106 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
2122 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs()
2123 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs()
2127 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
2132 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; in sync_regs()
2133 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
2134 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs()
2135 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs()
2136 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs()
2139 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs()
2140 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs()
2141 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs()
2142 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs()
2150 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
2151 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
2153 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
2154 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; in store_regs()
2155 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
2156 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs()
2157 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs()
2158 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs()
2159 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
2160 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
2161 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
2203 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in kvm_arch_vcpu_ioctl_run()
2204 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in kvm_arch_vcpu_ioctl_run()
2205 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in kvm_arch_vcpu_ioctl_run()
2262 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
2268 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
2270 &vcpu->arch.sie_block->cputm, 8); in kvm_s390_store_status_unloaded()
2271 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
2277 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
2358 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
2378 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); in kvm_s390_vcpu_start()
2384 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
2398 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
2404 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); in kvm_s390_vcpu_stop()
2422 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
2436 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
2437 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
2503 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_guest_mem_op()
2582 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
2599 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
2605 r = gmap_fault(vcpu->arch.gmap, arg, 0); in kvm_arch_vcpu_ioctl()
2669 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
2722 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, in kvm_arch_commit_memory_region()