Lines Matching refs:vcpu
34 static int handle_set_clock(struct kvm_vcpu *vcpu) in handle_set_clock() argument
42 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_clock()
43 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_set_clock()
45 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_set_clock()
47 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_set_clock()
48 rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); in handle_set_clock()
50 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_set_clock()
53 kvm_s390_set_psw_cc(vcpu, 3); in handle_set_clock()
58 mutex_lock(&vcpu->kvm->lock); in handle_set_clock()
59 kvm_for_each_vcpu(i, cpup, vcpu->kvm) in handle_set_clock()
61 mutex_unlock(&vcpu->kvm->lock); in handle_set_clock()
63 kvm_s390_set_psw_cc(vcpu, 0); in handle_set_clock()
67 static int handle_set_prefix(struct kvm_vcpu *vcpu) in handle_set_prefix() argument
74 vcpu->stat.instruction_spx++; in handle_set_prefix()
76 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_prefix()
77 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_set_prefix()
79 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_set_prefix()
83 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_set_prefix()
86 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); in handle_set_prefix()
88 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_set_prefix()
97 if (kvm_is_error_gpa(vcpu->kvm, address)) in handle_set_prefix()
98 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_set_prefix()
100 kvm_s390_set_prefix(vcpu, address); in handle_set_prefix()
102 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); in handle_set_prefix()
103 trace_kvm_s390_handle_prefix(vcpu, 1, address); in handle_set_prefix()
107 static int handle_store_prefix(struct kvm_vcpu *vcpu) in handle_store_prefix() argument
114 vcpu->stat.instruction_stpx++; in handle_store_prefix()
116 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_prefix()
117 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_store_prefix()
119 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_store_prefix()
123 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_store_prefix()
125 address = kvm_s390_get_prefix(vcpu); in handle_store_prefix()
128 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); in handle_store_prefix()
130 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_store_prefix()
132 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); in handle_store_prefix()
133 trace_kvm_s390_handle_prefix(vcpu, 0, address); in handle_store_prefix()
137 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) in handle_store_cpu_address() argument
139 u16 vcpu_id = vcpu->vcpu_id; in handle_store_cpu_address()
144 vcpu->stat.instruction_stap++; in handle_store_cpu_address()
146 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_cpu_address()
147 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_store_cpu_address()
149 ga = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_store_cpu_address()
152 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_store_cpu_address()
154 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); in handle_store_cpu_address()
156 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_store_cpu_address()
158 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); in handle_store_cpu_address()
159 trace_kvm_s390_handle_stap(vcpu, ga); in handle_store_cpu_address()
163 static int __skey_check_enable(struct kvm_vcpu *vcpu) in __skey_check_enable() argument
166 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) in __skey_check_enable()
170 trace_kvm_s390_skey_related_inst(vcpu); in __skey_check_enable()
171 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); in __skey_check_enable()
176 static int handle_skey(struct kvm_vcpu *vcpu) in handle_skey() argument
178 int rc = __skey_check_enable(vcpu); in handle_skey()
182 vcpu->stat.instruction_storage_key++; in handle_skey()
184 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_skey()
185 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_skey()
187 kvm_s390_rewind_psw(vcpu, 4); in handle_skey()
188 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); in handle_skey()
192 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) in handle_ipte_interlock() argument
194 vcpu->stat.instruction_ipte_interlock++; in handle_ipte_interlock()
195 if (psw_bits(vcpu->arch.sie_block->gpsw).p) in handle_ipte_interlock()
196 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_ipte_interlock()
197 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); in handle_ipte_interlock()
198 kvm_s390_rewind_psw(vcpu, 4); in handle_ipte_interlock()
199 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); in handle_ipte_interlock()
203 static int handle_test_block(struct kvm_vcpu *vcpu) in handle_test_block() argument
208 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_test_block()
209 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_test_block()
211 kvm_s390_get_regs_rre(vcpu, NULL, ®2); in handle_test_block()
212 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_test_block()
213 addr = kvm_s390_logical_to_effective(vcpu, addr); in handle_test_block()
214 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) in handle_test_block()
215 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in handle_test_block()
216 addr = kvm_s390_real_to_abs(vcpu, addr); in handle_test_block()
218 if (kvm_is_error_gpa(vcpu->kvm, addr)) in handle_test_block()
219 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_test_block()
224 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) in handle_test_block()
226 kvm_s390_set_psw_cc(vcpu, 0); in handle_test_block()
227 vcpu->run->s.regs.gprs[0] = 0; in handle_test_block()
231 static int handle_tpi(struct kvm_vcpu *vcpu) in handle_tpi() argument
240 addr = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_tpi()
242 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_tpi()
244 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); in handle_tpi()
246 kvm_s390_set_psw_cc(vcpu, 0); in handle_tpi()
259 rc = write_guest(vcpu, addr, ar, &tpi_data, len); in handle_tpi()
261 rc = kvm_s390_inject_prog_cond(vcpu, rc); in handle_tpi()
270 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { in handle_tpi()
279 kvm_s390_set_psw_cc(vcpu, 1); in handle_tpi()
287 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { in handle_tpi()
295 static int handle_tsch(struct kvm_vcpu *vcpu) in handle_tsch() argument
301 if (vcpu->run->s.regs.gprs[1]) in handle_tsch()
302 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, in handle_tsch()
303 vcpu->run->s.regs.gprs[1]); in handle_tsch()
313 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; in handle_tsch()
314 vcpu->run->s390_tsch.dequeued = !!inti; in handle_tsch()
316 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; in handle_tsch()
317 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; in handle_tsch()
318 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; in handle_tsch()
319 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; in handle_tsch()
321 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; in handle_tsch()
326 static int handle_io_inst(struct kvm_vcpu *vcpu) in handle_io_inst() argument
328 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); in handle_io_inst()
330 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_io_inst()
331 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_io_inst()
333 if (vcpu->kvm->arch.css_support) { in handle_io_inst()
338 if (vcpu->arch.sie_block->ipa == 0xb236) in handle_io_inst()
339 return handle_tpi(vcpu); in handle_io_inst()
340 if (vcpu->arch.sie_block->ipa == 0xb235) in handle_io_inst()
341 return handle_tsch(vcpu); in handle_io_inst()
349 kvm_s390_set_psw_cc(vcpu, 3); in handle_io_inst()
354 static int handle_stfl(struct kvm_vcpu *vcpu) in handle_stfl() argument
359 vcpu->stat.instruction_stfl++; in handle_stfl()
361 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stfl()
362 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stfl()
368 fac = *vcpu->kvm->arch.model.fac->list >> 32; in handle_stfl()
369 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), in handle_stfl()
373 VCPU_EVENT(vcpu, 5, "store facility list value %x", fac); in handle_stfl()
374 trace_kvm_s390_handle_stfl(vcpu, fac); in handle_stfl()
400 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) in kvm_s390_handle_lpsw() argument
402 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; in kvm_s390_handle_lpsw()
409 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in kvm_s390_handle_lpsw()
411 addr = kvm_s390_get_base_disp_s(vcpu, &ar); in kvm_s390_handle_lpsw()
413 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lpsw()
415 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); in kvm_s390_handle_lpsw()
417 return kvm_s390_inject_prog_cond(vcpu, rc); in kvm_s390_handle_lpsw()
419 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lpsw()
424 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lpsw()
428 static int handle_lpswe(struct kvm_vcpu *vcpu) in handle_lpswe() argument
435 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_lpswe()
436 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_lpswe()
438 addr = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_lpswe()
440 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_lpswe()
441 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); in handle_lpswe()
443 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_lpswe()
444 vcpu->arch.sie_block->gpsw = new_psw; in handle_lpswe()
445 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) in handle_lpswe()
446 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_lpswe()
450 static int handle_stidp(struct kvm_vcpu *vcpu) in handle_stidp() argument
452 u64 stidp_data = vcpu->arch.stidp_data; in handle_stidp()
457 vcpu->stat.instruction_stidp++; in handle_stidp()
459 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stidp()
460 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stidp()
462 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_stidp()
465 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stidp()
467 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); in handle_stidp()
469 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_stidp()
471 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); in handle_stidp()
475 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) in handle_stsi_3_2_2() argument
480 cpus = atomic_read(&vcpu->kvm->online_vcpus); in handle_stsi_3_2_2()
502 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, in insert_stsi_usr_data() argument
505 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; in insert_stsi_usr_data()
506 vcpu->run->s390_stsi.addr = addr; in insert_stsi_usr_data()
507 vcpu->run->s390_stsi.ar = ar; in insert_stsi_usr_data()
508 vcpu->run->s390_stsi.fc = fc; in insert_stsi_usr_data()
509 vcpu->run->s390_stsi.sel1 = sel1; in insert_stsi_usr_data()
510 vcpu->run->s390_stsi.sel2 = sel2; in insert_stsi_usr_data()
513 static int handle_stsi(struct kvm_vcpu *vcpu) in handle_stsi() argument
515 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; in handle_stsi()
516 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; in handle_stsi()
517 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; in handle_stsi()
523 vcpu->stat.instruction_stsi++; in handle_stsi()
524 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); in handle_stsi()
526 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stsi()
527 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stsi()
530 kvm_s390_set_psw_cc(vcpu, 3); in handle_stsi()
534 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 in handle_stsi()
535 || vcpu->run->s.regs.gprs[1] & 0xffff0000) in handle_stsi()
536 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stsi()
539 vcpu->run->s.regs.gprs[0] = 3 << 28; in handle_stsi()
540 kvm_s390_set_psw_cc(vcpu, 0); in handle_stsi()
544 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_stsi()
547 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stsi()
564 handle_stsi_3_2_2(vcpu, (void *) mem); in handle_stsi()
568 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); in handle_stsi()
570 rc = kvm_s390_inject_prog_cond(vcpu, rc); in handle_stsi()
573 if (vcpu->kvm->arch.user_stsi) { in handle_stsi()
574 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); in handle_stsi()
577 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); in handle_stsi()
579 kvm_s390_set_psw_cc(vcpu, 0); in handle_stsi()
580 vcpu->run->s.regs.gprs[0] = 0; in handle_stsi()
583 kvm_s390_set_psw_cc(vcpu, 3); in handle_stsi()
622 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) in kvm_s390_handle_b2() argument
631 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; in kvm_s390_handle_b2()
633 return handler(vcpu); in kvm_s390_handle_b2()
638 static int handle_epsw(struct kvm_vcpu *vcpu) in handle_epsw() argument
642 kvm_s390_get_regs_rre(vcpu, ®1, ®2); in handle_epsw()
645 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; in handle_epsw()
646 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; in handle_epsw()
648 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; in handle_epsw()
649 vcpu->run->s.regs.gprs[reg2] |= in handle_epsw()
650 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; in handle_epsw()
665 static int handle_pfmf(struct kvm_vcpu *vcpu) in handle_pfmf() argument
670 vcpu->stat.instruction_pfmf++; in handle_pfmf()
672 kvm_s390_get_regs_rre(vcpu, ®1, ®2); in handle_pfmf()
675 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_pfmf()
677 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_pfmf()
678 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_pfmf()
680 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) in handle_pfmf()
681 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
684 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14)) in handle_pfmf()
685 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
688 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) in handle_pfmf()
689 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
691 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_pfmf()
692 start = kvm_s390_logical_to_effective(vcpu, start); in handle_pfmf()
694 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { in handle_pfmf()
706 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
709 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { in handle_pfmf()
710 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) in handle_pfmf()
711 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in handle_pfmf()
718 if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0) in handle_pfmf()
719 abs_addr = kvm_s390_real_to_abs(vcpu, start); in handle_pfmf()
722 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr)); in handle_pfmf()
724 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_pfmf()
726 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { in handle_pfmf()
728 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_pfmf()
731 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { in handle_pfmf()
732 int rc = __skey_check_enable(vcpu); in handle_pfmf()
737 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, in handle_pfmf()
738 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) in handle_pfmf()
739 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_pfmf()
744 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) in handle_pfmf()
745 vcpu->run->s.regs.gprs[reg2] = end; in handle_pfmf()
749 static int handle_essa(struct kvm_vcpu *vcpu) in handle_essa() argument
752 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; in handle_essa()
757 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); in handle_essa()
758 gmap = vcpu->arch.gmap; in handle_essa()
759 vcpu->stat.instruction_essa++; in handle_essa()
760 if (!kvm_s390_cmma_enabled(vcpu->kvm)) in handle_essa()
761 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_essa()
763 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_essa()
764 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_essa()
766 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) in handle_essa()
767 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_essa()
770 kvm_s390_rewind_psw(vcpu, 4); in handle_essa()
771 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ in handle_essa()
772 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); in handle_essa()
784 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_essa()
797 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) in kvm_s390_handle_b9() argument
802 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; in kvm_s390_handle_b9()
804 return handler(vcpu); in kvm_s390_handle_b9()
809 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) in kvm_s390_handle_lctl() argument
811 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in kvm_s390_handle_lctl()
812 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_lctl()
818 vcpu->stat.instruction_lctl++; in kvm_s390_handle_lctl()
820 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_lctl()
821 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in kvm_s390_handle_lctl()
823 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); in kvm_s390_handle_lctl()
826 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lctl()
828 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); in kvm_s390_handle_lctl()
829 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); in kvm_s390_handle_lctl()
832 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); in kvm_s390_handle_lctl()
834 return kvm_s390_inject_prog_cond(vcpu, rc); in kvm_s390_handle_lctl()
838 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; in kvm_s390_handle_lctl()
839 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; in kvm_s390_handle_lctl()
844 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_s390_handle_lctl()
848 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) in kvm_s390_handle_stctl() argument
850 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in kvm_s390_handle_stctl()
851 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_stctl()
857 vcpu->stat.instruction_stctl++; in kvm_s390_handle_stctl()
859 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_stctl()
860 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in kvm_s390_handle_stctl()
862 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); in kvm_s390_handle_stctl()
865 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_stctl()
867 VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); in kvm_s390_handle_stctl()
868 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); in kvm_s390_handle_stctl()
873 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; in kvm_s390_handle_stctl()
878 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); in kvm_s390_handle_stctl()
879 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; in kvm_s390_handle_stctl()
882 static int handle_lctlg(struct kvm_vcpu *vcpu) in handle_lctlg() argument
884 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in handle_lctlg()
885 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in handle_lctlg()
891 vcpu->stat.instruction_lctlg++; in handle_lctlg()
893 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_lctlg()
894 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_lctlg()
896 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); in handle_lctlg()
899 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_lctlg()
901 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); in handle_lctlg()
902 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); in handle_lctlg()
905 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); in handle_lctlg()
907 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_lctlg()
911 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; in handle_lctlg()
916 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in handle_lctlg()
920 static int handle_stctg(struct kvm_vcpu *vcpu) in handle_stctg() argument
922 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in handle_stctg()
923 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in handle_stctg()
929 vcpu->stat.instruction_stctg++; in handle_stctg()
931 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stctg()
932 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stctg()
934 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); in handle_stctg()
937 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stctg()
939 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); in handle_stctg()
940 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); in handle_stctg()
945 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; in handle_stctg()
950 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); in handle_stctg()
951 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; in handle_stctg()
959 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) in kvm_s390_handle_eb() argument
963 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; in kvm_s390_handle_eb()
965 return handler(vcpu); in kvm_s390_handle_eb()
969 static int handle_tprot(struct kvm_vcpu *vcpu) in handle_tprot() argument
977 vcpu->stat.instruction_tprot++; in handle_tprot()
979 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_tprot()
980 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_tprot()
982 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); in handle_tprot()
989 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) in handle_tprot()
990 ipte_lock(vcpu); in handle_tprot()
991 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); in handle_tprot()
995 ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); in handle_tprot()
999 ret = kvm_s390_inject_program_int(vcpu, ret); in handle_tprot()
1002 kvm_s390_set_psw_cc(vcpu, 3); in handle_tprot()
1008 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); in handle_tprot()
1010 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_tprot()
1014 kvm_s390_set_psw_cc(vcpu, cc); in handle_tprot()
1018 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) in handle_tprot()
1019 ipte_unlock(vcpu); in handle_tprot()
1023 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) in kvm_s390_handle_e5() argument
1026 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) in kvm_s390_handle_e5()
1027 return handle_tprot(vcpu); in kvm_s390_handle_e5()
1031 static int handle_sckpf(struct kvm_vcpu *vcpu) in handle_sckpf() argument
1035 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_sckpf()
1036 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_sckpf()
1038 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) in handle_sckpf()
1039 return kvm_s390_inject_program_int(vcpu, in handle_sckpf()
1042 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; in handle_sckpf()
1043 vcpu->arch.sie_block->todpr = value; in handle_sckpf()
1052 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) in kvm_s390_handle_01() argument
1056 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; in kvm_s390_handle_01()
1058 return handler(vcpu); in kvm_s390_handle_01()