Lines Matching refs:vcpu
23 static int diag_release_pages(struct kvm_vcpu *vcpu) in diag_release_pages() argument
26 unsigned long prefix = kvm_s390_get_prefix(vcpu); in diag_release_pages()
28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages()
29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; in diag_release_pages()
33 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in diag_release_pages()
35 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); in diag_release_pages()
36 vcpu->stat.diagnose_10++; in diag_release_pages()
43 gmap_discard(vcpu->arch.gmap, start, end); in diag_release_pages()
51 gmap_discard(vcpu->arch.gmap, start, prefix); in diag_release_pages()
53 gmap_discard(vcpu->arch.gmap, 0, 4096); in diag_release_pages()
55 gmap_discard(vcpu->arch.gmap, 4096, 8192); in diag_release_pages()
56 gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end); in diag_release_pages()
61 static int __diag_page_ref_service(struct kvm_vcpu *vcpu) in __diag_page_ref_service() argument
75 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; in __diag_page_ref_service()
76 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); in __diag_page_ref_service()
78 if (vcpu->run->s.regs.gprs[rx] & 7) in __diag_page_ref_service()
79 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in __diag_page_ref_service()
80 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); in __diag_page_ref_service()
82 return kvm_s390_inject_prog_cond(vcpu, rc); in __diag_page_ref_service()
84 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in __diag_page_ref_service()
88 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { in __diag_page_ref_service()
94 vcpu->run->s.regs.gprs[ry] = 8; in __diag_page_ref_service()
100 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in __diag_page_ref_service()
102 if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr)) in __diag_page_ref_service()
103 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in __diag_page_ref_service()
105 vcpu->arch.pfault_token = parm.token_addr; in __diag_page_ref_service()
106 vcpu->arch.pfault_select = parm.select_mask; in __diag_page_ref_service()
107 vcpu->arch.pfault_compare = parm.compare_mask; in __diag_page_ref_service()
108 vcpu->run->s.regs.gprs[ry] = 0; in __diag_page_ref_service()
119 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in __diag_page_ref_service()
121 vcpu->run->s.regs.gprs[ry] = 0; in __diag_page_ref_service()
126 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in __diag_page_ref_service()
127 vcpu->run->s.regs.gprs[ry] = 4; in __diag_page_ref_service()
129 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in __diag_page_ref_service()
141 static int __diag_time_slice_end(struct kvm_vcpu *vcpu) in __diag_time_slice_end() argument
143 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); in __diag_time_slice_end()
144 vcpu->stat.diagnose_44++; in __diag_time_slice_end()
145 kvm_vcpu_on_spin(vcpu); in __diag_time_slice_end()
149 static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) in __diag_time_slice_end_directed() argument
151 struct kvm *kvm = vcpu->kvm; in __diag_time_slice_end_directed()
156 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in __diag_time_slice_end_directed()
157 vcpu->stat.diagnose_9c++; in __diag_time_slice_end_directed()
158 VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid); in __diag_time_slice_end_directed()
160 if (tid == vcpu->vcpu_id) in __diag_time_slice_end_directed()
172 static int __diag_ipl_functions(struct kvm_vcpu *vcpu) in __diag_ipl_functions() argument
174 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; in __diag_ipl_functions()
175 unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; in __diag_ipl_functions()
177 VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); in __diag_ipl_functions()
180 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; in __diag_ipl_functions()
183 vcpu->run->s390_reset_flags = 0; in __diag_ipl_functions()
189 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in __diag_ipl_functions()
190 kvm_s390_vcpu_stop(vcpu); in __diag_ipl_functions()
191 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; in __diag_ipl_functions()
192 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; in __diag_ipl_functions()
193 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; in __diag_ipl_functions()
194 vcpu->run->exit_reason = KVM_EXIT_S390_RESET; in __diag_ipl_functions()
195 VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx", in __diag_ipl_functions()
196 vcpu->run->s390_reset_flags); in __diag_ipl_functions()
197 trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags); in __diag_ipl_functions()
201 static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) in __diag_virtio_hypercall() argument
206 if (!vcpu->kvm->arch.css_support || in __diag_virtio_hypercall()
207 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) in __diag_virtio_hypercall()
216 ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS, in __diag_virtio_hypercall()
217 vcpu->run->s.regs.gprs[2] & 0xffffffff, in __diag_virtio_hypercall()
218 8, &vcpu->run->s.regs.gprs[3], in __diag_virtio_hypercall()
219 vcpu->run->s.regs.gprs[4]); in __diag_virtio_hypercall()
226 vcpu->run->s.regs.gprs[2] = ret; in __diag_virtio_hypercall()
231 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) in kvm_s390_handle_diag() argument
233 int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff; in kvm_s390_handle_diag()
235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_diag()
236 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in kvm_s390_handle_diag()
238 trace_kvm_s390_handle_diag(vcpu, code); in kvm_s390_handle_diag()
241 return diag_release_pages(vcpu); in kvm_s390_handle_diag()
243 return __diag_time_slice_end(vcpu); in kvm_s390_handle_diag()
245 return __diag_time_slice_end_directed(vcpu); in kvm_s390_handle_diag()
247 return __diag_page_ref_service(vcpu); in kvm_s390_handle_diag()
249 return __diag_ipl_functions(vcpu); in kvm_s390_handle_diag()
251 return __diag_virtio_hypercall(vcpu); in kvm_s390_handle_diag()