This source file includes following definitions.
- kvm_s390_fac_size
- kvm_arch_hardware_enable
- kvm_arch_check_processor_compat
- kvm_clock_sync_scb
- kvm_clock_sync
- kvm_arch_hardware_setup
- kvm_arch_hardware_unsetup
- allow_cpu_feat
- plo_test_bit
- __insn32_query
- kvm_s390_cpu_feat_init
- kvm_arch_init
- kvm_arch_exit
- kvm_arch_dev_ioctl
- kvm_vm_ioctl_check_extension
- kvm_s390_sync_dirty_log
- kvm_vm_ioctl_get_dirty_log
- icpt_operexc_on_all_vcpus
- kvm_vm_ioctl_enable_cap
- kvm_s390_get_mem_control
- kvm_s390_set_mem_control
- kvm_s390_vcpu_crypto_reset_all
- kvm_s390_vm_set_crypto
- kvm_s390_sync_request_broadcast
- kvm_s390_vm_start_migration
- kvm_s390_vm_stop_migration
- kvm_s390_vm_set_migration
- kvm_s390_vm_get_migration
- kvm_s390_set_tod_ext
- kvm_s390_set_tod_high
- kvm_s390_set_tod_low
- kvm_s390_set_tod
- kvm_s390_get_tod_clock
- kvm_s390_get_tod_ext
- kvm_s390_get_tod_high
- kvm_s390_get_tod_low
- kvm_s390_get_tod
- kvm_s390_set_processor
- kvm_s390_set_processor_feat
- kvm_s390_set_processor_subfunc
- kvm_s390_set_cpu_model
- kvm_s390_get_processor
- kvm_s390_get_machine
- kvm_s390_get_processor_feat
- kvm_s390_get_machine_feat
- kvm_s390_get_processor_subfunc
- kvm_s390_get_machine_subfunc
- kvm_s390_get_cpu_model
- kvm_s390_vm_set_attr
- kvm_s390_vm_get_attr
- kvm_s390_vm_has_attr
- kvm_s390_get_skeys
- kvm_s390_set_skeys
- gfn_to_memslot_approx
- kvm_s390_peek_cmma
- kvm_s390_next_dirty_cmma
- kvm_s390_get_cmma
- kvm_s390_get_cmma_bits
- kvm_s390_set_cmma_bits
- kvm_arch_vm_ioctl
- kvm_s390_apxa_installed
- kvm_s390_set_crycb_format
- kvm_arch_crypto_set_masks
- kvm_arch_crypto_clear_masks
- kvm_s390_get_initial_cpuid
- kvm_s390_crypto_init
- sca_dispose
- kvm_arch_init_vm
- kvm_arch_vcpu_destroy
- kvm_free_vcpus
- kvm_arch_destroy_vm
- __kvm_ucontrol_vcpu_init
- sca_del_vcpu
- sca_add_vcpu
- sca_copy_entry
- sca_copy_b_to_e
- sca_switch_to_extended
- sca_can_add_vcpu
- kvm_arch_vcpu_init
- __start_cpu_timer_accounting
- __stop_cpu_timer_accounting
- __enable_cpu_timer_accounting
- __disable_cpu_timer_accounting
- enable_cpu_timer_accounting
- disable_cpu_timer_accounting
- kvm_s390_set_cpu_timer
- kvm_s390_get_cpu_timer
- kvm_arch_vcpu_load
- kvm_arch_vcpu_put
- kvm_s390_vcpu_initial_reset
- kvm_arch_vcpu_postcreate
- kvm_has_pckmo_subfunc
- kvm_has_pckmo_ecc
- kvm_s390_vcpu_crypto_setup
- kvm_s390_vcpu_unsetup_cmma
- kvm_s390_vcpu_setup_cmma
- kvm_s390_vcpu_setup_model
- kvm_arch_vcpu_setup
- kvm_arch_vcpu_create
- kvm_arch_vcpu_runnable
- kvm_arch_vcpu_in_kernel
- kvm_s390_vcpu_block
- kvm_s390_vcpu_unblock
- kvm_s390_vcpu_request
- kvm_s390_vcpu_sie_inhibited
- kvm_s390_vcpu_request_handled
- exit_sie
- kvm_s390_sync_request
- kvm_gmap_notifier
- kvm_arch_no_poll
- kvm_arch_vcpu_should_kick
- kvm_arch_vcpu_ioctl_get_one_reg
- kvm_arch_vcpu_ioctl_set_one_reg
- kvm_arch_vcpu_ioctl_initial_reset
- kvm_arch_vcpu_ioctl_set_regs
- kvm_arch_vcpu_ioctl_get_regs
- kvm_arch_vcpu_ioctl_set_sregs
- kvm_arch_vcpu_ioctl_get_sregs
- kvm_arch_vcpu_ioctl_set_fpu
- kvm_arch_vcpu_ioctl_get_fpu
- kvm_arch_vcpu_ioctl_set_initial_psw
- kvm_arch_vcpu_ioctl_translate
- kvm_arch_vcpu_ioctl_set_guest_debug
- kvm_arch_vcpu_ioctl_get_mpstate
- kvm_arch_vcpu_ioctl_set_mpstate
- ibs_enabled
- kvm_s390_handle_requests
- kvm_s390_set_tod_clock
- kvm_arch_fault_in_page
- __kvm_inject_pfault_token
- kvm_arch_async_page_not_present
- kvm_arch_async_page_present
- kvm_arch_async_page_ready
- kvm_arch_can_inject_async_page_present
- kvm_arch_setup_async_pf
- vcpu_pre_run
- vcpu_post_run_fault_in_sie
- vcpu_post_run
- __vcpu_run
- sync_regs
- store_regs
- kvm_arch_vcpu_ioctl_run
- kvm_s390_store_status_unloaded
- kvm_s390_vcpu_store_status
- __disable_ibs_on_vcpu
- __disable_ibs_on_all_vcpus
- __enable_ibs_on_vcpu
- kvm_s390_vcpu_start
- kvm_s390_vcpu_stop
- kvm_vcpu_ioctl_enable_cap
- kvm_s390_guest_mem_op
- kvm_arch_vcpu_async_ioctl
- kvm_arch_vcpu_ioctl
- kvm_arch_vcpu_fault
- kvm_arch_create_memslot
- kvm_arch_prepare_memory_region
- kvm_arch_commit_memory_region
- nonhyp_mask
- kvm_arch_vcpu_block_finish
- kvm_s390_init
- kvm_s390_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #define KMSG_COMPONENT "kvm-s390"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/mman.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/random.h>
28 #include <linux/slab.h>
29 #include <linux/timer.h>
30 #include <linux/vmalloc.h>
31 #include <linux/bitmap.h>
32 #include <linux/sched/signal.h>
33 #include <linux/string.h>
34
35 #include <asm/asm-offsets.h>
36 #include <asm/lowcore.h>
37 #include <asm/stp.h>
38 #include <asm/pgtable.h>
39 #include <asm/gmap.h>
40 #include <asm/nmi.h>
41 #include <asm/switch_to.h>
42 #include <asm/isc.h>
43 #include <asm/sclp.h>
44 #include <asm/cpacf.h>
45 #include <asm/timex.h>
46 #include <asm/ap.h>
47 #include "kvm-s390.h"
48 #include "gaccess.h"
49
50 #define CREATE_TRACE_POINTS
51 #include "trace.h"
52 #include "trace-s390.h"
53
54 #define MEM_OP_MAX_SIZE 65536
55 #define LOCAL_IRQS 32
56 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
58
59 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
60 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
61
62 struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
64 { "exit_null", VCPU_STAT(exit_null) },
65 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
68 { "exit_io_request", VCPU_STAT(exit_io_request) },
69 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
70 { "exit_instruction", VCPU_STAT(exit_instruction) },
71 { "exit_pei", VCPU_STAT(exit_pei) },
72 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
74 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
75 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
76 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
77 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
78 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
79 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
80 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
81 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
82 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
84 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
86 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
87 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
88 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
89 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
90 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
93 { "deliver_program", VCPU_STAT(deliver_program) },
94 { "deliver_io", VCPU_STAT(deliver_io) },
95 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
96 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
97 { "inject_ckc", VCPU_STAT(inject_ckc) },
98 { "inject_cputm", VCPU_STAT(inject_cputm) },
99 { "inject_external_call", VCPU_STAT(inject_external_call) },
100 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 { "inject_io", VM_STAT(inject_io) },
103 { "inject_mchk", VCPU_STAT(inject_mchk) },
104 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 { "inject_program", VCPU_STAT(inject_program) },
106 { "inject_restart", VCPU_STAT(inject_restart) },
107 { "inject_service_signal", VM_STAT(inject_service_signal) },
108 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 { "inject_virtio", VM_STAT(inject_virtio) },
112 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 { "instruction_gs", VCPU_STAT(instruction_gs) },
114 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
117 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
118 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
119 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
120 { "instruction_sck", VCPU_STAT(instruction_sck) },
121 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
122 { "instruction_spx", VCPU_STAT(instruction_spx) },
123 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 { "instruction_stap", VCPU_STAT(instruction_stap) },
125 { "instruction_iske", VCPU_STAT(instruction_iske) },
126 { "instruction_ri", VCPU_STAT(instruction_ri) },
127 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 { "instruction_sske", VCPU_STAT(instruction_sske) },
129 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
130 { "instruction_essa", VCPU_STAT(instruction_essa) },
131 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
133 { "instruction_tb", VCPU_STAT(instruction_tb) },
134 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
135 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
136 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
137 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
138 { "instruction_sie", VCPU_STAT(instruction_sie) },
139 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
140 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
141 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
142 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
143 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
145 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
146 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
148 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
149 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
152 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
155 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
158 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
159 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
160 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
161 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
162 { NULL }
163 };
164
165 struct kvm_s390_tod_clock_ext {
166 __u8 epoch_idx;
167 __u64 tod;
168 __u8 reserved[7];
169 } __packed;
170
171
172 static int nested;
173 module_param(nested, int, S_IRUGO);
174 MODULE_PARM_DESC(nested, "Nested virtualization support");
175
176
177 static int hpage;
178 module_param(hpage, int, 0444);
179 MODULE_PARM_DESC(hpage, "1m huge page backing support");
180
181
182 static u8 halt_poll_max_steal = 10;
183 module_param(halt_poll_max_steal, byte, 0644);
184 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
185
186
187
188
189
190
191 #define SIZE_INTERNAL 16
192
193
194
195
196
197 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
198
199
200
201
202 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
203
204 static unsigned long kvm_s390_fac_size(void)
205 {
206 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
207 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
208 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
209 sizeof(S390_lowcore.stfle_fac_list));
210
211 return SIZE_INTERNAL;
212 }
213
214
215 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
216
217 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
218
219 static struct gmap_notifier gmap_notifier;
220 static struct gmap_notifier vsie_gmap_notifier;
221 debug_info_t *kvm_s390_dbf;
222
223
224 int kvm_arch_hardware_enable(void)
225 {
226
227 return 0;
228 }
229
230 int kvm_arch_check_processor_compat(void)
231 {
232 return 0;
233 }
234
235 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
236 unsigned long end);
237
238 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
239 {
240 u8 delta_idx = 0;
241
242
243
244
245
246 delta = -delta;
247
248
249 if ((s64)delta < 0)
250 delta_idx = -1;
251
252 scb->epoch += delta;
253 if (scb->ecd & ECD_MEF) {
254 scb->epdx += delta_idx;
255 if (scb->epoch < delta)
256 scb->epdx += 1;
257 }
258 }
259
260
261
262
263
264
265
266 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
267 void *v)
268 {
269 struct kvm *kvm;
270 struct kvm_vcpu *vcpu;
271 int i;
272 unsigned long long *delta = v;
273
274 list_for_each_entry(kvm, &vm_list, vm_list) {
275 kvm_for_each_vcpu(i, vcpu, kvm) {
276 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
277 if (i == 0) {
278 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
279 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
280 }
281 if (vcpu->arch.cputm_enabled)
282 vcpu->arch.cputm_start += *delta;
283 if (vcpu->arch.vsie_block)
284 kvm_clock_sync_scb(vcpu->arch.vsie_block,
285 *delta);
286 }
287 }
288 return NOTIFY_OK;
289 }
290
291 static struct notifier_block kvm_clock_notifier = {
292 .notifier_call = kvm_clock_sync,
293 };
294
295 int kvm_arch_hardware_setup(void)
296 {
297 gmap_notifier.notifier_call = kvm_gmap_notifier;
298 gmap_register_pte_notifier(&gmap_notifier);
299 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
300 gmap_register_pte_notifier(&vsie_gmap_notifier);
301 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
302 &kvm_clock_notifier);
303 return 0;
304 }
305
306 void kvm_arch_hardware_unsetup(void)
307 {
308 gmap_unregister_pte_notifier(&gmap_notifier);
309 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
310 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
311 &kvm_clock_notifier);
312 }
313
314 static void allow_cpu_feat(unsigned long nr)
315 {
316 set_bit_inv(nr, kvm_s390_available_cpu_feat);
317 }
318
319 static inline int plo_test_bit(unsigned char nr)
320 {
321 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
322 int cc;
323
324 asm volatile(
325
326 " plo 0,0,0,0(0)\n"
327 " ipm %0\n"
328 " srl %0,28\n"
329 : "=d" (cc)
330 : "d" (r0)
331 : "cc");
332 return cc == 0;
333 }
334
335 static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
336 {
337 register unsigned long r0 asm("0") = 0;
338 register unsigned long r1 asm("1") = (unsigned long) query;
339
340 asm volatile(
341
342 " .insn rrf,%[opc] << 16,2,4,6,0\n"
343 :
344 : "d" (r0), "a" (r1), [opc] "i" (opcode)
345 : "cc", "memory");
346 }
347
348 #define INSN_SORTL 0xb938
349 #define INSN_DFLTCC 0xb939
350
351 static void kvm_s390_cpu_feat_init(void)
352 {
353 int i;
354
355 for (i = 0; i < 256; ++i) {
356 if (plo_test_bit(i))
357 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
358 }
359
360 if (test_facility(28))
361 ptff(kvm_s390_available_subfunc.ptff,
362 sizeof(kvm_s390_available_subfunc.ptff),
363 PTFF_QAF);
364
365 if (test_facility(17)) {
366 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
367 kvm_s390_available_subfunc.kmac);
368 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
369 kvm_s390_available_subfunc.kmc);
370 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
371 kvm_s390_available_subfunc.km);
372 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
373 kvm_s390_available_subfunc.kimd);
374 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
375 kvm_s390_available_subfunc.klmd);
376 }
377 if (test_facility(76))
378 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
379 kvm_s390_available_subfunc.pckmo);
380 if (test_facility(77)) {
381 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
382 kvm_s390_available_subfunc.kmctr);
383 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
384 kvm_s390_available_subfunc.kmf);
385 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
386 kvm_s390_available_subfunc.kmo);
387 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
388 kvm_s390_available_subfunc.pcc);
389 }
390 if (test_facility(57))
391 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
392 kvm_s390_available_subfunc.ppno);
393
394 if (test_facility(146))
395 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
396 kvm_s390_available_subfunc.kma);
397
398 if (test_facility(155))
399 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
400 kvm_s390_available_subfunc.kdsa);
401
402 if (test_facility(150))
403 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
404
405 if (test_facility(151))
406 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
407
408 if (MACHINE_HAS_ESOP)
409 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
410
411
412
413
414 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
415 !test_facility(3) || !nested)
416 return;
417 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
418 if (sclp.has_64bscao)
419 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
420 if (sclp.has_siif)
421 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
422 if (sclp.has_gpere)
423 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
424 if (sclp.has_gsls)
425 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
426 if (sclp.has_ib)
427 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
428 if (sclp.has_cei)
429 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
430 if (sclp.has_ibs)
431 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
432 if (sclp.has_kss)
433 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452 }
453
454 int kvm_arch_init(void *opaque)
455 {
456 int rc;
457
458 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
459 if (!kvm_s390_dbf)
460 return -ENOMEM;
461
462 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
463 rc = -ENOMEM;
464 goto out_debug_unreg;
465 }
466
467 kvm_s390_cpu_feat_init();
468
469
470 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
471 if (rc) {
472 pr_err("A FLIC registration call failed with rc=%d\n", rc);
473 goto out_debug_unreg;
474 }
475
476 rc = kvm_s390_gib_init(GAL_ISC);
477 if (rc)
478 goto out_gib_destroy;
479
480 return 0;
481
482 out_gib_destroy:
483 kvm_s390_gib_destroy();
484 out_debug_unreg:
485 debug_unregister(kvm_s390_dbf);
486 return rc;
487 }
488
489 void kvm_arch_exit(void)
490 {
491 kvm_s390_gib_destroy();
492 debug_unregister(kvm_s390_dbf);
493 }
494
495
496 long kvm_arch_dev_ioctl(struct file *filp,
497 unsigned int ioctl, unsigned long arg)
498 {
499 if (ioctl == KVM_S390_ENABLE_SIE)
500 return s390_enable_sie();
501 return -EINVAL;
502 }
503
504 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
505 {
506 int r;
507
508 switch (ext) {
509 case KVM_CAP_S390_PSW:
510 case KVM_CAP_S390_GMAP:
511 case KVM_CAP_SYNC_MMU:
512 #ifdef CONFIG_KVM_S390_UCONTROL
513 case KVM_CAP_S390_UCONTROL:
514 #endif
515 case KVM_CAP_ASYNC_PF:
516 case KVM_CAP_SYNC_REGS:
517 case KVM_CAP_ONE_REG:
518 case KVM_CAP_ENABLE_CAP:
519 case KVM_CAP_S390_CSS_SUPPORT:
520 case KVM_CAP_IOEVENTFD:
521 case KVM_CAP_DEVICE_CTRL:
522 case KVM_CAP_S390_IRQCHIP:
523 case KVM_CAP_VM_ATTRIBUTES:
524 case KVM_CAP_MP_STATE:
525 case KVM_CAP_IMMEDIATE_EXIT:
526 case KVM_CAP_S390_INJECT_IRQ:
527 case KVM_CAP_S390_USER_SIGP:
528 case KVM_CAP_S390_USER_STSI:
529 case KVM_CAP_S390_SKEYS:
530 case KVM_CAP_S390_IRQ_STATE:
531 case KVM_CAP_S390_USER_INSTR0:
532 case KVM_CAP_S390_CMMA_MIGRATION:
533 case KVM_CAP_S390_AIS:
534 case KVM_CAP_S390_AIS_MIGRATION:
535 r = 1;
536 break;
537 case KVM_CAP_S390_HPAGE_1M:
538 r = 0;
539 if (hpage && !kvm_is_ucontrol(kvm))
540 r = 1;
541 break;
542 case KVM_CAP_S390_MEM_OP:
543 r = MEM_OP_MAX_SIZE;
544 break;
545 case KVM_CAP_NR_VCPUS:
546 case KVM_CAP_MAX_VCPUS:
547 case KVM_CAP_MAX_VCPU_ID:
548 r = KVM_S390_BSCA_CPU_SLOTS;
549 if (!kvm_s390_use_sca_entries())
550 r = KVM_MAX_VCPUS;
551 else if (sclp.has_esca && sclp.has_64bscao)
552 r = KVM_S390_ESCA_CPU_SLOTS;
553 break;
554 case KVM_CAP_S390_COW:
555 r = MACHINE_HAS_ESOP;
556 break;
557 case KVM_CAP_S390_VECTOR_REGISTERS:
558 r = MACHINE_HAS_VX;
559 break;
560 case KVM_CAP_S390_RI:
561 r = test_facility(64);
562 break;
563 case KVM_CAP_S390_GS:
564 r = test_facility(133);
565 break;
566 case KVM_CAP_S390_BPB:
567 r = test_facility(82);
568 break;
569 default:
570 r = 0;
571 }
572 return r;
573 }
574
575 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
576 struct kvm_memory_slot *memslot)
577 {
578 int i;
579 gfn_t cur_gfn, last_gfn;
580 unsigned long gaddr, vmaddr;
581 struct gmap *gmap = kvm->arch.gmap;
582 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
583
584
585 cur_gfn = memslot->base_gfn;
586 last_gfn = memslot->base_gfn + memslot->npages;
587 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
588 gaddr = gfn_to_gpa(cur_gfn);
589 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
590 if (kvm_is_error_hva(vmaddr))
591 continue;
592
593 bitmap_zero(bitmap, _PAGE_ENTRIES);
594 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
595 for (i = 0; i < _PAGE_ENTRIES; i++) {
596 if (test_bit(i, bitmap))
597 mark_page_dirty(kvm, cur_gfn + i);
598 }
599
600 if (fatal_signal_pending(current))
601 return;
602 cond_resched();
603 }
604 }
605
606
607 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
608
609
610
611
612 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
613 struct kvm_dirty_log *log)
614 {
615 int r;
616 unsigned long n;
617 struct kvm_memslots *slots;
618 struct kvm_memory_slot *memslot;
619 int is_dirty = 0;
620
621 if (kvm_is_ucontrol(kvm))
622 return -EINVAL;
623
624 mutex_lock(&kvm->slots_lock);
625
626 r = -EINVAL;
627 if (log->slot >= KVM_USER_MEM_SLOTS)
628 goto out;
629
630 slots = kvm_memslots(kvm);
631 memslot = id_to_memslot(slots, log->slot);
632 r = -ENOENT;
633 if (!memslot->dirty_bitmap)
634 goto out;
635
636 kvm_s390_sync_dirty_log(kvm, memslot);
637 r = kvm_get_dirty_log(kvm, log, &is_dirty);
638 if (r)
639 goto out;
640
641
642 if (is_dirty) {
643 n = kvm_dirty_bitmap_bytes(memslot);
644 memset(memslot->dirty_bitmap, 0, n);
645 }
646 r = 0;
647 out:
648 mutex_unlock(&kvm->slots_lock);
649 return r;
650 }
651
652 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
653 {
654 unsigned int i;
655 struct kvm_vcpu *vcpu;
656
657 kvm_for_each_vcpu(i, vcpu, kvm) {
658 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
659 }
660 }
661
662 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
663 {
664 int r;
665
666 if (cap->flags)
667 return -EINVAL;
668
669 switch (cap->cap) {
670 case KVM_CAP_S390_IRQCHIP:
671 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
672 kvm->arch.use_irqchip = 1;
673 r = 0;
674 break;
675 case KVM_CAP_S390_USER_SIGP:
676 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
677 kvm->arch.user_sigp = 1;
678 r = 0;
679 break;
680 case KVM_CAP_S390_VECTOR_REGISTERS:
681 mutex_lock(&kvm->lock);
682 if (kvm->created_vcpus) {
683 r = -EBUSY;
684 } else if (MACHINE_HAS_VX) {
685 set_kvm_facility(kvm->arch.model.fac_mask, 129);
686 set_kvm_facility(kvm->arch.model.fac_list, 129);
687 if (test_facility(134)) {
688 set_kvm_facility(kvm->arch.model.fac_mask, 134);
689 set_kvm_facility(kvm->arch.model.fac_list, 134);
690 }
691 if (test_facility(135)) {
692 set_kvm_facility(kvm->arch.model.fac_mask, 135);
693 set_kvm_facility(kvm->arch.model.fac_list, 135);
694 }
695 if (test_facility(148)) {
696 set_kvm_facility(kvm->arch.model.fac_mask, 148);
697 set_kvm_facility(kvm->arch.model.fac_list, 148);
698 }
699 if (test_facility(152)) {
700 set_kvm_facility(kvm->arch.model.fac_mask, 152);
701 set_kvm_facility(kvm->arch.model.fac_list, 152);
702 }
703 r = 0;
704 } else
705 r = -EINVAL;
706 mutex_unlock(&kvm->lock);
707 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
708 r ? "(not available)" : "(success)");
709 break;
710 case KVM_CAP_S390_RI:
711 r = -EINVAL;
712 mutex_lock(&kvm->lock);
713 if (kvm->created_vcpus) {
714 r = -EBUSY;
715 } else if (test_facility(64)) {
716 set_kvm_facility(kvm->arch.model.fac_mask, 64);
717 set_kvm_facility(kvm->arch.model.fac_list, 64);
718 r = 0;
719 }
720 mutex_unlock(&kvm->lock);
721 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
722 r ? "(not available)" : "(success)");
723 break;
724 case KVM_CAP_S390_AIS:
725 mutex_lock(&kvm->lock);
726 if (kvm->created_vcpus) {
727 r = -EBUSY;
728 } else {
729 set_kvm_facility(kvm->arch.model.fac_mask, 72);
730 set_kvm_facility(kvm->arch.model.fac_list, 72);
731 r = 0;
732 }
733 mutex_unlock(&kvm->lock);
734 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
735 r ? "(not available)" : "(success)");
736 break;
737 case KVM_CAP_S390_GS:
738 r = -EINVAL;
739 mutex_lock(&kvm->lock);
740 if (kvm->created_vcpus) {
741 r = -EBUSY;
742 } else if (test_facility(133)) {
743 set_kvm_facility(kvm->arch.model.fac_mask, 133);
744 set_kvm_facility(kvm->arch.model.fac_list, 133);
745 r = 0;
746 }
747 mutex_unlock(&kvm->lock);
748 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
749 r ? "(not available)" : "(success)");
750 break;
751 case KVM_CAP_S390_HPAGE_1M:
752 mutex_lock(&kvm->lock);
753 if (kvm->created_vcpus)
754 r = -EBUSY;
755 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
756 r = -EINVAL;
757 else {
758 r = 0;
759 down_write(&kvm->mm->mmap_sem);
760 kvm->mm->context.allow_gmap_hpage_1m = 1;
761 up_write(&kvm->mm->mmap_sem);
762
763
764
765
766
767 kvm->arch.use_skf = 0;
768 kvm->arch.use_pfmfi = 0;
769 }
770 mutex_unlock(&kvm->lock);
771 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
772 r ? "(not available)" : "(success)");
773 break;
774 case KVM_CAP_S390_USER_STSI:
775 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
776 kvm->arch.user_stsi = 1;
777 r = 0;
778 break;
779 case KVM_CAP_S390_USER_INSTR0:
780 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
781 kvm->arch.user_instr0 = 1;
782 icpt_operexc_on_all_vcpus(kvm);
783 r = 0;
784 break;
785 default:
786 r = -EINVAL;
787 break;
788 }
789 return r;
790 }
791
792 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
793 {
794 int ret;
795
796 switch (attr->attr) {
797 case KVM_S390_VM_MEM_LIMIT_SIZE:
798 ret = 0;
799 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
800 kvm->arch.mem_limit);
801 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
802 ret = -EFAULT;
803 break;
804 default:
805 ret = -ENXIO;
806 break;
807 }
808 return ret;
809 }
810
811 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
812 {
813 int ret;
814 unsigned int idx;
815 switch (attr->attr) {
816 case KVM_S390_VM_MEM_ENABLE_CMMA:
817 ret = -ENXIO;
818 if (!sclp.has_cmma)
819 break;
820
821 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
822 mutex_lock(&kvm->lock);
823 if (kvm->created_vcpus)
824 ret = -EBUSY;
825 else if (kvm->mm->context.allow_gmap_hpage_1m)
826 ret = -EINVAL;
827 else {
828 kvm->arch.use_cmma = 1;
829
830 kvm->arch.use_pfmfi = 0;
831 ret = 0;
832 }
833 mutex_unlock(&kvm->lock);
834 break;
835 case KVM_S390_VM_MEM_CLR_CMMA:
836 ret = -ENXIO;
837 if (!sclp.has_cmma)
838 break;
839 ret = -EINVAL;
840 if (!kvm->arch.use_cmma)
841 break;
842
843 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
844 mutex_lock(&kvm->lock);
845 idx = srcu_read_lock(&kvm->srcu);
846 s390_reset_cmma(kvm->arch.gmap->mm);
847 srcu_read_unlock(&kvm->srcu, idx);
848 mutex_unlock(&kvm->lock);
849 ret = 0;
850 break;
851 case KVM_S390_VM_MEM_LIMIT_SIZE: {
852 unsigned long new_limit;
853
854 if (kvm_is_ucontrol(kvm))
855 return -EINVAL;
856
857 if (get_user(new_limit, (u64 __user *)attr->addr))
858 return -EFAULT;
859
860 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
861 new_limit > kvm->arch.mem_limit)
862 return -E2BIG;
863
864 if (!new_limit)
865 return -EINVAL;
866
867
868 if (new_limit != KVM_S390_NO_MEM_LIMIT)
869 new_limit -= 1;
870
871 ret = -EBUSY;
872 mutex_lock(&kvm->lock);
873 if (!kvm->created_vcpus) {
874
875 struct gmap *new = gmap_create(current->mm, new_limit);
876
877 if (!new) {
878 ret = -ENOMEM;
879 } else {
880 gmap_remove(kvm->arch.gmap);
881 new->private = kvm;
882 kvm->arch.gmap = new;
883 ret = 0;
884 }
885 }
886 mutex_unlock(&kvm->lock);
887 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
888 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
889 (void *) kvm->arch.gmap->asce);
890 break;
891 }
892 default:
893 ret = -ENXIO;
894 break;
895 }
896 return ret;
897 }
898
899 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
900
901 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
902 {
903 struct kvm_vcpu *vcpu;
904 int i;
905
906 kvm_s390_vcpu_block_all(kvm);
907
908 kvm_for_each_vcpu(i, vcpu, kvm) {
909 kvm_s390_vcpu_crypto_setup(vcpu);
910
911 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
912 }
913
914 kvm_s390_vcpu_unblock_all(kvm);
915 }
916
917 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
918 {
919 mutex_lock(&kvm->lock);
920 switch (attr->attr) {
921 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
922 if (!test_kvm_facility(kvm, 76)) {
923 mutex_unlock(&kvm->lock);
924 return -EINVAL;
925 }
926 get_random_bytes(
927 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
928 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
929 kvm->arch.crypto.aes_kw = 1;
930 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
931 break;
932 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
933 if (!test_kvm_facility(kvm, 76)) {
934 mutex_unlock(&kvm->lock);
935 return -EINVAL;
936 }
937 get_random_bytes(
938 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
939 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
940 kvm->arch.crypto.dea_kw = 1;
941 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
942 break;
943 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
944 if (!test_kvm_facility(kvm, 76)) {
945 mutex_unlock(&kvm->lock);
946 return -EINVAL;
947 }
948 kvm->arch.crypto.aes_kw = 0;
949 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
950 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
951 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
952 break;
953 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
954 if (!test_kvm_facility(kvm, 76)) {
955 mutex_unlock(&kvm->lock);
956 return -EINVAL;
957 }
958 kvm->arch.crypto.dea_kw = 0;
959 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
960 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
961 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
962 break;
963 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
964 if (!ap_instructions_available()) {
965 mutex_unlock(&kvm->lock);
966 return -EOPNOTSUPP;
967 }
968 kvm->arch.crypto.apie = 1;
969 break;
970 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
971 if (!ap_instructions_available()) {
972 mutex_unlock(&kvm->lock);
973 return -EOPNOTSUPP;
974 }
975 kvm->arch.crypto.apie = 0;
976 break;
977 default:
978 mutex_unlock(&kvm->lock);
979 return -ENXIO;
980 }
981
982 kvm_s390_vcpu_crypto_reset_all(kvm);
983 mutex_unlock(&kvm->lock);
984 return 0;
985 }
986
987 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
988 {
989 int cx;
990 struct kvm_vcpu *vcpu;
991
992 kvm_for_each_vcpu(cx, vcpu, kvm)
993 kvm_s390_sync_request(req, vcpu);
994 }
995
996
997
998
999
1000 static int kvm_s390_vm_start_migration(struct kvm *kvm)
1001 {
1002 struct kvm_memory_slot *ms;
1003 struct kvm_memslots *slots;
1004 unsigned long ram_pages = 0;
1005 int slotnr;
1006
1007
1008 if (kvm->arch.migration_mode)
1009 return 0;
1010 slots = kvm_memslots(kvm);
1011 if (!slots || !slots->used_slots)
1012 return -EINVAL;
1013
1014 if (!kvm->arch.use_cmma) {
1015 kvm->arch.migration_mode = 1;
1016 return 0;
1017 }
1018
1019 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1020 ms = slots->memslots + slotnr;
1021 if (!ms->dirty_bitmap)
1022 return -EINVAL;
1023
1024
1025
1026
1027
1028
1029 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1030 ram_pages += ms->npages;
1031 }
1032 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1033 kvm->arch.migration_mode = 1;
1034 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1035 return 0;
1036 }
1037
1038
1039
1040
1041
1042 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1043 {
1044
1045 if (!kvm->arch.migration_mode)
1046 return 0;
1047 kvm->arch.migration_mode = 0;
1048 if (kvm->arch.use_cmma)
1049 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1050 return 0;
1051 }
1052
1053 static int kvm_s390_vm_set_migration(struct kvm *kvm,
1054 struct kvm_device_attr *attr)
1055 {
1056 int res = -ENXIO;
1057
1058 mutex_lock(&kvm->slots_lock);
1059 switch (attr->attr) {
1060 case KVM_S390_VM_MIGRATION_START:
1061 res = kvm_s390_vm_start_migration(kvm);
1062 break;
1063 case KVM_S390_VM_MIGRATION_STOP:
1064 res = kvm_s390_vm_stop_migration(kvm);
1065 break;
1066 default:
1067 break;
1068 }
1069 mutex_unlock(&kvm->slots_lock);
1070
1071 return res;
1072 }
1073
1074 static int kvm_s390_vm_get_migration(struct kvm *kvm,
1075 struct kvm_device_attr *attr)
1076 {
1077 u64 mig = kvm->arch.migration_mode;
1078
1079 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1080 return -ENXIO;
1081
1082 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1083 return -EFAULT;
1084 return 0;
1085 }
1086
1087 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1088 {
1089 struct kvm_s390_vm_tod_clock gtod;
1090
1091 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
1092 return -EFAULT;
1093
1094 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1095 return -EINVAL;
1096 kvm_s390_set_tod_clock(kvm, >od);
1097
1098 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1099 gtod.epoch_idx, gtod.tod);
1100
1101 return 0;
1102 }
1103
1104 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1105 {
1106 u8 gtod_high;
1107
1108 if (copy_from_user(>od_high, (void __user *)attr->addr,
1109 sizeof(gtod_high)))
1110 return -EFAULT;
1111
1112 if (gtod_high != 0)
1113 return -EINVAL;
1114 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1115
1116 return 0;
1117 }
1118
1119 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1120 {
1121 struct kvm_s390_vm_tod_clock gtod = { 0 };
1122
1123 if (copy_from_user(>od.tod, (void __user *)attr->addr,
1124 sizeof(gtod.tod)))
1125 return -EFAULT;
1126
1127 kvm_s390_set_tod_clock(kvm, >od);
1128 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1129 return 0;
1130 }
1131
1132 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1133 {
1134 int ret;
1135
1136 if (attr->flags)
1137 return -EINVAL;
1138
1139 switch (attr->attr) {
1140 case KVM_S390_VM_TOD_EXT:
1141 ret = kvm_s390_set_tod_ext(kvm, attr);
1142 break;
1143 case KVM_S390_VM_TOD_HIGH:
1144 ret = kvm_s390_set_tod_high(kvm, attr);
1145 break;
1146 case KVM_S390_VM_TOD_LOW:
1147 ret = kvm_s390_set_tod_low(kvm, attr);
1148 break;
1149 default:
1150 ret = -ENXIO;
1151 break;
1152 }
1153 return ret;
1154 }
1155
1156 static void kvm_s390_get_tod_clock(struct kvm *kvm,
1157 struct kvm_s390_vm_tod_clock *gtod)
1158 {
1159 struct kvm_s390_tod_clock_ext htod;
1160
1161 preempt_disable();
1162
1163 get_tod_clock_ext((char *)&htod);
1164
1165 gtod->tod = htod.tod + kvm->arch.epoch;
1166 gtod->epoch_idx = 0;
1167 if (test_kvm_facility(kvm, 139)) {
1168 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1169 if (gtod->tod < htod.tod)
1170 gtod->epoch_idx += 1;
1171 }
1172
1173 preempt_enable();
1174 }
1175
1176 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1177 {
1178 struct kvm_s390_vm_tod_clock gtod;
1179
1180 memset(>od, 0, sizeof(gtod));
1181 kvm_s390_get_tod_clock(kvm, >od);
1182 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1183 return -EFAULT;
1184
1185 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1186 gtod.epoch_idx, gtod.tod);
1187 return 0;
1188 }
1189
1190 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1191 {
1192 u8 gtod_high = 0;
1193
1194 if (copy_to_user((void __user *)attr->addr, >od_high,
1195 sizeof(gtod_high)))
1196 return -EFAULT;
1197 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1198
1199 return 0;
1200 }
1201
1202 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1203 {
1204 u64 gtod;
1205
1206 gtod = kvm_s390_get_tod_clock_fast(kvm);
1207 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1208 return -EFAULT;
1209 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1210
1211 return 0;
1212 }
1213
1214 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1215 {
1216 int ret;
1217
1218 if (attr->flags)
1219 return -EINVAL;
1220
1221 switch (attr->attr) {
1222 case KVM_S390_VM_TOD_EXT:
1223 ret = kvm_s390_get_tod_ext(kvm, attr);
1224 break;
1225 case KVM_S390_VM_TOD_HIGH:
1226 ret = kvm_s390_get_tod_high(kvm, attr);
1227 break;
1228 case KVM_S390_VM_TOD_LOW:
1229 ret = kvm_s390_get_tod_low(kvm, attr);
1230 break;
1231 default:
1232 ret = -ENXIO;
1233 break;
1234 }
1235 return ret;
1236 }
1237
1238 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1239 {
1240 struct kvm_s390_vm_cpu_processor *proc;
1241 u16 lowest_ibc, unblocked_ibc;
1242 int ret = 0;
1243
1244 mutex_lock(&kvm->lock);
1245 if (kvm->created_vcpus) {
1246 ret = -EBUSY;
1247 goto out;
1248 }
1249 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1250 if (!proc) {
1251 ret = -ENOMEM;
1252 goto out;
1253 }
1254 if (!copy_from_user(proc, (void __user *)attr->addr,
1255 sizeof(*proc))) {
1256 kvm->arch.model.cpuid = proc->cpuid;
1257 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1258 unblocked_ibc = sclp.ibc & 0xfff;
1259 if (lowest_ibc && proc->ibc) {
1260 if (proc->ibc > unblocked_ibc)
1261 kvm->arch.model.ibc = unblocked_ibc;
1262 else if (proc->ibc < lowest_ibc)
1263 kvm->arch.model.ibc = lowest_ibc;
1264 else
1265 kvm->arch.model.ibc = proc->ibc;
1266 }
1267 memcpy(kvm->arch.model.fac_list, proc->fac_list,
1268 S390_ARCH_FAC_LIST_SIZE_BYTE);
1269 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1270 kvm->arch.model.ibc,
1271 kvm->arch.model.cpuid);
1272 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1273 kvm->arch.model.fac_list[0],
1274 kvm->arch.model.fac_list[1],
1275 kvm->arch.model.fac_list[2]);
1276 } else
1277 ret = -EFAULT;
1278 kfree(proc);
1279 out:
1280 mutex_unlock(&kvm->lock);
1281 return ret;
1282 }
1283
1284 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1285 struct kvm_device_attr *attr)
1286 {
1287 struct kvm_s390_vm_cpu_feat data;
1288
1289 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1290 return -EFAULT;
1291 if (!bitmap_subset((unsigned long *) data.feat,
1292 kvm_s390_available_cpu_feat,
1293 KVM_S390_VM_CPU_FEAT_NR_BITS))
1294 return -EINVAL;
1295
1296 mutex_lock(&kvm->lock);
1297 if (kvm->created_vcpus) {
1298 mutex_unlock(&kvm->lock);
1299 return -EBUSY;
1300 }
1301 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1302 KVM_S390_VM_CPU_FEAT_NR_BITS);
1303 mutex_unlock(&kvm->lock);
1304 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1305 data.feat[0],
1306 data.feat[1],
1307 data.feat[2]);
1308 return 0;
1309 }
1310
1311 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1312 struct kvm_device_attr *attr)
1313 {
1314 mutex_lock(&kvm->lock);
1315 if (kvm->created_vcpus) {
1316 mutex_unlock(&kvm->lock);
1317 return -EBUSY;
1318 }
1319
1320 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1321 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1322 mutex_unlock(&kvm->lock);
1323 return -EFAULT;
1324 }
1325 mutex_unlock(&kvm->lock);
1326
1327 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1328 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1329 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1330 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1331 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1332 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1333 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1334 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1335 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1336 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1337 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1338 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1339 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1340 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1341 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1342 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1343 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1344 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1345 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1346 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1347 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1348 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1349 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1350 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1351 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1352 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1353 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1354 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1355 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1356 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1357 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1358 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1359 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1360 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1361 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1362 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1363 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1364 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1365 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1366 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1367 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1368 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1370 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1371 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1372 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1373 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1374 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1375 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1376 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1377 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1378 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1379 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1380 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1381 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1382 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1383 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1384
1385 return 0;
1386 }
1387
1388 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1389 {
1390 int ret = -ENXIO;
1391
1392 switch (attr->attr) {
1393 case KVM_S390_VM_CPU_PROCESSOR:
1394 ret = kvm_s390_set_processor(kvm, attr);
1395 break;
1396 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1397 ret = kvm_s390_set_processor_feat(kvm, attr);
1398 break;
1399 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1400 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1401 break;
1402 }
1403 return ret;
1404 }
1405
1406 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1407 {
1408 struct kvm_s390_vm_cpu_processor *proc;
1409 int ret = 0;
1410
1411 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1412 if (!proc) {
1413 ret = -ENOMEM;
1414 goto out;
1415 }
1416 proc->cpuid = kvm->arch.model.cpuid;
1417 proc->ibc = kvm->arch.model.ibc;
1418 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1419 S390_ARCH_FAC_LIST_SIZE_BYTE);
1420 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1421 kvm->arch.model.ibc,
1422 kvm->arch.model.cpuid);
1423 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1424 kvm->arch.model.fac_list[0],
1425 kvm->arch.model.fac_list[1],
1426 kvm->arch.model.fac_list[2]);
1427 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1428 ret = -EFAULT;
1429 kfree(proc);
1430 out:
1431 return ret;
1432 }
1433
1434 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1435 {
1436 struct kvm_s390_vm_cpu_machine *mach;
1437 int ret = 0;
1438
1439 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1440 if (!mach) {
1441 ret = -ENOMEM;
1442 goto out;
1443 }
1444 get_cpu_id((struct cpuid *) &mach->cpuid);
1445 mach->ibc = sclp.ibc;
1446 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1447 S390_ARCH_FAC_LIST_SIZE_BYTE);
1448 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
1449 sizeof(S390_lowcore.stfle_fac_list));
1450 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1451 kvm->arch.model.ibc,
1452 kvm->arch.model.cpuid);
1453 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1454 mach->fac_mask[0],
1455 mach->fac_mask[1],
1456 mach->fac_mask[2]);
1457 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1458 mach->fac_list[0],
1459 mach->fac_list[1],
1460 mach->fac_list[2]);
1461 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1462 ret = -EFAULT;
1463 kfree(mach);
1464 out:
1465 return ret;
1466 }
1467
1468 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1469 struct kvm_device_attr *attr)
1470 {
1471 struct kvm_s390_vm_cpu_feat data;
1472
1473 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1474 KVM_S390_VM_CPU_FEAT_NR_BITS);
1475 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1476 return -EFAULT;
1477 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1478 data.feat[0],
1479 data.feat[1],
1480 data.feat[2]);
1481 return 0;
1482 }
1483
1484 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1485 struct kvm_device_attr *attr)
1486 {
1487 struct kvm_s390_vm_cpu_feat data;
1488
1489 bitmap_copy((unsigned long *) data.feat,
1490 kvm_s390_available_cpu_feat,
1491 KVM_S390_VM_CPU_FEAT_NR_BITS);
1492 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1493 return -EFAULT;
1494 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1495 data.feat[0],
1496 data.feat[1],
1497 data.feat[2]);
1498 return 0;
1499 }
1500
1501 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1502 struct kvm_device_attr *attr)
1503 {
1504 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1505 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1506 return -EFAULT;
1507
1508 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1509 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1510 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1511 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1512 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1513 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1514 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1515 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1516 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1517 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1518 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1519 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1520 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1521 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1522 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1523 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1524 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1525 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1527 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1528 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1529 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1530 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1531 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1532 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1533 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1534 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1535 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1536 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1537 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1538 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1539 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1540 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1542 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1543 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1544 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1545 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1546 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1547 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1548 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1549 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1551 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1552 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1554 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1555 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1556 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1557 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1558 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1560 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1561 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1562 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1563 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1564 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1565
1566 return 0;
1567 }
1568
1569 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1570 struct kvm_device_attr *attr)
1571 {
1572 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1573 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1574 return -EFAULT;
1575
1576 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1577 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1578 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1579 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1580 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1581 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1582 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1583 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1584 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1585 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1586 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1587 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1588 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1589 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1590 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1591 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1592 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1593 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1594 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1595 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1596 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1597 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1598 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1599 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1600 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1601 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1602 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1603 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1604 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1605 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1606 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1607 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1608 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1609 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1610 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1611 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1612 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1613 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1614 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1615 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1616 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1617 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1618 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1619 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1620 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1621 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1622 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1623 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1624 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1625 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1626 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1627 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1628 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1629 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1630 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1631 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1632 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1633
1634 return 0;
1635 }
1636
1637 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1638 {
1639 int ret = -ENXIO;
1640
1641 switch (attr->attr) {
1642 case KVM_S390_VM_CPU_PROCESSOR:
1643 ret = kvm_s390_get_processor(kvm, attr);
1644 break;
1645 case KVM_S390_VM_CPU_MACHINE:
1646 ret = kvm_s390_get_machine(kvm, attr);
1647 break;
1648 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1649 ret = kvm_s390_get_processor_feat(kvm, attr);
1650 break;
1651 case KVM_S390_VM_CPU_MACHINE_FEAT:
1652 ret = kvm_s390_get_machine_feat(kvm, attr);
1653 break;
1654 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1655 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1656 break;
1657 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1658 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1659 break;
1660 }
1661 return ret;
1662 }
1663
1664 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1665 {
1666 int ret;
1667
1668 switch (attr->group) {
1669 case KVM_S390_VM_MEM_CTRL:
1670 ret = kvm_s390_set_mem_control(kvm, attr);
1671 break;
1672 case KVM_S390_VM_TOD:
1673 ret = kvm_s390_set_tod(kvm, attr);
1674 break;
1675 case KVM_S390_VM_CPU_MODEL:
1676 ret = kvm_s390_set_cpu_model(kvm, attr);
1677 break;
1678 case KVM_S390_VM_CRYPTO:
1679 ret = kvm_s390_vm_set_crypto(kvm, attr);
1680 break;
1681 case KVM_S390_VM_MIGRATION:
1682 ret = kvm_s390_vm_set_migration(kvm, attr);
1683 break;
1684 default:
1685 ret = -ENXIO;
1686 break;
1687 }
1688
1689 return ret;
1690 }
1691
1692 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1693 {
1694 int ret;
1695
1696 switch (attr->group) {
1697 case KVM_S390_VM_MEM_CTRL:
1698 ret = kvm_s390_get_mem_control(kvm, attr);
1699 break;
1700 case KVM_S390_VM_TOD:
1701 ret = kvm_s390_get_tod(kvm, attr);
1702 break;
1703 case KVM_S390_VM_CPU_MODEL:
1704 ret = kvm_s390_get_cpu_model(kvm, attr);
1705 break;
1706 case KVM_S390_VM_MIGRATION:
1707 ret = kvm_s390_vm_get_migration(kvm, attr);
1708 break;
1709 default:
1710 ret = -ENXIO;
1711 break;
1712 }
1713
1714 return ret;
1715 }
1716
1717 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1718 {
1719 int ret;
1720
1721 switch (attr->group) {
1722 case KVM_S390_VM_MEM_CTRL:
1723 switch (attr->attr) {
1724 case KVM_S390_VM_MEM_ENABLE_CMMA:
1725 case KVM_S390_VM_MEM_CLR_CMMA:
1726 ret = sclp.has_cmma ? 0 : -ENXIO;
1727 break;
1728 case KVM_S390_VM_MEM_LIMIT_SIZE:
1729 ret = 0;
1730 break;
1731 default:
1732 ret = -ENXIO;
1733 break;
1734 }
1735 break;
1736 case KVM_S390_VM_TOD:
1737 switch (attr->attr) {
1738 case KVM_S390_VM_TOD_LOW:
1739 case KVM_S390_VM_TOD_HIGH:
1740 ret = 0;
1741 break;
1742 default:
1743 ret = -ENXIO;
1744 break;
1745 }
1746 break;
1747 case KVM_S390_VM_CPU_MODEL:
1748 switch (attr->attr) {
1749 case KVM_S390_VM_CPU_PROCESSOR:
1750 case KVM_S390_VM_CPU_MACHINE:
1751 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1752 case KVM_S390_VM_CPU_MACHINE_FEAT:
1753 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1754 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1755 ret = 0;
1756 break;
1757 default:
1758 ret = -ENXIO;
1759 break;
1760 }
1761 break;
1762 case KVM_S390_VM_CRYPTO:
1763 switch (attr->attr) {
1764 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1765 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1766 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1767 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1768 ret = 0;
1769 break;
1770 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1771 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1772 ret = ap_instructions_available() ? 0 : -ENXIO;
1773 break;
1774 default:
1775 ret = -ENXIO;
1776 break;
1777 }
1778 break;
1779 case KVM_S390_VM_MIGRATION:
1780 ret = 0;
1781 break;
1782 default:
1783 ret = -ENXIO;
1784 break;
1785 }
1786
1787 return ret;
1788 }
1789
1790 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1791 {
1792 uint8_t *keys;
1793 uint64_t hva;
1794 int srcu_idx, i, r = 0;
1795
1796 if (args->flags != 0)
1797 return -EINVAL;
1798
1799
1800 if (!mm_uses_skeys(current->mm))
1801 return KVM_S390_GET_SKEYS_NONE;
1802
1803
1804 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1805 return -EINVAL;
1806
1807 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
1808 if (!keys)
1809 return -ENOMEM;
1810
1811 down_read(¤t->mm->mmap_sem);
1812 srcu_idx = srcu_read_lock(&kvm->srcu);
1813 for (i = 0; i < args->count; i++) {
1814 hva = gfn_to_hva(kvm, args->start_gfn + i);
1815 if (kvm_is_error_hva(hva)) {
1816 r = -EFAULT;
1817 break;
1818 }
1819
1820 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1821 if (r)
1822 break;
1823 }
1824 srcu_read_unlock(&kvm->srcu, srcu_idx);
1825 up_read(¤t->mm->mmap_sem);
1826
1827 if (!r) {
1828 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1829 sizeof(uint8_t) * args->count);
1830 if (r)
1831 r = -EFAULT;
1832 }
1833
1834 kvfree(keys);
1835 return r;
1836 }
1837
1838 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1839 {
1840 uint8_t *keys;
1841 uint64_t hva;
1842 int srcu_idx, i, r = 0;
1843 bool unlocked;
1844
1845 if (args->flags != 0)
1846 return -EINVAL;
1847
1848
1849 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1850 return -EINVAL;
1851
1852 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
1853 if (!keys)
1854 return -ENOMEM;
1855
1856 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1857 sizeof(uint8_t) * args->count);
1858 if (r) {
1859 r = -EFAULT;
1860 goto out;
1861 }
1862
1863
1864 r = s390_enable_skey();
1865 if (r)
1866 goto out;
1867
1868 i = 0;
1869 down_read(¤t->mm->mmap_sem);
1870 srcu_idx = srcu_read_lock(&kvm->srcu);
1871 while (i < args->count) {
1872 unlocked = false;
1873 hva = gfn_to_hva(kvm, args->start_gfn + i);
1874 if (kvm_is_error_hva(hva)) {
1875 r = -EFAULT;
1876 break;
1877 }
1878
1879
1880 if (keys[i] & 0x01) {
1881 r = -EINVAL;
1882 break;
1883 }
1884
1885 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
1886 if (r) {
1887 r = fixup_user_fault(current, current->mm, hva,
1888 FAULT_FLAG_WRITE, &unlocked);
1889 if (r)
1890 break;
1891 }
1892 if (!r)
1893 i++;
1894 }
1895 srcu_read_unlock(&kvm->srcu, srcu_idx);
1896 up_read(¤t->mm->mmap_sem);
1897 out:
1898 kvfree(keys);
1899 return r;
1900 }
1901
1902
1903
1904
1905
1906
1907 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1908
1909 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1910
1911
1912
1913
1914
1915
1916 static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1917 {
1918 int start = 0, end = slots->used_slots;
1919 int slot = atomic_read(&slots->lru_slot);
1920 struct kvm_memory_slot *memslots = slots->memslots;
1921
1922 if (gfn >= memslots[slot].base_gfn &&
1923 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1924 return slot;
1925
1926 while (start < end) {
1927 slot = start + (end - start) / 2;
1928
1929 if (gfn >= memslots[slot].base_gfn)
1930 end = slot;
1931 else
1932 start = slot + 1;
1933 }
1934
1935 if (start >= slots->used_slots)
1936 return slots->used_slots - 1;
1937
1938 if (gfn >= memslots[start].base_gfn &&
1939 gfn < memslots[start].base_gfn + memslots[start].npages) {
1940 atomic_set(&slots->lru_slot, start);
1941 }
1942
1943 return start;
1944 }
1945
1946 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1947 u8 *res, unsigned long bufsize)
1948 {
1949 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1950
1951 args->count = 0;
1952 while (args->count < bufsize) {
1953 hva = gfn_to_hva(kvm, cur_gfn);
1954
1955
1956
1957
1958 if (kvm_is_error_hva(hva))
1959 return args->count ? 0 : -EFAULT;
1960 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1961 pgstev = 0;
1962 res[args->count++] = (pgstev >> 24) & 0x43;
1963 cur_gfn++;
1964 }
1965
1966 return 0;
1967 }
1968
1969 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1970 unsigned long cur_gfn)
1971 {
1972 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1973 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1974 unsigned long ofs = cur_gfn - ms->base_gfn;
1975
1976 if (ms->base_gfn + ms->npages <= cur_gfn) {
1977 slotidx--;
1978
1979 if (slotidx < 0)
1980 slotidx = slots->used_slots - 1;
1981
1982 ms = slots->memslots + slotidx;
1983 ofs = 0;
1984 }
1985 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1986 while ((slotidx > 0) && (ofs >= ms->npages)) {
1987 slotidx--;
1988 ms = slots->memslots + slotidx;
1989 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1990 }
1991 return ms->base_gfn + ofs;
1992 }
1993
1994 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1995 u8 *res, unsigned long bufsize)
1996 {
1997 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1998 struct kvm_memslots *slots = kvm_memslots(kvm);
1999 struct kvm_memory_slot *ms;
2000
2001 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2002 ms = gfn_to_memslot(kvm, cur_gfn);
2003 args->count = 0;
2004 args->start_gfn = cur_gfn;
2005 if (!ms)
2006 return 0;
2007 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2008 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2009
2010 while (args->count < bufsize) {
2011 hva = gfn_to_hva(kvm, cur_gfn);
2012 if (kvm_is_error_hva(hva))
2013 return 0;
2014
2015 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2016 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2017 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2018 pgstev = 0;
2019
2020 res[args->count++] = (pgstev >> 24) & 0x43;
2021
2022 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2023 return 0;
2024
2025 if (cur_gfn == next_gfn)
2026 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2027
2028 if ((next_gfn >= mem_end) ||
2029 (next_gfn - args->start_gfn >= bufsize))
2030 return 0;
2031 cur_gfn++;
2032
2033 if (cur_gfn - ms->base_gfn >= ms->npages) {
2034 ms = gfn_to_memslot(kvm, cur_gfn);
2035 if (!ms)
2036 return 0;
2037 }
2038 }
2039 return 0;
2040 }
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2051 struct kvm_s390_cmma_log *args)
2052 {
2053 unsigned long bufsize;
2054 int srcu_idx, peek, ret;
2055 u8 *values;
2056
2057 if (!kvm->arch.use_cmma)
2058 return -ENXIO;
2059
2060 if (args->flags & ~KVM_S390_CMMA_PEEK)
2061 return -EINVAL;
2062
2063 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2064 if (!peek && !kvm->arch.migration_mode)
2065 return -EINVAL;
2066
2067 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2068 if (!bufsize || !kvm->mm->context.uses_cmm) {
2069 memset(args, 0, sizeof(*args));
2070 return 0;
2071 }
2072
2073 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2074 memset(args, 0, sizeof(*args));
2075 return 0;
2076 }
2077
2078 values = vmalloc(bufsize);
2079 if (!values)
2080 return -ENOMEM;
2081
2082 down_read(&kvm->mm->mmap_sem);
2083 srcu_idx = srcu_read_lock(&kvm->srcu);
2084 if (peek)
2085 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2086 else
2087 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2088 srcu_read_unlock(&kvm->srcu, srcu_idx);
2089 up_read(&kvm->mm->mmap_sem);
2090
2091 if (kvm->arch.migration_mode)
2092 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2093 else
2094 args->remaining = 0;
2095
2096 if (copy_to_user((void __user *)args->values, values, args->count))
2097 ret = -EFAULT;
2098
2099 vfree(values);
2100 return ret;
2101 }
2102
2103
2104
2105
2106
2107
2108 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2109 const struct kvm_s390_cmma_log *args)
2110 {
2111 unsigned long hva, mask, pgstev, i;
2112 uint8_t *bits;
2113 int srcu_idx, r = 0;
2114
2115 mask = args->mask;
2116
2117 if (!kvm->arch.use_cmma)
2118 return -ENXIO;
2119
2120 if (args->flags != 0)
2121 return -EINVAL;
2122
2123 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2124 return -EINVAL;
2125
2126 if (args->count == 0)
2127 return 0;
2128
2129 bits = vmalloc(array_size(sizeof(*bits), args->count));
2130 if (!bits)
2131 return -ENOMEM;
2132
2133 r = copy_from_user(bits, (void __user *)args->values, args->count);
2134 if (r) {
2135 r = -EFAULT;
2136 goto out;
2137 }
2138
2139 down_read(&kvm->mm->mmap_sem);
2140 srcu_idx = srcu_read_lock(&kvm->srcu);
2141 for (i = 0; i < args->count; i++) {
2142 hva = gfn_to_hva(kvm, args->start_gfn + i);
2143 if (kvm_is_error_hva(hva)) {
2144 r = -EFAULT;
2145 break;
2146 }
2147
2148 pgstev = bits[i];
2149 pgstev = pgstev << 24;
2150 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2151 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2152 }
2153 srcu_read_unlock(&kvm->srcu, srcu_idx);
2154 up_read(&kvm->mm->mmap_sem);
2155
2156 if (!kvm->mm->context.uses_cmm) {
2157 down_write(&kvm->mm->mmap_sem);
2158 kvm->mm->context.uses_cmm = 1;
2159 up_write(&kvm->mm->mmap_sem);
2160 }
2161 out:
2162 vfree(bits);
2163 return r;
2164 }
2165
2166 long kvm_arch_vm_ioctl(struct file *filp,
2167 unsigned int ioctl, unsigned long arg)
2168 {
2169 struct kvm *kvm = filp->private_data;
2170 void __user *argp = (void __user *)arg;
2171 struct kvm_device_attr attr;
2172 int r;
2173
2174 switch (ioctl) {
2175 case KVM_S390_INTERRUPT: {
2176 struct kvm_s390_interrupt s390int;
2177
2178 r = -EFAULT;
2179 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2180 break;
2181 r = kvm_s390_inject_vm(kvm, &s390int);
2182 break;
2183 }
2184 case KVM_CREATE_IRQCHIP: {
2185 struct kvm_irq_routing_entry routing;
2186
2187 r = -EINVAL;
2188 if (kvm->arch.use_irqchip) {
2189
2190 memset(&routing, 0, sizeof(routing));
2191 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
2192 }
2193 break;
2194 }
2195 case KVM_SET_DEVICE_ATTR: {
2196 r = -EFAULT;
2197 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2198 break;
2199 r = kvm_s390_vm_set_attr(kvm, &attr);
2200 break;
2201 }
2202 case KVM_GET_DEVICE_ATTR: {
2203 r = -EFAULT;
2204 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2205 break;
2206 r = kvm_s390_vm_get_attr(kvm, &attr);
2207 break;
2208 }
2209 case KVM_HAS_DEVICE_ATTR: {
2210 r = -EFAULT;
2211 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2212 break;
2213 r = kvm_s390_vm_has_attr(kvm, &attr);
2214 break;
2215 }
2216 case KVM_S390_GET_SKEYS: {
2217 struct kvm_s390_skeys args;
2218
2219 r = -EFAULT;
2220 if (copy_from_user(&args, argp,
2221 sizeof(struct kvm_s390_skeys)))
2222 break;
2223 r = kvm_s390_get_skeys(kvm, &args);
2224 break;
2225 }
2226 case KVM_S390_SET_SKEYS: {
2227 struct kvm_s390_skeys args;
2228
2229 r = -EFAULT;
2230 if (copy_from_user(&args, argp,
2231 sizeof(struct kvm_s390_skeys)))
2232 break;
2233 r = kvm_s390_set_skeys(kvm, &args);
2234 break;
2235 }
2236 case KVM_S390_GET_CMMA_BITS: {
2237 struct kvm_s390_cmma_log args;
2238
2239 r = -EFAULT;
2240 if (copy_from_user(&args, argp, sizeof(args)))
2241 break;
2242 mutex_lock(&kvm->slots_lock);
2243 r = kvm_s390_get_cmma_bits(kvm, &args);
2244 mutex_unlock(&kvm->slots_lock);
2245 if (!r) {
2246 r = copy_to_user(argp, &args, sizeof(args));
2247 if (r)
2248 r = -EFAULT;
2249 }
2250 break;
2251 }
2252 case KVM_S390_SET_CMMA_BITS: {
2253 struct kvm_s390_cmma_log args;
2254
2255 r = -EFAULT;
2256 if (copy_from_user(&args, argp, sizeof(args)))
2257 break;
2258 mutex_lock(&kvm->slots_lock);
2259 r = kvm_s390_set_cmma_bits(kvm, &args);
2260 mutex_unlock(&kvm->slots_lock);
2261 break;
2262 }
2263 default:
2264 r = -ENOTTY;
2265 }
2266
2267 return r;
2268 }
2269
2270 static int kvm_s390_apxa_installed(void)
2271 {
2272 struct ap_config_info info;
2273
2274 if (ap_instructions_available()) {
2275 if (ap_qci(&info) == 0)
2276 return info.apxa;
2277 }
2278
2279 return 0;
2280 }
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290 static void kvm_s390_set_crycb_format(struct kvm *kvm)
2291 {
2292 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2293
2294
2295 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2296
2297
2298 if (!test_kvm_facility(kvm, 76))
2299 return;
2300
2301 if (kvm_s390_apxa_installed())
2302 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2303 else
2304 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2305 }
2306
2307 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2308 unsigned long *aqm, unsigned long *adm)
2309 {
2310 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2311
2312 mutex_lock(&kvm->lock);
2313 kvm_s390_vcpu_block_all(kvm);
2314
2315 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2316 case CRYCB_FORMAT2:
2317 memcpy(crycb->apcb1.apm, apm, 32);
2318 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2319 apm[0], apm[1], apm[2], apm[3]);
2320 memcpy(crycb->apcb1.aqm, aqm, 32);
2321 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2322 aqm[0], aqm[1], aqm[2], aqm[3]);
2323 memcpy(crycb->apcb1.adm, adm, 32);
2324 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2325 adm[0], adm[1], adm[2], adm[3]);
2326 break;
2327 case CRYCB_FORMAT1:
2328 case CRYCB_FORMAT0:
2329 memcpy(crycb->apcb0.apm, apm, 8);
2330 memcpy(crycb->apcb0.aqm, aqm, 2);
2331 memcpy(crycb->apcb0.adm, adm, 2);
2332 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2333 apm[0], *((unsigned short *)aqm),
2334 *((unsigned short *)adm));
2335 break;
2336 default:
2337 break;
2338 }
2339
2340
2341 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2342 kvm_s390_vcpu_unblock_all(kvm);
2343 mutex_unlock(&kvm->lock);
2344 }
2345 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2346
2347 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2348 {
2349 mutex_lock(&kvm->lock);
2350 kvm_s390_vcpu_block_all(kvm);
2351
2352 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2353 sizeof(kvm->arch.crypto.crycb->apcb0));
2354 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2355 sizeof(kvm->arch.crypto.crycb->apcb1));
2356
2357 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
2358
2359 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2360 kvm_s390_vcpu_unblock_all(kvm);
2361 mutex_unlock(&kvm->lock);
2362 }
2363 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2364
2365 static u64 kvm_s390_get_initial_cpuid(void)
2366 {
2367 struct cpuid cpuid;
2368
2369 get_cpu_id(&cpuid);
2370 cpuid.version = 0xff;
2371 return *((u64 *) &cpuid);
2372 }
2373
2374 static void kvm_s390_crypto_init(struct kvm *kvm)
2375 {
2376 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
2377 kvm_s390_set_crycb_format(kvm);
2378
2379 if (!test_kvm_facility(kvm, 76))
2380 return;
2381
2382
2383 kvm->arch.crypto.aes_kw = 1;
2384 kvm->arch.crypto.dea_kw = 1;
2385 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2386 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2387 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2388 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
2389 }
2390
2391 static void sca_dispose(struct kvm *kvm)
2392 {
2393 if (kvm->arch.use_esca)
2394 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
2395 else
2396 free_page((unsigned long)(kvm->arch.sca));
2397 kvm->arch.sca = NULL;
2398 }
2399
2400 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
2401 {
2402 gfp_t alloc_flags = GFP_KERNEL;
2403 int i, rc;
2404 char debug_name[16];
2405 static unsigned long sca_offset;
2406
2407 rc = -EINVAL;
2408 #ifdef CONFIG_KVM_S390_UCONTROL
2409 if (type & ~KVM_VM_S390_UCONTROL)
2410 goto out_err;
2411 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2412 goto out_err;
2413 #else
2414 if (type)
2415 goto out_err;
2416 #endif
2417
2418 rc = s390_enable_sie();
2419 if (rc)
2420 goto out_err;
2421
2422 rc = -ENOMEM;
2423
2424 if (!sclp.has_64bscao)
2425 alloc_flags |= GFP_DMA;
2426 rwlock_init(&kvm->arch.sca_lock);
2427
2428 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
2429 if (!kvm->arch.sca)
2430 goto out_err;
2431 mutex_lock(&kvm_lock);
2432 sca_offset += 16;
2433 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
2434 sca_offset = 0;
2435 kvm->arch.sca = (struct bsca_block *)
2436 ((char *) kvm->arch.sca + sca_offset);
2437 mutex_unlock(&kvm_lock);
2438
2439 sprintf(debug_name, "kvm-%u", current->pid);
2440
2441 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
2442 if (!kvm->arch.dbf)
2443 goto out_err;
2444
2445 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
2446 kvm->arch.sie_page2 =
2447 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2448 if (!kvm->arch.sie_page2)
2449 goto out_err;
2450
2451 kvm->arch.sie_page2->kvm = kvm;
2452 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
2453
2454 for (i = 0; i < kvm_s390_fac_size(); i++) {
2455 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2456 (kvm_s390_fac_base[i] |
2457 kvm_s390_fac_ext[i]);
2458 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2459 kvm_s390_fac_base[i];
2460 }
2461 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
2462
2463
2464 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2465 set_kvm_facility(kvm->arch.model.fac_list, 138);
2466
2467 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2468 set_kvm_facility(kvm->arch.model.fac_list, 74);
2469 if (MACHINE_HAS_TLB_GUEST) {
2470 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2471 set_kvm_facility(kvm->arch.model.fac_list, 147);
2472 }
2473
2474 if (css_general_characteristics.aiv && test_facility(65))
2475 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2476
2477 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
2478 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
2479
2480 kvm_s390_crypto_init(kvm);
2481
2482 mutex_init(&kvm->arch.float_int.ais_lock);
2483 spin_lock_init(&kvm->arch.float_int.lock);
2484 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2485 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
2486 init_waitqueue_head(&kvm->arch.ipte_wq);
2487 mutex_init(&kvm->arch.ipte_mutex);
2488
2489 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
2490 VM_EVENT(kvm, 3, "vm created with type %lu", type);
2491
2492 if (type & KVM_VM_S390_UCONTROL) {
2493 kvm->arch.gmap = NULL;
2494 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
2495 } else {
2496 if (sclp.hamax == U64_MAX)
2497 kvm->arch.mem_limit = TASK_SIZE_MAX;
2498 else
2499 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
2500 sclp.hamax + 1);
2501 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
2502 if (!kvm->arch.gmap)
2503 goto out_err;
2504 kvm->arch.gmap->private = kvm;
2505 kvm->arch.gmap->pfault_enabled = 0;
2506 }
2507
2508 kvm->arch.use_pfmfi = sclp.has_pfmfi;
2509 kvm->arch.use_skf = sclp.has_skey;
2510 spin_lock_init(&kvm->arch.start_stop_lock);
2511 kvm_s390_vsie_init(kvm);
2512 kvm_s390_gisa_init(kvm);
2513 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
2514
2515 return 0;
2516 out_err:
2517 free_page((unsigned long)kvm->arch.sie_page2);
2518 debug_unregister(kvm->arch.dbf);
2519 sca_dispose(kvm);
2520 KVM_EVENT(3, "creation of vm failed: %d", rc);
2521 return rc;
2522 }
2523
2524 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2525 {
2526 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
2527 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
2528 kvm_s390_clear_local_irqs(vcpu);
2529 kvm_clear_async_pf_completion_queue(vcpu);
2530 if (!kvm_is_ucontrol(vcpu->kvm))
2531 sca_del_vcpu(vcpu);
2532
2533 if (kvm_is_ucontrol(vcpu->kvm))
2534 gmap_remove(vcpu->arch.gmap);
2535
2536 if (vcpu->kvm->arch.use_cmma)
2537 kvm_s390_vcpu_unsetup_cmma(vcpu);
2538 free_page((unsigned long)(vcpu->arch.sie_block));
2539
2540 kvm_vcpu_uninit(vcpu);
2541 kmem_cache_free(kvm_vcpu_cache, vcpu);
2542 }
2543
2544 static void kvm_free_vcpus(struct kvm *kvm)
2545 {
2546 unsigned int i;
2547 struct kvm_vcpu *vcpu;
2548
2549 kvm_for_each_vcpu(i, vcpu, kvm)
2550 kvm_arch_vcpu_destroy(vcpu);
2551
2552 mutex_lock(&kvm->lock);
2553 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2554 kvm->vcpus[i] = NULL;
2555
2556 atomic_set(&kvm->online_vcpus, 0);
2557 mutex_unlock(&kvm->lock);
2558 }
2559
2560 void kvm_arch_destroy_vm(struct kvm *kvm)
2561 {
2562 kvm_free_vcpus(kvm);
2563 sca_dispose(kvm);
2564 debug_unregister(kvm->arch.dbf);
2565 kvm_s390_gisa_destroy(kvm);
2566 free_page((unsigned long)kvm->arch.sie_page2);
2567 if (!kvm_is_ucontrol(kvm))
2568 gmap_remove(kvm->arch.gmap);
2569 kvm_s390_destroy_adapters(kvm);
2570 kvm_s390_clear_float_irqs(kvm);
2571 kvm_s390_vsie_destroy(kvm);
2572 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
2573 }
2574
2575
2576 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2577 {
2578 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
2579 if (!vcpu->arch.gmap)
2580 return -ENOMEM;
2581 vcpu->arch.gmap->private = vcpu->kvm;
2582
2583 return 0;
2584 }
2585
2586 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2587 {
2588 if (!kvm_s390_use_sca_entries())
2589 return;
2590 read_lock(&vcpu->kvm->arch.sca_lock);
2591 if (vcpu->kvm->arch.use_esca) {
2592 struct esca_block *sca = vcpu->kvm->arch.sca;
2593
2594 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
2595 sca->cpu[vcpu->vcpu_id].sda = 0;
2596 } else {
2597 struct bsca_block *sca = vcpu->kvm->arch.sca;
2598
2599 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
2600 sca->cpu[vcpu->vcpu_id].sda = 0;
2601 }
2602 read_unlock(&vcpu->kvm->arch.sca_lock);
2603 }
2604
2605 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
2606 {
2607 if (!kvm_s390_use_sca_entries()) {
2608 struct bsca_block *sca = vcpu->kvm->arch.sca;
2609
2610
2611 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2612 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2613 return;
2614 }
2615 read_lock(&vcpu->kvm->arch.sca_lock);
2616 if (vcpu->kvm->arch.use_esca) {
2617 struct esca_block *sca = vcpu->kvm->arch.sca;
2618
2619 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2620 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2621 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
2622 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2623 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
2624 } else {
2625 struct bsca_block *sca = vcpu->kvm->arch.sca;
2626
2627 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2628 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2629 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2630 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
2631 }
2632 read_unlock(&vcpu->kvm->arch.sca_lock);
2633 }
2634
2635
2636 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2637 {
2638 d->sda = s->sda;
2639 d->sigp_ctrl.c = s->sigp_ctrl.c;
2640 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2641 }
2642
2643 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2644 {
2645 int i;
2646
2647 d->ipte_control = s->ipte_control;
2648 d->mcn[0] = s->mcn;
2649 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2650 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2651 }
2652
2653 static int sca_switch_to_extended(struct kvm *kvm)
2654 {
2655 struct bsca_block *old_sca = kvm->arch.sca;
2656 struct esca_block *new_sca;
2657 struct kvm_vcpu *vcpu;
2658 unsigned int vcpu_idx;
2659 u32 scaol, scaoh;
2660
2661 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2662 if (!new_sca)
2663 return -ENOMEM;
2664
2665 scaoh = (u32)((u64)(new_sca) >> 32);
2666 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2667
2668 kvm_s390_vcpu_block_all(kvm);
2669 write_lock(&kvm->arch.sca_lock);
2670
2671 sca_copy_b_to_e(new_sca, old_sca);
2672
2673 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2674 vcpu->arch.sie_block->scaoh = scaoh;
2675 vcpu->arch.sie_block->scaol = scaol;
2676 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2677 }
2678 kvm->arch.sca = new_sca;
2679 kvm->arch.use_esca = 1;
2680
2681 write_unlock(&kvm->arch.sca_lock);
2682 kvm_s390_vcpu_unblock_all(kvm);
2683
2684 free_page((unsigned long)old_sca);
2685
2686 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2687 old_sca, kvm->arch.sca);
2688 return 0;
2689 }
2690
2691 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2692 {
2693 int rc;
2694
2695 if (!kvm_s390_use_sca_entries()) {
2696 if (id < KVM_MAX_VCPUS)
2697 return true;
2698 return false;
2699 }
2700 if (id < KVM_S390_BSCA_CPU_SLOTS)
2701 return true;
2702 if (!sclp.has_esca || !sclp.has_64bscao)
2703 return false;
2704
2705 mutex_lock(&kvm->lock);
2706 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2707 mutex_unlock(&kvm->lock);
2708
2709 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
2710 }
2711
2712 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2713 {
2714 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2715 kvm_clear_async_pf_completion_queue(vcpu);
2716 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2717 KVM_SYNC_GPRS |
2718 KVM_SYNC_ACRS |
2719 KVM_SYNC_CRS |
2720 KVM_SYNC_ARCH0 |
2721 KVM_SYNC_PFAULT;
2722 kvm_s390_set_prefix(vcpu, 0);
2723 if (test_kvm_facility(vcpu->kvm, 64))
2724 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
2725 if (test_kvm_facility(vcpu->kvm, 82))
2726 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
2727 if (test_kvm_facility(vcpu->kvm, 133))
2728 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
2729 if (test_kvm_facility(vcpu->kvm, 156))
2730 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
2731
2732
2733
2734 if (MACHINE_HAS_VX)
2735 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
2736 else
2737 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
2738
2739 if (kvm_is_ucontrol(vcpu->kvm))
2740 return __kvm_ucontrol_vcpu_init(vcpu);
2741
2742 return 0;
2743 }
2744
2745
2746 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2747 {
2748 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
2749 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2750 vcpu->arch.cputm_start = get_tod_clock_fast();
2751 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2752 }
2753
2754
2755 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2756 {
2757 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
2758 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2759 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2760 vcpu->arch.cputm_start = 0;
2761 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2762 }
2763
2764
2765 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2766 {
2767 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2768 vcpu->arch.cputm_enabled = true;
2769 __start_cpu_timer_accounting(vcpu);
2770 }
2771
2772
2773 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2774 {
2775 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2776 __stop_cpu_timer_accounting(vcpu);
2777 vcpu->arch.cputm_enabled = false;
2778 }
2779
2780 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2781 {
2782 preempt_disable();
2783 __enable_cpu_timer_accounting(vcpu);
2784 preempt_enable();
2785 }
2786
2787 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2788 {
2789 preempt_disable();
2790 __disable_cpu_timer_accounting(vcpu);
2791 preempt_enable();
2792 }
2793
2794
2795 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2796 {
2797 preempt_disable();
2798 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2799 if (vcpu->arch.cputm_enabled)
2800 vcpu->arch.cputm_start = get_tod_clock_fast();
2801 vcpu->arch.sie_block->cputm = cputm;
2802 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2803 preempt_enable();
2804 }
2805
2806
2807 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2808 {
2809 unsigned int seq;
2810 __u64 value;
2811
2812 if (unlikely(!vcpu->arch.cputm_enabled))
2813 return vcpu->arch.sie_block->cputm;
2814
2815 preempt_disable();
2816 do {
2817 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2818
2819
2820
2821
2822 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2823 value = vcpu->arch.sie_block->cputm;
2824
2825 if (likely(vcpu->arch.cputm_start))
2826 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2827 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2828 preempt_enable();
2829 return value;
2830 }
2831
2832 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2833 {
2834
2835 gmap_enable(vcpu->arch.enabled_gmap);
2836 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
2837 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2838 __start_cpu_timer_accounting(vcpu);
2839 vcpu->cpu = cpu;
2840 }
2841
2842 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2843 {
2844 vcpu->cpu = -1;
2845 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2846 __stop_cpu_timer_accounting(vcpu);
2847 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
2848 vcpu->arch.enabled_gmap = gmap_get_enabled();
2849 gmap_disable(vcpu->arch.enabled_gmap);
2850
2851 }
2852
2853 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2854 {
2855
2856 vcpu->arch.sie_block->gpsw.mask = 0UL;
2857 vcpu->arch.sie_block->gpsw.addr = 0UL;
2858 kvm_s390_set_prefix(vcpu, 0);
2859 kvm_s390_set_cpu_timer(vcpu, 0);
2860 vcpu->arch.sie_block->ckc = 0UL;
2861 vcpu->arch.sie_block->todpr = 0;
2862 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2863 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2864 CR0_INTERRUPT_KEY_SUBMASK |
2865 CR0_MEASUREMENT_ALERT_SUBMASK;
2866 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2867 CR14_UNUSED_33 |
2868 CR14_EXTERNAL_DAMAGE_SUBMASK;
2869 vcpu->run->s.regs.fpc = 0;
2870 vcpu->arch.sie_block->gbea = 1;
2871 vcpu->arch.sie_block->pp = 0;
2872 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
2873 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2874 kvm_clear_async_pf_completion_queue(vcpu);
2875 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2876 kvm_s390_vcpu_stop(vcpu);
2877 kvm_s390_clear_local_irqs(vcpu);
2878 }
2879
2880 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
2881 {
2882 mutex_lock(&vcpu->kvm->lock);
2883 preempt_disable();
2884 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
2885 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
2886 preempt_enable();
2887 mutex_unlock(&vcpu->kvm->lock);
2888 if (!kvm_is_ucontrol(vcpu->kvm)) {
2889 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
2890 sca_add_vcpu(vcpu);
2891 }
2892 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2893 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2894
2895 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
2896 }
2897
2898 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2899 {
2900 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2901 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2902 return true;
2903 return false;
2904 }
2905
2906 static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2907 {
2908
2909 return kvm_has_pckmo_subfunc(kvm, 32) ||
2910 kvm_has_pckmo_subfunc(kvm, 33) ||
2911 kvm_has_pckmo_subfunc(kvm, 34) ||
2912 kvm_has_pckmo_subfunc(kvm, 40) ||
2913 kvm_has_pckmo_subfunc(kvm, 41);
2914
2915 }
2916
2917 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2918 {
2919
2920
2921
2922
2923 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
2924 return;
2925
2926 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2927 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2928 vcpu->arch.sie_block->eca &= ~ECA_APIE;
2929 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
2930
2931 if (vcpu->kvm->arch.crypto.apie)
2932 vcpu->arch.sie_block->eca |= ECA_APIE;
2933
2934
2935 if (vcpu->kvm->arch.crypto.aes_kw) {
2936 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2937
2938 if (kvm_has_pckmo_ecc(vcpu->kvm))
2939 vcpu->arch.sie_block->ecd |= ECD_ECC;
2940 }
2941
2942 if (vcpu->kvm->arch.crypto.dea_kw)
2943 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2944 }
2945
2946 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2947 {
2948 free_page(vcpu->arch.sie_block->cbrlo);
2949 vcpu->arch.sie_block->cbrlo = 0;
2950 }
2951
2952 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2953 {
2954 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2955 if (!vcpu->arch.sie_block->cbrlo)
2956 return -ENOMEM;
2957 return 0;
2958 }
2959
2960 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2961 {
2962 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2963
2964 vcpu->arch.sie_block->ibc = model->ibc;
2965 if (test_kvm_facility(vcpu->kvm, 7))
2966 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
2967 }
2968
2969 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2970 {
2971 int rc = 0;
2972
2973 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2974 CPUSTAT_SM |
2975 CPUSTAT_STOPPED);
2976
2977 if (test_kvm_facility(vcpu->kvm, 78))
2978 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
2979 else if (test_kvm_facility(vcpu->kvm, 8))
2980 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
2981
2982 kvm_s390_vcpu_setup_model(vcpu);
2983
2984
2985 if (MACHINE_HAS_ESOP)
2986 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
2987 if (test_kvm_facility(vcpu->kvm, 9))
2988 vcpu->arch.sie_block->ecb |= ECB_SRSI;
2989 if (test_kvm_facility(vcpu->kvm, 73))
2990 vcpu->arch.sie_block->ecb |= ECB_TE;
2991
2992 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
2993 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
2994 if (test_kvm_facility(vcpu->kvm, 130))
2995 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2996 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
2997 if (sclp.has_cei)
2998 vcpu->arch.sie_block->eca |= ECA_CEI;
2999 if (sclp.has_ib)
3000 vcpu->arch.sie_block->eca |= ECA_IB;
3001 if (sclp.has_siif)
3002 vcpu->arch.sie_block->eca |= ECA_SII;
3003 if (sclp.has_sigpif)
3004 vcpu->arch.sie_block->eca |= ECA_SIGPI;
3005 if (test_kvm_facility(vcpu->kvm, 129)) {
3006 vcpu->arch.sie_block->eca |= ECA_VX;
3007 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3008 }
3009 if (test_kvm_facility(vcpu->kvm, 139))
3010 vcpu->arch.sie_block->ecd |= ECD_MEF;
3011 if (test_kvm_facility(vcpu->kvm, 156))
3012 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3013 if (vcpu->arch.sie_block->gd) {
3014 vcpu->arch.sie_block->eca |= ECA_AIV;
3015 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3016 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3017 }
3018 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3019 | SDNXC;
3020 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
3021
3022 if (sclp.has_kss)
3023 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3024 else
3025 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3026
3027 if (vcpu->kvm->arch.use_cmma) {
3028 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3029 if (rc)
3030 return rc;
3031 }
3032 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3033 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3034
3035 vcpu->arch.sie_block->hpid = HPID_KVM;
3036
3037 kvm_s390_vcpu_crypto_setup(vcpu);
3038
3039 return rc;
3040 }
3041
3042 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3043 unsigned int id)
3044 {
3045 struct kvm_vcpu *vcpu;
3046 struct sie_page *sie_page;
3047 int rc = -EINVAL;
3048
3049 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3050 goto out;
3051
3052 rc = -ENOMEM;
3053
3054 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
3055 if (!vcpu)
3056 goto out;
3057
3058 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3059 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3060 if (!sie_page)
3061 goto out_free_cpu;
3062
3063 vcpu->arch.sie_block = &sie_page->sie_block;
3064 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3065
3066
3067 vcpu->arch.sie_block->mso = 0;
3068 vcpu->arch.sie_block->msl = sclp.hamax;
3069
3070 vcpu->arch.sie_block->icpua = id;
3071 spin_lock_init(&vcpu->arch.local_int.lock);
3072 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
3073 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3074 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
3075 seqcount_init(&vcpu->arch.cputm_seqcount);
3076
3077 rc = kvm_vcpu_init(vcpu, kvm, id);
3078 if (rc)
3079 goto out_free_sie_block;
3080 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
3081 vcpu->arch.sie_block);
3082 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
3083
3084 return vcpu;
3085 out_free_sie_block:
3086 free_page((unsigned long)(vcpu->arch.sie_block));
3087 out_free_cpu:
3088 kmem_cache_free(kvm_vcpu_cache, vcpu);
3089 out:
3090 return ERR_PTR(rc);
3091 }
3092
3093 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3094 {
3095 return kvm_s390_vcpu_has_irq(vcpu, 0);
3096 }
3097
3098 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3099 {
3100 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3101 }
3102
3103 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
3104 {
3105 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3106 exit_sie(vcpu);
3107 }
3108
3109 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
3110 {
3111 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3112 }
3113
3114 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3115 {
3116 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3117 exit_sie(vcpu);
3118 }
3119
3120 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3121 {
3122 return atomic_read(&vcpu->arch.sie_block->prog20) &
3123 (PROG_BLOCK_SIE | PROG_REQUEST);
3124 }
3125
3126 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3127 {
3128 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3129 }
3130
3131
3132
3133
3134
3135 void exit_sie(struct kvm_vcpu *vcpu)
3136 {
3137 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
3138 kvm_s390_vsie_kick(vcpu);
3139 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3140 cpu_relax();
3141 }
3142
3143
3144 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
3145 {
3146 kvm_make_request(req, vcpu);
3147 kvm_s390_vcpu_request(vcpu);
3148 }
3149
3150 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3151 unsigned long end)
3152 {
3153 struct kvm *kvm = gmap->private;
3154 struct kvm_vcpu *vcpu;
3155 unsigned long prefix;
3156 int i;
3157
3158 if (gmap_is_shadow(gmap))
3159 return;
3160 if (start >= 1UL << 31)
3161
3162 return;
3163 kvm_for_each_vcpu(i, vcpu, kvm) {
3164
3165 prefix = kvm_s390_get_prefix(vcpu);
3166 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3167 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3168 start, end);
3169 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
3170 }
3171 }
3172 }
3173
3174 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3175 {
3176
3177 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3178 halt_poll_max_steal) {
3179 vcpu->stat.halt_no_poll_steal++;
3180 return true;
3181 }
3182 return false;
3183 }
3184
3185 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3186 {
3187
3188 BUG();
3189 return 0;
3190 }
3191
3192 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3193 struct kvm_one_reg *reg)
3194 {
3195 int r = -EINVAL;
3196
3197 switch (reg->id) {
3198 case KVM_REG_S390_TODPR:
3199 r = put_user(vcpu->arch.sie_block->todpr,
3200 (u32 __user *)reg->addr);
3201 break;
3202 case KVM_REG_S390_EPOCHDIFF:
3203 r = put_user(vcpu->arch.sie_block->epoch,
3204 (u64 __user *)reg->addr);
3205 break;
3206 case KVM_REG_S390_CPU_TIMER:
3207 r = put_user(kvm_s390_get_cpu_timer(vcpu),
3208 (u64 __user *)reg->addr);
3209 break;
3210 case KVM_REG_S390_CLOCK_COMP:
3211 r = put_user(vcpu->arch.sie_block->ckc,
3212 (u64 __user *)reg->addr);
3213 break;
3214 case KVM_REG_S390_PFTOKEN:
3215 r = put_user(vcpu->arch.pfault_token,
3216 (u64 __user *)reg->addr);
3217 break;
3218 case KVM_REG_S390_PFCOMPARE:
3219 r = put_user(vcpu->arch.pfault_compare,
3220 (u64 __user *)reg->addr);
3221 break;
3222 case KVM_REG_S390_PFSELECT:
3223 r = put_user(vcpu->arch.pfault_select,
3224 (u64 __user *)reg->addr);
3225 break;
3226 case KVM_REG_S390_PP:
3227 r = put_user(vcpu->arch.sie_block->pp,
3228 (u64 __user *)reg->addr);
3229 break;
3230 case KVM_REG_S390_GBEA:
3231 r = put_user(vcpu->arch.sie_block->gbea,
3232 (u64 __user *)reg->addr);
3233 break;
3234 default:
3235 break;
3236 }
3237
3238 return r;
3239 }
3240
3241 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3242 struct kvm_one_reg *reg)
3243 {
3244 int r = -EINVAL;
3245 __u64 val;
3246
3247 switch (reg->id) {
3248 case KVM_REG_S390_TODPR:
3249 r = get_user(vcpu->arch.sie_block->todpr,
3250 (u32 __user *)reg->addr);
3251 break;
3252 case KVM_REG_S390_EPOCHDIFF:
3253 r = get_user(vcpu->arch.sie_block->epoch,
3254 (u64 __user *)reg->addr);
3255 break;
3256 case KVM_REG_S390_CPU_TIMER:
3257 r = get_user(val, (u64 __user *)reg->addr);
3258 if (!r)
3259 kvm_s390_set_cpu_timer(vcpu, val);
3260 break;
3261 case KVM_REG_S390_CLOCK_COMP:
3262 r = get_user(vcpu->arch.sie_block->ckc,
3263 (u64 __user *)reg->addr);
3264 break;
3265 case KVM_REG_S390_PFTOKEN:
3266 r = get_user(vcpu->arch.pfault_token,
3267 (u64 __user *)reg->addr);
3268 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3269 kvm_clear_async_pf_completion_queue(vcpu);
3270 break;
3271 case KVM_REG_S390_PFCOMPARE:
3272 r = get_user(vcpu->arch.pfault_compare,
3273 (u64 __user *)reg->addr);
3274 break;
3275 case KVM_REG_S390_PFSELECT:
3276 r = get_user(vcpu->arch.pfault_select,
3277 (u64 __user *)reg->addr);
3278 break;
3279 case KVM_REG_S390_PP:
3280 r = get_user(vcpu->arch.sie_block->pp,
3281 (u64 __user *)reg->addr);
3282 break;
3283 case KVM_REG_S390_GBEA:
3284 r = get_user(vcpu->arch.sie_block->gbea,
3285 (u64 __user *)reg->addr);
3286 break;
3287 default:
3288 break;
3289 }
3290
3291 return r;
3292 }
3293
3294 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3295 {
3296 kvm_s390_vcpu_initial_reset(vcpu);
3297 return 0;
3298 }
3299
3300 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3301 {
3302 vcpu_load(vcpu);
3303 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
3304 vcpu_put(vcpu);
3305 return 0;
3306 }
3307
3308 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3309 {
3310 vcpu_load(vcpu);
3311 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
3312 vcpu_put(vcpu);
3313 return 0;
3314 }
3315
3316 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3317 struct kvm_sregs *sregs)
3318 {
3319 vcpu_load(vcpu);
3320
3321 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
3322 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
3323
3324 vcpu_put(vcpu);
3325 return 0;
3326 }
3327
3328 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3329 struct kvm_sregs *sregs)
3330 {
3331 vcpu_load(vcpu);
3332
3333 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
3334 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
3335
3336 vcpu_put(vcpu);
3337 return 0;
3338 }
3339
3340 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3341 {
3342 int ret = 0;
3343
3344 vcpu_load(vcpu);
3345
3346 if (test_fp_ctl(fpu->fpc)) {
3347 ret = -EINVAL;
3348 goto out;
3349 }
3350 vcpu->run->s.regs.fpc = fpu->fpc;
3351 if (MACHINE_HAS_VX)
3352 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3353 (freg_t *) fpu->fprs);
3354 else
3355 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
3356
3357 out:
3358 vcpu_put(vcpu);
3359 return ret;
3360 }
3361
3362 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3363 {
3364 vcpu_load(vcpu);
3365
3366
3367 save_fpu_regs();
3368 if (MACHINE_HAS_VX)
3369 convert_vx_to_fp((freg_t *) fpu->fprs,
3370 (__vector128 *) vcpu->run->s.regs.vrs);
3371 else
3372 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
3373 fpu->fpc = vcpu->run->s.regs.fpc;
3374
3375 vcpu_put(vcpu);
3376 return 0;
3377 }
3378
3379 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3380 {
3381 int rc = 0;
3382
3383 if (!is_vcpu_stopped(vcpu))
3384 rc = -EBUSY;
3385 else {
3386 vcpu->run->psw_mask = psw.mask;
3387 vcpu->run->psw_addr = psw.addr;
3388 }
3389 return rc;
3390 }
3391
3392 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3393 struct kvm_translation *tr)
3394 {
3395 return -EINVAL;
3396 }
3397
3398 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3399 KVM_GUESTDBG_USE_HW_BP | \
3400 KVM_GUESTDBG_ENABLE)
3401
3402 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3403 struct kvm_guest_debug *dbg)
3404 {
3405 int rc = 0;
3406
3407 vcpu_load(vcpu);
3408
3409 vcpu->guest_debug = 0;
3410 kvm_s390_clear_bp_data(vcpu);
3411
3412 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3413 rc = -EINVAL;
3414 goto out;
3415 }
3416 if (!sclp.has_gpere) {
3417 rc = -EINVAL;
3418 goto out;
3419 }
3420
3421 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3422 vcpu->guest_debug = dbg->control;
3423
3424 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
3425
3426 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3427 rc = kvm_s390_import_bp_data(vcpu, dbg);
3428 } else {
3429 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
3430 vcpu->arch.guestdbg.last_bp = 0;
3431 }
3432
3433 if (rc) {
3434 vcpu->guest_debug = 0;
3435 kvm_s390_clear_bp_data(vcpu);
3436 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
3437 }
3438
3439 out:
3440 vcpu_put(vcpu);
3441 return rc;
3442 }
3443
3444 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3445 struct kvm_mp_state *mp_state)
3446 {
3447 int ret;
3448
3449 vcpu_load(vcpu);
3450
3451
3452 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3453 KVM_MP_STATE_OPERATING;
3454
3455 vcpu_put(vcpu);
3456 return ret;
3457 }
3458
3459 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3460 struct kvm_mp_state *mp_state)
3461 {
3462 int rc = 0;
3463
3464 vcpu_load(vcpu);
3465
3466
3467 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3468
3469 switch (mp_state->mp_state) {
3470 case KVM_MP_STATE_STOPPED:
3471 kvm_s390_vcpu_stop(vcpu);
3472 break;
3473 case KVM_MP_STATE_OPERATING:
3474 kvm_s390_vcpu_start(vcpu);
3475 break;
3476 case KVM_MP_STATE_LOAD:
3477 case KVM_MP_STATE_CHECK_STOP:
3478
3479 default:
3480 rc = -ENXIO;
3481 }
3482
3483 vcpu_put(vcpu);
3484 return rc;
3485 }
3486
3487 static bool ibs_enabled(struct kvm_vcpu *vcpu)
3488 {
3489 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
3490 }
3491
3492 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3493 {
3494 retry:
3495 kvm_s390_vcpu_request_handled(vcpu);
3496 if (!kvm_request_pending(vcpu))
3497 return 0;
3498
3499
3500
3501
3502
3503
3504
3505 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
3506 int rc;
3507 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3508 kvm_s390_get_prefix(vcpu),
3509 PAGE_SIZE * 2, PROT_WRITE);
3510 if (rc) {
3511 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
3512 return rc;
3513 }
3514 goto retry;
3515 }
3516
3517 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3518 vcpu->arch.sie_block->ihcpu = 0xffff;
3519 goto retry;
3520 }
3521
3522 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3523 if (!ibs_enabled(vcpu)) {
3524 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
3525 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
3526 }
3527 goto retry;
3528 }
3529
3530 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3531 if (ibs_enabled(vcpu)) {
3532 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
3533 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
3534 }
3535 goto retry;
3536 }
3537
3538 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3539 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3540 goto retry;
3541 }
3542
3543 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3544
3545
3546
3547
3548
3549 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3550 goto retry;
3551 }
3552
3553 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3554
3555
3556
3557
3558 if ((vcpu->kvm->arch.use_cmma) &&
3559 (vcpu->kvm->mm->context.uses_cmm))
3560 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3561 goto retry;
3562 }
3563
3564
3565 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
3566
3567 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
3568
3569 return 0;
3570 }
3571
3572 void kvm_s390_set_tod_clock(struct kvm *kvm,
3573 const struct kvm_s390_vm_tod_clock *gtod)
3574 {
3575 struct kvm_vcpu *vcpu;
3576 struct kvm_s390_tod_clock_ext htod;
3577 int i;
3578
3579 mutex_lock(&kvm->lock);
3580 preempt_disable();
3581
3582 get_tod_clock_ext((char *)&htod);
3583
3584 kvm->arch.epoch = gtod->tod - htod.tod;
3585 kvm->arch.epdx = 0;
3586 if (test_kvm_facility(kvm, 139)) {
3587 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3588 if (kvm->arch.epoch > gtod->tod)
3589 kvm->arch.epdx -= 1;
3590 }
3591
3592 kvm_s390_vcpu_block_all(kvm);
3593 kvm_for_each_vcpu(i, vcpu, kvm) {
3594 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3595 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3596 }
3597
3598 kvm_s390_vcpu_unblock_all(kvm);
3599 preempt_enable();
3600 mutex_unlock(&kvm->lock);
3601 }
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
3614 {
3615 return gmap_fault(vcpu->arch.gmap, gpa,
3616 writable ? FAULT_FLAG_WRITE : 0);
3617 }
3618
3619 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3620 unsigned long token)
3621 {
3622 struct kvm_s390_interrupt inti;
3623 struct kvm_s390_irq irq;
3624
3625 if (start_token) {
3626 irq.u.ext.ext_params2 = token;
3627 irq.type = KVM_S390_INT_PFAULT_INIT;
3628 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3629 } else {
3630 inti.type = KVM_S390_INT_PFAULT_DONE;
3631 inti.parm64 = token;
3632 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3633 }
3634 }
3635
3636 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3637 struct kvm_async_pf *work)
3638 {
3639 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3640 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3641 }
3642
3643 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3644 struct kvm_async_pf *work)
3645 {
3646 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3647 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3648 }
3649
3650 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3651 struct kvm_async_pf *work)
3652 {
3653
3654 }
3655
3656 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3657 {
3658
3659
3660
3661
3662 return true;
3663 }
3664
3665 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3666 {
3667 hva_t hva;
3668 struct kvm_arch_async_pf arch;
3669 int rc;
3670
3671 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3672 return 0;
3673 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3674 vcpu->arch.pfault_compare)
3675 return 0;
3676 if (psw_extint_disabled(vcpu))
3677 return 0;
3678 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3679 return 0;
3680 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
3681 return 0;
3682 if (!vcpu->arch.gmap->pfault_enabled)
3683 return 0;
3684
3685 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3686 hva += current->thread.gmap_addr & ~PAGE_MASK;
3687 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3688 return 0;
3689
3690 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3691 return rc;
3692 }
3693
3694 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
3695 {
3696 int rc, cpuflags;
3697
3698
3699
3700
3701
3702
3703 kvm_check_async_pf_completion(vcpu);
3704
3705 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3706 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
3707
3708 if (need_resched())
3709 schedule();
3710
3711 if (test_cpu_flag(CIF_MCCK_PENDING))
3712 s390_handle_mcck();
3713
3714 if (!kvm_is_ucontrol(vcpu->kvm)) {
3715 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3716 if (rc)
3717 return rc;
3718 }
3719
3720 rc = kvm_s390_handle_requests(vcpu);
3721 if (rc)
3722 return rc;
3723
3724 if (guestdbg_enabled(vcpu)) {
3725 kvm_s390_backup_guest_per_regs(vcpu);
3726 kvm_s390_patch_guest_per_regs(vcpu);
3727 }
3728
3729 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3730
3731 vcpu->arch.sie_block->icptcode = 0;
3732 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3733 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3734 trace_kvm_s390_sie_enter(vcpu, cpuflags);
3735
3736 return 0;
3737 }
3738
3739 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3740 {
3741 struct kvm_s390_pgm_info pgm_info = {
3742 .code = PGM_ADDRESSING,
3743 };
3744 u8 opcode, ilen;
3745 int rc;
3746
3747 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3748 trace_kvm_s390_sie_fault(vcpu);
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
3759 ilen = insn_length(opcode);
3760 if (rc < 0) {
3761 return rc;
3762 } else if (rc) {
3763
3764
3765
3766
3767 pgm_info = vcpu->arch.pgm;
3768 ilen = 4;
3769 }
3770 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3771 kvm_s390_forward_psw(vcpu, ilen);
3772 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
3773 }
3774
3775 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3776 {
3777 struct mcck_volatile_info *mcck_info;
3778 struct sie_page *sie_page;
3779
3780 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3781 vcpu->arch.sie_block->icptcode);
3782 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3783
3784 if (guestdbg_enabled(vcpu))
3785 kvm_s390_restore_guest_per_regs(vcpu);
3786
3787 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3788 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
3789
3790 if (exit_reason == -EINTR) {
3791 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3792 sie_page = container_of(vcpu->arch.sie_block,
3793 struct sie_page, sie_block);
3794 mcck_info = &sie_page->mcck_info;
3795 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3796 return 0;
3797 }
3798
3799 if (vcpu->arch.sie_block->icptcode > 0) {
3800 int rc = kvm_handle_sie_intercept(vcpu);
3801
3802 if (rc != -EOPNOTSUPP)
3803 return rc;
3804 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3805 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3806 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3807 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3808 return -EREMOTE;
3809 } else if (exit_reason != -EFAULT) {
3810 vcpu->stat.exit_null++;
3811 return 0;
3812 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3813 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3814 vcpu->run->s390_ucontrol.trans_exc_code =
3815 current->thread.gmap_addr;
3816 vcpu->run->s390_ucontrol.pgm_code = 0x10;
3817 return -EREMOTE;
3818 } else if (current->thread.gmap_pfault) {
3819 trace_kvm_s390_major_guest_pfault(vcpu);
3820 current->thread.gmap_pfault = 0;
3821 if (kvm_arch_setup_async_pf(vcpu))
3822 return 0;
3823 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
3824 }
3825 return vcpu_post_run_fault_in_sie(vcpu);
3826 }
3827
3828 static int __vcpu_run(struct kvm_vcpu *vcpu)
3829 {
3830 int rc, exit_reason;
3831
3832
3833
3834
3835
3836 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3837
3838 do {
3839 rc = vcpu_pre_run(vcpu);
3840 if (rc)
3841 break;
3842
3843 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3844
3845
3846
3847
3848 local_irq_disable();
3849 guest_enter_irqoff();
3850 __disable_cpu_timer_accounting(vcpu);
3851 local_irq_enable();
3852 exit_reason = sie64a(vcpu->arch.sie_block,
3853 vcpu->run->s.regs.gprs);
3854 local_irq_disable();
3855 __enable_cpu_timer_accounting(vcpu);
3856 guest_exit_irqoff();
3857 local_irq_enable();
3858 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3859
3860 rc = vcpu_post_run(vcpu, exit_reason);
3861 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3862
3863 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3864 return rc;
3865 }
3866
3867 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3868 {
3869 struct runtime_instr_cb *riccb;
3870 struct gs_cb *gscb;
3871
3872 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
3873 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
3874 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3875 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3876 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3877 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3878 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3879 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
3880
3881 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3882 }
3883 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
3884 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
3885 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3886 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3887 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3888 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3889 }
3890 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3891 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3892 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3893 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
3894 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3895 kvm_clear_async_pf_completion_queue(vcpu);
3896 }
3897
3898
3899
3900
3901 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
3902 test_kvm_facility(vcpu->kvm, 64) &&
3903 riccb->v &&
3904 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
3905 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
3906 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
3907 }
3908
3909
3910
3911
3912 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3913 test_kvm_facility(vcpu->kvm, 133) &&
3914 gscb->gssm &&
3915 !vcpu->arch.gs_enabled) {
3916 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3917 vcpu->arch.sie_block->ecb |= ECB_GS;
3918 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3919 vcpu->arch.gs_enabled = 1;
3920 }
3921 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3922 test_kvm_facility(vcpu->kvm, 82)) {
3923 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3924 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3925 }
3926 save_access_regs(vcpu->arch.host_acrs);
3927 restore_access_regs(vcpu->run->s.regs.acrs);
3928
3929 save_fpu_regs();
3930 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3931 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3932 if (MACHINE_HAS_VX)
3933 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3934 else
3935 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3936 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3937 if (test_fp_ctl(current->thread.fpu.fpc))
3938
3939 current->thread.fpu.fpc = 0;
3940 if (MACHINE_HAS_GS) {
3941 preempt_disable();
3942 __ctl_set_bit(2, 4);
3943 if (current->thread.gs_cb) {
3944 vcpu->arch.host_gscb = current->thread.gs_cb;
3945 save_gs_cb(vcpu->arch.host_gscb);
3946 }
3947 if (vcpu->arch.gs_enabled) {
3948 current->thread.gs_cb = (struct gs_cb *)
3949 &vcpu->run->s.regs.gscb;
3950 restore_gs_cb(current->thread.gs_cb);
3951 }
3952 preempt_enable();
3953 }
3954
3955
3956 kvm_run->kvm_dirty_regs = 0;
3957 }
3958
3959 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3960 {
3961 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3962 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3963 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3964 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
3965 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
3966 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3967 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3968 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3969 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3970 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3971 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3972 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
3973 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
3974 save_access_regs(vcpu->run->s.regs.acrs);
3975 restore_access_regs(vcpu->arch.host_acrs);
3976
3977 save_fpu_regs();
3978 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3979
3980 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3981 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
3982 if (MACHINE_HAS_GS) {
3983 __ctl_set_bit(2, 4);
3984 if (vcpu->arch.gs_enabled)
3985 save_gs_cb(current->thread.gs_cb);
3986 preempt_disable();
3987 current->thread.gs_cb = vcpu->arch.host_gscb;
3988 restore_gs_cb(vcpu->arch.host_gscb);
3989 preempt_enable();
3990 if (!vcpu->arch.host_gscb)
3991 __ctl_clear_bit(2, 4);
3992 vcpu->arch.host_gscb = NULL;
3993 }
3994
3995 }
3996
3997 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3998 {
3999 int rc;
4000
4001 if (kvm_run->immediate_exit)
4002 return -EINTR;
4003
4004 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4005 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4006 return -EINVAL;
4007
4008 vcpu_load(vcpu);
4009
4010 if (guestdbg_exit_pending(vcpu)) {
4011 kvm_s390_prepare_debug_exit(vcpu);
4012 rc = 0;
4013 goto out;
4014 }
4015
4016 kvm_sigset_activate(vcpu);
4017
4018 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4019 kvm_s390_vcpu_start(vcpu);
4020 } else if (is_vcpu_stopped(vcpu)) {
4021 pr_err_ratelimited("can't run stopped vcpu %d\n",
4022 vcpu->vcpu_id);
4023 rc = -EINVAL;
4024 goto out;
4025 }
4026
4027 sync_regs(vcpu, kvm_run);
4028 enable_cpu_timer_accounting(vcpu);
4029
4030 might_fault();
4031 rc = __vcpu_run(vcpu);
4032
4033 if (signal_pending(current) && !rc) {
4034 kvm_run->exit_reason = KVM_EXIT_INTR;
4035 rc = -EINTR;
4036 }
4037
4038 if (guestdbg_exit_pending(vcpu) && !rc) {
4039 kvm_s390_prepare_debug_exit(vcpu);
4040 rc = 0;
4041 }
4042
4043 if (rc == -EREMOTE) {
4044
4045 rc = 0;
4046 }
4047
4048 disable_cpu_timer_accounting(vcpu);
4049 store_regs(vcpu, kvm_run);
4050
4051 kvm_sigset_deactivate(vcpu);
4052
4053 vcpu->stat.exit_userspace++;
4054 out:
4055 vcpu_put(vcpu);
4056 return rc;
4057 }
4058
4059
4060
4061
4062
4063
4064
4065 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
4066 {
4067 unsigned char archmode = 1;
4068 freg_t fprs[NUM_FPRS];
4069 unsigned int px;
4070 u64 clkcomp, cputm;
4071 int rc;
4072
4073 px = kvm_s390_get_prefix(vcpu);
4074 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4075 if (write_guest_abs(vcpu, 163, &archmode, 1))
4076 return -EFAULT;
4077 gpa = 0;
4078 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4079 if (write_guest_real(vcpu, 163, &archmode, 1))
4080 return -EFAULT;
4081 gpa = px;
4082 } else
4083 gpa -= __LC_FPREGS_SAVE_AREA;
4084
4085
4086 if (MACHINE_HAS_VX) {
4087 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
4088 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4089 fprs, 128);
4090 } else {
4091 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4092 vcpu->run->s.regs.fprs, 128);
4093 }
4094 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
4095 vcpu->run->s.regs.gprs, 128);
4096 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
4097 &vcpu->arch.sie_block->gpsw, 16);
4098 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
4099 &px, 4);
4100 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
4101 &vcpu->run->s.regs.fpc, 4);
4102 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
4103 &vcpu->arch.sie_block->todpr, 4);
4104 cputm = kvm_s390_get_cpu_timer(vcpu);
4105 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4106 &cputm, 8);
4107 clkcomp = vcpu->arch.sie_block->ckc >> 8;
4108 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
4109 &clkcomp, 8);
4110 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
4111 &vcpu->run->s.regs.acrs, 64);
4112 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
4113 &vcpu->arch.sie_block->gcr, 128);
4114 return rc ? -EFAULT : 0;
4115 }
4116
4117 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4118 {
4119
4120
4121
4122
4123
4124 save_fpu_regs();
4125 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4126 save_access_regs(vcpu->run->s.regs.acrs);
4127
4128 return kvm_s390_store_status_unloaded(vcpu, addr);
4129 }
4130
4131 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4132 {
4133 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
4134 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
4135 }
4136
4137 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4138 {
4139 unsigned int i;
4140 struct kvm_vcpu *vcpu;
4141
4142 kvm_for_each_vcpu(i, vcpu, kvm) {
4143 __disable_ibs_on_vcpu(vcpu);
4144 }
4145 }
4146
4147 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4148 {
4149 if (!sclp.has_ibs)
4150 return;
4151 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
4152 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
4153 }
4154
4155 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4156 {
4157 int i, online_vcpus, started_vcpus = 0;
4158
4159 if (!is_vcpu_stopped(vcpu))
4160 return;
4161
4162 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
4163
4164 spin_lock(&vcpu->kvm->arch.start_stop_lock);
4165 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4166
4167 for (i = 0; i < online_vcpus; i++) {
4168 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4169 started_vcpus++;
4170 }
4171
4172 if (started_vcpus == 0) {
4173
4174 __enable_ibs_on_vcpu(vcpu);
4175 } else if (started_vcpus == 1) {
4176
4177
4178
4179
4180
4181 __disable_ibs_on_all_vcpus(vcpu->kvm);
4182 }
4183
4184 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
4185
4186
4187
4188
4189 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4190 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4191 return;
4192 }
4193
4194 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4195 {
4196 int i, online_vcpus, started_vcpus = 0;
4197 struct kvm_vcpu *started_vcpu = NULL;
4198
4199 if (is_vcpu_stopped(vcpu))
4200 return;
4201
4202 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
4203
4204 spin_lock(&vcpu->kvm->arch.start_stop_lock);
4205 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4206
4207
4208 kvm_s390_clear_stop_irq(vcpu);
4209
4210 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
4211 __disable_ibs_on_vcpu(vcpu);
4212
4213 for (i = 0; i < online_vcpus; i++) {
4214 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4215 started_vcpus++;
4216 started_vcpu = vcpu->kvm->vcpus[i];
4217 }
4218 }
4219
4220 if (started_vcpus == 1) {
4221
4222
4223
4224
4225 __enable_ibs_on_vcpu(started_vcpu);
4226 }
4227
4228 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4229 return;
4230 }
4231
4232 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4233 struct kvm_enable_cap *cap)
4234 {
4235 int r;
4236
4237 if (cap->flags)
4238 return -EINVAL;
4239
4240 switch (cap->cap) {
4241 case KVM_CAP_S390_CSS_SUPPORT:
4242 if (!vcpu->kvm->arch.css_support) {
4243 vcpu->kvm->arch.css_support = 1;
4244 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
4245 trace_kvm_s390_enable_css(vcpu->kvm);
4246 }
4247 r = 0;
4248 break;
4249 default:
4250 r = -EINVAL;
4251 break;
4252 }
4253 return r;
4254 }
4255
4256 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4257 struct kvm_s390_mem_op *mop)
4258 {
4259 void __user *uaddr = (void __user *)mop->buf;
4260 void *tmpbuf = NULL;
4261 int r, srcu_idx;
4262 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4263 | KVM_S390_MEMOP_F_CHECK_ONLY;
4264
4265 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
4266 return -EINVAL;
4267
4268 if (mop->size > MEM_OP_MAX_SIZE)
4269 return -E2BIG;
4270
4271 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4272 tmpbuf = vmalloc(mop->size);
4273 if (!tmpbuf)
4274 return -ENOMEM;
4275 }
4276
4277 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4278
4279 switch (mop->op) {
4280 case KVM_S390_MEMOP_LOGICAL_READ:
4281 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
4282 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4283 mop->size, GACC_FETCH);
4284 break;
4285 }
4286 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4287 if (r == 0) {
4288 if (copy_to_user(uaddr, tmpbuf, mop->size))
4289 r = -EFAULT;
4290 }
4291 break;
4292 case KVM_S390_MEMOP_LOGICAL_WRITE:
4293 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
4294 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4295 mop->size, GACC_STORE);
4296 break;
4297 }
4298 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4299 r = -EFAULT;
4300 break;
4301 }
4302 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4303 break;
4304 default:
4305 r = -EINVAL;
4306 }
4307
4308 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4309
4310 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4311 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4312
4313 vfree(tmpbuf);
4314 return r;
4315 }
4316
4317 long kvm_arch_vcpu_async_ioctl(struct file *filp,
4318 unsigned int ioctl, unsigned long arg)
4319 {
4320 struct kvm_vcpu *vcpu = filp->private_data;
4321 void __user *argp = (void __user *)arg;
4322
4323 switch (ioctl) {
4324 case KVM_S390_IRQ: {
4325 struct kvm_s390_irq s390irq;
4326
4327 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
4328 return -EFAULT;
4329 return kvm_s390_inject_vcpu(vcpu, &s390irq);
4330 }
4331 case KVM_S390_INTERRUPT: {
4332 struct kvm_s390_interrupt s390int;
4333 struct kvm_s390_irq s390irq = {};
4334
4335 if (copy_from_user(&s390int, argp, sizeof(s390int)))
4336 return -EFAULT;
4337 if (s390int_to_s390irq(&s390int, &s390irq))
4338 return -EINVAL;
4339 return kvm_s390_inject_vcpu(vcpu, &s390irq);
4340 }
4341 }
4342 return -ENOIOCTLCMD;
4343 }
4344
4345 long kvm_arch_vcpu_ioctl(struct file *filp,
4346 unsigned int ioctl, unsigned long arg)
4347 {
4348 struct kvm_vcpu *vcpu = filp->private_data;
4349 void __user *argp = (void __user *)arg;
4350 int idx;
4351 long r;
4352
4353 vcpu_load(vcpu);
4354
4355 switch (ioctl) {
4356 case KVM_S390_STORE_STATUS:
4357 idx = srcu_read_lock(&vcpu->kvm->srcu);
4358 r = kvm_s390_store_status_unloaded(vcpu, arg);
4359 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4360 break;
4361 case KVM_S390_SET_INITIAL_PSW: {
4362 psw_t psw;
4363
4364 r = -EFAULT;
4365 if (copy_from_user(&psw, argp, sizeof(psw)))
4366 break;
4367 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4368 break;
4369 }
4370 case KVM_S390_INITIAL_RESET:
4371 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4372 break;
4373 case KVM_SET_ONE_REG:
4374 case KVM_GET_ONE_REG: {
4375 struct kvm_one_reg reg;
4376 r = -EFAULT;
4377 if (copy_from_user(®, argp, sizeof(reg)))
4378 break;
4379 if (ioctl == KVM_SET_ONE_REG)
4380 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
4381 else
4382 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
4383 break;
4384 }
4385 #ifdef CONFIG_KVM_S390_UCONTROL
4386 case KVM_S390_UCAS_MAP: {
4387 struct kvm_s390_ucas_mapping ucasmap;
4388
4389 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4390 r = -EFAULT;
4391 break;
4392 }
4393
4394 if (!kvm_is_ucontrol(vcpu->kvm)) {
4395 r = -EINVAL;
4396 break;
4397 }
4398
4399 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4400 ucasmap.vcpu_addr, ucasmap.length);
4401 break;
4402 }
4403 case KVM_S390_UCAS_UNMAP: {
4404 struct kvm_s390_ucas_mapping ucasmap;
4405
4406 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4407 r = -EFAULT;
4408 break;
4409 }
4410
4411 if (!kvm_is_ucontrol(vcpu->kvm)) {
4412 r = -EINVAL;
4413 break;
4414 }
4415
4416 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4417 ucasmap.length);
4418 break;
4419 }
4420 #endif
4421 case KVM_S390_VCPU_FAULT: {
4422 r = gmap_fault(vcpu->arch.gmap, arg, 0);
4423 break;
4424 }
4425 case KVM_ENABLE_CAP:
4426 {
4427 struct kvm_enable_cap cap;
4428 r = -EFAULT;
4429 if (copy_from_user(&cap, argp, sizeof(cap)))
4430 break;
4431 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4432 break;
4433 }
4434 case KVM_S390_MEM_OP: {
4435 struct kvm_s390_mem_op mem_op;
4436
4437 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4438 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4439 else
4440 r = -EFAULT;
4441 break;
4442 }
4443 case KVM_S390_SET_IRQ_STATE: {
4444 struct kvm_s390_irq_state irq_state;
4445
4446 r = -EFAULT;
4447 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4448 break;
4449 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4450 irq_state.len == 0 ||
4451 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4452 r = -EINVAL;
4453 break;
4454 }
4455
4456 r = kvm_s390_set_irq_state(vcpu,
4457 (void __user *) irq_state.buf,
4458 irq_state.len);
4459 break;
4460 }
4461 case KVM_S390_GET_IRQ_STATE: {
4462 struct kvm_s390_irq_state irq_state;
4463
4464 r = -EFAULT;
4465 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4466 break;
4467 if (irq_state.len == 0) {
4468 r = -EINVAL;
4469 break;
4470 }
4471
4472 r = kvm_s390_get_irq_state(vcpu,
4473 (__u8 __user *) irq_state.buf,
4474 irq_state.len);
4475 break;
4476 }
4477 default:
4478 r = -ENOTTY;
4479 }
4480
4481 vcpu_put(vcpu);
4482 return r;
4483 }
4484
4485 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
4486 {
4487 #ifdef CONFIG_KVM_S390_UCONTROL
4488 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4489 && (kvm_is_ucontrol(vcpu->kvm))) {
4490 vmf->page = virt_to_page(vcpu->arch.sie_block);
4491 get_page(vmf->page);
4492 return 0;
4493 }
4494 #endif
4495 return VM_FAULT_SIGBUS;
4496 }
4497
4498 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4499 unsigned long npages)
4500 {
4501 return 0;
4502 }
4503
4504
4505 int kvm_arch_prepare_memory_region(struct kvm *kvm,
4506 struct kvm_memory_slot *memslot,
4507 const struct kvm_userspace_memory_region *mem,
4508 enum kvm_mr_change change)
4509 {
4510
4511
4512
4513
4514
4515 if (mem->userspace_addr & 0xffffful)
4516 return -EINVAL;
4517
4518 if (mem->memory_size & 0xffffful)
4519 return -EINVAL;
4520
4521 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4522 return -EINVAL;
4523
4524 return 0;
4525 }
4526
4527 void kvm_arch_commit_memory_region(struct kvm *kvm,
4528 const struct kvm_userspace_memory_region *mem,
4529 const struct kvm_memory_slot *old,
4530 const struct kvm_memory_slot *new,
4531 enum kvm_mr_change change)
4532 {
4533 int rc = 0;
4534
4535 switch (change) {
4536 case KVM_MR_DELETE:
4537 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4538 old->npages * PAGE_SIZE);
4539 break;
4540 case KVM_MR_MOVE:
4541 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4542 old->npages * PAGE_SIZE);
4543 if (rc)
4544 break;
4545
4546 case KVM_MR_CREATE:
4547 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4548 mem->guest_phys_addr, mem->memory_size);
4549 break;
4550 case KVM_MR_FLAGS_ONLY:
4551 break;
4552 default:
4553 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4554 }
4555 if (rc)
4556 pr_warn("failed to commit memory region\n");
4557 return;
4558 }
4559
4560 static inline unsigned long nonhyp_mask(int i)
4561 {
4562 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4563
4564 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4565 }
4566
4567 void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4568 {
4569 vcpu->valid_wakeup = false;
4570 }
4571
4572 static int __init kvm_s390_init(void)
4573 {
4574 int i;
4575
4576 if (!sclp.has_sief2) {
4577 pr_info("SIE is not available\n");
4578 return -ENODEV;
4579 }
4580
4581 if (nested && hpage) {
4582 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
4583 return -EINVAL;
4584 }
4585
4586 for (i = 0; i < 16; i++)
4587 kvm_s390_fac_base[i] |=
4588 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4589
4590 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
4591 }
4592
4593 static void __exit kvm_s390_exit(void)
4594 {
4595 kvm_exit();
4596 }
4597
4598 module_init(kvm_s390_init);
4599 module_exit(kvm_s390_exit);
4600
4601
4602
4603
4604
4605
4606 #include <linux/miscdevice.h>
4607 MODULE_ALIAS_MISCDEV(KVM_MINOR);
4608 MODULE_ALIAS("devname:kvm");