This source file includes following definitions.
- kvm_s390_set_cpuflags
- kvm_s390_clear_cpuflags
- kvm_s390_test_cpuflags
- is_vcpu_stopped
- is_vcpu_idle
- kvm_is_ucontrol
- kvm_s390_get_prefix
- kvm_s390_set_prefix
- kvm_s390_get_base_disp_s
- kvm_s390_get_base_disp_sse
- kvm_s390_get_regs_rre
- kvm_s390_get_base_disp_rsy
- kvm_s390_get_base_disp_rs
- kvm_s390_set_psw_cc
- test_kvm_facility
- set_kvm_facility
- test_kvm_cpu_feat
- kvm_s390_user_cpu_state_ctrl
- kvm_s390_inject_prog_irq
- kvm_s390_inject_program_int
- kvm_s390_rewind_psw
- kvm_s390_forward_psw
- kvm_s390_retry_instr
- kvm_s390_vcpu_block_all
- kvm_s390_vcpu_unblock_all
- kvm_s390_get_tod_clock_fast
- kvm_s390_inject_prog_cond
- kvm_s390_get_ipte_control
- kvm_s390_use_sca_entries
1
2
3
4
5
6
7
8
9
10
11
12 #ifndef ARCH_S390_KVM_S390_H
13 #define ARCH_S390_KVM_S390_H
14
15 #include <linux/hrtimer.h>
16 #include <linux/kvm.h>
17 #include <linux/kvm_host.h>
18 #include <asm/facility.h>
19 #include <asm/processor.h>
20 #include <asm/sclp.h>
21
22
23 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
24 #define TDB_FORMAT1 1
25 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
26
27 extern debug_info_t *kvm_s390_dbf;
28 #define KVM_EVENT(d_loglevel, d_string, d_args...)\
29 do { \
30 debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
31 d_args); \
32 } while (0)
33
34 #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
35 do { \
36 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
37 d_args); \
38 } while (0)
39
40 #define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
41 do { \
42 debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
43 "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
44 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
45 d_args); \
46 } while (0)
47
48 static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
49 {
50 atomic_or(flags, &vcpu->arch.sie_block->cpuflags);
51 }
52
53 static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
54 {
55 atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags);
56 }
57
58 static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
59 {
60 return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags;
61 }
62
63 static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
64 {
65 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED);
66 }
67
68 static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
69 {
70 return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
71 }
72
73 static inline int kvm_is_ucontrol(struct kvm *kvm)
74 {
75 #ifdef CONFIG_KVM_S390_UCONTROL
76 if (kvm->arch.gmap)
77 return 0;
78 return 1;
79 #else
80 return 0;
81 #endif
82 }
83
84 #define GUEST_PREFIX_SHIFT 13
85 static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
86 {
87 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
88 }
89
90 static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
91 {
92 VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
93 prefix);
94 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
95 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
96 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
97 }
98
99 static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
100 {
101 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
102 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
103
104 if (ar)
105 *ar = base2;
106
107 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
108 }
109
110 static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
111 u64 *address1, u64 *address2,
112 u8 *ar_b1, u8 *ar_b2)
113 {
114 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
115 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
116 u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
117 u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
118
119 *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
120 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
121
122 if (ar_b1)
123 *ar_b1 = base1;
124 if (ar_b2)
125 *ar_b2 = base2;
126 }
127
128 static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
129 {
130 if (r1)
131 *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
132 if (r2)
133 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
134 }
135
136 static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
137 {
138 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
139 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
140 ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
141
142 if (disp2 & 0x80000)
143 disp2+=0xfff00000;
144
145 if (ar)
146 *ar = base2;
147
148 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
149 }
150
151 static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
152 {
153 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
154 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
155
156 if (ar)
157 *ar = base2;
158
159 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
160 }
161
162
163 static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
164 {
165 vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
166 vcpu->arch.sie_block->gpsw.mask |= cc << 44;
167 }
168
169
170 static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
171 {
172 return __test_facility(nr, kvm->arch.model.fac_mask) &&
173 __test_facility(nr, kvm->arch.model.fac_list);
174 }
175
176 static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
177 {
178 unsigned char *ptr;
179
180 if (nr >= MAX_FACILITY_BIT)
181 return -EINVAL;
182 ptr = (unsigned char *) fac_list + (nr >> 3);
183 *ptr |= (0x80UL >> (nr & 7));
184 return 0;
185 }
186
187 static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
188 {
189 WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS);
190 return test_bit_inv(nr, kvm->arch.cpu_feat);
191 }
192
193
194 static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
195 {
196 return kvm->arch.user_cpu_state_ctrl != 0;
197 }
198
199
200 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
201 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
202 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
203 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
204 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
205 void kvm_s390_clear_float_irqs(struct kvm *kvm);
206 int __must_check kvm_s390_inject_vm(struct kvm *kvm,
207 struct kvm_s390_interrupt *s390int);
208 int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
209 struct kvm_s390_irq *irq);
210 static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
211 struct kvm_s390_pgm_info *pgm_info)
212 {
213 struct kvm_s390_irq irq = {
214 .type = KVM_S390_PROGRAM_INT,
215 .u.pgm = *pgm_info,
216 };
217
218 return kvm_s390_inject_vcpu(vcpu, &irq);
219 }
220 static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
221 {
222 struct kvm_s390_irq irq = {
223 .type = KVM_S390_PROGRAM_INT,
224 .u.pgm.code = code,
225 };
226
227 return kvm_s390_inject_vcpu(vcpu, &irq);
228 }
229 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
230 u64 isc_mask, u32 schid);
231 int kvm_s390_reinject_io_int(struct kvm *kvm,
232 struct kvm_s390_interrupt_info *inti);
233 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
234
235
236 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
237 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
238 static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
239 {
240 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
241
242 sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
243 }
244 static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
245 {
246 kvm_s390_rewind_psw(vcpu, -ilen);
247 }
248 static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
249 {
250
251 vcpu->arch.sie_block->icptstatus &= ~0x02;
252 kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
253 }
254
255 int handle_sthyi(struct kvm_vcpu *vcpu);
256
257
258 int is_valid_psw(psw_t *psw);
259 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
260 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
261 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
262 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
263 int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
264 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
265 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
266 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
267 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
268 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
269 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
270
271
272 int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
273 void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
274 void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
275 unsigned long end);
276 void kvm_s390_vsie_init(struct kvm *kvm);
277 void kvm_s390_vsie_destroy(struct kvm *kvm);
278
279
280 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
281 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
282
283
284 void kvm_s390_set_tod_clock(struct kvm *kvm,
285 const struct kvm_s390_vm_tod_clock *gtod);
286 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
287 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
288 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
289 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
290 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
291 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
292 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
293 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
294 void exit_sie(struct kvm_vcpu *vcpu);
295 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
296 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
297 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
298 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
299 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
300
301
302 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
303
304 static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
305 {
306 int i;
307 struct kvm_vcpu *vcpu;
308
309 WARN_ON(!mutex_is_locked(&kvm->lock));
310 kvm_for_each_vcpu(i, vcpu, kvm)
311 kvm_s390_vcpu_block(vcpu);
312 }
313
314 static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
315 {
316 int i;
317 struct kvm_vcpu *vcpu;
318
319 kvm_for_each_vcpu(i, vcpu, kvm)
320 kvm_s390_vcpu_unblock(vcpu);
321 }
322
323 static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
324 {
325 u64 rc;
326
327 preempt_disable();
328 rc = get_tod_clock_fast() + kvm->arch.epoch;
329 preempt_enable();
330 return rc;
331 }
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359 static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
360 {
361 if (rc <= 0)
362 return rc;
363 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
364 }
365
366 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
367 struct kvm_s390_irq *s390irq);
368
369
370 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
371 int psw_extint_disabled(struct kvm_vcpu *vcpu);
372 void kvm_s390_destroy_adapters(struct kvm *kvm);
373 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
374 extern struct kvm_device_ops kvm_flic_ops;
375 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
376 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
377 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
378 void __user *buf, int len);
379 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
380 __u8 __user *buf, int len);
381 void kvm_s390_gisa_init(struct kvm *kvm);
382 void kvm_s390_gisa_clear(struct kvm *kvm);
383 void kvm_s390_gisa_destroy(struct kvm *kvm);
384 int kvm_s390_gib_init(u8 nisc);
385 void kvm_s390_gib_destroy(void);
386
387
388 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
389 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
390 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
391 int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
392 struct kvm_guest_debug *dbg);
393 void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
394 void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
395 int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
396 int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
397
398
399 static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
400 {
401 struct bsca_block *sca = kvm->arch.sca;
402
403 return &sca->ipte_control;
404 }
405 static inline int kvm_s390_use_sca_entries(void)
406 {
407
408
409
410
411
412 return sclp.has_sigpif;
413 }
414 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
415 struct mcck_volatile_info *mcck_info);
416
417
418
419
420
421
422
423
424
425
426
427
428 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
429 #endif