This source file includes following definitions.
- kvmppc_get_last_inst
- is_kvmppc_hv_enabled
- kvmppc_get_field
- kvmppc_set_field
- kvm_cma_reserve
- kvmppc_set_xive_tima
- kvmppc_get_xics_latch
- kvmppc_set_host_ipi
- kvmppc_clear_host_ipi
- kvmppc_fast_vcpu_kick
- kvm_cma_reserve
- kvmppc_set_xics_phys
- kvmppc_set_xive_tima
- kvmppc_get_xics_latch
- kvmppc_set_host_ipi
- kvmppc_clear_host_ipi
- kvmppc_fast_vcpu_kick
- kvm_hv_mode_active
- kvmppc_xics_enabled
- kvmppc_get_passthru_irqmap
- kvmppc_get_passthru_irqmap
- kvmppc_alloc_host_rm_ops
- kvmppc_free_host_rm_ops
- kvmppc_free_pimap
- kvmppc_xics_rm_complete
- kvmppc_xics_enabled
- kvmppc_xics_free_icp
- kvmppc_xics_hcall
- kvmppc_xive_enabled
- kvmppc_xive_set_xive
- kvmppc_xive_get_xive
- kvmppc_xive_int_on
- kvmppc_xive_int_off
- kvmppc_xive_init_module
- kvmppc_xive_exit_module
- kvmppc_xive_connect_vcpu
- kvmppc_xive_cleanup_vcpu
- kvmppc_xive_set_mapped
- kvmppc_xive_clr_mapped
- kvmppc_xive_get_icp
- kvmppc_xive_set_icp
- kvmppc_xive_set_irq
- kvmppc_xive_push_vcpu
- kvmppc_xive_enabled
- kvmppc_xive_native_connect_vcpu
- kvmppc_xive_native_cleanup_vcpu
- kvmppc_xive_native_init_module
- kvmppc_xive_native_exit_module
- kvmppc_xive_native_get_vp
- kvmppc_xive_native_set_vp
- xics_on_xive
- xics_on_xive
- kvmppc_get_epr
- kvmppc_set_epr
- kvmppc_mpic_set_epr
- kvmppc_mpic_connect_vcpu
- kvmppc_mpic_disconnect_vcpu
- kvmppc_mmu_flush_icache
- kvmppc_shared_big_endian
- SHARED_WRAPPER
- SHARED_WRAPPER
- kvmppc_set_sr
- kvmppc_fix_ee_before_entry
- kvmppc_get_ea_indexed
1
2
3
4
5
6
7
8
9 #ifndef __POWERPC_KVM_PPC_H__
10 #define __POWERPC_KVM_PPC_H__
11
12
13
14
15 #include <linux/mutex.h>
16 #include <linux/timer.h>
17 #include <linux/types.h>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bug.h>
21 #ifdef CONFIG_PPC_BOOK3S
22 #include <asm/kvm_book3s.h>
23 #else
24 #include <asm/kvm_booke.h>
25 #endif
26 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
27 #include <asm/paca.h>
28 #include <asm/xive.h>
29 #include <asm/cpu_has_feature.h>
30 #endif
31
32
33
34
35
36 #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
37
38 enum emulation_result {
39 EMULATE_DONE,
40 EMULATE_DO_MMIO,
41 EMULATE_FAIL,
42 EMULATE_AGAIN,
43 EMULATE_EXIT_USER,
44 };
45
46 enum instruction_fetch_type {
47 INST_GENERIC,
48 INST_SC,
49 };
50
51 enum xlate_instdata {
52 XLATE_INST,
53 XLATE_DATA
54 };
55
56 enum xlate_readwrite {
57 XLATE_READ,
58 XLATE_WRITE
59 };
60
61 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
62 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
63 extern void kvmppc_handler_highmem(void);
64
65 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
66 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
67 unsigned int rt, unsigned int bytes,
68 int is_default_endian);
69 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
70 unsigned int rt, unsigned int bytes,
71 int is_default_endian);
72 extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
73 unsigned int rt, unsigned int bytes,
74 int is_default_endian, int mmio_sign_extend);
75 extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes, int is_default_endian);
77 extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
78 unsigned int rs, unsigned int bytes, int is_default_endian);
79 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
80 u64 val, unsigned int bytes,
81 int is_default_endian);
82 extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
83 int rs, unsigned int bytes,
84 int is_default_endian);
85
86 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
87 enum instruction_fetch_type type, u32 *inst);
88
89 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
90 bool data);
91 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92 bool data);
93 extern int kvmppc_emulate_instruction(struct kvm_run *run,
94 struct kvm_vcpu *vcpu);
95 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
96 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
97 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
98 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
99 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
100 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
101 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
102 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
103
104
105
106 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
107 unsigned int gtlb_idx);
108 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
109 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
110 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
111 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
112 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
113 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
114 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
115 gva_t eaddr);
116 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
117 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
118 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
119 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
120 struct kvmppc_pte *pte);
121
122 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
123 unsigned int id);
124 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
125 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
126 extern int kvmppc_core_check_processor_compat(void);
127 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
128 struct kvm_translation *tr);
129
130 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
131 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
132
133 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
134 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
135 extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
136 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
137 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
138 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
139 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
140 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
141 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
142 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
143 struct kvm_interrupt *irq);
144 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
145 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
146 ulong esr_flags);
147 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
148 ulong dear_flags,
149 ulong esr_flags);
150 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
151 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
152 ulong esr_flags);
153 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
154 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
155
156 extern int kvmppc_booke_init(void);
157 extern void kvmppc_booke_exit(void);
158
159 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
160 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
161 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
162
163 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
164 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
165 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
166 extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
167 extern void kvmppc_rmap_reset(struct kvm *kvm);
168 extern long kvmppc_prepare_vrma(struct kvm *kvm,
169 struct kvm_userspace_memory_region *mem);
170 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
171 struct kvm_memory_slot *memslot, unsigned long porder);
172 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
173 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
174 struct iommu_group *grp);
175 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
176 struct iommu_group *grp);
177 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
178 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
179 extern void kvmppc_setup_partition_table(struct kvm *kvm);
180
181 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
182 struct kvm_create_spapr_tce_64 *args);
183 extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
184 struct kvm *kvm, unsigned long liobn);
185 #define kvmppc_ioba_validate(stt, ioba, npages) \
186 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
187 (stt)->size, (ioba), (npages)) ? \
188 H_PARAMETER : H_SUCCESS)
189 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
190 unsigned long ioba, unsigned long tce);
191 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
192 unsigned long liobn, unsigned long ioba,
193 unsigned long tce_list, unsigned long npages);
194 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
195 unsigned long liobn, unsigned long ioba,
196 unsigned long tce_value, unsigned long npages);
197 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
198 unsigned long ioba);
199 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
200 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
201 extern int kvmppc_core_init_vm(struct kvm *kvm);
202 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
203 extern void kvmppc_core_free_memslot(struct kvm *kvm,
204 struct kvm_memory_slot *free,
205 struct kvm_memory_slot *dont);
206 extern int kvmppc_core_create_memslot(struct kvm *kvm,
207 struct kvm_memory_slot *slot,
208 unsigned long npages);
209 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
210 struct kvm_memory_slot *memslot,
211 const struct kvm_userspace_memory_region *mem);
212 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
213 const struct kvm_userspace_memory_region *mem,
214 const struct kvm_memory_slot *old,
215 const struct kvm_memory_slot *new,
216 enum kvm_mr_change change);
217 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
218 struct kvm_ppc_smmu_info *info);
219 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
220 struct kvm_memory_slot *memslot);
221
222 extern int kvmppc_bookehv_init(void);
223 extern void kvmppc_bookehv_exit(void);
224
225 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
226
227 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
228 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
229 struct kvm_ppc_resize_hpt *rhpt);
230 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
231 struct kvm_ppc_resize_hpt *rhpt);
232
233 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
234
235 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
236 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
237 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
238
239 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
240 u32 priority);
241 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
242 u32 *priority);
243 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
244 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
245
246 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
247 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
248
249 union kvmppc_one_reg {
250 u32 wval;
251 u64 dval;
252 vector128 vval;
253 u64 vsxval[2];
254 u32 vsx32val[4];
255 u16 vsx16val[8];
256 u8 vsx8val[16];
257 struct {
258 u64 addr;
259 u64 length;
260 } vpaval;
261 u64 xive_timaval[2];
262 };
263
264 struct kvmppc_ops {
265 struct module *owner;
266 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
267 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
268 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
269 union kvmppc_one_reg *val);
270 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
271 union kvmppc_one_reg *val);
272 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
273 void (*vcpu_put)(struct kvm_vcpu *vcpu);
274 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
275 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
276 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
277 void (*vcpu_free)(struct kvm_vcpu *vcpu);
278 int (*check_requests)(struct kvm_vcpu *vcpu);
279 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
280 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
281 int (*prepare_memory_region)(struct kvm *kvm,
282 struct kvm_memory_slot *memslot,
283 const struct kvm_userspace_memory_region *mem);
284 void (*commit_memory_region)(struct kvm *kvm,
285 const struct kvm_userspace_memory_region *mem,
286 const struct kvm_memory_slot *old,
287 const struct kvm_memory_slot *new,
288 enum kvm_mr_change change);
289 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
290 unsigned long end);
291 int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
292 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
293 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
294 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
295 void (*free_memslot)(struct kvm_memory_slot *free,
296 struct kvm_memory_slot *dont);
297 int (*create_memslot)(struct kvm_memory_slot *slot,
298 unsigned long npages);
299 int (*init_vm)(struct kvm *kvm);
300 void (*destroy_vm)(struct kvm *kvm);
301 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
302 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
303 unsigned int inst, int *advance);
304 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
305 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
306 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
307 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
308 unsigned long arg);
309 int (*hcall_implemented)(unsigned long hcall);
310 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
311 struct irq_bypass_producer *);
312 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
313 struct irq_bypass_producer *);
314 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
315 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
316 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
317 unsigned long flags);
318 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
319 int (*enable_nested)(struct kvm *kvm);
320 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
321 int size);
322 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
323 int size);
324 };
325
326 extern struct kvmppc_ops *kvmppc_hv_ops;
327 extern struct kvmppc_ops *kvmppc_pr_ops;
328
329 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
330 enum instruction_fetch_type type, u32 *inst)
331 {
332 int ret = EMULATE_DONE;
333 u32 fetched_inst;
334
335
336
337 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
338 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
339
340
341 if (ret == EMULATE_DONE)
342 fetched_inst = kvmppc_need_byteswap(vcpu) ?
343 swab32(vcpu->arch.last_inst) :
344 vcpu->arch.last_inst;
345 else
346 fetched_inst = vcpu->arch.last_inst;
347
348 *inst = fetched_inst;
349 return ret;
350 }
351
352 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
353 {
354 return kvm->arch.kvm_ops == kvmppc_hv_ops;
355 }
356
357 extern int kvmppc_hwrng_present(void);
358
359
360
361
362
363 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
364 {
365 u32 r;
366 u32 mask;
367
368 BUG_ON(msb > lsb);
369
370 mask = (1 << (lsb - msb + 1)) - 1;
371 r = (inst >> (63 - lsb)) & mask;
372
373 return r;
374 }
375
376
377
378
379 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
380 {
381 u32 r;
382 u32 mask;
383
384 BUG_ON(msb > lsb);
385
386 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
387 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
388
389 return r;
390 }
391
392 #define one_reg_size(id) \
393 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
394
395 #define get_reg_val(id, reg) ({ \
396 union kvmppc_one_reg __u; \
397 switch (one_reg_size(id)) { \
398 case 4: __u.wval = (reg); break; \
399 case 8: __u.dval = (reg); break; \
400 default: BUG(); \
401 } \
402 __u; \
403 })
404
405
406 #define set_reg_val(id, val) ({ \
407 u64 __v; \
408 switch (one_reg_size(id)) { \
409 case 4: __v = (val).wval; break; \
410 case 8: __v = (val).dval; break; \
411 default: BUG(); \
412 } \
413 __v; \
414 })
415
416 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
417 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
418
419 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
420 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
421
422 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
423 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
424 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
425 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
426
427 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
428
429 struct openpic;
430
431 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
432 extern void kvm_cma_reserve(void) __init;
433 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
434 {
435 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
436 }
437
438 static inline void kvmppc_set_xive_tima(int cpu,
439 unsigned long phys_addr,
440 void __iomem *virt_addr)
441 {
442 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
443 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
444 }
445
446 static inline u32 kvmppc_get_xics_latch(void)
447 {
448 u32 xirr;
449
450 xirr = get_paca()->kvm_hstate.saved_xirr;
451 get_paca()->kvm_hstate.saved_xirr = 0;
452 return xirr;
453 }
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529 static inline void kvmppc_set_host_ipi(int cpu)
530 {
531
532
533
534
535
536 smp_mb();
537 paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
538 }
539
540 static inline void kvmppc_clear_host_ipi(int cpu)
541 {
542 paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
543
544
545
546
547
548 smp_mb();
549 }
550
551 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
552 {
553 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
554 }
555
556 extern void kvm_hv_vm_activated(void);
557 extern void kvm_hv_vm_deactivated(void);
558 extern bool kvm_hv_mode_active(void);
559
560 extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
561 struct kvm_nested_guest *nested);
562
563 #else
564 static inline void __init kvm_cma_reserve(void)
565 {}
566
567 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
568 {}
569
570 static inline void kvmppc_set_xive_tima(int cpu,
571 unsigned long phys_addr,
572 void __iomem *virt_addr)
573 {}
574
575 static inline u32 kvmppc_get_xics_latch(void)
576 {
577 return 0;
578 }
579
580 static inline void kvmppc_set_host_ipi(int cpu)
581 {}
582
583 static inline void kvmppc_clear_host_ipi(int cpu)
584 {}
585
586 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
587 {
588 kvm_vcpu_kick(vcpu);
589 }
590
591 static inline bool kvm_hv_mode_active(void) { return false; }
592
593 #endif
594
595 #ifdef CONFIG_KVM_XICS
596 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
597 {
598 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
599 }
600
601 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
602 struct kvm *kvm)
603 {
604 if (kvm && kvm_irq_bypass)
605 return kvm->arch.pimap;
606 return NULL;
607 }
608
609 extern void kvmppc_alloc_host_rm_ops(void);
610 extern void kvmppc_free_host_rm_ops(void);
611 extern void kvmppc_free_pimap(struct kvm *kvm);
612 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
613 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
614 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
615 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
616 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
617 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
618 struct kvm_vcpu *vcpu, u32 cpu);
619 extern void kvmppc_xics_ipi_action(void);
620 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
621 unsigned long host_irq);
622 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
623 unsigned long host_irq);
624 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
625 struct kvmppc_irq_map *irq_map,
626 struct kvmppc_passthru_irqmap *pimap,
627 bool *again);
628
629 extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
630 int level, bool line_status);
631
632 extern int h_ipi_redirect;
633 #else
634 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
635 struct kvm *kvm)
636 { return NULL; }
637 static inline void kvmppc_alloc_host_rm_ops(void) {};
638 static inline void kvmppc_free_host_rm_ops(void) {};
639 static inline void kvmppc_free_pimap(struct kvm *kvm) {};
640 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
641 { return 0; }
642 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
643 { return 0; }
644 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
645 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
646 { return 0; }
647 #endif
648
649 #ifdef CONFIG_KVM_XIVE
650
651
652
653
654
655
656
657
658 extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
659 u32 priority);
660 extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
661 u32 *priority);
662 extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
663 extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
664 extern void kvmppc_xive_init_module(void);
665 extern void kvmppc_xive_exit_module(void);
666
667 extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
668 struct kvm_vcpu *vcpu, u32 cpu);
669 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
670 extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
671 struct irq_desc *host_desc);
672 extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
673 struct irq_desc *host_desc);
674 extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
675 extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
676
677 extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
678 int level, bool line_status);
679 extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
680
681 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
682 {
683 return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
684 }
685
686 extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
687 struct kvm_vcpu *vcpu, u32 cpu);
688 extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
689 extern void kvmppc_xive_native_init_module(void);
690 extern void kvmppc_xive_native_exit_module(void);
691 extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
692 union kvmppc_one_reg *val);
693 extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
694 union kvmppc_one_reg *val);
695 extern bool kvmppc_xive_native_supported(void);
696
697 #else
698 static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
699 u32 priority) { return -1; }
700 static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
701 u32 *priority) { return -1; }
702 static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
703 static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
704 static inline void kvmppc_xive_init_module(void) { }
705 static inline void kvmppc_xive_exit_module(void) { }
706
707 static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
708 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
709 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
710 static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
711 struct irq_desc *host_desc) { return -ENODEV; }
712 static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
713 struct irq_desc *host_desc) { return -ENODEV; }
714 static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
715 static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
716
717 static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
718 int level, bool line_status) { return -ENODEV; }
719 static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
720
721 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
722 { return 0; }
723 static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
724 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
725 static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
726 static inline void kvmppc_xive_native_init_module(void) { }
727 static inline void kvmppc_xive_native_exit_module(void) { }
728 static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
729 union kvmppc_one_reg *val)
730 { return 0; }
731 static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
732 union kvmppc_one_reg *val)
733 { return -ENOENT; }
734
735 #endif
736
737 #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
738 static inline bool xics_on_xive(void)
739 {
740 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
741 }
742 #else
743 static inline bool xics_on_xive(void)
744 {
745 return false;
746 }
747 #endif
748
749
750
751
752
753 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
754 unsigned long ioba, unsigned long tce);
755 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
756 unsigned long liobn, unsigned long ioba,
757 unsigned long tce_list, unsigned long npages);
758 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
759 unsigned long liobn, unsigned long ioba,
760 unsigned long tce_value, unsigned long npages);
761 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
762 unsigned int yield_count);
763 long kvmppc_h_random(struct kvm_vcpu *vcpu);
764 void kvmhv_commence_exit(int trap);
765 void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
766 void kvmppc_subcore_enter_guest(void);
767 void kvmppc_subcore_exit_guest(void);
768 long kvmppc_realmode_hmi_handler(void);
769 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
770 long pte_index, unsigned long pteh, unsigned long ptel);
771 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
772 unsigned long pte_index, unsigned long avpn);
773 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
774 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
775 unsigned long pte_index, unsigned long avpn,
776 unsigned long va);
777 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
778 unsigned long pte_index);
779 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
780 unsigned long pte_index);
781 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
782 unsigned long pte_index);
783 long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
784 unsigned long dest, unsigned long src);
785 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
786 unsigned long slb_v, unsigned int status, bool data);
787 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
788 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
789 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
790 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
791 unsigned long mfrr);
792 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
793 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
794 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
795
796
797
798
799
800
801
802 union kvmppc_rm_state {
803 unsigned long raw;
804 struct {
805 u32 in_host;
806 u32 rm_action;
807 };
808 };
809
810 struct kvmppc_host_rm_core {
811 union kvmppc_rm_state rm_state;
812 void *rm_data;
813 char pad[112];
814 };
815
816 struct kvmppc_host_rm_ops {
817 struct kvmppc_host_rm_core *rm_core;
818 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
819 };
820
821 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
822
823 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
824 {
825 #ifdef CONFIG_KVM_BOOKE_HV
826 return mfspr(SPRN_GEPR);
827 #elif defined(CONFIG_BOOKE)
828 return vcpu->arch.epr;
829 #else
830 return 0;
831 #endif
832 }
833
834 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
835 {
836 #ifdef CONFIG_KVM_BOOKE_HV
837 mtspr(SPRN_GEPR, epr);
838 #elif defined(CONFIG_BOOKE)
839 vcpu->arch.epr = epr;
840 #endif
841 }
842
843 #ifdef CONFIG_KVM_MPIC
844
845 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
846 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
847 u32 cpu);
848 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
849
850 #else
851
852 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
853 {
854 }
855
856 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
857 struct kvm_vcpu *vcpu, u32 cpu)
858 {
859 return -EINVAL;
860 }
861
862 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
863 struct kvm_vcpu *vcpu)
864 {
865 }
866
867 #endif
868
869 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
870 struct kvm_config_tlb *cfg);
871 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
872 struct kvm_dirty_tlb *cfg);
873
874 long kvmppc_alloc_lpid(void);
875 void kvmppc_claim_lpid(long lpid);
876 void kvmppc_free_lpid(long lpid);
877 void kvmppc_init_lpid(unsigned long nr_lpids);
878
879 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
880 {
881 struct page *page;
882
883
884
885
886 if (!pfn_valid(pfn))
887 return;
888
889
890 page = pfn_to_page(pfn);
891 if (!test_bit(PG_arch_1, &page->flags)) {
892 flush_dcache_icache_page(page);
893 set_bit(PG_arch_1, &page->flags);
894 }
895 }
896
897
898
899
900
901 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
902 {
903 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
904
905 return vcpu->arch.shared_big_endian;
906 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
907
908 return false;
909 #else
910 return true;
911 #endif
912 }
913
914 #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
915 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
916 { \
917 return mfspr(bookehv_spr); \
918 } \
919
920 #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
921 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
922 { \
923 mtspr(bookehv_spr, val); \
924 } \
925
926 #define SHARED_WRAPPER_GET(reg, size) \
927 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
928 { \
929 if (kvmppc_shared_big_endian(vcpu)) \
930 return be##size##_to_cpu(vcpu->arch.shared->reg); \
931 else \
932 return le##size##_to_cpu(vcpu->arch.shared->reg); \
933 } \
934
935 #define SHARED_WRAPPER_SET(reg, size) \
936 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
937 { \
938 if (kvmppc_shared_big_endian(vcpu)) \
939 vcpu->arch.shared->reg = cpu_to_be##size(val); \
940 else \
941 vcpu->arch.shared->reg = cpu_to_le##size(val); \
942 } \
943
944 #define SHARED_WRAPPER(reg, size) \
945 SHARED_WRAPPER_GET(reg, size) \
946 SHARED_WRAPPER_SET(reg, size) \
947
948 #define SPRNG_WRAPPER(reg, bookehv_spr) \
949 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
950 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
951
952 #ifdef CONFIG_KVM_BOOKE_HV
953
954 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
955 SPRNG_WRAPPER(reg, bookehv_spr) \
956
957 #else
958
959 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
960 SHARED_WRAPPER(reg, size) \
961
962 #endif
963
964 SHARED_WRAPPER(critical, 64)
965 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
966 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
967 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
968 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
969 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
970 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
971 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
972 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
973 SHARED_WRAPPER_GET(msr, 64)
974 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
975 {
976 if (kvmppc_shared_big_endian(vcpu))
977 vcpu->arch.shared->msr = cpu_to_be64(val);
978 else
979 vcpu->arch.shared->msr = cpu_to_le64(val);
980 }
981 SHARED_WRAPPER(dsisr, 32)
982 SHARED_WRAPPER(int_pending, 32)
983 SHARED_WRAPPER(sprg4, 64)
984 SHARED_WRAPPER(sprg5, 64)
985 SHARED_WRAPPER(sprg6, 64)
986 SHARED_WRAPPER(sprg7, 64)
987
988 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
989 {
990 if (kvmppc_shared_big_endian(vcpu))
991 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
992 else
993 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
994 }
995
996 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
997 {
998 if (kvmppc_shared_big_endian(vcpu))
999 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
1000 else
1001 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
1002 }
1003
1004
1005
1006
1007
1008
1009 static inline void kvmppc_fix_ee_before_entry(void)
1010 {
1011 trace_hardirqs_on();
1012
1013 #ifdef CONFIG_PPC64
1014
1015
1016
1017
1018 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1019
1020
1021 local_paca->irq_happened = 0;
1022 irq_soft_mask_set(IRQS_ENABLED);
1023 #endif
1024 }
1025
1026 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1027 {
1028 ulong ea;
1029 ulong msr_64bit = 0;
1030
1031 ea = kvmppc_get_gpr(vcpu, rb);
1032 if (ra)
1033 ea += kvmppc_get_gpr(vcpu, ra);
1034
1035 #if defined(CONFIG_PPC_BOOK3E_64)
1036 msr_64bit = MSR_CM;
1037 #elif defined(CONFIG_PPC_BOOK3S_64)
1038 msr_64bit = MSR_SF;
1039 #endif
1040
1041 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
1042 ea = (uint32_t)ea;
1043
1044 return ea;
1045 }
1046
1047 extern void xics_wake_cpu(int cpu);
1048
1049 #endif