This source file includes following definitions.
- kvmppc_save_tm_pr
- kvmppc_restore_tm_pr
- kvmppc_save_tm_sprs
- kvmppc_restore_tm_sprs
- to_book3s
- kvmppc_set_gpr
- kvmppc_get_gpr
- kvmppc_set_cr
- kvmppc_get_cr
- kvmppc_set_xer
- kvmppc_get_xer
- kvmppc_set_ctr
- kvmppc_get_ctr
- kvmppc_set_lr
- kvmppc_get_lr
- kvmppc_set_pc
- kvmppc_get_pc
- kvmppc_need_byteswap
- kvmppc_get_fault_dar
- is_kvmppc_resume_guest
- kvmppc_supports_magic_page
- kvmppc_pack_vcpu_id
1
2
3
4
5
6
7
8
9 #ifndef __ASM_KVM_BOOK3S_H__
10 #define __ASM_KVM_BOOK3S_H__
11
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <asm/kvm_book3s_asm.h>
15
16 struct kvmppc_bat {
17 u64 raw;
18 u32 bepi;
19 u32 bepi_mask;
20 u32 brpn;
21 u8 wimg;
22 u8 pp;
23 bool vs : 1;
24 bool vp : 1;
25 };
26
27 struct kvmppc_sid_map {
28 u64 guest_vsid;
29 u64 guest_esid;
30 u64 host_vsid;
31 bool valid : 1;
32 };
33
34 #define SID_MAP_BITS 9
35 #define SID_MAP_NUM (1 << SID_MAP_BITS)
36 #define SID_MAP_MASK (SID_MAP_NUM - 1)
37
38 #ifdef CONFIG_PPC_BOOK3S_64
39 #define SID_CONTEXTS 1
40 #else
41 #define SID_CONTEXTS 128
42 #define VSID_POOL_SIZE (SID_CONTEXTS * 16)
43 #endif
44
45 struct hpte_cache {
46 struct hlist_node list_pte;
47 struct hlist_node list_pte_long;
48 struct hlist_node list_vpte;
49 struct hlist_node list_vpte_long;
50 #ifdef CONFIG_PPC_BOOK3S_64
51 struct hlist_node list_vpte_64k;
52 #endif
53 struct rcu_head rcu_head;
54 u64 host_vpn;
55 u64 pfn;
56 ulong slot;
57 struct kvmppc_pte pte;
58 int pagesize;
59 };
60
61
62
63
64
65
66
67
68 struct kvmppc_vcore {
69 int n_runnable;
70 int num_threads;
71 int entry_exit_map;
72 int napping_threads;
73 int first_vcpuid;
74 u16 pcpu;
75 u16 last_cpu;
76 u8 vcore_state;
77 u8 in_guest;
78 struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
79 struct list_head preempt_list;
80 spinlock_t lock;
81 struct swait_queue_head wq;
82 spinlock_t stoltb_lock;
83 u64 stolen_tb;
84 u64 preempt_tb;
85 struct kvm_vcpu *runner;
86 struct kvm *kvm;
87 u64 tb_offset;
88 u64 tb_offset_applied;
89 ulong lpcr;
90 u32 arch_compat;
91 ulong pcr;
92 ulong dpdes;
93 ulong vtb;
94 ulong conferring_threads;
95 unsigned int halt_poll_ns;
96 atomic_t online_count;
97 };
98
99 struct kvmppc_vcpu_book3s {
100 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
101 struct {
102 u64 esid;
103 u64 vsid;
104 } slb_shadow[64];
105 u8 slb_shadow_max;
106 struct kvmppc_bat ibat[8];
107 struct kvmppc_bat dbat[8];
108 u64 hid[6];
109 u64 gqr[8];
110 u64 sdr1;
111 u64 hior;
112 u64 msr_mask;
113 u64 vtb;
114 #ifdef CONFIG_PPC_BOOK3S_32
115 u32 vsid_pool[VSID_POOL_SIZE];
116 u32 vsid_next;
117 #else
118 u64 proto_vsid_first;
119 u64 proto_vsid_max;
120 u64 proto_vsid_next;
121 #endif
122 int context_id[SID_CONTEXTS];
123
124 bool hior_explicit;
125
126 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
127 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
128 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
129 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
130 #ifdef CONFIG_PPC_BOOK3S_64
131 struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
132 #endif
133 int hpte_cache_count;
134 spinlock_t mmu_lock;
135 };
136
137 #define VSID_REAL 0x07ffffffffc00000ULL
138 #define VSID_BAT 0x07ffffffffb00000ULL
139 #define VSID_64K 0x0800000000000000ULL
140 #define VSID_1T 0x1000000000000000ULL
141 #define VSID_REAL_DR 0x2000000000000000ULL
142 #define VSID_REAL_IR 0x4000000000000000ULL
143 #define VSID_PR 0x8000000000000000ULL
144
145 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
146 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
147 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
148 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
149 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
150 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
151 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
152 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
153 bool iswrite);
154 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
155 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
156 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
157 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
158 extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
159 struct kvm_vcpu *vcpu, unsigned long addr,
160 unsigned long status);
161 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
162 unsigned long slb_v, unsigned long valid);
163 extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
164 unsigned long gpa, gva_t ea, int is_store);
165
166 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
167 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
168 extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
169 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
170 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
171 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
172 extern int kvmppc_mmu_hpte_sysinit(void);
173 extern void kvmppc_mmu_hpte_sysexit(void);
174 extern int kvmppc_mmu_hv_init(void);
175 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
176
177 extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
178 struct kvm_vcpu *vcpu,
179 unsigned long ea, unsigned long dsisr);
180 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
181 gva_t eaddr, void *to, void *from,
182 unsigned long n);
183 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
184 void *to, unsigned long n);
185 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
186 void *from, unsigned long n);
187 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
188 struct kvmppc_pte *gpte, u64 root,
189 u64 *pte_ret_p);
190 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
191 struct kvmppc_pte *gpte, u64 table,
192 int table_index, u64 *pte_ret_p);
193 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
194 struct kvmppc_pte *gpte, bool data, bool iswrite);
195 extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
196 unsigned int pshift, unsigned int lpid);
197 extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
198 unsigned int shift,
199 const struct kvm_memory_slot *memslot,
200 unsigned int lpid);
201 extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable,
202 bool writing, unsigned long gpa,
203 unsigned int lpid);
204 extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
205 unsigned long gpa,
206 struct kvm_memory_slot *memslot,
207 bool writing, bool kvm_ro,
208 pte_t *inserted_pte, unsigned int *levelp);
209 extern int kvmppc_init_vm_radix(struct kvm *kvm);
210 extern void kvmppc_free_radix(struct kvm *kvm);
211 extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
212 unsigned int lpid);
213 extern int kvmppc_radix_init(void);
214 extern void kvmppc_radix_exit(void);
215 extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
216 unsigned long gfn);
217 extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
218 unsigned long gfn);
219 extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
220 unsigned long gfn);
221 extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
222 struct kvm_memory_slot *memslot, unsigned long *map);
223 extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
224 const struct kvm_memory_slot *memslot);
225 extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
226
227
228 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
229 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
230 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
231 unsigned int vec);
232 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
233 extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
234 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
235 bool upper, u32 val);
236 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
237 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
238 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
239 bool writing, bool *writable);
240 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
241 unsigned long *rmap, long pte_index, int realmode);
242 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
243 unsigned long gfn, unsigned long psize);
244 extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
245 unsigned long pte_index);
246 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
247 unsigned long pte_index);
248 extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
249 unsigned long *nb_ret);
250 extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
251 unsigned long gpa, bool dirty);
252 extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
253 long pte_index, unsigned long pteh, unsigned long ptel,
254 pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
255 extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
256 unsigned long pte_index, unsigned long avpn,
257 unsigned long *hpret);
258 extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
259 struct kvm_memory_slot *memslot, unsigned long *map);
260 extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
261 struct kvm_memory_slot *memslot,
262 unsigned long *map);
263 extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
264 unsigned long mask);
265 extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
266
267 extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
268 extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
269 extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
270
271 extern void kvmppc_entry_trampoline(void);
272 extern void kvmppc_hv_entry_trampoline(void);
273 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
274 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
275 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
276 extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
277 extern int kvmppc_hcall_impl_pr(unsigned long cmd);
278 extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
279 extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
280 extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
281
282 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
283 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
284 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
285 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
286 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
287 #else
288 static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
289 static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
290 static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
291 static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
292 #endif
293
294 long kvmhv_nested_init(void);
295 void kvmhv_nested_exit(void);
296 void kvmhv_vm_nested_init(struct kvm *kvm);
297 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
298 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
299 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
300 void kvmhv_release_all_nested(struct kvm *kvm);
301 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
302 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
303 int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
304 u64 time_limit, unsigned long lpcr);
305 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
306 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
307 struct hv_guest_state *hr);
308 long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu);
309
310 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
311
312 extern int kvm_irq_bypass;
313
314 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
315 {
316 return vcpu->arch.book3s;
317 }
318
319
320
321 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
322 #include <asm/kvm_book3s_32.h>
323 #endif
324 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
325 #include <asm/kvm_book3s_64.h>
326 #endif
327
328 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
329 {
330 vcpu->arch.regs.gpr[num] = val;
331 }
332
333 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
334 {
335 return vcpu->arch.regs.gpr[num];
336 }
337
338 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
339 {
340 vcpu->arch.regs.ccr = val;
341 }
342
343 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
344 {
345 return vcpu->arch.regs.ccr;
346 }
347
348 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
349 {
350 vcpu->arch.regs.xer = val;
351 }
352
353 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
354 {
355 return vcpu->arch.regs.xer;
356 }
357
358 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
359 {
360 vcpu->arch.regs.ctr = val;
361 }
362
363 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
364 {
365 return vcpu->arch.regs.ctr;
366 }
367
368 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
369 {
370 vcpu->arch.regs.link = val;
371 }
372
373 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
374 {
375 return vcpu->arch.regs.link;
376 }
377
378 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
379 {
380 vcpu->arch.regs.nip = val;
381 }
382
383 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
384 {
385 return vcpu->arch.regs.nip;
386 }
387
388 static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
389 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
390 {
391 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
392 }
393
394 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
395 {
396 return vcpu->arch.fault_dar;
397 }
398
399 static inline bool is_kvmppc_resume_guest(int r)
400 {
401 return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
402 }
403
404 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
405 static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
406 {
407
408 return !is_kvmppc_hv_enabled(vcpu->kvm);
409 }
410
411 extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
412 extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
413
414
415
416 #define OSI_SC_MAGIC_R3 0x113724FA
417 #define OSI_SC_MAGIC_R4 0x77810F9B
418
419 #define INS_DCBZ 0x7c0007ec
420
421 #define INS_TW 0x7fe00008
422
423 #define SPLIT_HACK_MASK 0xff000000
424 #define SPLIT_HACK_OFFS 0xfb000000
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458 static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
459 {
460 const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
461 int stride = kvm->arch.emul_smt_mode;
462 int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
463 u32 packed_id;
464
465 if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
466 return 0;
467 packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
468 if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
469 return 0;
470 return packed_id;
471 }
472
473 #endif