This source file includes following definitions.
- kvm_init_host_cpu_context
- __cpu_init_hyp_mode
- kvm_arch_requires_vhe
- kvm_arch_hardware_unsetup
- kvm_arch_sync_events
- kvm_arch_sched_in
- kvm_arch_vcpu_block_finish
- __cpu_init_stage2
- kvm_pmu_counter_deferred
- kvm_arch_vcpu_run_pid_change
- kvm_set_pmu_events
- kvm_clr_pmu_events
- kvm_arm_vhe_guest_enter
- kvm_arm_vhe_guest_exit
- kvm_arm_harden_branch_predictor
- kvm_arm_have_ssbd
1
2
3
4
5
6
7
8
9
10
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13
14 #include <linux/bitmap.h>
15 #include <linux/types.h>
16 #include <linux/jump_label.h>
17 #include <linux/kvm_types.h>
18 #include <linux/percpu.h>
19 #include <asm/arch_gicv3.h>
20 #include <asm/barrier.h>
21 #include <asm/cpufeature.h>
22 #include <asm/cputype.h>
23 #include <asm/daifflags.h>
24 #include <asm/fpsimd.h>
25 #include <asm/kvm.h>
26 #include <asm/kvm_asm.h>
27 #include <asm/kvm_mmio.h>
28 #include <asm/thread_info.h>
29
30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
31
32 #define KVM_USER_MEM_SLOTS 512
33 #define KVM_HALT_POLL_NS_DEFAULT 500000
34
35 #include <kvm/arm_vgic.h>
36 #include <kvm/arm_arch_timer.h>
37 #include <kvm/arm_pmu.h>
38
39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40
41 #define KVM_VCPU_MAX_FEATURES 7
42
43 #define KVM_REQ_SLEEP \
44 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
45 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
46 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
47
48 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
49
50 extern unsigned int kvm_sve_max_vl;
51 int kvm_arm_init_sve(void);
52
53 int __attribute_const__ kvm_target_cpu(void);
54 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
55 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
56 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
57 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
58
59 struct kvm_vmid {
60
61 u64 vmid_gen;
62 u32 vmid;
63 };
64
65 struct kvm_arch {
66 struct kvm_vmid vmid;
67
68
69 pgd_t *pgd;
70 phys_addr_t pgd_phys;
71
72
73 u64 vtcr;
74
75
76 int __percpu *last_vcpu_ran;
77
78
79 int max_vcpus;
80
81
82 struct vgic_dist vgic;
83
84
85 u32 psci_version;
86 };
87
88 #define KVM_NR_MEM_OBJS 40
89
90
91
92
93
94 struct kvm_mmu_memory_cache {
95 int nobjs;
96 void *objects[KVM_NR_MEM_OBJS];
97 };
98
99 struct kvm_vcpu_fault_info {
100 u32 esr_el2;
101 u64 far_el2;
102 u64 hpfar_el2;
103 u64 disr_el1;
104 };
105
106
107
108
109
110 enum vcpu_sysreg {
111 __INVALID_SYSREG__,
112 MPIDR_EL1,
113 CSSELR_EL1,
114 SCTLR_EL1,
115 ACTLR_EL1,
116 CPACR_EL1,
117 ZCR_EL1,
118 TTBR0_EL1,
119 TTBR1_EL1,
120 TCR_EL1,
121 ESR_EL1,
122 AFSR0_EL1,
123 AFSR1_EL1,
124 FAR_EL1,
125 MAIR_EL1,
126 VBAR_EL1,
127 CONTEXTIDR_EL1,
128 TPIDR_EL0,
129 TPIDRRO_EL0,
130 TPIDR_EL1,
131 AMAIR_EL1,
132 CNTKCTL_EL1,
133 PAR_EL1,
134 MDSCR_EL1,
135 MDCCINT_EL1,
136 DISR_EL1,
137
138
139 PMCR_EL0,
140 PMSELR_EL0,
141 PMEVCNTR0_EL0,
142 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
143 PMCCNTR_EL0,
144 PMEVTYPER0_EL0,
145 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
146 PMCCFILTR_EL0,
147 PMCNTENSET_EL0,
148 PMINTENSET_EL1,
149 PMOVSSET_EL0,
150 PMSWINC_EL0,
151 PMUSERENR_EL0,
152
153
154 APIAKEYLO_EL1,
155 APIAKEYHI_EL1,
156 APIBKEYLO_EL1,
157 APIBKEYHI_EL1,
158 APDAKEYLO_EL1,
159 APDAKEYHI_EL1,
160 APDBKEYLO_EL1,
161 APDBKEYHI_EL1,
162 APGAKEYLO_EL1,
163 APGAKEYHI_EL1,
164
165
166 DACR32_EL2,
167 IFSR32_EL2,
168 FPEXC32_EL2,
169 DBGVCR32_EL2,
170
171 NR_SYS_REGS
172 };
173
174
175 #define c0_MPIDR (MPIDR_EL1 * 2)
176 #define c0_CSSELR (CSSELR_EL1 * 2)
177 #define c1_SCTLR (SCTLR_EL1 * 2)
178 #define c1_ACTLR (ACTLR_EL1 * 2)
179 #define c1_CPACR (CPACR_EL1 * 2)
180 #define c2_TTBR0 (TTBR0_EL1 * 2)
181 #define c2_TTBR0_high (c2_TTBR0 + 1)
182 #define c2_TTBR1 (TTBR1_EL1 * 2)
183 #define c2_TTBR1_high (c2_TTBR1 + 1)
184 #define c2_TTBCR (TCR_EL1 * 2)
185 #define c3_DACR (DACR32_EL2 * 2)
186 #define c5_DFSR (ESR_EL1 * 2)
187 #define c5_IFSR (IFSR32_EL2 * 2)
188 #define c5_ADFSR (AFSR0_EL1 * 2)
189 #define c5_AIFSR (AFSR1_EL1 * 2)
190 #define c6_DFAR (FAR_EL1 * 2)
191 #define c6_IFAR (c6_DFAR + 1)
192 #define c7_PAR (PAR_EL1 * 2)
193 #define c7_PAR_high (c7_PAR + 1)
194 #define c10_PRRR (MAIR_EL1 * 2)
195 #define c10_NMRR (c10_PRRR + 1)
196 #define c12_VBAR (VBAR_EL1 * 2)
197 #define c13_CID (CONTEXTIDR_EL1 * 2)
198 #define c13_TID_URW (TPIDR_EL0 * 2)
199 #define c13_TID_URO (TPIDRRO_EL0 * 2)
200 #define c13_TID_PRIV (TPIDR_EL1 * 2)
201 #define c10_AMAIR0 (AMAIR_EL1 * 2)
202 #define c10_AMAIR1 (c10_AMAIR0 + 1)
203 #define c14_CNTKCTL (CNTKCTL_EL1 * 2)
204
205 #define cp14_DBGDSCRext (MDSCR_EL1 * 2)
206 #define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
207 #define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
208 #define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
209 #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
210 #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
211 #define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
212
213 #define NR_COPRO_REGS (NR_SYS_REGS * 2)
214
215 struct kvm_cpu_context {
216 struct kvm_regs gp_regs;
217 union {
218 u64 sys_regs[NR_SYS_REGS];
219 u32 copro[NR_COPRO_REGS];
220 };
221
222 struct kvm_vcpu *__hyp_running_vcpu;
223 };
224
225 struct kvm_pmu_events {
226 u32 events_host;
227 u32 events_guest;
228 };
229
230 struct kvm_host_data {
231 struct kvm_cpu_context host_ctxt;
232 struct kvm_pmu_events pmu_events;
233 };
234
235 typedef struct kvm_host_data kvm_host_data_t;
236
237 struct vcpu_reset_state {
238 unsigned long pc;
239 unsigned long r0;
240 bool be;
241 bool reset;
242 };
243
244 struct kvm_vcpu_arch {
245 struct kvm_cpu_context ctxt;
246 void *sve_state;
247 unsigned int sve_max_vl;
248
249
250 u64 hcr_el2;
251 u32 mdcr_el2;
252
253
254 struct kvm_vcpu_fault_info fault;
255
256
257 u64 workaround_flags;
258
259
260 u64 flags;
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275 struct kvm_guest_debug_arch *debug_ptr;
276 struct kvm_guest_debug_arch vcpu_debug_state;
277 struct kvm_guest_debug_arch external_debug_state;
278
279
280 struct kvm_cpu_context *host_cpu_context;
281
282 struct thread_info *host_thread_info;
283 struct user_fpsimd_state *host_fpsimd_state;
284
285 struct {
286
287 struct kvm_guest_debug_arch regs;
288
289 u64 pmscr_el1;
290 } host_debug_state;
291
292
293 struct vgic_cpu vgic_cpu;
294 struct arch_timer_cpu timer_cpu;
295 struct kvm_pmu pmu;
296
297
298
299
300
301
302
303
304
305
306
307
308
309 struct {
310 u32 mdscr_el1;
311 } guest_debug_preserved;
312
313
314 bool power_off;
315
316
317 bool pause;
318
319
320 struct kvm_decode mmio_decode;
321
322
323 struct kvm_mmu_memory_cache mmu_page_cache;
324
325
326 int target;
327 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
328
329
330 bool has_run_once;
331
332
333 u64 vsesr_el2;
334
335
336 struct vcpu_reset_state reset_state;
337
338
339
340 bool sysregs_loaded_on_cpu;
341 };
342
343
344 #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
345 sve_ffr_offset((vcpu)->arch.sve_max_vl)))
346
347 #define vcpu_sve_state_size(vcpu) ({ \
348 size_t __size_ret; \
349 unsigned int __vcpu_vq; \
350 \
351 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
352 __size_ret = 0; \
353 } else { \
354 __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \
355 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
356 } \
357 \
358 __size_ret; \
359 })
360
361
362 #define KVM_ARM64_DEBUG_DIRTY (1 << 0)
363 #define KVM_ARM64_FP_ENABLED (1 << 1)
364 #define KVM_ARM64_FP_HOST (1 << 2)
365 #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3)
366 #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4)
367 #define KVM_ARM64_GUEST_HAS_SVE (1 << 5)
368 #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6)
369 #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7)
370
371 #define vcpu_has_sve(vcpu) (system_supports_sve() && \
372 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
373
374 #define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \
375 system_supports_generic_auth()) && \
376 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
377
378 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
379
380
381
382
383
384
385
386 #define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
387
388 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
389 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
390
391
392
393
394
395 #define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
396
397 #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
398 #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
399
400 struct kvm_vm_stat {
401 ulong remote_tlb_flush;
402 };
403
404 struct kvm_vcpu_stat {
405 u64 halt_successful_poll;
406 u64 halt_attempted_poll;
407 u64 halt_poll_invalid;
408 u64 halt_wakeup;
409 u64 hvc_exit_stat;
410 u64 wfe_exit_stat;
411 u64 wfi_exit_stat;
412 u64 mmio_exit_user;
413 u64 mmio_exit_kernel;
414 u64 exits;
415 };
416
417 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
418 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
419 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
420 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
421 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
422 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
423 struct kvm_vcpu_events *events);
424
425 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
426 struct kvm_vcpu_events *events);
427
428 #define KVM_ARCH_WANT_MMU_NOTIFIER
429 int kvm_unmap_hva_range(struct kvm *kvm,
430 unsigned long start, unsigned long end);
431 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
432 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
433 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
434
435 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
436 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
437 void kvm_arm_halt_guest(struct kvm *kvm);
438 void kvm_arm_resume_guest(struct kvm *kvm);
439
440 u64 __kvm_call_hyp(void *hypfn, ...);
441
442
443
444
445
446
447 #define kvm_call_hyp(f, ...) \
448 do { \
449 if (has_vhe()) { \
450 f(__VA_ARGS__); \
451 isb(); \
452 } else { \
453 __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
454 } \
455 } while(0)
456
457 #define kvm_call_hyp_ret(f, ...) \
458 ({ \
459 typeof(f(__VA_ARGS__)) ret; \
460 \
461 if (has_vhe()) { \
462 ret = f(__VA_ARGS__); \
463 isb(); \
464 } else { \
465 ret = __kvm_call_hyp(kvm_ksym_ref(f), \
466 ##__VA_ARGS__); \
467 } \
468 \
469 ret; \
470 })
471
472 void force_vm_exit(const cpumask_t *mask);
473 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
474
475 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
476 int exception_index);
477 void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
478 int exception_index);
479
480 int kvm_perf_init(void);
481 int kvm_perf_teardown(void);
482
483 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
484
485 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
486
487 DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
488
489 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
490 {
491
492 cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr();
493 }
494
495 void __kvm_enable_ssbs(void);
496
497 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
498 unsigned long hyp_stack_ptr,
499 unsigned long vector_ptr)
500 {
501
502
503
504
505
506 u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
507 (u64)kvm_ksym_ref(kvm_host_data));
508
509
510
511
512
513
514
515 BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
516 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
517
518
519
520
521
522 if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
523 arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
524 kvm_call_hyp(__kvm_enable_ssbs);
525 }
526 }
527
528 static inline bool kvm_arch_requires_vhe(void)
529 {
530
531
532
533
534
535 if (system_supports_sve())
536 return true;
537
538
539 if (cpus_have_cap(ARM64_WORKAROUND_1165522))
540 return true;
541
542 return false;
543 }
544
545 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
546
547 static inline void kvm_arch_hardware_unsetup(void) {}
548 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
549 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
550 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
551
552 void kvm_arm_init_debug(void);
553 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
554 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
555 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
556 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
557 struct kvm_device_attr *attr);
558 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
559 struct kvm_device_attr *attr);
560 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
561 struct kvm_device_attr *attr);
562
563 static inline void __cpu_init_stage2(void) {}
564
565
566 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
567 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
568 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
569 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
570
571 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
572 {
573 return (!has_vhe() && attr->exclude_host);
574 }
575
576 #ifdef CONFIG_KVM
577 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
578 {
579 return kvm_arch_vcpu_run_map_fp(vcpu);
580 }
581
582 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
583 void kvm_clr_pmu_events(u32 clr);
584
585 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
586 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
587 #else
588 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
589 static inline void kvm_clr_pmu_events(u32 clr) {}
590 #endif
591
592 static inline void kvm_arm_vhe_guest_enter(void)
593 {
594 local_daif_mask();
595
596
597
598
599
600
601
602
603
604
605 if (system_uses_irq_prio_masking())
606 dsb(sy);
607 }
608
609 static inline void kvm_arm_vhe_guest_exit(void)
610 {
611
612
613
614
615 local_daif_restore(DAIF_PROCCTX_NOIRQ);
616
617
618
619
620
621
622 isb();
623 }
624
625 #define KVM_BP_HARDEN_UNKNOWN -1
626 #define KVM_BP_HARDEN_WA_NEEDED 0
627 #define KVM_BP_HARDEN_NOT_REQUIRED 1
628
629 static inline int kvm_arm_harden_branch_predictor(void)
630 {
631 switch (get_spectre_v2_workaround_state()) {
632 case ARM64_BP_HARDEN_WA_NEEDED:
633 return KVM_BP_HARDEN_WA_NEEDED;
634 case ARM64_BP_HARDEN_NOT_REQUIRED:
635 return KVM_BP_HARDEN_NOT_REQUIRED;
636 case ARM64_BP_HARDEN_UNKNOWN:
637 default:
638 return KVM_BP_HARDEN_UNKNOWN;
639 }
640 }
641
642 #define KVM_SSBD_UNKNOWN -1
643 #define KVM_SSBD_FORCE_DISABLE 0
644 #define KVM_SSBD_KERNEL 1
645 #define KVM_SSBD_FORCE_ENABLE 2
646 #define KVM_SSBD_MITIGATED 3
647
648 static inline int kvm_arm_have_ssbd(void)
649 {
650 switch (arm64_get_ssbd_state()) {
651 case ARM64_SSBD_FORCE_DISABLE:
652 return KVM_SSBD_FORCE_DISABLE;
653 case ARM64_SSBD_KERNEL:
654 return KVM_SSBD_KERNEL;
655 case ARM64_SSBD_FORCE_ENABLE:
656 return KVM_SSBD_FORCE_ENABLE;
657 case ARM64_SSBD_MITIGATED:
658 return KVM_SSBD_MITIGATED;
659 case ARM64_SSBD_UNKNOWN:
660 default:
661 return KVM_SSBD_UNKNOWN;
662 }
663 }
664
665 void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
666 void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
667
668 void kvm_set_ipa_limit(void);
669
670 #define __KVM_HAVE_ARCH_VM_ALLOC
671 struct kvm *kvm_arch_alloc_vm(void);
672 void kvm_arch_free_vm(struct kvm *kvm);
673
674 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
675
676 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
677 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
678
679 #define kvm_arm_vcpu_sve_finalized(vcpu) \
680 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
681
682 #define kvm_arm_vcpu_loaded(vcpu) ((vcpu)->arch.sysregs_loaded_on_cpu)
683
684 #endif