This source file includes following definitions.
- read_from_write_only
- write_to_read_only
- vcpu_read_sys_reg
- vcpu_write_sys_reg
- get_ccsidr
- access_dcsw
- access_vm_reg
- access_gic_sgi
- access_gic_sre
- trap_raz_wi
- trap_loregion
- trap_oslsr_el1
- trap_dbgauthstatus_el1
- trap_debug_regs
- reg_to_dbg
- dbg_to_reg
- trap_bvr
- set_bvr
- get_bvr
- reset_bvr
- trap_bcr
- set_bcr
- get_bcr
- reset_bcr
- trap_wvr
- set_wvr
- get_wvr
- reset_wvr
- trap_wcr
- set_wcr
- get_wcr
- reset_wcr
- reset_amair_el1
- reset_mpidr
- reset_pmcr
- check_pmu_access_disabled
- pmu_access_el0_disabled
- pmu_write_swinc_el0_disabled
- pmu_access_cycle_counter_el0_disabled
- pmu_access_event_counter_el0_disabled
- access_pmcr
- access_pmselr
- access_pmceid
- pmu_counter_idx_valid
- access_pmu_evcntr
- access_pmu_evtyper
- access_pmcnten
- access_pminten
- access_pmovs
- access_pmswinc
- access_pmuserenr
- trap_ptrauth
- ptrauth_visibility
- access_arch_timer
- read_id_reg
- __access_id_reg
- access_id_reg
- access_raz_id_reg
- sve_visibility
- sve_id_visibility
- guest_id_aa64zfr0_el1
- access_id_aa64zfr0_el1
- get_id_aa64zfr0_el1
- set_id_aa64zfr0_el1
- __get_id_reg
- __set_id_reg
- get_id_reg
- set_id_reg
- get_raz_id_reg
- set_raz_id_reg
- access_ctr
- access_clidr
- access_csselr
- access_ccsidr
- trap_dbgidr
- trap_debug32
- trap_xvr
- kvm_register_target_sys_reg_table
- get_target_table
- match_sys_reg
- find_reg
- kvm_handle_cp14_load_store
- perform_access
- emulate_cp
- unhandled_cp_access
- kvm_handle_cp_64
- kvm_handle_cp_32
- kvm_handle_cp15_64
- kvm_handle_cp15_32
- kvm_handle_cp14_64
- kvm_handle_cp14_32
- emulate_sys_reg
- reset_sys_reg_descs
- kvm_handle_sys_reg
- index_to_params
- find_reg_by_id
- index_to_sys_reg_desc
- FUNCTION_INVARIANT
- reg_from_user
- reg_to_user
- get_invariant_sys_reg
- set_invariant_sys_reg
- is_valid_cache
- demux_c15_get
- demux_c15_set
- kvm_arm_sys_reg_get_reg
- kvm_arm_sys_reg_set_reg
- num_demux_regs
- write_demux_regids
- sys_reg_to_index
- copy_reg_to_user
- walk_one_sys_reg
- walk_sys_regs
- kvm_arm_num_sys_reg_descs
- kvm_arm_copy_sys_reg_indices
- check_sysreg_table
- kvm_sys_reg_table_init
- kvm_reset_sys_regs
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/bsearch.h>
13 #include <linux/kvm_host.h>
14 #include <linux/mm.h>
15 #include <linux/printk.h>
16 #include <linux/uaccess.h>
17
18 #include <asm/cacheflush.h>
19 #include <asm/cputype.h>
20 #include <asm/debug-monitors.h>
21 #include <asm/esr.h>
22 #include <asm/kvm_arm.h>
23 #include <asm/kvm_coproc.h>
24 #include <asm/kvm_emulate.h>
25 #include <asm/kvm_host.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/perf_event.h>
29 #include <asm/sysreg.h>
30
31 #include <trace/events/kvm.h>
32
33 #include "sys_regs.h"
34
35 #include "trace.h"
36
37
38
39
40
41
42
43
44
45
46
47
48 static bool read_from_write_only(struct kvm_vcpu *vcpu,
49 struct sys_reg_params *params,
50 const struct sys_reg_desc *r)
51 {
52 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
53 print_sys_reg_instr(params);
54 kvm_inject_undefined(vcpu);
55 return false;
56 }
57
58 static bool write_to_read_only(struct kvm_vcpu *vcpu,
59 struct sys_reg_params *params,
60 const struct sys_reg_desc *r)
61 {
62 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
63 print_sys_reg_instr(params);
64 kvm_inject_undefined(vcpu);
65 return false;
66 }
67
68 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
69 {
70 if (!vcpu->arch.sysregs_loaded_on_cpu)
71 goto immediate_read;
72
73
74
75
76
77
78
79
80
81
82 switch (reg) {
83 case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1);
84 case SCTLR_EL1: return read_sysreg_s(SYS_SCTLR_EL12);
85 case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1);
86 case CPACR_EL1: return read_sysreg_s(SYS_CPACR_EL12);
87 case TTBR0_EL1: return read_sysreg_s(SYS_TTBR0_EL12);
88 case TTBR1_EL1: return read_sysreg_s(SYS_TTBR1_EL12);
89 case TCR_EL1: return read_sysreg_s(SYS_TCR_EL12);
90 case ESR_EL1: return read_sysreg_s(SYS_ESR_EL12);
91 case AFSR0_EL1: return read_sysreg_s(SYS_AFSR0_EL12);
92 case AFSR1_EL1: return read_sysreg_s(SYS_AFSR1_EL12);
93 case FAR_EL1: return read_sysreg_s(SYS_FAR_EL12);
94 case MAIR_EL1: return read_sysreg_s(SYS_MAIR_EL12);
95 case VBAR_EL1: return read_sysreg_s(SYS_VBAR_EL12);
96 case CONTEXTIDR_EL1: return read_sysreg_s(SYS_CONTEXTIDR_EL12);
97 case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0);
98 case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0);
99 case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1);
100 case AMAIR_EL1: return read_sysreg_s(SYS_AMAIR_EL12);
101 case CNTKCTL_EL1: return read_sysreg_s(SYS_CNTKCTL_EL12);
102 case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1);
103 case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2);
104 case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2);
105 case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2);
106 }
107
108 immediate_read:
109 return __vcpu_sys_reg(vcpu, reg);
110 }
111
112 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
113 {
114 if (!vcpu->arch.sysregs_loaded_on_cpu)
115 goto immediate_write;
116
117
118
119
120
121
122
123
124
125 switch (reg) {
126 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
127 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); return;
128 case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return;
129 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); return;
130 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); return;
131 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); return;
132 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); return;
133 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); return;
134 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); return;
135 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); return;
136 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); return;
137 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); return;
138 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); return;
139 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12); return;
140 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return;
141 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return;
142 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return;
143 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); return;
144 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); return;
145 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return;
146 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return;
147 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return;
148 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return;
149 }
150
151 immediate_write:
152 __vcpu_sys_reg(vcpu, reg) = val;
153 }
154
155
156 static u32 cache_levels;
157
158
159 #define CSSELR_MAX 12
160
161
162 static u32 get_ccsidr(u32 csselr)
163 {
164 u32 ccsidr;
165
166
167 local_irq_disable();
168 write_sysreg(csselr, csselr_el1);
169 isb();
170 ccsidr = read_sysreg(ccsidr_el1);
171 local_irq_enable();
172
173 return ccsidr;
174 }
175
176
177
178
179 static bool access_dcsw(struct kvm_vcpu *vcpu,
180 struct sys_reg_params *p,
181 const struct sys_reg_desc *r)
182 {
183 if (!p->is_write)
184 return read_from_write_only(vcpu, p, r);
185
186
187
188
189
190
191
192
193 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
194 kvm_set_way_flush(vcpu);
195
196 return true;
197 }
198
199
200
201
202
203
204 static bool access_vm_reg(struct kvm_vcpu *vcpu,
205 struct sys_reg_params *p,
206 const struct sys_reg_desc *r)
207 {
208 bool was_enabled = vcpu_has_cache_enabled(vcpu);
209 u64 val;
210 int reg = r->reg;
211
212 BUG_ON(!p->is_write);
213
214
215 if (p->is_aarch32)
216 reg = r->reg / 2;
217
218 if (!p->is_aarch32 || !p->is_32bit) {
219 val = p->regval;
220 } else {
221 val = vcpu_read_sys_reg(vcpu, reg);
222 if (r->reg % 2)
223 val = (p->regval << 32) | (u64)lower_32_bits(val);
224 else
225 val = ((u64)upper_32_bits(val) << 32) |
226 lower_32_bits(p->regval);
227 }
228 vcpu_write_sys_reg(vcpu, val, reg);
229
230 kvm_toggle_cache(vcpu, was_enabled);
231 return true;
232 }
233
234
235
236
237
238
239
240 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
241 struct sys_reg_params *p,
242 const struct sys_reg_desc *r)
243 {
244 bool g1;
245
246 if (!p->is_write)
247 return read_from_write_only(vcpu, p, r);
248
249
250
251
252
253
254
255
256 if (p->is_aarch32) {
257 switch (p->Op1) {
258 default:
259 case 0:
260 g1 = true;
261 break;
262 case 1:
263 case 2:
264 g1 = false;
265 break;
266 }
267 } else {
268 switch (p->Op2) {
269 default:
270 case 5:
271 g1 = true;
272 break;
273 case 6:
274 case 7:
275 g1 = false;
276 break;
277 }
278 }
279
280 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
281
282 return true;
283 }
284
285 static bool access_gic_sre(struct kvm_vcpu *vcpu,
286 struct sys_reg_params *p,
287 const struct sys_reg_desc *r)
288 {
289 if (p->is_write)
290 return ignore_write(vcpu, p);
291
292 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
293 return true;
294 }
295
296 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
297 struct sys_reg_params *p,
298 const struct sys_reg_desc *r)
299 {
300 if (p->is_write)
301 return ignore_write(vcpu, p);
302 else
303 return read_zero(vcpu, p);
304 }
305
306
307
308
309
310
311
312 static bool trap_loregion(struct kvm_vcpu *vcpu,
313 struct sys_reg_params *p,
314 const struct sys_reg_desc *r)
315 {
316 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
317 u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
318 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
319
320 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
321 kvm_inject_undefined(vcpu);
322 return false;
323 }
324
325 if (p->is_write && sr == SYS_LORID_EL1)
326 return write_to_read_only(vcpu, p, r);
327
328 return trap_raz_wi(vcpu, p, r);
329 }
330
331 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
332 struct sys_reg_params *p,
333 const struct sys_reg_desc *r)
334 {
335 if (p->is_write) {
336 return ignore_write(vcpu, p);
337 } else {
338 p->regval = (1 << 3);
339 return true;
340 }
341 }
342
343 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
344 struct sys_reg_params *p,
345 const struct sys_reg_desc *r)
346 {
347 if (p->is_write) {
348 return ignore_write(vcpu, p);
349 } else {
350 p->regval = read_sysreg(dbgauthstatus_el1);
351 return true;
352 }
353 }
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
383 struct sys_reg_params *p,
384 const struct sys_reg_desc *r)
385 {
386 if (p->is_write) {
387 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
388 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
389 } else {
390 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
391 }
392
393 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
394
395 return true;
396 }
397
398
399
400
401
402
403
404
405
406
407 static void reg_to_dbg(struct kvm_vcpu *vcpu,
408 struct sys_reg_params *p,
409 u64 *dbg_reg)
410 {
411 u64 val = p->regval;
412
413 if (p->is_32bit) {
414 val &= 0xffffffffUL;
415 val |= ((*dbg_reg >> 32) << 32);
416 }
417
418 *dbg_reg = val;
419 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
420 }
421
422 static void dbg_to_reg(struct kvm_vcpu *vcpu,
423 struct sys_reg_params *p,
424 u64 *dbg_reg)
425 {
426 p->regval = *dbg_reg;
427 if (p->is_32bit)
428 p->regval &= 0xffffffffUL;
429 }
430
431 static bool trap_bvr(struct kvm_vcpu *vcpu,
432 struct sys_reg_params *p,
433 const struct sys_reg_desc *rd)
434 {
435 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
436
437 if (p->is_write)
438 reg_to_dbg(vcpu, p, dbg_reg);
439 else
440 dbg_to_reg(vcpu, p, dbg_reg);
441
442 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
443
444 return true;
445 }
446
447 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
448 const struct kvm_one_reg *reg, void __user *uaddr)
449 {
450 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
451
452 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
453 return -EFAULT;
454 return 0;
455 }
456
457 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
458 const struct kvm_one_reg *reg, void __user *uaddr)
459 {
460 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
461
462 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
463 return -EFAULT;
464 return 0;
465 }
466
467 static void reset_bvr(struct kvm_vcpu *vcpu,
468 const struct sys_reg_desc *rd)
469 {
470 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
471 }
472
473 static bool trap_bcr(struct kvm_vcpu *vcpu,
474 struct sys_reg_params *p,
475 const struct sys_reg_desc *rd)
476 {
477 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
478
479 if (p->is_write)
480 reg_to_dbg(vcpu, p, dbg_reg);
481 else
482 dbg_to_reg(vcpu, p, dbg_reg);
483
484 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
485
486 return true;
487 }
488
489 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
490 const struct kvm_one_reg *reg, void __user *uaddr)
491 {
492 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
493
494 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
495 return -EFAULT;
496
497 return 0;
498 }
499
500 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
501 const struct kvm_one_reg *reg, void __user *uaddr)
502 {
503 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
504
505 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
506 return -EFAULT;
507 return 0;
508 }
509
510 static void reset_bcr(struct kvm_vcpu *vcpu,
511 const struct sys_reg_desc *rd)
512 {
513 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
514 }
515
516 static bool trap_wvr(struct kvm_vcpu *vcpu,
517 struct sys_reg_params *p,
518 const struct sys_reg_desc *rd)
519 {
520 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
521
522 if (p->is_write)
523 reg_to_dbg(vcpu, p, dbg_reg);
524 else
525 dbg_to_reg(vcpu, p, dbg_reg);
526
527 trace_trap_reg(__func__, rd->reg, p->is_write,
528 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
529
530 return true;
531 }
532
533 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
534 const struct kvm_one_reg *reg, void __user *uaddr)
535 {
536 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
537
538 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
539 return -EFAULT;
540 return 0;
541 }
542
543 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
544 const struct kvm_one_reg *reg, void __user *uaddr)
545 {
546 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
547
548 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
549 return -EFAULT;
550 return 0;
551 }
552
553 static void reset_wvr(struct kvm_vcpu *vcpu,
554 const struct sys_reg_desc *rd)
555 {
556 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
557 }
558
559 static bool trap_wcr(struct kvm_vcpu *vcpu,
560 struct sys_reg_params *p,
561 const struct sys_reg_desc *rd)
562 {
563 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
564
565 if (p->is_write)
566 reg_to_dbg(vcpu, p, dbg_reg);
567 else
568 dbg_to_reg(vcpu, p, dbg_reg);
569
570 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
571
572 return true;
573 }
574
575 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
576 const struct kvm_one_reg *reg, void __user *uaddr)
577 {
578 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
579
580 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
581 return -EFAULT;
582 return 0;
583 }
584
585 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
586 const struct kvm_one_reg *reg, void __user *uaddr)
587 {
588 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
589
590 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
591 return -EFAULT;
592 return 0;
593 }
594
595 static void reset_wcr(struct kvm_vcpu *vcpu,
596 const struct sys_reg_desc *rd)
597 {
598 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
599 }
600
601 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
602 {
603 u64 amair = read_sysreg(amair_el1);
604 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
605 }
606
607 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
608 {
609 u64 mpidr;
610
611
612
613
614
615
616
617
618 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
619 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
620 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
621 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
622 }
623
624 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
625 {
626 u64 pmcr, val;
627
628 pmcr = read_sysreg(pmcr_el0);
629
630
631
632
633 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
634 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
635 if (!system_supports_32bit_el0())
636 val |= ARMV8_PMU_PMCR_LC;
637 __vcpu_sys_reg(vcpu, r->reg) = val;
638 }
639
640 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
641 {
642 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
643 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
644
645 if (!enabled)
646 kvm_inject_undefined(vcpu);
647
648 return !enabled;
649 }
650
651 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
652 {
653 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
654 }
655
656 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
657 {
658 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
659 }
660
661 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
662 {
663 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
664 }
665
666 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
667 {
668 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
669 }
670
671 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
672 const struct sys_reg_desc *r)
673 {
674 u64 val;
675
676 if (!kvm_arm_pmu_v3_ready(vcpu))
677 return trap_raz_wi(vcpu, p, r);
678
679 if (pmu_access_el0_disabled(vcpu))
680 return false;
681
682 if (p->is_write) {
683
684 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
685 val &= ~ARMV8_PMU_PMCR_MASK;
686 val |= p->regval & ARMV8_PMU_PMCR_MASK;
687 if (!system_supports_32bit_el0())
688 val |= ARMV8_PMU_PMCR_LC;
689 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
690 kvm_pmu_handle_pmcr(vcpu, val);
691 kvm_vcpu_pmu_restore_guest(vcpu);
692 } else {
693
694 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
695 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
696 p->regval = val;
697 }
698
699 return true;
700 }
701
702 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
703 const struct sys_reg_desc *r)
704 {
705 if (!kvm_arm_pmu_v3_ready(vcpu))
706 return trap_raz_wi(vcpu, p, r);
707
708 if (pmu_access_event_counter_el0_disabled(vcpu))
709 return false;
710
711 if (p->is_write)
712 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
713 else
714
715 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
716 & ARMV8_PMU_COUNTER_MASK;
717
718 return true;
719 }
720
721 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
722 const struct sys_reg_desc *r)
723 {
724 u64 pmceid;
725
726 if (!kvm_arm_pmu_v3_ready(vcpu))
727 return trap_raz_wi(vcpu, p, r);
728
729 BUG_ON(p->is_write);
730
731 if (pmu_access_el0_disabled(vcpu))
732 return false;
733
734 if (!(p->Op2 & 1))
735 pmceid = read_sysreg(pmceid0_el0);
736 else
737 pmceid = read_sysreg(pmceid1_el0);
738
739 p->regval = pmceid;
740
741 return true;
742 }
743
744 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
745 {
746 u64 pmcr, val;
747
748 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
749 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
750 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
751 kvm_inject_undefined(vcpu);
752 return false;
753 }
754
755 return true;
756 }
757
758 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
759 struct sys_reg_params *p,
760 const struct sys_reg_desc *r)
761 {
762 u64 idx;
763
764 if (!kvm_arm_pmu_v3_ready(vcpu))
765 return trap_raz_wi(vcpu, p, r);
766
767 if (r->CRn == 9 && r->CRm == 13) {
768 if (r->Op2 == 2) {
769
770 if (pmu_access_event_counter_el0_disabled(vcpu))
771 return false;
772
773 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
774 & ARMV8_PMU_COUNTER_MASK;
775 } else if (r->Op2 == 0) {
776
777 if (pmu_access_cycle_counter_el0_disabled(vcpu))
778 return false;
779
780 idx = ARMV8_PMU_CYCLE_IDX;
781 } else {
782 return false;
783 }
784 } else if (r->CRn == 0 && r->CRm == 9) {
785
786 if (pmu_access_event_counter_el0_disabled(vcpu))
787 return false;
788
789 idx = ARMV8_PMU_CYCLE_IDX;
790 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
791
792 if (pmu_access_event_counter_el0_disabled(vcpu))
793 return false;
794
795 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
796 } else {
797 return false;
798 }
799
800 if (!pmu_counter_idx_valid(vcpu, idx))
801 return false;
802
803 if (p->is_write) {
804 if (pmu_access_el0_disabled(vcpu))
805 return false;
806
807 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
808 } else {
809 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
810 }
811
812 return true;
813 }
814
815 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
816 const struct sys_reg_desc *r)
817 {
818 u64 idx, reg;
819
820 if (!kvm_arm_pmu_v3_ready(vcpu))
821 return trap_raz_wi(vcpu, p, r);
822
823 if (pmu_access_el0_disabled(vcpu))
824 return false;
825
826 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
827
828 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
829 reg = PMEVTYPER0_EL0 + idx;
830 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
831 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
832 if (idx == ARMV8_PMU_CYCLE_IDX)
833 reg = PMCCFILTR_EL0;
834 else
835
836 reg = PMEVTYPER0_EL0 + idx;
837 } else {
838 BUG();
839 }
840
841 if (!pmu_counter_idx_valid(vcpu, idx))
842 return false;
843
844 if (p->is_write) {
845 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
846 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
847 kvm_vcpu_pmu_restore_guest(vcpu);
848 } else {
849 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
850 }
851
852 return true;
853 }
854
855 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
856 const struct sys_reg_desc *r)
857 {
858 u64 val, mask;
859
860 if (!kvm_arm_pmu_v3_ready(vcpu))
861 return trap_raz_wi(vcpu, p, r);
862
863 if (pmu_access_el0_disabled(vcpu))
864 return false;
865
866 mask = kvm_pmu_valid_counter_mask(vcpu);
867 if (p->is_write) {
868 val = p->regval & mask;
869 if (r->Op2 & 0x1) {
870
871 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
872 kvm_pmu_enable_counter_mask(vcpu, val);
873 kvm_vcpu_pmu_restore_guest(vcpu);
874 } else {
875
876 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
877 kvm_pmu_disable_counter_mask(vcpu, val);
878 }
879 } else {
880 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
881 }
882
883 return true;
884 }
885
886 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
887 const struct sys_reg_desc *r)
888 {
889 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
890
891 if (!kvm_arm_pmu_v3_ready(vcpu))
892 return trap_raz_wi(vcpu, p, r);
893
894 if (!vcpu_mode_priv(vcpu)) {
895 kvm_inject_undefined(vcpu);
896 return false;
897 }
898
899 if (p->is_write) {
900 u64 val = p->regval & mask;
901
902 if (r->Op2 & 0x1)
903
904 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
905 else
906
907 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
908 } else {
909 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
910 }
911
912 return true;
913 }
914
915 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
916 const struct sys_reg_desc *r)
917 {
918 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
919
920 if (!kvm_arm_pmu_v3_ready(vcpu))
921 return trap_raz_wi(vcpu, p, r);
922
923 if (pmu_access_el0_disabled(vcpu))
924 return false;
925
926 if (p->is_write) {
927 if (r->CRm & 0x2)
928
929 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
930 else
931
932 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
933 } else {
934 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
935 }
936
937 return true;
938 }
939
940 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
941 const struct sys_reg_desc *r)
942 {
943 u64 mask;
944
945 if (!kvm_arm_pmu_v3_ready(vcpu))
946 return trap_raz_wi(vcpu, p, r);
947
948 if (!p->is_write)
949 return read_from_write_only(vcpu, p, r);
950
951 if (pmu_write_swinc_el0_disabled(vcpu))
952 return false;
953
954 mask = kvm_pmu_valid_counter_mask(vcpu);
955 kvm_pmu_software_increment(vcpu, p->regval & mask);
956 return true;
957 }
958
959 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
960 const struct sys_reg_desc *r)
961 {
962 if (!kvm_arm_pmu_v3_ready(vcpu))
963 return trap_raz_wi(vcpu, p, r);
964
965 if (p->is_write) {
966 if (!vcpu_mode_priv(vcpu)) {
967 kvm_inject_undefined(vcpu);
968 return false;
969 }
970
971 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
972 p->regval & ARMV8_PMU_USERENR_MASK;
973 } else {
974 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
975 & ARMV8_PMU_USERENR_MASK;
976 }
977
978 return true;
979 }
980
981 #define reg_to_encoding(x) \
982 sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
983 (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
984
985
986 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
987 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
988 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
989 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
990 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
991 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
992 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
993 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
994 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
995
996
997 #define PMU_PMEVCNTR_EL0(n) \
998 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
999 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
1000
1001
1002 #define PMU_PMEVTYPER_EL0(n) \
1003 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
1004 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
1005
1006 static bool trap_ptrauth(struct kvm_vcpu *vcpu,
1007 struct sys_reg_params *p,
1008 const struct sys_reg_desc *rd)
1009 {
1010 kvm_arm_vcpu_ptrauth_trap(vcpu);
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 return false;
1021 }
1022
1023 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1024 const struct sys_reg_desc *rd)
1025 {
1026 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1027 }
1028
1029 #define __PTRAUTH_KEY(k) \
1030 { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \
1031 .visibility = ptrauth_visibility}
1032
1033 #define PTRAUTH_KEY(k) \
1034 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1035 __PTRAUTH_KEY(k ## KEYHI_EL1)
1036
1037 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1038 struct sys_reg_params *p,
1039 const struct sys_reg_desc *r)
1040 {
1041 enum kvm_arch_timers tmr;
1042 enum kvm_arch_timer_regs treg;
1043 u64 reg = reg_to_encoding(r);
1044
1045 switch (reg) {
1046 case SYS_CNTP_TVAL_EL0:
1047 case SYS_AARCH32_CNTP_TVAL:
1048 tmr = TIMER_PTIMER;
1049 treg = TIMER_REG_TVAL;
1050 break;
1051 case SYS_CNTP_CTL_EL0:
1052 case SYS_AARCH32_CNTP_CTL:
1053 tmr = TIMER_PTIMER;
1054 treg = TIMER_REG_CTL;
1055 break;
1056 case SYS_CNTP_CVAL_EL0:
1057 case SYS_AARCH32_CNTP_CVAL:
1058 tmr = TIMER_PTIMER;
1059 treg = TIMER_REG_CVAL;
1060 break;
1061 default:
1062 BUG();
1063 }
1064
1065 if (p->is_write)
1066 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1067 else
1068 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1069
1070 return true;
1071 }
1072
1073
1074 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
1075 struct sys_reg_desc const *r, bool raz)
1076 {
1077 u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1078 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1079 u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1080
1081 if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) {
1082 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1083 } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
1084 val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
1085 (0xfUL << ID_AA64ISAR1_API_SHIFT) |
1086 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
1087 (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
1088 }
1089
1090 return val;
1091 }
1092
1093
1094
1095 static bool __access_id_reg(struct kvm_vcpu *vcpu,
1096 struct sys_reg_params *p,
1097 const struct sys_reg_desc *r,
1098 bool raz)
1099 {
1100 if (p->is_write)
1101 return write_to_read_only(vcpu, p, r);
1102
1103 p->regval = read_id_reg(vcpu, r, raz);
1104 return true;
1105 }
1106
1107 static bool access_id_reg(struct kvm_vcpu *vcpu,
1108 struct sys_reg_params *p,
1109 const struct sys_reg_desc *r)
1110 {
1111 return __access_id_reg(vcpu, p, r, false);
1112 }
1113
1114 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1115 struct sys_reg_params *p,
1116 const struct sys_reg_desc *r)
1117 {
1118 return __access_id_reg(vcpu, p, r, true);
1119 }
1120
1121 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1122 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1123 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1124
1125
1126 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1127 const struct sys_reg_desc *rd)
1128 {
1129 if (vcpu_has_sve(vcpu))
1130 return 0;
1131
1132 return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1133 }
1134
1135
1136 static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
1137 const struct sys_reg_desc *rd)
1138 {
1139 if (vcpu_has_sve(vcpu))
1140 return 0;
1141
1142 return REG_HIDDEN_USER;
1143 }
1144
1145
1146 static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
1147 {
1148 if (!vcpu_has_sve(vcpu))
1149 return 0;
1150
1151 return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
1152 }
1153
1154 static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1155 struct sys_reg_params *p,
1156 const struct sys_reg_desc *rd)
1157 {
1158 if (p->is_write)
1159 return write_to_read_only(vcpu, p, rd);
1160
1161 p->regval = guest_id_aa64zfr0_el1(vcpu);
1162 return true;
1163 }
1164
1165 static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1166 const struct sys_reg_desc *rd,
1167 const struct kvm_one_reg *reg, void __user *uaddr)
1168 {
1169 u64 val;
1170
1171 if (WARN_ON(!vcpu_has_sve(vcpu)))
1172 return -ENOENT;
1173
1174 val = guest_id_aa64zfr0_el1(vcpu);
1175 return reg_to_user(uaddr, &val, reg->id);
1176 }
1177
1178 static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1179 const struct sys_reg_desc *rd,
1180 const struct kvm_one_reg *reg, void __user *uaddr)
1181 {
1182 const u64 id = sys_reg_to_index(rd);
1183 int err;
1184 u64 val;
1185
1186 if (WARN_ON(!vcpu_has_sve(vcpu)))
1187 return -ENOENT;
1188
1189 err = reg_from_user(&val, uaddr, id);
1190 if (err)
1191 return err;
1192
1193
1194 if (val != guest_id_aa64zfr0_el1(vcpu))
1195 return -EINVAL;
1196
1197 return 0;
1198 }
1199
1200
1201
1202
1203
1204
1205
1206
1207 static int __get_id_reg(const struct kvm_vcpu *vcpu,
1208 const struct sys_reg_desc *rd, void __user *uaddr,
1209 bool raz)
1210 {
1211 const u64 id = sys_reg_to_index(rd);
1212 const u64 val = read_id_reg(vcpu, rd, raz);
1213
1214 return reg_to_user(uaddr, &val, id);
1215 }
1216
1217 static int __set_id_reg(const struct kvm_vcpu *vcpu,
1218 const struct sys_reg_desc *rd, void __user *uaddr,
1219 bool raz)
1220 {
1221 const u64 id = sys_reg_to_index(rd);
1222 int err;
1223 u64 val;
1224
1225 err = reg_from_user(&val, uaddr, id);
1226 if (err)
1227 return err;
1228
1229
1230 if (val != read_id_reg(vcpu, rd, raz))
1231 return -EINVAL;
1232
1233 return 0;
1234 }
1235
1236 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1237 const struct kvm_one_reg *reg, void __user *uaddr)
1238 {
1239 return __get_id_reg(vcpu, rd, uaddr, false);
1240 }
1241
1242 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1243 const struct kvm_one_reg *reg, void __user *uaddr)
1244 {
1245 return __set_id_reg(vcpu, rd, uaddr, false);
1246 }
1247
1248 static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1249 const struct kvm_one_reg *reg, void __user *uaddr)
1250 {
1251 return __get_id_reg(vcpu, rd, uaddr, true);
1252 }
1253
1254 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1255 const struct kvm_one_reg *reg, void __user *uaddr)
1256 {
1257 return __set_id_reg(vcpu, rd, uaddr, true);
1258 }
1259
1260 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1261 const struct sys_reg_desc *r)
1262 {
1263 if (p->is_write)
1264 return write_to_read_only(vcpu, p, r);
1265
1266 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1267 return true;
1268 }
1269
1270 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1271 const struct sys_reg_desc *r)
1272 {
1273 if (p->is_write)
1274 return write_to_read_only(vcpu, p, r);
1275
1276 p->regval = read_sysreg(clidr_el1);
1277 return true;
1278 }
1279
1280 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1281 const struct sys_reg_desc *r)
1282 {
1283 int reg = r->reg;
1284
1285
1286 if (p->is_aarch32)
1287 reg = r->reg / 2;
1288
1289 if (p->is_write)
1290 vcpu_write_sys_reg(vcpu, p->regval, reg);
1291 else
1292 p->regval = vcpu_read_sys_reg(vcpu, reg);
1293 return true;
1294 }
1295
1296 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1297 const struct sys_reg_desc *r)
1298 {
1299 u32 csselr;
1300
1301 if (p->is_write)
1302 return write_to_read_only(vcpu, p, r);
1303
1304 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1305 p->regval = get_ccsidr(csselr);
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319 if (!(csselr & 1))
1320 p->regval &= ~GENMASK(27, 3);
1321 return true;
1322 }
1323
1324
1325 #define ID_SANITISED(name) { \
1326 SYS_DESC(SYS_##name), \
1327 .access = access_id_reg, \
1328 .get_user = get_id_reg, \
1329 .set_user = set_id_reg, \
1330 }
1331
1332
1333
1334
1335
1336
1337 #define ID_UNALLOCATED(crm, op2) { \
1338 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1339 .access = access_raz_id_reg, \
1340 .get_user = get_raz_id_reg, \
1341 .set_user = set_raz_id_reg, \
1342 }
1343
1344
1345
1346
1347
1348
1349 #define ID_HIDDEN(name) { \
1350 SYS_DESC(SYS_##name), \
1351 .access = access_raz_id_reg, \
1352 .get_user = get_raz_id_reg, \
1353 .set_user = set_raz_id_reg, \
1354 }
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 static const struct sys_reg_desc sys_reg_descs[] = {
1368 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1369 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1370 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1371
1372 DBG_BCR_BVR_WCR_WVR_EL1(0),
1373 DBG_BCR_BVR_WCR_WVR_EL1(1),
1374 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1375 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1376 DBG_BCR_BVR_WCR_WVR_EL1(2),
1377 DBG_BCR_BVR_WCR_WVR_EL1(3),
1378 DBG_BCR_BVR_WCR_WVR_EL1(4),
1379 DBG_BCR_BVR_WCR_WVR_EL1(5),
1380 DBG_BCR_BVR_WCR_WVR_EL1(6),
1381 DBG_BCR_BVR_WCR_WVR_EL1(7),
1382 DBG_BCR_BVR_WCR_WVR_EL1(8),
1383 DBG_BCR_BVR_WCR_WVR_EL1(9),
1384 DBG_BCR_BVR_WCR_WVR_EL1(10),
1385 DBG_BCR_BVR_WCR_WVR_EL1(11),
1386 DBG_BCR_BVR_WCR_WVR_EL1(12),
1387 DBG_BCR_BVR_WCR_WVR_EL1(13),
1388 DBG_BCR_BVR_WCR_WVR_EL1(14),
1389 DBG_BCR_BVR_WCR_WVR_EL1(15),
1390
1391 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1392 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1393 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1394 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1395 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1396 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1397 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1398 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1399
1400 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1401 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1402
1403 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1404
1405 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1406
1407 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1408
1409
1410
1411
1412
1413
1414
1415
1416 ID_SANITISED(ID_PFR0_EL1),
1417 ID_SANITISED(ID_PFR1_EL1),
1418 ID_SANITISED(ID_DFR0_EL1),
1419 ID_HIDDEN(ID_AFR0_EL1),
1420 ID_SANITISED(ID_MMFR0_EL1),
1421 ID_SANITISED(ID_MMFR1_EL1),
1422 ID_SANITISED(ID_MMFR2_EL1),
1423 ID_SANITISED(ID_MMFR3_EL1),
1424
1425
1426 ID_SANITISED(ID_ISAR0_EL1),
1427 ID_SANITISED(ID_ISAR1_EL1),
1428 ID_SANITISED(ID_ISAR2_EL1),
1429 ID_SANITISED(ID_ISAR3_EL1),
1430 ID_SANITISED(ID_ISAR4_EL1),
1431 ID_SANITISED(ID_ISAR5_EL1),
1432 ID_SANITISED(ID_MMFR4_EL1),
1433 ID_UNALLOCATED(2,7),
1434
1435
1436 ID_SANITISED(MVFR0_EL1),
1437 ID_SANITISED(MVFR1_EL1),
1438 ID_SANITISED(MVFR2_EL1),
1439 ID_UNALLOCATED(3,3),
1440 ID_UNALLOCATED(3,4),
1441 ID_UNALLOCATED(3,5),
1442 ID_UNALLOCATED(3,6),
1443 ID_UNALLOCATED(3,7),
1444
1445
1446
1447 ID_SANITISED(ID_AA64PFR0_EL1),
1448 ID_SANITISED(ID_AA64PFR1_EL1),
1449 ID_UNALLOCATED(4,2),
1450 ID_UNALLOCATED(4,3),
1451 { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
1452 ID_UNALLOCATED(4,5),
1453 ID_UNALLOCATED(4,6),
1454 ID_UNALLOCATED(4,7),
1455
1456
1457 ID_SANITISED(ID_AA64DFR0_EL1),
1458 ID_SANITISED(ID_AA64DFR1_EL1),
1459 ID_UNALLOCATED(5,2),
1460 ID_UNALLOCATED(5,3),
1461 ID_HIDDEN(ID_AA64AFR0_EL1),
1462 ID_HIDDEN(ID_AA64AFR1_EL1),
1463 ID_UNALLOCATED(5,6),
1464 ID_UNALLOCATED(5,7),
1465
1466
1467 ID_SANITISED(ID_AA64ISAR0_EL1),
1468 ID_SANITISED(ID_AA64ISAR1_EL1),
1469 ID_UNALLOCATED(6,2),
1470 ID_UNALLOCATED(6,3),
1471 ID_UNALLOCATED(6,4),
1472 ID_UNALLOCATED(6,5),
1473 ID_UNALLOCATED(6,6),
1474 ID_UNALLOCATED(6,7),
1475
1476
1477 ID_SANITISED(ID_AA64MMFR0_EL1),
1478 ID_SANITISED(ID_AA64MMFR1_EL1),
1479 ID_SANITISED(ID_AA64MMFR2_EL1),
1480 ID_UNALLOCATED(7,3),
1481 ID_UNALLOCATED(7,4),
1482 ID_UNALLOCATED(7,5),
1483 ID_UNALLOCATED(7,6),
1484 ID_UNALLOCATED(7,7),
1485
1486 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1487 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1488 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1489 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1490 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1491 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1492
1493 PTRAUTH_KEY(APIA),
1494 PTRAUTH_KEY(APIB),
1495 PTRAUTH_KEY(APDA),
1496 PTRAUTH_KEY(APDB),
1497 PTRAUTH_KEY(APGA),
1498
1499 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1500 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1501 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1502
1503 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1504 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1505 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1506 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1507 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1508 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1509 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1510 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1511
1512 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1513 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1514
1515 { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1516 { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
1517
1518 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1519 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1520
1521 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1522 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1523 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1524 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1525 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1526
1527 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1528 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1529
1530 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1531 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1532 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1533 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1534 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1535 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1536 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1537 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1538 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1539 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1540 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1541 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1542
1543 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1544 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1545
1546 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1547
1548 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1549 { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1550 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1551 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1552
1553 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
1554 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1555 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1556 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
1557 { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1558 { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1559 { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1560 { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1561 { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1562 { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1563 { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1564
1565
1566
1567
1568 { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1569 { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1570
1571 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1572 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1573
1574 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1575 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1576 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1577
1578
1579 PMU_PMEVCNTR_EL0(0),
1580 PMU_PMEVCNTR_EL0(1),
1581 PMU_PMEVCNTR_EL0(2),
1582 PMU_PMEVCNTR_EL0(3),
1583 PMU_PMEVCNTR_EL0(4),
1584 PMU_PMEVCNTR_EL0(5),
1585 PMU_PMEVCNTR_EL0(6),
1586 PMU_PMEVCNTR_EL0(7),
1587 PMU_PMEVCNTR_EL0(8),
1588 PMU_PMEVCNTR_EL0(9),
1589 PMU_PMEVCNTR_EL0(10),
1590 PMU_PMEVCNTR_EL0(11),
1591 PMU_PMEVCNTR_EL0(12),
1592 PMU_PMEVCNTR_EL0(13),
1593 PMU_PMEVCNTR_EL0(14),
1594 PMU_PMEVCNTR_EL0(15),
1595 PMU_PMEVCNTR_EL0(16),
1596 PMU_PMEVCNTR_EL0(17),
1597 PMU_PMEVCNTR_EL0(18),
1598 PMU_PMEVCNTR_EL0(19),
1599 PMU_PMEVCNTR_EL0(20),
1600 PMU_PMEVCNTR_EL0(21),
1601 PMU_PMEVCNTR_EL0(22),
1602 PMU_PMEVCNTR_EL0(23),
1603 PMU_PMEVCNTR_EL0(24),
1604 PMU_PMEVCNTR_EL0(25),
1605 PMU_PMEVCNTR_EL0(26),
1606 PMU_PMEVCNTR_EL0(27),
1607 PMU_PMEVCNTR_EL0(28),
1608 PMU_PMEVCNTR_EL0(29),
1609 PMU_PMEVCNTR_EL0(30),
1610
1611 PMU_PMEVTYPER_EL0(0),
1612 PMU_PMEVTYPER_EL0(1),
1613 PMU_PMEVTYPER_EL0(2),
1614 PMU_PMEVTYPER_EL0(3),
1615 PMU_PMEVTYPER_EL0(4),
1616 PMU_PMEVTYPER_EL0(5),
1617 PMU_PMEVTYPER_EL0(6),
1618 PMU_PMEVTYPER_EL0(7),
1619 PMU_PMEVTYPER_EL0(8),
1620 PMU_PMEVTYPER_EL0(9),
1621 PMU_PMEVTYPER_EL0(10),
1622 PMU_PMEVTYPER_EL0(11),
1623 PMU_PMEVTYPER_EL0(12),
1624 PMU_PMEVTYPER_EL0(13),
1625 PMU_PMEVTYPER_EL0(14),
1626 PMU_PMEVTYPER_EL0(15),
1627 PMU_PMEVTYPER_EL0(16),
1628 PMU_PMEVTYPER_EL0(17),
1629 PMU_PMEVTYPER_EL0(18),
1630 PMU_PMEVTYPER_EL0(19),
1631 PMU_PMEVTYPER_EL0(20),
1632 PMU_PMEVTYPER_EL0(21),
1633 PMU_PMEVTYPER_EL0(22),
1634 PMU_PMEVTYPER_EL0(23),
1635 PMU_PMEVTYPER_EL0(24),
1636 PMU_PMEVTYPER_EL0(25),
1637 PMU_PMEVTYPER_EL0(26),
1638 PMU_PMEVTYPER_EL0(27),
1639 PMU_PMEVTYPER_EL0(28),
1640 PMU_PMEVTYPER_EL0(29),
1641 PMU_PMEVTYPER_EL0(30),
1642
1643
1644
1645
1646 { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1647
1648 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1649 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1650 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1651 };
1652
1653 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1654 struct sys_reg_params *p,
1655 const struct sys_reg_desc *r)
1656 {
1657 if (p->is_write) {
1658 return ignore_write(vcpu, p);
1659 } else {
1660 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1661 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1662 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1663
1664 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1665 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1666 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1667 | (6 << 16) | (el3 << 14) | (el3 << 12));
1668 return true;
1669 }
1670 }
1671
1672 static bool trap_debug32(struct kvm_vcpu *vcpu,
1673 struct sys_reg_params *p,
1674 const struct sys_reg_desc *r)
1675 {
1676 if (p->is_write) {
1677 vcpu_cp14(vcpu, r->reg) = p->regval;
1678 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1679 } else {
1680 p->regval = vcpu_cp14(vcpu, r->reg);
1681 }
1682
1683 return true;
1684 }
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697 static bool trap_xvr(struct kvm_vcpu *vcpu,
1698 struct sys_reg_params *p,
1699 const struct sys_reg_desc *rd)
1700 {
1701 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1702
1703 if (p->is_write) {
1704 u64 val = *dbg_reg;
1705
1706 val &= 0xffffffffUL;
1707 val |= p->regval << 32;
1708 *dbg_reg = val;
1709
1710 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1711 } else {
1712 p->regval = *dbg_reg >> 32;
1713 }
1714
1715 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1716
1717 return true;
1718 }
1719
1720 #define DBG_BCR_BVR_WCR_WVR(n) \
1721 \
1722 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1723 \
1724 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1725 \
1726 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1727 \
1728 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1729
1730 #define DBGBXVR(n) \
1731 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1732
1733
1734
1735
1736
1737
1738 static const struct sys_reg_desc cp14_regs[] = {
1739
1740 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1741
1742 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1743
1744 DBG_BCR_BVR_WCR_WVR(0),
1745
1746 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1747 DBG_BCR_BVR_WCR_WVR(1),
1748
1749 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1750
1751 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1752 DBG_BCR_BVR_WCR_WVR(2),
1753
1754 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1755
1756 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1757 DBG_BCR_BVR_WCR_WVR(3),
1758 DBG_BCR_BVR_WCR_WVR(4),
1759 DBG_BCR_BVR_WCR_WVR(5),
1760
1761 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1762
1763 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1764 DBG_BCR_BVR_WCR_WVR(6),
1765
1766 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1767 DBG_BCR_BVR_WCR_WVR(7),
1768 DBG_BCR_BVR_WCR_WVR(8),
1769 DBG_BCR_BVR_WCR_WVR(9),
1770 DBG_BCR_BVR_WCR_WVR(10),
1771 DBG_BCR_BVR_WCR_WVR(11),
1772 DBG_BCR_BVR_WCR_WVR(12),
1773 DBG_BCR_BVR_WCR_WVR(13),
1774 DBG_BCR_BVR_WCR_WVR(14),
1775 DBG_BCR_BVR_WCR_WVR(15),
1776
1777
1778 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1779
1780 DBGBXVR(0),
1781
1782 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1783 DBGBXVR(1),
1784
1785 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1786 DBGBXVR(2),
1787 DBGBXVR(3),
1788
1789 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1790 DBGBXVR(4),
1791
1792 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1793 DBGBXVR(5),
1794 DBGBXVR(6),
1795 DBGBXVR(7),
1796 DBGBXVR(8),
1797 DBGBXVR(9),
1798 DBGBXVR(10),
1799 DBGBXVR(11),
1800 DBGBXVR(12),
1801 DBGBXVR(13),
1802 DBGBXVR(14),
1803 DBGBXVR(15),
1804
1805
1806 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1807
1808
1809 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1810
1811 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1812
1813 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1814
1815 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1816
1817 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1818
1819 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1820 };
1821
1822
1823 static const struct sys_reg_desc cp14_64_regs[] = {
1824
1825 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1826
1827
1828 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1829 };
1830
1831
1832 #define PMU_PMEVCNTR(n) \
1833 \
1834 { Op1(0), CRn(0b1110), \
1835 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1836 access_pmu_evcntr }
1837
1838
1839 #define PMU_PMEVTYPER(n) \
1840 \
1841 { Op1(0), CRn(0b1110), \
1842 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1843 access_pmu_evtyper }
1844
1845
1846
1847
1848
1849
1850 static const struct sys_reg_desc cp15_regs[] = {
1851 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
1852 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1853 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1854 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1855 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1856 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1857 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1858 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1859 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1860 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1861 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1862 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1863
1864
1865
1866
1867 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1868 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1869 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1870
1871
1872 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1873 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1874 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1875 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1876 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1877 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1878 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1879 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1880 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1881 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1882 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1883 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1884 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1885 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1886 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1887
1888 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1889 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1890 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1891 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1892
1893
1894 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1895
1896 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1897
1898
1899 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
1900 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
1901
1902
1903 PMU_PMEVCNTR(0),
1904 PMU_PMEVCNTR(1),
1905 PMU_PMEVCNTR(2),
1906 PMU_PMEVCNTR(3),
1907 PMU_PMEVCNTR(4),
1908 PMU_PMEVCNTR(5),
1909 PMU_PMEVCNTR(6),
1910 PMU_PMEVCNTR(7),
1911 PMU_PMEVCNTR(8),
1912 PMU_PMEVCNTR(9),
1913 PMU_PMEVCNTR(10),
1914 PMU_PMEVCNTR(11),
1915 PMU_PMEVCNTR(12),
1916 PMU_PMEVCNTR(13),
1917 PMU_PMEVCNTR(14),
1918 PMU_PMEVCNTR(15),
1919 PMU_PMEVCNTR(16),
1920 PMU_PMEVCNTR(17),
1921 PMU_PMEVCNTR(18),
1922 PMU_PMEVCNTR(19),
1923 PMU_PMEVCNTR(20),
1924 PMU_PMEVCNTR(21),
1925 PMU_PMEVCNTR(22),
1926 PMU_PMEVCNTR(23),
1927 PMU_PMEVCNTR(24),
1928 PMU_PMEVCNTR(25),
1929 PMU_PMEVCNTR(26),
1930 PMU_PMEVCNTR(27),
1931 PMU_PMEVCNTR(28),
1932 PMU_PMEVCNTR(29),
1933 PMU_PMEVCNTR(30),
1934
1935 PMU_PMEVTYPER(0),
1936 PMU_PMEVTYPER(1),
1937 PMU_PMEVTYPER(2),
1938 PMU_PMEVTYPER(3),
1939 PMU_PMEVTYPER(4),
1940 PMU_PMEVTYPER(5),
1941 PMU_PMEVTYPER(6),
1942 PMU_PMEVTYPER(7),
1943 PMU_PMEVTYPER(8),
1944 PMU_PMEVTYPER(9),
1945 PMU_PMEVTYPER(10),
1946 PMU_PMEVTYPER(11),
1947 PMU_PMEVTYPER(12),
1948 PMU_PMEVTYPER(13),
1949 PMU_PMEVTYPER(14),
1950 PMU_PMEVTYPER(15),
1951 PMU_PMEVTYPER(16),
1952 PMU_PMEVTYPER(17),
1953 PMU_PMEVTYPER(18),
1954 PMU_PMEVTYPER(19),
1955 PMU_PMEVTYPER(20),
1956 PMU_PMEVTYPER(21),
1957 PMU_PMEVTYPER(22),
1958 PMU_PMEVTYPER(23),
1959 PMU_PMEVTYPER(24),
1960 PMU_PMEVTYPER(25),
1961 PMU_PMEVTYPER(26),
1962 PMU_PMEVTYPER(27),
1963 PMU_PMEVTYPER(28),
1964 PMU_PMEVTYPER(29),
1965 PMU_PMEVTYPER(30),
1966
1967 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1968
1969 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
1970 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
1971 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
1972 };
1973
1974 static const struct sys_reg_desc cp15_64_regs[] = {
1975 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1976 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1977 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1978 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1979 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1980 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1981 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
1982 };
1983
1984
1985 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1986
1987 void kvm_register_target_sys_reg_table(unsigned int target,
1988 struct kvm_sys_reg_target_table *table)
1989 {
1990 target_tables[target] = table;
1991 }
1992
1993
1994 static const struct sys_reg_desc *get_target_table(unsigned target,
1995 bool mode_is_64,
1996 size_t *num)
1997 {
1998 struct kvm_sys_reg_target_table *table;
1999
2000 table = target_tables[target];
2001 if (mode_is_64) {
2002 *num = table->table64.num;
2003 return table->table64.table;
2004 } else {
2005 *num = table->table32.num;
2006 return table->table32.table;
2007 }
2008 }
2009
2010 static int match_sys_reg(const void *key, const void *elt)
2011 {
2012 const unsigned long pval = (unsigned long)key;
2013 const struct sys_reg_desc *r = elt;
2014
2015 return pval - reg_to_encoding(r);
2016 }
2017
2018 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
2019 const struct sys_reg_desc table[],
2020 unsigned int num)
2021 {
2022 unsigned long pval = reg_to_encoding(params);
2023
2024 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
2025 }
2026
2027 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
2028 {
2029 kvm_inject_undefined(vcpu);
2030 return 1;
2031 }
2032
2033 static void perform_access(struct kvm_vcpu *vcpu,
2034 struct sys_reg_params *params,
2035 const struct sys_reg_desc *r)
2036 {
2037 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2038
2039
2040 if (sysreg_hidden_from_guest(vcpu, r)) {
2041 kvm_inject_undefined(vcpu);
2042 return;
2043 }
2044
2045
2046
2047
2048
2049
2050 BUG_ON(!r->access);
2051
2052
2053 if (likely(r->access(vcpu, params, r)))
2054 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
2055 }
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067 static int emulate_cp(struct kvm_vcpu *vcpu,
2068 struct sys_reg_params *params,
2069 const struct sys_reg_desc *table,
2070 size_t num)
2071 {
2072 const struct sys_reg_desc *r;
2073
2074 if (!table)
2075 return -1;
2076
2077 r = find_reg(params, table, num);
2078
2079 if (r) {
2080 perform_access(vcpu, params, r);
2081 return 0;
2082 }
2083
2084
2085 return -1;
2086 }
2087
2088 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2089 struct sys_reg_params *params)
2090 {
2091 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
2092 int cp = -1;
2093
2094 switch(hsr_ec) {
2095 case ESR_ELx_EC_CP15_32:
2096 case ESR_ELx_EC_CP15_64:
2097 cp = 15;
2098 break;
2099 case ESR_ELx_EC_CP14_MR:
2100 case ESR_ELx_EC_CP14_64:
2101 cp = 14;
2102 break;
2103 default:
2104 WARN_ON(1);
2105 }
2106
2107 kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n",
2108 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2109 print_sys_reg_instr(params);
2110 kvm_inject_undefined(vcpu);
2111 }
2112
2113
2114
2115
2116
2117
2118 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2119 const struct sys_reg_desc *global,
2120 size_t nr_global,
2121 const struct sys_reg_desc *target_specific,
2122 size_t nr_specific)
2123 {
2124 struct sys_reg_params params;
2125 u32 hsr = kvm_vcpu_get_hsr(vcpu);
2126 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2127 int Rt2 = (hsr >> 10) & 0x1f;
2128
2129 params.is_aarch32 = true;
2130 params.is_32bit = false;
2131 params.CRm = (hsr >> 1) & 0xf;
2132 params.is_write = ((hsr & 1) == 0);
2133
2134 params.Op0 = 0;
2135 params.Op1 = (hsr >> 16) & 0xf;
2136 params.Op2 = 0;
2137 params.CRn = 0;
2138
2139
2140
2141
2142
2143 if (params.is_write) {
2144 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2145 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2146 }
2147
2148
2149
2150
2151
2152
2153
2154
2155 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
2156 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
2157
2158 if (!params.is_write) {
2159 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2160 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2161 }
2162
2163 return 1;
2164 }
2165
2166 unhandled_cp_access(vcpu, ¶ms);
2167 return 1;
2168 }
2169
2170
2171
2172
2173
2174
2175 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2176 const struct sys_reg_desc *global,
2177 size_t nr_global,
2178 const struct sys_reg_desc *target_specific,
2179 size_t nr_specific)
2180 {
2181 struct sys_reg_params params;
2182 u32 hsr = kvm_vcpu_get_hsr(vcpu);
2183 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2184
2185 params.is_aarch32 = true;
2186 params.is_32bit = true;
2187 params.CRm = (hsr >> 1) & 0xf;
2188 params.regval = vcpu_get_reg(vcpu, Rt);
2189 params.is_write = ((hsr & 1) == 0);
2190 params.CRn = (hsr >> 10) & 0xf;
2191 params.Op0 = 0;
2192 params.Op1 = (hsr >> 14) & 0x7;
2193 params.Op2 = (hsr >> 17) & 0x7;
2194
2195 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
2196 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
2197 if (!params.is_write)
2198 vcpu_set_reg(vcpu, Rt, params.regval);
2199 return 1;
2200 }
2201
2202 unhandled_cp_access(vcpu, ¶ms);
2203 return 1;
2204 }
2205
2206 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2207 {
2208 const struct sys_reg_desc *target_specific;
2209 size_t num;
2210
2211 target_specific = get_target_table(vcpu->arch.target, false, &num);
2212 return kvm_handle_cp_64(vcpu,
2213 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
2214 target_specific, num);
2215 }
2216
2217 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2218 {
2219 const struct sys_reg_desc *target_specific;
2220 size_t num;
2221
2222 target_specific = get_target_table(vcpu->arch.target, false, &num);
2223 return kvm_handle_cp_32(vcpu,
2224 cp15_regs, ARRAY_SIZE(cp15_regs),
2225 target_specific, num);
2226 }
2227
2228 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2229 {
2230 return kvm_handle_cp_64(vcpu,
2231 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
2232 NULL, 0);
2233 }
2234
2235 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2236 {
2237 return kvm_handle_cp_32(vcpu,
2238 cp14_regs, ARRAY_SIZE(cp14_regs),
2239 NULL, 0);
2240 }
2241
2242 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2243 struct sys_reg_params *params)
2244 {
2245 size_t num;
2246 const struct sys_reg_desc *table, *r;
2247
2248 table = get_target_table(vcpu->arch.target, true, &num);
2249
2250
2251 r = find_reg(params, table, num);
2252 if (!r)
2253 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2254
2255 if (likely(r)) {
2256 perform_access(vcpu, params, r);
2257 } else {
2258 kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n",
2259 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2260 print_sys_reg_instr(params);
2261 kvm_inject_undefined(vcpu);
2262 }
2263 return 1;
2264 }
2265
2266 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
2267 const struct sys_reg_desc *table, size_t num,
2268 unsigned long *bmap)
2269 {
2270 unsigned long i;
2271
2272 for (i = 0; i < num; i++)
2273 if (table[i].reset) {
2274 int reg = table[i].reg;
2275
2276 table[i].reset(vcpu, &table[i]);
2277 if (reg > 0 && reg < NR_SYS_REGS)
2278 set_bit(reg, bmap);
2279 }
2280 }
2281
2282
2283
2284
2285
2286
2287 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
2288 {
2289 struct sys_reg_params params;
2290 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
2291 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2292 int ret;
2293
2294 trace_kvm_handle_sys_reg(esr);
2295
2296 params.is_aarch32 = false;
2297 params.is_32bit = false;
2298 params.Op0 = (esr >> 20) & 3;
2299 params.Op1 = (esr >> 14) & 0x7;
2300 params.CRn = (esr >> 10) & 0xf;
2301 params.CRm = (esr >> 1) & 0xf;
2302 params.Op2 = (esr >> 17) & 0x7;
2303 params.regval = vcpu_get_reg(vcpu, Rt);
2304 params.is_write = !(esr & 1);
2305
2306 ret = emulate_sys_reg(vcpu, ¶ms);
2307
2308 if (!params.is_write)
2309 vcpu_set_reg(vcpu, Rt, params.regval);
2310 return ret;
2311 }
2312
2313
2314
2315
2316
2317 static bool index_to_params(u64 id, struct sys_reg_params *params)
2318 {
2319 switch (id & KVM_REG_SIZE_MASK) {
2320 case KVM_REG_SIZE_U64:
2321
2322 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2323 | KVM_REG_ARM_COPROC_MASK
2324 | KVM_REG_ARM64_SYSREG_OP0_MASK
2325 | KVM_REG_ARM64_SYSREG_OP1_MASK
2326 | KVM_REG_ARM64_SYSREG_CRN_MASK
2327 | KVM_REG_ARM64_SYSREG_CRM_MASK
2328 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2329 return false;
2330 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2331 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2332 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2333 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2334 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2335 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2336 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2337 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2338 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2339 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2340 return true;
2341 default:
2342 return false;
2343 }
2344 }
2345
2346 const struct sys_reg_desc *find_reg_by_id(u64 id,
2347 struct sys_reg_params *params,
2348 const struct sys_reg_desc table[],
2349 unsigned int num)
2350 {
2351 if (!index_to_params(id, params))
2352 return NULL;
2353
2354 return find_reg(params, table, num);
2355 }
2356
2357
2358 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2359 u64 id)
2360 {
2361 size_t num;
2362 const struct sys_reg_desc *table, *r;
2363 struct sys_reg_params params;
2364
2365
2366 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2367 return NULL;
2368
2369 if (!index_to_params(id, ¶ms))
2370 return NULL;
2371
2372 table = get_target_table(vcpu->arch.target, true, &num);
2373 r = find_reg(¶ms, table, num);
2374 if (!r)
2375 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2376
2377
2378 if (r && !(r->reg || r->get_user))
2379 r = NULL;
2380
2381 return r;
2382 }
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392 #define FUNCTION_INVARIANT(reg) \
2393 static void get_##reg(struct kvm_vcpu *v, \
2394 const struct sys_reg_desc *r) \
2395 { \
2396 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2397 }
2398
2399 FUNCTION_INVARIANT(midr_el1)
2400 FUNCTION_INVARIANT(revidr_el1)
2401 FUNCTION_INVARIANT(clidr_el1)
2402 FUNCTION_INVARIANT(aidr_el1)
2403
2404 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2405 {
2406 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2407 }
2408
2409
2410 static struct sys_reg_desc invariant_sys_regs[] = {
2411 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2412 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2413 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2414 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2415 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2416 };
2417
2418 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2419 {
2420 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2421 return -EFAULT;
2422 return 0;
2423 }
2424
2425 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2426 {
2427 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2428 return -EFAULT;
2429 return 0;
2430 }
2431
2432 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2433 {
2434 struct sys_reg_params params;
2435 const struct sys_reg_desc *r;
2436
2437 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2438 ARRAY_SIZE(invariant_sys_regs));
2439 if (!r)
2440 return -ENOENT;
2441
2442 return reg_to_user(uaddr, &r->val, id);
2443 }
2444
2445 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2446 {
2447 struct sys_reg_params params;
2448 const struct sys_reg_desc *r;
2449 int err;
2450 u64 val = 0;
2451
2452 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2453 ARRAY_SIZE(invariant_sys_regs));
2454 if (!r)
2455 return -ENOENT;
2456
2457 err = reg_from_user(&val, uaddr, id);
2458 if (err)
2459 return err;
2460
2461
2462 if (r->val != val)
2463 return -EINVAL;
2464
2465 return 0;
2466 }
2467
2468 static bool is_valid_cache(u32 val)
2469 {
2470 u32 level, ctype;
2471
2472 if (val >= CSSELR_MAX)
2473 return false;
2474
2475
2476 level = (val >> 1);
2477 ctype = (cache_levels >> (level * 3)) & 7;
2478
2479 switch (ctype) {
2480 case 0:
2481 return false;
2482 case 1:
2483 return (val & 1);
2484 case 2:
2485 case 4:
2486 return !(val & 1);
2487 case 3:
2488 return true;
2489 default:
2490 return false;
2491 }
2492 }
2493
2494 static int demux_c15_get(u64 id, void __user *uaddr)
2495 {
2496 u32 val;
2497 u32 __user *uval = uaddr;
2498
2499
2500 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2501 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2502 return -ENOENT;
2503
2504 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2505 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2506 if (KVM_REG_SIZE(id) != 4)
2507 return -ENOENT;
2508 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2509 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2510 if (!is_valid_cache(val))
2511 return -ENOENT;
2512
2513 return put_user(get_ccsidr(val), uval);
2514 default:
2515 return -ENOENT;
2516 }
2517 }
2518
2519 static int demux_c15_set(u64 id, void __user *uaddr)
2520 {
2521 u32 val, newval;
2522 u32 __user *uval = uaddr;
2523
2524
2525 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2526 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2527 return -ENOENT;
2528
2529 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2530 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2531 if (KVM_REG_SIZE(id) != 4)
2532 return -ENOENT;
2533 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2534 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2535 if (!is_valid_cache(val))
2536 return -ENOENT;
2537
2538 if (get_user(newval, uval))
2539 return -EFAULT;
2540
2541
2542 if (newval != get_ccsidr(val))
2543 return -EINVAL;
2544 return 0;
2545 default:
2546 return -ENOENT;
2547 }
2548 }
2549
2550 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2551 {
2552 const struct sys_reg_desc *r;
2553 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2554
2555 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2556 return demux_c15_get(reg->id, uaddr);
2557
2558 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2559 return -ENOENT;
2560
2561 r = index_to_sys_reg_desc(vcpu, reg->id);
2562 if (!r)
2563 return get_invariant_sys_reg(reg->id, uaddr);
2564
2565
2566 if (sysreg_hidden_from_user(vcpu, r))
2567 return -ENOENT;
2568
2569 if (r->get_user)
2570 return (r->get_user)(vcpu, r, reg, uaddr);
2571
2572 return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2573 }
2574
2575 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2576 {
2577 const struct sys_reg_desc *r;
2578 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2579
2580 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2581 return demux_c15_set(reg->id, uaddr);
2582
2583 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2584 return -ENOENT;
2585
2586 r = index_to_sys_reg_desc(vcpu, reg->id);
2587 if (!r)
2588 return set_invariant_sys_reg(reg->id, uaddr);
2589
2590
2591 if (sysreg_hidden_from_user(vcpu, r))
2592 return -ENOENT;
2593
2594 if (r->set_user)
2595 return (r->set_user)(vcpu, r, reg, uaddr);
2596
2597 return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2598 }
2599
2600 static unsigned int num_demux_regs(void)
2601 {
2602 unsigned int i, count = 0;
2603
2604 for (i = 0; i < CSSELR_MAX; i++)
2605 if (is_valid_cache(i))
2606 count++;
2607
2608 return count;
2609 }
2610
2611 static int write_demux_regids(u64 __user *uindices)
2612 {
2613 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2614 unsigned int i;
2615
2616 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2617 for (i = 0; i < CSSELR_MAX; i++) {
2618 if (!is_valid_cache(i))
2619 continue;
2620 if (put_user(val | i, uindices))
2621 return -EFAULT;
2622 uindices++;
2623 }
2624 return 0;
2625 }
2626
2627 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2628 {
2629 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2630 KVM_REG_ARM64_SYSREG |
2631 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2632 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2633 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2634 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2635 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2636 }
2637
2638 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2639 {
2640 if (!*uind)
2641 return true;
2642
2643 if (put_user(sys_reg_to_index(reg), *uind))
2644 return false;
2645
2646 (*uind)++;
2647 return true;
2648 }
2649
2650 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2651 const struct sys_reg_desc *rd,
2652 u64 __user **uind,
2653 unsigned int *total)
2654 {
2655
2656
2657
2658
2659 if (!(rd->reg || rd->get_user))
2660 return 0;
2661
2662 if (sysreg_hidden_from_user(vcpu, rd))
2663 return 0;
2664
2665 if (!copy_reg_to_user(rd, uind))
2666 return -EFAULT;
2667
2668 (*total)++;
2669 return 0;
2670 }
2671
2672
2673 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2674 {
2675 const struct sys_reg_desc *i1, *i2, *end1, *end2;
2676 unsigned int total = 0;
2677 size_t num;
2678 int err;
2679
2680
2681 i1 = get_target_table(vcpu->arch.target, true, &num);
2682 end1 = i1 + num;
2683 i2 = sys_reg_descs;
2684 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2685
2686 BUG_ON(i1 == end1 || i2 == end2);
2687
2688
2689 while (i1 || i2) {
2690 int cmp = cmp_sys_reg(i1, i2);
2691
2692 if (cmp <= 0)
2693 err = walk_one_sys_reg(vcpu, i1, &uind, &total);
2694 else
2695 err = walk_one_sys_reg(vcpu, i2, &uind, &total);
2696
2697 if (err)
2698 return err;
2699
2700 if (cmp <= 0 && ++i1 == end1)
2701 i1 = NULL;
2702 if (cmp >= 0 && ++i2 == end2)
2703 i2 = NULL;
2704 }
2705 return total;
2706 }
2707
2708 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2709 {
2710 return ARRAY_SIZE(invariant_sys_regs)
2711 + num_demux_regs()
2712 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2713 }
2714
2715 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2716 {
2717 unsigned int i;
2718 int err;
2719
2720
2721 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2722 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2723 return -EFAULT;
2724 uindices++;
2725 }
2726
2727 err = walk_sys_regs(vcpu, uindices);
2728 if (err < 0)
2729 return err;
2730 uindices += err;
2731
2732 return write_demux_regids(uindices);
2733 }
2734
2735 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2736 {
2737 unsigned int i;
2738
2739 for (i = 1; i < n; i++) {
2740 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2741 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2742 return 1;
2743 }
2744 }
2745
2746 return 0;
2747 }
2748
2749 void kvm_sys_reg_table_init(void)
2750 {
2751 unsigned int i;
2752 struct sys_reg_desc clidr;
2753
2754
2755 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2756 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2757 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2758 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2759 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2760 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2761
2762
2763 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2764 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776 get_clidr_el1(NULL, &clidr);
2777 cache_levels = clidr.val;
2778 for (i = 0; i < 7; i++)
2779 if (((cache_levels >> (i*3)) & 7) == 0)
2780 break;
2781
2782 cache_levels &= (1 << (i*3))-1;
2783 }
2784
2785
2786
2787
2788
2789
2790
2791
2792 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2793 {
2794 size_t num;
2795 const struct sys_reg_desc *table;
2796 DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
2797
2798
2799 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
2800
2801 table = get_target_table(vcpu->arch.target, true, &num);
2802 reset_sys_reg_descs(vcpu, table, num, bmap);
2803
2804 for (num = 1; num < NR_SYS_REGS; num++) {
2805 if (WARN(!test_bit(num, bmap),
2806 "Didn't reset __vcpu_sys_reg(%zi)\n", num))
2807 break;
2808 }
2809 }