This source file includes following definitions.
- vcpu_reg
- __vcpu_spsr
- kvm_inject_vabt
1
2
3
4
5
6
7 #include <linux/mm.h>
8 #include <linux/kvm_host.h>
9 #include <asm/kvm_arm.h>
10 #include <asm/kvm_emulate.h>
11 #include <asm/opcodes.h>
12 #include <trace/events/kvm.h>
13
14 #include "trace.h"
15
16 #define VCPU_NR_MODES 6
17 #define VCPU_REG_OFFSET_USR 0
18 #define VCPU_REG_OFFSET_FIQ 1
19 #define VCPU_REG_OFFSET_IRQ 2
20 #define VCPU_REG_OFFSET_SVC 3
21 #define VCPU_REG_OFFSET_ABT 4
22 #define VCPU_REG_OFFSET_UND 5
23 #define REG_OFFSET(_reg) \
24 (offsetof(struct kvm_regs, _reg) / sizeof(u32))
25
26 #define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
27
28 static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
29
30 [VCPU_REG_OFFSET_USR] = {
31 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
32 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
33 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
34 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
35 USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
36 },
37
38
39 [VCPU_REG_OFFSET_FIQ] = {
40 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
41 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
42 USR_REG_OFFSET(6), USR_REG_OFFSET(7),
43 REG_OFFSET(fiq_regs[0]),
44 REG_OFFSET(fiq_regs[1]),
45 REG_OFFSET(fiq_regs[2]),
46 REG_OFFSET(fiq_regs[3]),
47 REG_OFFSET(fiq_regs[4]),
48 REG_OFFSET(fiq_regs[5]),
49 REG_OFFSET(fiq_regs[6]),
50 },
51
52
53 [VCPU_REG_OFFSET_IRQ] = {
54 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
55 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
56 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
57 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
58 USR_REG_OFFSET(12),
59 REG_OFFSET(irq_regs[0]),
60 REG_OFFSET(irq_regs[1]),
61 },
62
63
64 [VCPU_REG_OFFSET_SVC] = {
65 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
66 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
67 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
68 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
69 USR_REG_OFFSET(12),
70 REG_OFFSET(svc_regs[0]),
71 REG_OFFSET(svc_regs[1]),
72 },
73
74
75 [VCPU_REG_OFFSET_ABT] = {
76 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
77 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
78 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
79 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
80 USR_REG_OFFSET(12),
81 REG_OFFSET(abt_regs[0]),
82 REG_OFFSET(abt_regs[1]),
83 },
84
85
86 [VCPU_REG_OFFSET_UND] = {
87 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
88 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
89 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
90 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
91 USR_REG_OFFSET(12),
92 REG_OFFSET(und_regs[0]),
93 REG_OFFSET(und_regs[1]),
94 },
95 };
96
97
98
99
100
101 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
102 {
103 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
104 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
105
106 switch (mode) {
107 case USR_MODE...SVC_MODE:
108 mode &= ~MODE32_BIT;
109 break;
110
111 case ABT_MODE:
112 mode = VCPU_REG_OFFSET_ABT;
113 break;
114
115 case UND_MODE:
116 mode = VCPU_REG_OFFSET_UND;
117 break;
118
119 case SYSTEM_MODE:
120 mode = VCPU_REG_OFFSET_USR;
121 break;
122
123 default:
124 BUG();
125 }
126
127 return reg_array + vcpu_reg_offsets[mode][reg_num];
128 }
129
130
131
132
133 unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu)
134 {
135 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
136 switch (mode) {
137 case SVC_MODE:
138 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
139 case ABT_MODE:
140 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
141 case UND_MODE:
142 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
143 case IRQ_MODE:
144 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
145 case FIQ_MODE:
146 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
147 default:
148 BUG();
149 }
150 }
151
152
153
154
155
156
157
158
159
160
161
162
163 void kvm_inject_vabt(struct kvm_vcpu *vcpu)
164 {
165 *vcpu_hcr(vcpu) |= HCR_VA;
166 }