This source file includes following definitions.
- vgic_v4_doorbell_handler
- vgic_v4_init
- vgic_v4_teardown
- vgic_v4_sync_hwstate
- vgic_v4_flush_hwstate
- vgic_get_its
- kvm_vgic_v4_set_forwarding
- kvm_vgic_v4_unset_forwarding
- kvm_vgic_v4_enable_doorbell
- kvm_vgic_v4_disable_doorbell
1
2
3
4
5
6
7 #include <linux/interrupt.h>
8 #include <linux/irq.h>
9 #include <linux/irqdomain.h>
10 #include <linux/kvm_host.h>
11 #include <linux/irqchip/arm-gic-v3.h>
12
13 #include "vgic.h"
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82 #define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
83
84 static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
85 {
86 struct kvm_vcpu *vcpu = info;
87
88 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
89 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
90 kvm_vcpu_kick(vcpu);
91
92 return IRQ_HANDLED;
93 }
94
95
96
97
98
99
100
101
102
103
104 int vgic_v4_init(struct kvm *kvm)
105 {
106 struct vgic_dist *dist = &kvm->arch.vgic;
107 struct kvm_vcpu *vcpu;
108 int i, nr_vcpus, ret;
109
110 if (!kvm_vgic_global_state.has_gicv4)
111 return 0;
112
113 if (dist->its_vm.vpes)
114 return 0;
115
116 nr_vcpus = atomic_read(&kvm->online_vcpus);
117
118 dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
119 GFP_KERNEL);
120 if (!dist->its_vm.vpes)
121 return -ENOMEM;
122
123 dist->its_vm.nr_vpes = nr_vcpus;
124
125 kvm_for_each_vcpu(i, vcpu, kvm)
126 dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
127
128 ret = its_alloc_vcpu_irqs(&dist->its_vm);
129 if (ret < 0) {
130 kvm_err("VPE IRQ allocation failure\n");
131 kfree(dist->its_vm.vpes);
132 dist->its_vm.nr_vpes = 0;
133 dist->its_vm.vpes = NULL;
134 return ret;
135 }
136
137 kvm_for_each_vcpu(i, vcpu, kvm) {
138 int irq = dist->its_vm.vpes[i]->irq;
139
140
141
142
143
144
145
146
147 irq_set_status_flags(irq, DB_IRQ_FLAGS);
148 ret = request_irq(irq, vgic_v4_doorbell_handler,
149 0, "vcpu", vcpu);
150 if (ret) {
151 kvm_err("failed to allocate vcpu IRQ%d\n", irq);
152
153
154
155
156 dist->its_vm.nr_vpes = i;
157 break;
158 }
159 }
160
161 if (ret)
162 vgic_v4_teardown(kvm);
163
164 return ret;
165 }
166
167
168
169
170
171
172
173 void vgic_v4_teardown(struct kvm *kvm)
174 {
175 struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
176 int i;
177
178 if (!its_vm->vpes)
179 return;
180
181 for (i = 0; i < its_vm->nr_vpes; i++) {
182 struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
183 int irq = its_vm->vpes[i]->irq;
184
185 irq_clear_status_flags(irq, DB_IRQ_FLAGS);
186 free_irq(irq, vcpu);
187 }
188
189 its_free_vcpu_irqs(its_vm);
190 kfree(its_vm->vpes);
191 its_vm->nr_vpes = 0;
192 its_vm->vpes = NULL;
193 }
194
195 int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
196 {
197 if (!vgic_supports_direct_msis(vcpu->kvm))
198 return 0;
199
200 return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
201 }
202
203 int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
204 {
205 int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
206 int err;
207
208 if (!vgic_supports_direct_msis(vcpu->kvm))
209 return 0;
210
211
212
213
214
215
216
217 err = irq_set_affinity(irq, cpumask_of(smp_processor_id()));
218 if (err)
219 return err;
220
221 err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
222 if (err)
223 return err;
224
225
226
227
228
229 err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false);
230
231 return err;
232 }
233
234 static struct vgic_its *vgic_get_its(struct kvm *kvm,
235 struct kvm_kernel_irq_routing_entry *irq_entry)
236 {
237 struct kvm_msi msi = (struct kvm_msi) {
238 .address_lo = irq_entry->msi.address_lo,
239 .address_hi = irq_entry->msi.address_hi,
240 .data = irq_entry->msi.data,
241 .flags = irq_entry->msi.flags,
242 .devid = irq_entry->msi.devid,
243 };
244
245 return vgic_msi_to_its(kvm, &msi);
246 }
247
248 int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
249 struct kvm_kernel_irq_routing_entry *irq_entry)
250 {
251 struct vgic_its *its;
252 struct vgic_irq *irq;
253 struct its_vlpi_map map;
254 int ret;
255
256 if (!vgic_supports_direct_msis(kvm))
257 return 0;
258
259
260
261
262
263 its = vgic_get_its(kvm, irq_entry);
264 if (IS_ERR(its))
265 return 0;
266
267 mutex_lock(&its->its_lock);
268
269
270 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
271 irq_entry->msi.data, &irq);
272 if (ret)
273 goto out;
274
275
276
277
278
279
280
281 map = (struct its_vlpi_map) {
282 .vm = &kvm->arch.vgic.its_vm,
283 .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
284 .vintid = irq->intid,
285 .properties = ((irq->priority & 0xfc) |
286 (irq->enabled ? LPI_PROP_ENABLED : 0) |
287 LPI_PROP_GROUP1),
288 .db_enabled = true,
289 };
290
291 ret = its_map_vlpi(virq, &map);
292 if (ret)
293 goto out;
294
295 irq->hw = true;
296 irq->host_irq = virq;
297
298 out:
299 mutex_unlock(&its->its_lock);
300 return ret;
301 }
302
303 int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
304 struct kvm_kernel_irq_routing_entry *irq_entry)
305 {
306 struct vgic_its *its;
307 struct vgic_irq *irq;
308 int ret;
309
310 if (!vgic_supports_direct_msis(kvm))
311 return 0;
312
313
314
315
316
317 its = vgic_get_its(kvm, irq_entry);
318 if (IS_ERR(its))
319 return 0;
320
321 mutex_lock(&its->its_lock);
322
323 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
324 irq_entry->msi.data, &irq);
325 if (ret)
326 goto out;
327
328 WARN_ON(!(irq->hw && irq->host_irq == virq));
329 if (irq->hw) {
330 irq->hw = false;
331 ret = its_unmap_vlpi(virq);
332 }
333
334 out:
335 mutex_unlock(&its->its_lock);
336 return ret;
337 }
338
339 void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
340 {
341 if (vgic_supports_direct_msis(vcpu->kvm)) {
342 int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
343 if (irq)
344 enable_irq(irq);
345 }
346 }
347
348 void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
349 {
350 if (vgic_supports_direct_msis(vcpu->kvm)) {
351 int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
352 if (irq)
353 disable_irq(irq);
354 }
355 }