1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  */
18 
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
29 
30 #include <asm/kvm_emulate.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_mmu.h>
33 #include <trace/events/kvm.h>
34 #include <asm/kvm.h>
35 #include <kvm/iodev.h>
36 
37 #define CREATE_TRACE_POINTS
38 #include "trace.h"
39 
40 /*
41  * How the whole thing works (courtesy of Christoffer Dall):
42  *
43  * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
44  *   something is pending on the CPU interface.
45  * - Interrupts that are pending on the distributor are stored on the
46  *   vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
47  *   ioctls and guest mmio ops, and other in-kernel peripherals such as the
48  *   arch. timers).
49  * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
50  *   recalculated
51  * - To calculate the oracle, we need info for each cpu from
52  *   compute_pending_for_cpu, which considers:
53  *   - PPI: dist->irq_pending & dist->irq_enable
54  *   - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
55  *   - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
56  *     registers, stored on each vcpu. We only keep one bit of
57  *     information per interrupt, making sure that only one vcpu can
58  *     accept the interrupt.
59  * - If any of the above state changes, we must recalculate the oracle.
60  * - The same is true when injecting an interrupt, except that we only
61  *   consider a single interrupt at a time. The irq_spi_cpu array
62  *   contains the target CPU for each SPI.
63  *
64  * The handling of level interrupts adds some extra complexity. We
65  * need to track when the interrupt has been EOIed, so we can sample
66  * the 'line' again. This is achieved as such:
67  *
68  * - When a level interrupt is moved onto a vcpu, the corresponding
69  *   bit in irq_queued is set. As long as this bit is set, the line
70  *   will be ignored for further interrupts. The interrupt is injected
71  *   into the vcpu with the GICH_LR_EOI bit set (generate a
72  *   maintenance interrupt on EOI).
73  * - When the interrupt is EOIed, the maintenance interrupt fires,
74  *   and clears the corresponding bit in irq_queued. This allows the
75  *   interrupt line to be sampled again.
76  * - Note that level-triggered interrupts can also be set to pending from
77  *   writes to GICD_ISPENDRn and lowering the external input line does not
78  *   cause the interrupt to become inactive in such a situation.
79  *   Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
80  *   inactive as long as the external input line is held high.
81  *
82  *
83  * Initialization rules: there are multiple stages to the vgic
84  * initialization, both for the distributor and the CPU interfaces.
85  *
86  * Distributor:
87  *
88  * - kvm_vgic_early_init(): initialization of static data that doesn't
89  *   depend on any sizing information or emulation type. No allocation
90  *   is allowed there.
91  *
92  * - vgic_init(): allocation and initialization of the generic data
93  *   structures that depend on sizing information (number of CPUs,
94  *   number of interrupts). Also initializes the vcpu specific data
95  *   structures. Can be executed lazily for GICv2.
96  *   [to be renamed to kvm_vgic_init??]
97  *
98  * CPU Interface:
99  *
100  * - kvm_vgic_cpu_early_init(): initialization of static data that
101  *   doesn't depend on any sizing information or emulation type. No
102  *   allocation is allowed there.
103  */
104 
105 #include "vgic.h"
106 
107 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
108 static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
109 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
110 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
111 static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
112 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
113 						int virt_irq);
114 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
115 
116 static const struct vgic_ops *vgic_ops;
117 static const struct vgic_params *vgic;
118 
add_sgi_source(struct kvm_vcpu * vcpu,int irq,int source)119 static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
120 {
121 	vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source);
122 }
123 
queue_sgi(struct kvm_vcpu * vcpu,int irq)124 static bool queue_sgi(struct kvm_vcpu *vcpu, int irq)
125 {
126 	return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
127 }
128 
kvm_vgic_map_resources(struct kvm * kvm)129 int kvm_vgic_map_resources(struct kvm *kvm)
130 {
131 	return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
132 }
133 
134 /*
135  * struct vgic_bitmap contains a bitmap made of unsigned longs, but
136  * extracts u32s out of them.
137  *
138  * This does not work on 64-bit BE systems, because the bitmap access
139  * will store two consecutive 32-bit words with the higher-addressed
140  * register's bits at the lower index and the lower-addressed register's
141  * bits at the higher index.
142  *
143  * Therefore, swizzle the register index when accessing the 32-bit word
144  * registers to access the right register's value.
145  */
146 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
147 #define REG_OFFSET_SWIZZLE	1
148 #else
149 #define REG_OFFSET_SWIZZLE	0
150 #endif
151 
vgic_init_bitmap(struct vgic_bitmap * b,int nr_cpus,int nr_irqs)152 static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
153 {
154 	int nr_longs;
155 
156 	nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
157 
158 	b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
159 	if (!b->private)
160 		return -ENOMEM;
161 
162 	b->shared = b->private + nr_cpus;
163 
164 	return 0;
165 }
166 
vgic_free_bitmap(struct vgic_bitmap * b)167 static void vgic_free_bitmap(struct vgic_bitmap *b)
168 {
169 	kfree(b->private);
170 	b->private = NULL;
171 	b->shared = NULL;
172 }
173 
174 /*
175  * Call this function to convert a u64 value to an unsigned long * bitmask
176  * in a way that works on both 32-bit and 64-bit LE and BE platforms.
177  *
178  * Warning: Calling this function may modify *val.
179  */
u64_to_bitmask(u64 * val)180 static unsigned long *u64_to_bitmask(u64 *val)
181 {
182 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
183 	*val = (*val >> 32) | (*val << 32);
184 #endif
185 	return (unsigned long *)val;
186 }
187 
vgic_bitmap_get_reg(struct vgic_bitmap * x,int cpuid,u32 offset)188 u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
189 {
190 	offset >>= 2;
191 	if (!offset)
192 		return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
193 	else
194 		return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
195 }
196 
vgic_bitmap_get_irq_val(struct vgic_bitmap * x,int cpuid,int irq)197 static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
198 				   int cpuid, int irq)
199 {
200 	if (irq < VGIC_NR_PRIVATE_IRQS)
201 		return test_bit(irq, x->private + cpuid);
202 
203 	return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
204 }
205 
vgic_bitmap_set_irq_val(struct vgic_bitmap * x,int cpuid,int irq,int val)206 void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
207 			     int irq, int val)
208 {
209 	unsigned long *reg;
210 
211 	if (irq < VGIC_NR_PRIVATE_IRQS) {
212 		reg = x->private + cpuid;
213 	} else {
214 		reg = x->shared;
215 		irq -= VGIC_NR_PRIVATE_IRQS;
216 	}
217 
218 	if (val)
219 		set_bit(irq, reg);
220 	else
221 		clear_bit(irq, reg);
222 }
223 
vgic_bitmap_get_cpu_map(struct vgic_bitmap * x,int cpuid)224 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
225 {
226 	return x->private + cpuid;
227 }
228 
vgic_bitmap_get_shared_map(struct vgic_bitmap * x)229 unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
230 {
231 	return x->shared;
232 }
233 
vgic_init_bytemap(struct vgic_bytemap * x,int nr_cpus,int nr_irqs)234 static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
235 {
236 	int size;
237 
238 	size  = nr_cpus * VGIC_NR_PRIVATE_IRQS;
239 	size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
240 
241 	x->private = kzalloc(size, GFP_KERNEL);
242 	if (!x->private)
243 		return -ENOMEM;
244 
245 	x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
246 	return 0;
247 }
248 
vgic_free_bytemap(struct vgic_bytemap * b)249 static void vgic_free_bytemap(struct vgic_bytemap *b)
250 {
251 	kfree(b->private);
252 	b->private = NULL;
253 	b->shared = NULL;
254 }
255 
vgic_bytemap_get_reg(struct vgic_bytemap * x,int cpuid,u32 offset)256 u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
257 {
258 	u32 *reg;
259 
260 	if (offset < VGIC_NR_PRIVATE_IRQS) {
261 		reg = x->private;
262 		offset += cpuid * VGIC_NR_PRIVATE_IRQS;
263 	} else {
264 		reg = x->shared;
265 		offset -= VGIC_NR_PRIVATE_IRQS;
266 	}
267 
268 	return reg + (offset / sizeof(u32));
269 }
270 
271 #define VGIC_CFG_LEVEL	0
272 #define VGIC_CFG_EDGE	1
273 
vgic_irq_is_edge(struct kvm_vcpu * vcpu,int irq)274 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
275 {
276 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
277 	int irq_val;
278 
279 	irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
280 	return irq_val == VGIC_CFG_EDGE;
281 }
282 
vgic_irq_is_enabled(struct kvm_vcpu * vcpu,int irq)283 static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
284 {
285 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
286 
287 	return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
288 }
289 
vgic_irq_is_queued(struct kvm_vcpu * vcpu,int irq)290 static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
291 {
292 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
293 
294 	return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
295 }
296 
vgic_irq_is_active(struct kvm_vcpu * vcpu,int irq)297 static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
298 {
299 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
300 
301 	return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
302 }
303 
vgic_irq_set_queued(struct kvm_vcpu * vcpu,int irq)304 static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
305 {
306 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
307 
308 	vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
309 }
310 
vgic_irq_clear_queued(struct kvm_vcpu * vcpu,int irq)311 static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
312 {
313 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
314 
315 	vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
316 }
317 
vgic_irq_set_active(struct kvm_vcpu * vcpu,int irq)318 static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
319 {
320 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
321 
322 	vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
323 }
324 
vgic_irq_clear_active(struct kvm_vcpu * vcpu,int irq)325 static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
326 {
327 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
328 
329 	vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
330 }
331 
vgic_dist_irq_get_level(struct kvm_vcpu * vcpu,int irq)332 static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
333 {
334 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
335 
336 	return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
337 }
338 
vgic_dist_irq_set_level(struct kvm_vcpu * vcpu,int irq)339 static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
340 {
341 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
342 
343 	vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
344 }
345 
vgic_dist_irq_clear_level(struct kvm_vcpu * vcpu,int irq)346 static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
347 {
348 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
349 
350 	vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
351 }
352 
vgic_dist_irq_soft_pend(struct kvm_vcpu * vcpu,int irq)353 static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
354 {
355 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
356 
357 	return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
358 }
359 
vgic_dist_irq_clear_soft_pend(struct kvm_vcpu * vcpu,int irq)360 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
361 {
362 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
363 
364 	vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
365 	if (!vgic_dist_irq_get_level(vcpu, irq)) {
366 		vgic_dist_irq_clear_pending(vcpu, irq);
367 		if (!compute_pending_for_cpu(vcpu))
368 			clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
369 	}
370 }
371 
vgic_dist_irq_is_pending(struct kvm_vcpu * vcpu,int irq)372 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
373 {
374 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
375 
376 	return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
377 }
378 
vgic_dist_irq_set_pending(struct kvm_vcpu * vcpu,int irq)379 void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
380 {
381 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
382 
383 	vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
384 }
385 
vgic_dist_irq_clear_pending(struct kvm_vcpu * vcpu,int irq)386 void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
387 {
388 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
389 
390 	vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
391 }
392 
vgic_cpu_irq_set(struct kvm_vcpu * vcpu,int irq)393 static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
394 {
395 	if (irq < VGIC_NR_PRIVATE_IRQS)
396 		set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
397 	else
398 		set_bit(irq - VGIC_NR_PRIVATE_IRQS,
399 			vcpu->arch.vgic_cpu.pending_shared);
400 }
401 
vgic_cpu_irq_clear(struct kvm_vcpu * vcpu,int irq)402 void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
403 {
404 	if (irq < VGIC_NR_PRIVATE_IRQS)
405 		clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
406 	else
407 		clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
408 			  vcpu->arch.vgic_cpu.pending_shared);
409 }
410 
vgic_can_sample_irq(struct kvm_vcpu * vcpu,int irq)411 static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
412 {
413 	return !vgic_irq_is_queued(vcpu, irq);
414 }
415 
416 /**
417  * vgic_reg_access - access vgic register
418  * @mmio:   pointer to the data describing the mmio access
419  * @reg:    pointer to the virtual backing of vgic distributor data
420  * @offset: least significant 2 bits used for word offset
421  * @mode:   ACCESS_ mode (see defines above)
422  *
423  * Helper to make vgic register access easier using one of the access
424  * modes defined for vgic register access
425  * (read,raz,write-ignored,setbit,clearbit,write)
426  */
vgic_reg_access(struct kvm_exit_mmio * mmio,u32 * reg,phys_addr_t offset,int mode)427 void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
428 		     phys_addr_t offset, int mode)
429 {
430 	int word_offset = (offset & 3) * 8;
431 	u32 mask = (1UL << (mmio->len * 8)) - 1;
432 	u32 regval;
433 
434 	/*
435 	 * Any alignment fault should have been delivered to the guest
436 	 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
437 	 */
438 
439 	if (reg) {
440 		regval = *reg;
441 	} else {
442 		BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
443 		regval = 0;
444 	}
445 
446 	if (mmio->is_write) {
447 		u32 data = mmio_data_read(mmio, mask) << word_offset;
448 		switch (ACCESS_WRITE_MASK(mode)) {
449 		case ACCESS_WRITE_IGNORED:
450 			return;
451 
452 		case ACCESS_WRITE_SETBIT:
453 			regval |= data;
454 			break;
455 
456 		case ACCESS_WRITE_CLEARBIT:
457 			regval &= ~data;
458 			break;
459 
460 		case ACCESS_WRITE_VALUE:
461 			regval = (regval & ~(mask << word_offset)) | data;
462 			break;
463 		}
464 		*reg = regval;
465 	} else {
466 		switch (ACCESS_READ_MASK(mode)) {
467 		case ACCESS_READ_RAZ:
468 			regval = 0;
469 			/* fall through */
470 
471 		case ACCESS_READ_VALUE:
472 			mmio_data_write(mmio, mask, regval >> word_offset);
473 		}
474 	}
475 }
476 
handle_mmio_raz_wi(struct kvm_vcpu * vcpu,struct kvm_exit_mmio * mmio,phys_addr_t offset)477 bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
478 			phys_addr_t offset)
479 {
480 	vgic_reg_access(mmio, NULL, offset,
481 			ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
482 	return false;
483 }
484 
vgic_handle_enable_reg(struct kvm * kvm,struct kvm_exit_mmio * mmio,phys_addr_t offset,int vcpu_id,int access)485 bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
486 			    phys_addr_t offset, int vcpu_id, int access)
487 {
488 	u32 *reg;
489 	int mode = ACCESS_READ_VALUE | access;
490 	struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
491 
492 	reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
493 	vgic_reg_access(mmio, reg, offset, mode);
494 	if (mmio->is_write) {
495 		if (access & ACCESS_WRITE_CLEARBIT) {
496 			if (offset < 4) /* Force SGI enabled */
497 				*reg |= 0xffff;
498 			vgic_retire_disabled_irqs(target_vcpu);
499 		}
500 		vgic_update_state(kvm);
501 		return true;
502 	}
503 
504 	return false;
505 }
506 
vgic_handle_set_pending_reg(struct kvm * kvm,struct kvm_exit_mmio * mmio,phys_addr_t offset,int vcpu_id)507 bool vgic_handle_set_pending_reg(struct kvm *kvm,
508 				 struct kvm_exit_mmio *mmio,
509 				 phys_addr_t offset, int vcpu_id)
510 {
511 	u32 *reg, orig;
512 	u32 level_mask;
513 	int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT;
514 	struct vgic_dist *dist = &kvm->arch.vgic;
515 
516 	reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset);
517 	level_mask = (~(*reg));
518 
519 	/* Mark both level and edge triggered irqs as pending */
520 	reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
521 	orig = *reg;
522 	vgic_reg_access(mmio, reg, offset, mode);
523 
524 	if (mmio->is_write) {
525 		/* Set the soft-pending flag only for level-triggered irqs */
526 		reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
527 					  vcpu_id, offset);
528 		vgic_reg_access(mmio, reg, offset, mode);
529 		*reg &= level_mask;
530 
531 		/* Ignore writes to SGIs */
532 		if (offset < 2) {
533 			*reg &= ~0xffff;
534 			*reg |= orig & 0xffff;
535 		}
536 
537 		vgic_update_state(kvm);
538 		return true;
539 	}
540 
541 	return false;
542 }
543 
vgic_handle_clear_pending_reg(struct kvm * kvm,struct kvm_exit_mmio * mmio,phys_addr_t offset,int vcpu_id)544 bool vgic_handle_clear_pending_reg(struct kvm *kvm,
545 				   struct kvm_exit_mmio *mmio,
546 				   phys_addr_t offset, int vcpu_id)
547 {
548 	u32 *level_active;
549 	u32 *reg, orig;
550 	int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT;
551 	struct vgic_dist *dist = &kvm->arch.vgic;
552 
553 	reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
554 	orig = *reg;
555 	vgic_reg_access(mmio, reg, offset, mode);
556 	if (mmio->is_write) {
557 		/* Re-set level triggered level-active interrupts */
558 		level_active = vgic_bitmap_get_reg(&dist->irq_level,
559 					  vcpu_id, offset);
560 		reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
561 		*reg |= *level_active;
562 
563 		/* Ignore writes to SGIs */
564 		if (offset < 2) {
565 			*reg &= ~0xffff;
566 			*reg |= orig & 0xffff;
567 		}
568 
569 		/* Clear soft-pending flags */
570 		reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
571 					  vcpu_id, offset);
572 		vgic_reg_access(mmio, reg, offset, mode);
573 
574 		vgic_update_state(kvm);
575 		return true;
576 	}
577 	return false;
578 }
579 
vgic_handle_set_active_reg(struct kvm * kvm,struct kvm_exit_mmio * mmio,phys_addr_t offset,int vcpu_id)580 bool vgic_handle_set_active_reg(struct kvm *kvm,
581 				struct kvm_exit_mmio *mmio,
582 				phys_addr_t offset, int vcpu_id)
583 {
584 	u32 *reg;
585 	struct vgic_dist *dist = &kvm->arch.vgic;
586 
587 	reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
588 	vgic_reg_access(mmio, reg, offset,
589 			ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
590 
591 	if (mmio->is_write) {
592 		vgic_update_state(kvm);
593 		return true;
594 	}
595 
596 	return false;
597 }
598 
vgic_handle_clear_active_reg(struct kvm * kvm,struct kvm_exit_mmio * mmio,phys_addr_t offset,int vcpu_id)599 bool vgic_handle_clear_active_reg(struct kvm *kvm,
600 				  struct kvm_exit_mmio *mmio,
601 				  phys_addr_t offset, int vcpu_id)
602 {
603 	u32 *reg;
604 	struct vgic_dist *dist = &kvm->arch.vgic;
605 
606 	reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
607 	vgic_reg_access(mmio, reg, offset,
608 			ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
609 
610 	if (mmio->is_write) {
611 		vgic_update_state(kvm);
612 		return true;
613 	}
614 
615 	return false;
616 }
617 
vgic_cfg_expand(u16 val)618 static u32 vgic_cfg_expand(u16 val)
619 {
620 	u32 res = 0;
621 	int i;
622 
623 	/*
624 	 * Turn a 16bit value like abcd...mnop into a 32bit word
625 	 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
626 	 */
627 	for (i = 0; i < 16; i++)
628 		res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
629 
630 	return res;
631 }
632 
vgic_cfg_compress(u32 val)633 static u16 vgic_cfg_compress(u32 val)
634 {
635 	u16 res = 0;
636 	int i;
637 
638 	/*
639 	 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
640 	 * abcd...mnop which is what we really care about.
641 	 */
642 	for (i = 0; i < 16; i++)
643 		res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
644 
645 	return res;
646 }
647 
648 /*
649  * The distributor uses 2 bits per IRQ for the CFG register, but the
650  * LSB is always 0. As such, we only keep the upper bit, and use the
651  * two above functions to compress/expand the bits
652  */
vgic_handle_cfg_reg(u32 * reg,struct kvm_exit_mmio * mmio,phys_addr_t offset)653 bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
654 			 phys_addr_t offset)
655 {
656 	u32 val;
657 
658 	if (offset & 4)
659 		val = *reg >> 16;
660 	else
661 		val = *reg & 0xffff;
662 
663 	val = vgic_cfg_expand(val);
664 	vgic_reg_access(mmio, &val, offset,
665 			ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
666 	if (mmio->is_write) {
667 		/* Ignore writes to read-only SGI and PPI bits */
668 		if (offset < 8)
669 			return false;
670 
671 		val = vgic_cfg_compress(val);
672 		if (offset & 4) {
673 			*reg &= 0xffff;
674 			*reg |= val << 16;
675 		} else {
676 			*reg &= 0xffff << 16;
677 			*reg |= val;
678 		}
679 	}
680 
681 	return false;
682 }
683 
684 /**
685  * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
686  * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
687  *
688  * Move any IRQs that have already been assigned to LRs back to the
689  * emulated distributor state so that the complete emulated state can be read
690  * from the main emulation structures without investigating the LRs.
691  */
vgic_unqueue_irqs(struct kvm_vcpu * vcpu)692 void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
693 {
694 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
695 	u64 elrsr = vgic_get_elrsr(vcpu);
696 	unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
697 	int i;
698 
699 	for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
700 		struct vgic_lr lr = vgic_get_lr(vcpu, i);
701 
702 		/*
703 		 * There are three options for the state bits:
704 		 *
705 		 * 01: pending
706 		 * 10: active
707 		 * 11: pending and active
708 		 */
709 		BUG_ON(!(lr.state & LR_STATE_MASK));
710 
711 		/* Reestablish SGI source for pending and active IRQs */
712 		if (lr.irq < VGIC_NR_SGIS)
713 			add_sgi_source(vcpu, lr.irq, lr.source);
714 
715 		/*
716 		 * If the LR holds an active (10) or a pending and active (11)
717 		 * interrupt then move the active state to the
718 		 * distributor tracking bit.
719 		 */
720 		if (lr.state & LR_STATE_ACTIVE)
721 			vgic_irq_set_active(vcpu, lr.irq);
722 
723 		/*
724 		 * Reestablish the pending state on the distributor and the
725 		 * CPU interface and mark the LR as free for other use.
726 		 */
727 		vgic_retire_lr(i, vcpu);
728 
729 		/* Finally update the VGIC state. */
730 		vgic_update_state(vcpu->kvm);
731 	}
732 }
733 
734 const
vgic_find_range(const struct vgic_io_range * ranges,int len,gpa_t offset)735 struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
736 				      int len, gpa_t offset)
737 {
738 	while (ranges->len) {
739 		if (offset >= ranges->base &&
740 		    (offset + len) <= (ranges->base + ranges->len))
741 			return ranges;
742 		ranges++;
743 	}
744 
745 	return NULL;
746 }
747 
vgic_validate_access(const struct vgic_dist * dist,const struct vgic_io_range * range,unsigned long offset)748 static bool vgic_validate_access(const struct vgic_dist *dist,
749 				 const struct vgic_io_range *range,
750 				 unsigned long offset)
751 {
752 	int irq;
753 
754 	if (!range->bits_per_irq)
755 		return true;	/* Not an irq-based access */
756 
757 	irq = offset * 8 / range->bits_per_irq;
758 	if (irq >= dist->nr_irqs)
759 		return false;
760 
761 	return true;
762 }
763 
764 /*
765  * Call the respective handler function for the given range.
766  * We split up any 64 bit accesses into two consecutive 32 bit
767  * handler calls and merge the result afterwards.
768  * We do this in a little endian fashion regardless of the host's
769  * or guest's endianness, because the GIC is always LE and the rest of
770  * the code (vgic_reg_access) also puts it in a LE fashion already.
771  * At this point we have already identified the handle function, so
772  * range points to that one entry and offset is relative to this.
773  */
call_range_handler(struct kvm_vcpu * vcpu,struct kvm_exit_mmio * mmio,unsigned long offset,const struct vgic_io_range * range)774 static bool call_range_handler(struct kvm_vcpu *vcpu,
775 			       struct kvm_exit_mmio *mmio,
776 			       unsigned long offset,
777 			       const struct vgic_io_range *range)
778 {
779 	struct kvm_exit_mmio mmio32;
780 	bool ret;
781 
782 	if (likely(mmio->len <= 4))
783 		return range->handle_mmio(vcpu, mmio, offset);
784 
785 	/*
786 	 * Any access bigger than 4 bytes (that we currently handle in KVM)
787 	 * is actually 8 bytes long, caused by a 64-bit access
788 	 */
789 
790 	mmio32.len = 4;
791 	mmio32.is_write = mmio->is_write;
792 	mmio32.private = mmio->private;
793 
794 	mmio32.phys_addr = mmio->phys_addr + 4;
795 	mmio32.data = &((u32 *)mmio->data)[1];
796 	ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
797 
798 	mmio32.phys_addr = mmio->phys_addr;
799 	mmio32.data = &((u32 *)mmio->data)[0];
800 	ret |= range->handle_mmio(vcpu, &mmio32, offset);
801 
802 	return ret;
803 }
804 
805 /**
806  * vgic_handle_mmio_access - handle an in-kernel MMIO access
807  * This is called by the read/write KVM IO device wrappers below.
808  * @vcpu:	pointer to the vcpu performing the access
809  * @this:	pointer to the KVM IO device in charge
810  * @addr:	guest physical address of the access
811  * @len:	size of the access
812  * @val:	pointer to the data region
813  * @is_write:	read or write access
814  *
815  * returns true if the MMIO access could be performed
816  */
vgic_handle_mmio_access(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,void * val,bool is_write)817 static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
818 				   struct kvm_io_device *this, gpa_t addr,
819 				   int len, void *val, bool is_write)
820 {
821 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
822 	struct vgic_io_device *iodev = container_of(this,
823 						    struct vgic_io_device, dev);
824 	struct kvm_run *run = vcpu->run;
825 	const struct vgic_io_range *range;
826 	struct kvm_exit_mmio mmio;
827 	bool updated_state;
828 	gpa_t offset;
829 
830 	offset = addr - iodev->addr;
831 	range = vgic_find_range(iodev->reg_ranges, len, offset);
832 	if (unlikely(!range || !range->handle_mmio)) {
833 		pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len);
834 		return -ENXIO;
835 	}
836 
837 	mmio.phys_addr = addr;
838 	mmio.len = len;
839 	mmio.is_write = is_write;
840 	mmio.data = val;
841 	mmio.private = iodev->redist_vcpu;
842 
843 	spin_lock(&dist->lock);
844 	offset -= range->base;
845 	if (vgic_validate_access(dist, range, offset)) {
846 		updated_state = call_range_handler(vcpu, &mmio, offset, range);
847 	} else {
848 		if (!is_write)
849 			memset(val, 0, len);
850 		updated_state = false;
851 	}
852 	spin_unlock(&dist->lock);
853 	run->mmio.is_write	= is_write;
854 	run->mmio.len		= len;
855 	run->mmio.phys_addr	= addr;
856 	memcpy(run->mmio.data, val, len);
857 
858 	kvm_handle_mmio_return(vcpu, run);
859 
860 	if (updated_state)
861 		vgic_kick_vcpus(vcpu->kvm);
862 
863 	return 0;
864 }
865 
vgic_handle_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,void * val)866 static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
867 				 struct kvm_io_device *this,
868 				 gpa_t addr, int len, void *val)
869 {
870 	return vgic_handle_mmio_access(vcpu, this, addr, len, val, false);
871 }
872 
vgic_handle_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,const void * val)873 static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu,
874 				  struct kvm_io_device *this,
875 				  gpa_t addr, int len, const void *val)
876 {
877 	return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val,
878 				       true);
879 }
880 
881 struct kvm_io_device_ops vgic_io_ops = {
882 	.read	= vgic_handle_mmio_read,
883 	.write	= vgic_handle_mmio_write,
884 };
885 
886 /**
887  * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
888  * @kvm:            The VM structure pointer
889  * @base:           The (guest) base address for the register frame
890  * @len:            Length of the register frame window
891  * @ranges:         Describing the handler functions for each register
892  * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
893  * @iodev:          Points to memory to be passed on to the handler
894  *
895  * @iodev stores the parameters of this function to be usable by the handler
896  * respectively the dispatcher function (since the KVM I/O bus framework lacks
897  * an opaque parameter). Initialization is done in this function, but the
898  * reference should be valid and unique for the whole VGIC lifetime.
899  * If the register frame is not mapped for a specific VCPU, pass -1 to
900  * @redist_vcpu_id.
901  */
vgic_register_kvm_io_dev(struct kvm * kvm,gpa_t base,int len,const struct vgic_io_range * ranges,int redist_vcpu_id,struct vgic_io_device * iodev)902 int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
903 			     const struct vgic_io_range *ranges,
904 			     int redist_vcpu_id,
905 			     struct vgic_io_device *iodev)
906 {
907 	struct kvm_vcpu *vcpu = NULL;
908 	int ret;
909 
910 	if (redist_vcpu_id >= 0)
911 		vcpu = kvm_get_vcpu(kvm, redist_vcpu_id);
912 
913 	iodev->addr		= base;
914 	iodev->len		= len;
915 	iodev->reg_ranges	= ranges;
916 	iodev->redist_vcpu	= vcpu;
917 
918 	kvm_iodevice_init(&iodev->dev, &vgic_io_ops);
919 
920 	mutex_lock(&kvm->slots_lock);
921 
922 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len,
923 				      &iodev->dev);
924 	mutex_unlock(&kvm->slots_lock);
925 
926 	/* Mark the iodev as invalid if registration fails. */
927 	if (ret)
928 		iodev->dev.ops = NULL;
929 
930 	return ret;
931 }
932 
vgic_nr_shared_irqs(struct vgic_dist * dist)933 static int vgic_nr_shared_irqs(struct vgic_dist *dist)
934 {
935 	return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
936 }
937 
compute_active_for_cpu(struct kvm_vcpu * vcpu)938 static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
939 {
940 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
941 	unsigned long *active, *enabled, *act_percpu, *act_shared;
942 	unsigned long active_private, active_shared;
943 	int nr_shared = vgic_nr_shared_irqs(dist);
944 	int vcpu_id;
945 
946 	vcpu_id = vcpu->vcpu_id;
947 	act_percpu = vcpu->arch.vgic_cpu.active_percpu;
948 	act_shared = vcpu->arch.vgic_cpu.active_shared;
949 
950 	active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
951 	enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
952 	bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
953 
954 	active = vgic_bitmap_get_shared_map(&dist->irq_active);
955 	enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
956 	bitmap_and(act_shared, active, enabled, nr_shared);
957 	bitmap_and(act_shared, act_shared,
958 		   vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
959 		   nr_shared);
960 
961 	active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
962 	active_shared = find_first_bit(act_shared, nr_shared);
963 
964 	return (active_private < VGIC_NR_PRIVATE_IRQS ||
965 		active_shared < nr_shared);
966 }
967 
compute_pending_for_cpu(struct kvm_vcpu * vcpu)968 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
969 {
970 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
971 	unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
972 	unsigned long pending_private, pending_shared;
973 	int nr_shared = vgic_nr_shared_irqs(dist);
974 	int vcpu_id;
975 
976 	vcpu_id = vcpu->vcpu_id;
977 	pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
978 	pend_shared = vcpu->arch.vgic_cpu.pending_shared;
979 
980 	if (!dist->enabled) {
981 		bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
982 		bitmap_zero(pend_shared, nr_shared);
983 		return 0;
984 	}
985 
986 	pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
987 	enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
988 	bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
989 
990 	pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
991 	enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
992 	bitmap_and(pend_shared, pending, enabled, nr_shared);
993 	bitmap_and(pend_shared, pend_shared,
994 		   vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
995 		   nr_shared);
996 
997 	pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
998 	pending_shared = find_first_bit(pend_shared, nr_shared);
999 	return (pending_private < VGIC_NR_PRIVATE_IRQS ||
1000 		pending_shared < vgic_nr_shared_irqs(dist));
1001 }
1002 
1003 /*
1004  * Update the interrupt state and determine which CPUs have pending
1005  * or active interrupts. Must be called with distributor lock held.
1006  */
vgic_update_state(struct kvm * kvm)1007 void vgic_update_state(struct kvm *kvm)
1008 {
1009 	struct vgic_dist *dist = &kvm->arch.vgic;
1010 	struct kvm_vcpu *vcpu;
1011 	int c;
1012 
1013 	kvm_for_each_vcpu(c, vcpu, kvm) {
1014 		if (compute_pending_for_cpu(vcpu))
1015 			set_bit(c, dist->irq_pending_on_cpu);
1016 
1017 		if (compute_active_for_cpu(vcpu))
1018 			set_bit(c, dist->irq_active_on_cpu);
1019 		else
1020 			clear_bit(c, dist->irq_active_on_cpu);
1021 	}
1022 }
1023 
vgic_get_lr(const struct kvm_vcpu * vcpu,int lr)1024 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
1025 {
1026 	return vgic_ops->get_lr(vcpu, lr);
1027 }
1028 
vgic_set_lr(struct kvm_vcpu * vcpu,int lr,struct vgic_lr vlr)1029 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1030 			       struct vgic_lr vlr)
1031 {
1032 	vgic_ops->set_lr(vcpu, lr, vlr);
1033 }
1034 
vgic_get_elrsr(struct kvm_vcpu * vcpu)1035 static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1036 {
1037 	return vgic_ops->get_elrsr(vcpu);
1038 }
1039 
vgic_get_eisr(struct kvm_vcpu * vcpu)1040 static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1041 {
1042 	return vgic_ops->get_eisr(vcpu);
1043 }
1044 
vgic_clear_eisr(struct kvm_vcpu * vcpu)1045 static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
1046 {
1047 	vgic_ops->clear_eisr(vcpu);
1048 }
1049 
vgic_get_interrupt_status(struct kvm_vcpu * vcpu)1050 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1051 {
1052 	return vgic_ops->get_interrupt_status(vcpu);
1053 }
1054 
vgic_enable_underflow(struct kvm_vcpu * vcpu)1055 static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1056 {
1057 	vgic_ops->enable_underflow(vcpu);
1058 }
1059 
vgic_disable_underflow(struct kvm_vcpu * vcpu)1060 static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1061 {
1062 	vgic_ops->disable_underflow(vcpu);
1063 }
1064 
vgic_get_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcr)1065 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1066 {
1067 	vgic_ops->get_vmcr(vcpu, vmcr);
1068 }
1069 
vgic_set_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcr)1070 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1071 {
1072 	vgic_ops->set_vmcr(vcpu, vmcr);
1073 }
1074 
vgic_enable(struct kvm_vcpu * vcpu)1075 static inline void vgic_enable(struct kvm_vcpu *vcpu)
1076 {
1077 	vgic_ops->enable(vcpu);
1078 }
1079 
vgic_retire_lr(int lr_nr,struct kvm_vcpu * vcpu)1080 static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
1081 {
1082 	struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1083 
1084 	vgic_irq_clear_queued(vcpu, vlr.irq);
1085 
1086 	/*
1087 	 * We must transfer the pending state back to the distributor before
1088 	 * retiring the LR, otherwise we may loose edge-triggered interrupts.
1089 	 */
1090 	if (vlr.state & LR_STATE_PENDING) {
1091 		vgic_dist_irq_set_pending(vcpu, vlr.irq);
1092 		vlr.hwirq = 0;
1093 	}
1094 
1095 	vlr.state = 0;
1096 	vgic_set_lr(vcpu, lr_nr, vlr);
1097 }
1098 
dist_active_irq(struct kvm_vcpu * vcpu)1099 static bool dist_active_irq(struct kvm_vcpu *vcpu)
1100 {
1101 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1102 
1103 	return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1104 }
1105 
kvm_vgic_map_is_active(struct kvm_vcpu * vcpu,struct irq_phys_map * map)1106 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1107 {
1108 	int i;
1109 
1110 	for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
1111 		struct vgic_lr vlr = vgic_get_lr(vcpu, i);
1112 
1113 		if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
1114 			return true;
1115 	}
1116 
1117 	return vgic_irq_is_active(vcpu, map->virt_irq);
1118 }
1119 
1120 /*
1121  * An interrupt may have been disabled after being made pending on the
1122  * CPU interface (the classic case is a timer running while we're
1123  * rebooting the guest - the interrupt would kick as soon as the CPU
1124  * interface gets enabled, with deadly consequences).
1125  *
1126  * The solution is to examine already active LRs, and check the
1127  * interrupt is still enabled. If not, just retire it.
1128  */
vgic_retire_disabled_irqs(struct kvm_vcpu * vcpu)1129 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1130 {
1131 	u64 elrsr = vgic_get_elrsr(vcpu);
1132 	unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
1133 	int lr;
1134 
1135 	for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
1136 		struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1137 
1138 		if (!vgic_irq_is_enabled(vcpu, vlr.irq))
1139 			vgic_retire_lr(lr, vcpu);
1140 	}
1141 }
1142 
vgic_queue_irq_to_lr(struct kvm_vcpu * vcpu,int irq,int lr_nr,struct vgic_lr vlr)1143 static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
1144 				 int lr_nr, struct vgic_lr vlr)
1145 {
1146 	if (vgic_irq_is_active(vcpu, irq)) {
1147 		vlr.state |= LR_STATE_ACTIVE;
1148 		kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
1149 		vgic_irq_clear_active(vcpu, irq);
1150 		vgic_update_state(vcpu->kvm);
1151 	} else {
1152 		WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
1153 		vlr.state |= LR_STATE_PENDING;
1154 		kvm_debug("Set pending: 0x%x\n", vlr.state);
1155 	}
1156 
1157 	if (!vgic_irq_is_edge(vcpu, irq))
1158 		vlr.state |= LR_EOI_INT;
1159 
1160 	if (vlr.irq >= VGIC_NR_SGIS) {
1161 		struct irq_phys_map *map;
1162 		map = vgic_irq_map_search(vcpu, irq);
1163 
1164 		if (map) {
1165 			vlr.hwirq = map->phys_irq;
1166 			vlr.state |= LR_HW;
1167 			vlr.state &= ~LR_EOI_INT;
1168 
1169 			/*
1170 			 * Make sure we're not going to sample this
1171 			 * again, as a HW-backed interrupt cannot be
1172 			 * in the PENDING_ACTIVE stage.
1173 			 */
1174 			vgic_irq_set_queued(vcpu, irq);
1175 		}
1176 	}
1177 
1178 	vgic_set_lr(vcpu, lr_nr, vlr);
1179 }
1180 
1181 /*
1182  * Queue an interrupt to a CPU virtual interface. Return true on success,
1183  * or false if it wasn't possible to queue it.
1184  * sgi_source must be zero for any non-SGI interrupts.
1185  */
vgic_queue_irq(struct kvm_vcpu * vcpu,u8 sgi_source_id,int irq)1186 bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1187 {
1188 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1189 	u64 elrsr = vgic_get_elrsr(vcpu);
1190 	unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
1191 	struct vgic_lr vlr;
1192 	int lr;
1193 
1194 	/* Sanitize the input... */
1195 	BUG_ON(sgi_source_id & ~7);
1196 	BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1197 	BUG_ON(irq >= dist->nr_irqs);
1198 
1199 	kvm_debug("Queue IRQ%d\n", irq);
1200 
1201 	/* Do we have an active interrupt for the same CPUID? */
1202 	for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
1203 		vlr = vgic_get_lr(vcpu, lr);
1204 		if (vlr.irq == irq && vlr.source == sgi_source_id) {
1205 			kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1206 			vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
1207 			return true;
1208 		}
1209 	}
1210 
1211 	/* Try to use another LR for this interrupt */
1212 	lr = find_first_bit(elrsr_ptr, vgic->nr_lr);
1213 	if (lr >= vgic->nr_lr)
1214 		return false;
1215 
1216 	kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1217 
1218 	vlr.irq = irq;
1219 	vlr.source = sgi_source_id;
1220 	vlr.state = 0;
1221 	vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
1222 
1223 	return true;
1224 }
1225 
vgic_queue_hwirq(struct kvm_vcpu * vcpu,int irq)1226 static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1227 {
1228 	if (!vgic_can_sample_irq(vcpu, irq))
1229 		return true; /* level interrupt, already queued */
1230 
1231 	if (vgic_queue_irq(vcpu, 0, irq)) {
1232 		if (vgic_irq_is_edge(vcpu, irq)) {
1233 			vgic_dist_irq_clear_pending(vcpu, irq);
1234 			vgic_cpu_irq_clear(vcpu, irq);
1235 		} else {
1236 			vgic_irq_set_queued(vcpu, irq);
1237 		}
1238 
1239 		return true;
1240 	}
1241 
1242 	return false;
1243 }
1244 
1245 /*
1246  * Fill the list registers with pending interrupts before running the
1247  * guest.
1248  */
__kvm_vgic_flush_hwstate(struct kvm_vcpu * vcpu)1249 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1250 {
1251 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1252 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1253 	unsigned long *pa_percpu, *pa_shared;
1254 	int i, vcpu_id;
1255 	int overflow = 0;
1256 	int nr_shared = vgic_nr_shared_irqs(dist);
1257 
1258 	vcpu_id = vcpu->vcpu_id;
1259 
1260 	pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
1261 	pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
1262 
1263 	bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
1264 		  VGIC_NR_PRIVATE_IRQS);
1265 	bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
1266 		  nr_shared);
1267 	/*
1268 	 * We may not have any pending interrupt, or the interrupts
1269 	 * may have been serviced from another vcpu. In all cases,
1270 	 * move along.
1271 	 */
1272 	if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu))
1273 		goto epilog;
1274 
1275 	/* SGIs */
1276 	for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
1277 		if (!queue_sgi(vcpu, i))
1278 			overflow = 1;
1279 	}
1280 
1281 	/* PPIs */
1282 	for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
1283 		if (!vgic_queue_hwirq(vcpu, i))
1284 			overflow = 1;
1285 	}
1286 
1287 	/* SPIs */
1288 	for_each_set_bit(i, pa_shared, nr_shared) {
1289 		if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1290 			overflow = 1;
1291 	}
1292 
1293 
1294 
1295 
1296 epilog:
1297 	if (overflow) {
1298 		vgic_enable_underflow(vcpu);
1299 	} else {
1300 		vgic_disable_underflow(vcpu);
1301 		/*
1302 		 * We're about to run this VCPU, and we've consumed
1303 		 * everything the distributor had in store for
1304 		 * us. Claim we don't have anything pending. We'll
1305 		 * adjust that if needed while exiting.
1306 		 */
1307 		clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1308 	}
1309 }
1310 
process_queued_irq(struct kvm_vcpu * vcpu,int lr,struct vgic_lr vlr)1311 static int process_queued_irq(struct kvm_vcpu *vcpu,
1312 				   int lr, struct vgic_lr vlr)
1313 {
1314 	int pending = 0;
1315 
1316 	/*
1317 	 * If the IRQ was EOIed (called from vgic_process_maintenance) or it
1318 	 * went from active to non-active (called from vgic_sync_hwirq) it was
1319 	 * also ACKed and we we therefore assume we can clear the soft pending
1320 	 * state (should it had been set) for this interrupt.
1321 	 *
1322 	 * Note: if the IRQ soft pending state was set after the IRQ was
1323 	 * acked, it actually shouldn't be cleared, but we have no way of
1324 	 * knowing that unless we start trapping ACKs when the soft-pending
1325 	 * state is set.
1326 	 */
1327 	vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1328 
1329 	/*
1330 	 * Tell the gic to start sampling this interrupt again.
1331 	 */
1332 	vgic_irq_clear_queued(vcpu, vlr.irq);
1333 
1334 	/* Any additional pending interrupt? */
1335 	if (vgic_irq_is_edge(vcpu, vlr.irq)) {
1336 		BUG_ON(!(vlr.state & LR_HW));
1337 		pending = vgic_dist_irq_is_pending(vcpu, vlr.irq);
1338 	} else {
1339 		if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1340 			vgic_cpu_irq_set(vcpu, vlr.irq);
1341 			pending = 1;
1342 		} else {
1343 			vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1344 			vgic_cpu_irq_clear(vcpu, vlr.irq);
1345 		}
1346 	}
1347 
1348 	/*
1349 	 * Despite being EOIed, the LR may not have
1350 	 * been marked as empty.
1351 	 */
1352 	vlr.state = 0;
1353 	vlr.hwirq = 0;
1354 	vgic_set_lr(vcpu, lr, vlr);
1355 
1356 	return pending;
1357 }
1358 
vgic_process_maintenance(struct kvm_vcpu * vcpu)1359 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1360 {
1361 	u32 status = vgic_get_interrupt_status(vcpu);
1362 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1363 	struct kvm *kvm = vcpu->kvm;
1364 	int level_pending = 0;
1365 
1366 	kvm_debug("STATUS = %08x\n", status);
1367 
1368 	if (status & INT_STATUS_EOI) {
1369 		/*
1370 		 * Some level interrupts have been EOIed. Clear their
1371 		 * active bit.
1372 		 */
1373 		u64 eisr = vgic_get_eisr(vcpu);
1374 		unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
1375 		int lr;
1376 
1377 		for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1378 			struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1379 
1380 			WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1381 			WARN_ON(vlr.state & LR_STATE_MASK);
1382 
1383 
1384 			/*
1385 			 * kvm_notify_acked_irq calls kvm_set_irq()
1386 			 * to reset the IRQ level, which grabs the dist->lock
1387 			 * so we call this before taking the dist->lock.
1388 			 */
1389 			kvm_notify_acked_irq(kvm, 0,
1390 					     vlr.irq - VGIC_NR_PRIVATE_IRQS);
1391 
1392 			spin_lock(&dist->lock);
1393 			level_pending |= process_queued_irq(vcpu, lr, vlr);
1394 			spin_unlock(&dist->lock);
1395 		}
1396 	}
1397 
1398 	if (status & INT_STATUS_UNDERFLOW)
1399 		vgic_disable_underflow(vcpu);
1400 
1401 	/*
1402 	 * In the next iterations of the vcpu loop, if we sync the vgic state
1403 	 * after flushing it, but before entering the guest (this happens for
1404 	 * pending signals and vmid rollovers), then make sure we don't pick
1405 	 * up any old maintenance interrupts here.
1406 	 */
1407 	vgic_clear_eisr(vcpu);
1408 
1409 	return level_pending;
1410 }
1411 
1412 /*
1413  * Save the physical active state, and reset it to inactive.
1414  *
1415  * Return true if there's a pending forwarded interrupt to queue.
1416  */
vgic_sync_hwirq(struct kvm_vcpu * vcpu,int lr,struct vgic_lr vlr)1417 static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
1418 {
1419 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1420 	bool level_pending;
1421 
1422 	if (!(vlr.state & LR_HW))
1423 		return false;
1424 
1425 	if (vlr.state & LR_STATE_ACTIVE)
1426 		return false;
1427 
1428 	spin_lock(&dist->lock);
1429 	level_pending = process_queued_irq(vcpu, lr, vlr);
1430 	spin_unlock(&dist->lock);
1431 	return level_pending;
1432 }
1433 
1434 /* Sync back the VGIC state after a guest run */
__kvm_vgic_sync_hwstate(struct kvm_vcpu * vcpu)1435 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1436 {
1437 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1438 	u64 elrsr;
1439 	unsigned long *elrsr_ptr;
1440 	int lr, pending;
1441 	bool level_pending;
1442 
1443 	level_pending = vgic_process_maintenance(vcpu);
1444 
1445 	/* Deal with HW interrupts, and clear mappings for empty LRs */
1446 	for (lr = 0; lr < vgic->nr_lr; lr++) {
1447 		struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1448 
1449 		level_pending |= vgic_sync_hwirq(vcpu, lr, vlr);
1450 		BUG_ON(vlr.irq >= dist->nr_irqs);
1451 	}
1452 
1453 	/* Check if we still have something up our sleeve... */
1454 	elrsr = vgic_get_elrsr(vcpu);
1455 	elrsr_ptr = u64_to_bitmask(&elrsr);
1456 	pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1457 	if (level_pending || pending < vgic->nr_lr)
1458 		set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1459 }
1460 
kvm_vgic_flush_hwstate(struct kvm_vcpu * vcpu)1461 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1462 {
1463 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1464 
1465 	if (!irqchip_in_kernel(vcpu->kvm))
1466 		return;
1467 
1468 	spin_lock(&dist->lock);
1469 	__kvm_vgic_flush_hwstate(vcpu);
1470 	spin_unlock(&dist->lock);
1471 }
1472 
kvm_vgic_sync_hwstate(struct kvm_vcpu * vcpu)1473 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1474 {
1475 	if (!irqchip_in_kernel(vcpu->kvm))
1476 		return;
1477 
1478 	__kvm_vgic_sync_hwstate(vcpu);
1479 }
1480 
kvm_vgic_vcpu_pending_irq(struct kvm_vcpu * vcpu)1481 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1482 {
1483 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1484 
1485 	if (!irqchip_in_kernel(vcpu->kvm))
1486 		return 0;
1487 
1488 	return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1489 }
1490 
vgic_kick_vcpus(struct kvm * kvm)1491 void vgic_kick_vcpus(struct kvm *kvm)
1492 {
1493 	struct kvm_vcpu *vcpu;
1494 	int c;
1495 
1496 	/*
1497 	 * We've injected an interrupt, time to find out who deserves
1498 	 * a good kick...
1499 	 */
1500 	kvm_for_each_vcpu(c, vcpu, kvm) {
1501 		if (kvm_vgic_vcpu_pending_irq(vcpu))
1502 			kvm_vcpu_kick(vcpu);
1503 	}
1504 }
1505 
vgic_validate_injection(struct kvm_vcpu * vcpu,int irq,int level)1506 static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1507 {
1508 	int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1509 
1510 	/*
1511 	 * Only inject an interrupt if:
1512 	 * - edge triggered and we have a rising edge
1513 	 * - level triggered and we change level
1514 	 */
1515 	if (edge_triggered) {
1516 		int state = vgic_dist_irq_is_pending(vcpu, irq);
1517 		return level > state;
1518 	} else {
1519 		int state = vgic_dist_irq_get_level(vcpu, irq);
1520 		return level != state;
1521 	}
1522 }
1523 
vgic_update_irq_pending(struct kvm * kvm,int cpuid,struct irq_phys_map * map,unsigned int irq_num,bool level)1524 static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1525 				   struct irq_phys_map *map,
1526 				   unsigned int irq_num, bool level)
1527 {
1528 	struct vgic_dist *dist = &kvm->arch.vgic;
1529 	struct kvm_vcpu *vcpu;
1530 	int edge_triggered, level_triggered;
1531 	int enabled;
1532 	bool ret = true, can_inject = true;
1533 
1534 	trace_vgic_update_irq_pending(cpuid, irq_num, level);
1535 
1536 	if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
1537 		return -EINVAL;
1538 
1539 	spin_lock(&dist->lock);
1540 
1541 	vcpu = kvm_get_vcpu(kvm, cpuid);
1542 	edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
1543 	level_triggered = !edge_triggered;
1544 
1545 	if (!vgic_validate_injection(vcpu, irq_num, level)) {
1546 		ret = false;
1547 		goto out;
1548 	}
1549 
1550 	if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1551 		cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1552 		if (cpuid == VCPU_NOT_ALLOCATED) {
1553 			/* Pretend we use CPU0, and prevent injection */
1554 			cpuid = 0;
1555 			can_inject = false;
1556 		}
1557 		vcpu = kvm_get_vcpu(kvm, cpuid);
1558 	}
1559 
1560 	kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1561 
1562 	if (level) {
1563 		if (level_triggered)
1564 			vgic_dist_irq_set_level(vcpu, irq_num);
1565 		vgic_dist_irq_set_pending(vcpu, irq_num);
1566 	} else {
1567 		if (level_triggered) {
1568 			vgic_dist_irq_clear_level(vcpu, irq_num);
1569 			if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
1570 				vgic_dist_irq_clear_pending(vcpu, irq_num);
1571 				vgic_cpu_irq_clear(vcpu, irq_num);
1572 				if (!compute_pending_for_cpu(vcpu))
1573 					clear_bit(cpuid, dist->irq_pending_on_cpu);
1574 			}
1575 		}
1576 
1577 		ret = false;
1578 		goto out;
1579 	}
1580 
1581 	enabled = vgic_irq_is_enabled(vcpu, irq_num);
1582 
1583 	if (!enabled || !can_inject) {
1584 		ret = false;
1585 		goto out;
1586 	}
1587 
1588 	if (!vgic_can_sample_irq(vcpu, irq_num)) {
1589 		/*
1590 		 * Level interrupt in progress, will be picked up
1591 		 * when EOId.
1592 		 */
1593 		ret = false;
1594 		goto out;
1595 	}
1596 
1597 	if (level) {
1598 		vgic_cpu_irq_set(vcpu, irq_num);
1599 		set_bit(cpuid, dist->irq_pending_on_cpu);
1600 	}
1601 
1602 out:
1603 	spin_unlock(&dist->lock);
1604 
1605 	if (ret) {
1606 		/* kick the specified vcpu */
1607 		kvm_vcpu_kick(kvm_get_vcpu(kvm, cpuid));
1608 	}
1609 
1610 	return 0;
1611 }
1612 
vgic_lazy_init(struct kvm * kvm)1613 static int vgic_lazy_init(struct kvm *kvm)
1614 {
1615 	int ret = 0;
1616 
1617 	if (unlikely(!vgic_initialized(kvm))) {
1618 		/*
1619 		 * We only provide the automatic initialization of the VGIC
1620 		 * for the legacy case of a GICv2. Any other type must
1621 		 * be explicitly initialized once setup with the respective
1622 		 * KVM device call.
1623 		 */
1624 		if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
1625 			return -EBUSY;
1626 
1627 		mutex_lock(&kvm->lock);
1628 		ret = vgic_init(kvm);
1629 		mutex_unlock(&kvm->lock);
1630 	}
1631 
1632 	return ret;
1633 }
1634 
1635 /**
1636  * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1637  * @kvm:     The VM structure pointer
1638  * @cpuid:   The CPU for PPIs
1639  * @irq_num: The IRQ number that is assigned to the device. This IRQ
1640  *           must not be mapped to a HW interrupt.
1641  * @level:   Edge-triggered:  true:  to trigger the interrupt
1642  *			      false: to ignore the call
1643  *	     Level-sensitive  true:  raise the input signal
1644  *			      false: lower the input signal
1645  *
1646  * The GIC is not concerned with devices being active-LOW or active-HIGH for
1647  * level-sensitive interrupts.  You can think of the level parameter as 1
1648  * being HIGH and 0 being LOW and all devices being active-HIGH.
1649  */
kvm_vgic_inject_irq(struct kvm * kvm,int cpuid,unsigned int irq_num,bool level)1650 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1651 			bool level)
1652 {
1653 	struct irq_phys_map *map;
1654 	int ret;
1655 
1656 	ret = vgic_lazy_init(kvm);
1657 	if (ret)
1658 		return ret;
1659 
1660 	map = vgic_irq_map_search(kvm_get_vcpu(kvm, cpuid), irq_num);
1661 	if (map)
1662 		return -EINVAL;
1663 
1664 	return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level);
1665 }
1666 
1667 /**
1668  * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
1669  * @kvm:     The VM structure pointer
1670  * @cpuid:   The CPU for PPIs
1671  * @map:     Pointer to a irq_phys_map structure describing the mapping
1672  * @level:   Edge-triggered:  true:  to trigger the interrupt
1673  *			      false: to ignore the call
1674  *	     Level-sensitive  true:  raise the input signal
1675  *			      false: lower the input signal
1676  *
1677  * The GIC is not concerned with devices being active-LOW or active-HIGH for
1678  * level-sensitive interrupts.  You can think of the level parameter as 1
1679  * being HIGH and 0 being LOW and all devices being active-HIGH.
1680  */
kvm_vgic_inject_mapped_irq(struct kvm * kvm,int cpuid,struct irq_phys_map * map,bool level)1681 int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
1682 			       struct irq_phys_map *map, bool level)
1683 {
1684 	int ret;
1685 
1686 	ret = vgic_lazy_init(kvm);
1687 	if (ret)
1688 		return ret;
1689 
1690 	return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level);
1691 }
1692 
vgic_maintenance_handler(int irq,void * data)1693 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1694 {
1695 	/*
1696 	 * We cannot rely on the vgic maintenance interrupt to be
1697 	 * delivered synchronously. This means we can only use it to
1698 	 * exit the VM, and we perform the handling of EOIed
1699 	 * interrupts on the exit path (see vgic_process_maintenance).
1700 	 */
1701 	return IRQ_HANDLED;
1702 }
1703 
vgic_get_irq_phys_map_list(struct kvm_vcpu * vcpu,int virt_irq)1704 static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
1705 						    int virt_irq)
1706 {
1707 	if (virt_irq < VGIC_NR_PRIVATE_IRQS)
1708 		return &vcpu->arch.vgic_cpu.irq_phys_map_list;
1709 	else
1710 		return &vcpu->kvm->arch.vgic.irq_phys_map_list;
1711 }
1712 
1713 /**
1714  * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
1715  * @vcpu: The VCPU pointer
1716  * @virt_irq: The virtual irq number
1717  * @irq: The Linux IRQ number
1718  *
1719  * Establish a mapping between a guest visible irq (@virt_irq) and a
1720  * Linux irq (@irq). On injection, @virt_irq will be associated with
1721  * the physical interrupt represented by @irq. This mapping can be
1722  * established multiple times as long as the parameters are the same.
1723  *
1724  * Returns a valid pointer on success, and an error pointer otherwise
1725  */
kvm_vgic_map_phys_irq(struct kvm_vcpu * vcpu,int virt_irq,int irq)1726 struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
1727 					   int virt_irq, int irq)
1728 {
1729 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1730 	struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
1731 	struct irq_phys_map *map;
1732 	struct irq_phys_map_entry *entry;
1733 	struct irq_desc *desc;
1734 	struct irq_data *data;
1735 	int phys_irq;
1736 
1737 	desc = irq_to_desc(irq);
1738 	if (!desc) {
1739 		kvm_err("%s: no interrupt descriptor\n", __func__);
1740 		return ERR_PTR(-EINVAL);
1741 	}
1742 
1743 	data = irq_desc_get_irq_data(desc);
1744 	while (data->parent_data)
1745 		data = data->parent_data;
1746 
1747 	phys_irq = data->hwirq;
1748 
1749 	/* Create a new mapping */
1750 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1751 	if (!entry)
1752 		return ERR_PTR(-ENOMEM);
1753 
1754 	spin_lock(&dist->irq_phys_map_lock);
1755 
1756 	/* Try to match an existing mapping */
1757 	map = vgic_irq_map_search(vcpu, virt_irq);
1758 	if (map) {
1759 		/* Make sure this mapping matches */
1760 		if (map->phys_irq != phys_irq	||
1761 		    map->irq      != irq)
1762 			map = ERR_PTR(-EINVAL);
1763 
1764 		/* Found an existing, valid mapping */
1765 		goto out;
1766 	}
1767 
1768 	map           = &entry->map;
1769 	map->virt_irq = virt_irq;
1770 	map->phys_irq = phys_irq;
1771 	map->irq      = irq;
1772 
1773 	list_add_tail_rcu(&entry->entry, root);
1774 
1775 out:
1776 	spin_unlock(&dist->irq_phys_map_lock);
1777 	/* If we've found a hit in the existing list, free the useless
1778 	 * entry */
1779 	if (IS_ERR(map) || map != &entry->map)
1780 		kfree(entry);
1781 	return map;
1782 }
1783 
vgic_irq_map_search(struct kvm_vcpu * vcpu,int virt_irq)1784 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
1785 						int virt_irq)
1786 {
1787 	struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
1788 	struct irq_phys_map_entry *entry;
1789 	struct irq_phys_map *map;
1790 
1791 	rcu_read_lock();
1792 
1793 	list_for_each_entry_rcu(entry, root, entry) {
1794 		map = &entry->map;
1795 		if (map->virt_irq == virt_irq) {
1796 			rcu_read_unlock();
1797 			return map;
1798 		}
1799 	}
1800 
1801 	rcu_read_unlock();
1802 
1803 	return NULL;
1804 }
1805 
vgic_free_phys_irq_map_rcu(struct rcu_head * rcu)1806 static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
1807 {
1808 	struct irq_phys_map_entry *entry;
1809 
1810 	entry = container_of(rcu, struct irq_phys_map_entry, rcu);
1811 	kfree(entry);
1812 }
1813 
1814 /**
1815  * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
1816  * @vcpu: The VCPU pointer
1817  * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
1818  *
1819  * Remove an existing mapping between virtual and physical interrupts.
1820  */
kvm_vgic_unmap_phys_irq(struct kvm_vcpu * vcpu,struct irq_phys_map * map)1821 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1822 {
1823 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1824 	struct irq_phys_map_entry *entry;
1825 	struct list_head *root;
1826 
1827 	if (!map)
1828 		return -EINVAL;
1829 
1830 	root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
1831 
1832 	spin_lock(&dist->irq_phys_map_lock);
1833 
1834 	list_for_each_entry(entry, root, entry) {
1835 		if (&entry->map == map) {
1836 			list_del_rcu(&entry->entry);
1837 			call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
1838 			break;
1839 		}
1840 	}
1841 
1842 	spin_unlock(&dist->irq_phys_map_lock);
1843 
1844 	return 0;
1845 }
1846 
vgic_destroy_irq_phys_map(struct kvm * kvm,struct list_head * root)1847 static void vgic_destroy_irq_phys_map(struct kvm *kvm, struct list_head *root)
1848 {
1849 	struct vgic_dist *dist = &kvm->arch.vgic;
1850 	struct irq_phys_map_entry *entry;
1851 
1852 	spin_lock(&dist->irq_phys_map_lock);
1853 
1854 	list_for_each_entry(entry, root, entry) {
1855 		list_del_rcu(&entry->entry);
1856 		call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
1857 	}
1858 
1859 	spin_unlock(&dist->irq_phys_map_lock);
1860 }
1861 
kvm_vgic_vcpu_destroy(struct kvm_vcpu * vcpu)1862 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1863 {
1864 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1865 
1866 	kfree(vgic_cpu->pending_shared);
1867 	kfree(vgic_cpu->active_shared);
1868 	kfree(vgic_cpu->pend_act_shared);
1869 	vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
1870 	vgic_cpu->pending_shared = NULL;
1871 	vgic_cpu->active_shared = NULL;
1872 	vgic_cpu->pend_act_shared = NULL;
1873 }
1874 
vgic_vcpu_init_maps(struct kvm_vcpu * vcpu,int nr_irqs)1875 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1876 {
1877 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1878 	int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
1879 	int sz = nr_longs * sizeof(unsigned long);
1880 	vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1881 	vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
1882 	vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
1883 
1884 	if (!vgic_cpu->pending_shared
1885 		|| !vgic_cpu->active_shared
1886 		|| !vgic_cpu->pend_act_shared) {
1887 		kvm_vgic_vcpu_destroy(vcpu);
1888 		return -ENOMEM;
1889 	}
1890 
1891 	/*
1892 	 * Store the number of LRs per vcpu, so we don't have to go
1893 	 * all the way to the distributor structure to find out. Only
1894 	 * assembly code should use this one.
1895 	 */
1896 	vgic_cpu->nr_lr = vgic->nr_lr;
1897 
1898 	return 0;
1899 }
1900 
1901 /**
1902  * kvm_vgic_vcpu_early_init - Earliest possible per-vcpu vgic init stage
1903  *
1904  * No memory allocation should be performed here, only static init.
1905  */
kvm_vgic_vcpu_early_init(struct kvm_vcpu * vcpu)1906 void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
1907 {
1908 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1909 	INIT_LIST_HEAD(&vgic_cpu->irq_phys_map_list);
1910 }
1911 
1912 /**
1913  * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
1914  *
1915  * The host's GIC naturally limits the maximum amount of VCPUs a guest
1916  * can use.
1917  */
kvm_vgic_get_max_vcpus(void)1918 int kvm_vgic_get_max_vcpus(void)
1919 {
1920 	return vgic->max_gic_vcpus;
1921 }
1922 
kvm_vgic_destroy(struct kvm * kvm)1923 void kvm_vgic_destroy(struct kvm *kvm)
1924 {
1925 	struct vgic_dist *dist = &kvm->arch.vgic;
1926 	struct kvm_vcpu *vcpu;
1927 	int i;
1928 
1929 	kvm_for_each_vcpu(i, vcpu, kvm)
1930 		kvm_vgic_vcpu_destroy(vcpu);
1931 
1932 	vgic_free_bitmap(&dist->irq_enabled);
1933 	vgic_free_bitmap(&dist->irq_level);
1934 	vgic_free_bitmap(&dist->irq_pending);
1935 	vgic_free_bitmap(&dist->irq_soft_pend);
1936 	vgic_free_bitmap(&dist->irq_queued);
1937 	vgic_free_bitmap(&dist->irq_cfg);
1938 	vgic_free_bytemap(&dist->irq_priority);
1939 	if (dist->irq_spi_target) {
1940 		for (i = 0; i < dist->nr_cpus; i++)
1941 			vgic_free_bitmap(&dist->irq_spi_target[i]);
1942 	}
1943 	kfree(dist->irq_sgi_sources);
1944 	kfree(dist->irq_spi_cpu);
1945 	kfree(dist->irq_spi_mpidr);
1946 	kfree(dist->irq_spi_target);
1947 	kfree(dist->irq_pending_on_cpu);
1948 	kfree(dist->irq_active_on_cpu);
1949 	vgic_destroy_irq_phys_map(kvm, &dist->irq_phys_map_list);
1950 	dist->irq_sgi_sources = NULL;
1951 	dist->irq_spi_cpu = NULL;
1952 	dist->irq_spi_target = NULL;
1953 	dist->irq_pending_on_cpu = NULL;
1954 	dist->irq_active_on_cpu = NULL;
1955 	dist->nr_cpus = 0;
1956 }
1957 
1958 /*
1959  * Allocate and initialize the various data structures. Must be called
1960  * with kvm->lock held!
1961  */
vgic_init(struct kvm * kvm)1962 int vgic_init(struct kvm *kvm)
1963 {
1964 	struct vgic_dist *dist = &kvm->arch.vgic;
1965 	struct kvm_vcpu *vcpu;
1966 	int nr_cpus, nr_irqs;
1967 	int ret, i, vcpu_id;
1968 
1969 	if (vgic_initialized(kvm))
1970 		return 0;
1971 
1972 	nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
1973 	if (!nr_cpus)		/* No vcpus? Can't be good... */
1974 		return -ENODEV;
1975 
1976 	/*
1977 	 * If nobody configured the number of interrupts, use the
1978 	 * legacy one.
1979 	 */
1980 	if (!dist->nr_irqs)
1981 		dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
1982 
1983 	nr_irqs = dist->nr_irqs;
1984 
1985 	ret  = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
1986 	ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
1987 	ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1988 	ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1989 	ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1990 	ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
1991 	ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1992 	ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1993 
1994 	if (ret)
1995 		goto out;
1996 
1997 	dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
1998 	dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
1999 	dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
2000 				       GFP_KERNEL);
2001 	dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
2002 					   GFP_KERNEL);
2003 	dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
2004 					   GFP_KERNEL);
2005 	if (!dist->irq_sgi_sources ||
2006 	    !dist->irq_spi_cpu ||
2007 	    !dist->irq_spi_target ||
2008 	    !dist->irq_pending_on_cpu ||
2009 	    !dist->irq_active_on_cpu) {
2010 		ret = -ENOMEM;
2011 		goto out;
2012 	}
2013 
2014 	for (i = 0; i < nr_cpus; i++)
2015 		ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
2016 					nr_cpus, nr_irqs);
2017 
2018 	if (ret)
2019 		goto out;
2020 
2021 	ret = kvm->arch.vgic.vm_ops.init_model(kvm);
2022 	if (ret)
2023 		goto out;
2024 
2025 	kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
2026 		ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
2027 		if (ret) {
2028 			kvm_err("VGIC: Failed to allocate vcpu memory\n");
2029 			break;
2030 		}
2031 
2032 		/*
2033 		 * Enable and configure all SGIs to be edge-triggere and
2034 		 * configure all PPIs as level-triggered.
2035 		 */
2036 		for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
2037 			if (i < VGIC_NR_SGIS) {
2038 				/* SGIs */
2039 				vgic_bitmap_set_irq_val(&dist->irq_enabled,
2040 							vcpu->vcpu_id, i, 1);
2041 				vgic_bitmap_set_irq_val(&dist->irq_cfg,
2042 							vcpu->vcpu_id, i,
2043 							VGIC_CFG_EDGE);
2044 			} else if (i < VGIC_NR_PRIVATE_IRQS) {
2045 				/* PPIs */
2046 				vgic_bitmap_set_irq_val(&dist->irq_cfg,
2047 							vcpu->vcpu_id, i,
2048 							VGIC_CFG_LEVEL);
2049 			}
2050 		}
2051 
2052 		vgic_enable(vcpu);
2053 	}
2054 
2055 out:
2056 	if (ret)
2057 		kvm_vgic_destroy(kvm);
2058 
2059 	return ret;
2060 }
2061 
init_vgic_model(struct kvm * kvm,int type)2062 static int init_vgic_model(struct kvm *kvm, int type)
2063 {
2064 	switch (type) {
2065 	case KVM_DEV_TYPE_ARM_VGIC_V2:
2066 		vgic_v2_init_emulation(kvm);
2067 		break;
2068 #ifdef CONFIG_KVM_ARM_VGIC_V3
2069 	case KVM_DEV_TYPE_ARM_VGIC_V3:
2070 		vgic_v3_init_emulation(kvm);
2071 		break;
2072 #endif
2073 	default:
2074 		return -ENODEV;
2075 	}
2076 
2077 	if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus)
2078 		return -E2BIG;
2079 
2080 	return 0;
2081 }
2082 
2083 /**
2084  * kvm_vgic_early_init - Earliest possible vgic initialization stage
2085  *
2086  * No memory allocation should be performed here, only static init.
2087  */
kvm_vgic_early_init(struct kvm * kvm)2088 void kvm_vgic_early_init(struct kvm *kvm)
2089 {
2090 	spin_lock_init(&kvm->arch.vgic.lock);
2091 	spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock);
2092 	INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list);
2093 }
2094 
kvm_vgic_create(struct kvm * kvm,u32 type)2095 int kvm_vgic_create(struct kvm *kvm, u32 type)
2096 {
2097 	int i, vcpu_lock_idx = -1, ret;
2098 	struct kvm_vcpu *vcpu;
2099 
2100 	mutex_lock(&kvm->lock);
2101 
2102 	if (irqchip_in_kernel(kvm)) {
2103 		ret = -EEXIST;
2104 		goto out;
2105 	}
2106 
2107 	/*
2108 	 * This function is also called by the KVM_CREATE_IRQCHIP handler,
2109 	 * which had no chance yet to check the availability of the GICv2
2110 	 * emulation. So check this here again. KVM_CREATE_DEVICE does
2111 	 * the proper checks already.
2112 	 */
2113 	if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) {
2114 		ret = -ENODEV;
2115 		goto out;
2116 	}
2117 
2118 	/*
2119 	 * Any time a vcpu is run, vcpu_load is called which tries to grab the
2120 	 * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
2121 	 * that no other VCPUs are run while we create the vgic.
2122 	 */
2123 	ret = -EBUSY;
2124 	kvm_for_each_vcpu(i, vcpu, kvm) {
2125 		if (!mutex_trylock(&vcpu->mutex))
2126 			goto out_unlock;
2127 		vcpu_lock_idx = i;
2128 	}
2129 
2130 	kvm_for_each_vcpu(i, vcpu, kvm) {
2131 		if (vcpu->arch.has_run_once)
2132 			goto out_unlock;
2133 	}
2134 	ret = 0;
2135 
2136 	ret = init_vgic_model(kvm, type);
2137 	if (ret)
2138 		goto out_unlock;
2139 
2140 	kvm->arch.vgic.in_kernel = true;
2141 	kvm->arch.vgic.vgic_model = type;
2142 	kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
2143 	kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
2144 	kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
2145 	kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
2146 
2147 out_unlock:
2148 	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
2149 		vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
2150 		mutex_unlock(&vcpu->mutex);
2151 	}
2152 
2153 out:
2154 	mutex_unlock(&kvm->lock);
2155 	return ret;
2156 }
2157 
vgic_ioaddr_overlap(struct kvm * kvm)2158 static int vgic_ioaddr_overlap(struct kvm *kvm)
2159 {
2160 	phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
2161 	phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
2162 
2163 	if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
2164 		return 0;
2165 	if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
2166 	    (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
2167 		return -EBUSY;
2168 	return 0;
2169 }
2170 
vgic_ioaddr_assign(struct kvm * kvm,phys_addr_t * ioaddr,phys_addr_t addr,phys_addr_t size)2171 static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
2172 			      phys_addr_t addr, phys_addr_t size)
2173 {
2174 	int ret;
2175 
2176 	if (addr & ~KVM_PHYS_MASK)
2177 		return -E2BIG;
2178 
2179 	if (addr & (SZ_4K - 1))
2180 		return -EINVAL;
2181 
2182 	if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
2183 		return -EEXIST;
2184 	if (addr + size < addr)
2185 		return -EINVAL;
2186 
2187 	*ioaddr = addr;
2188 	ret = vgic_ioaddr_overlap(kvm);
2189 	if (ret)
2190 		*ioaddr = VGIC_ADDR_UNDEF;
2191 
2192 	return ret;
2193 }
2194 
2195 /**
2196  * kvm_vgic_addr - set or get vgic VM base addresses
2197  * @kvm:   pointer to the vm struct
2198  * @type:  the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
2199  * @addr:  pointer to address value
2200  * @write: if true set the address in the VM address space, if false read the
2201  *          address
2202  *
2203  * Set or get the vgic base addresses for the distributor and the virtual CPU
2204  * interface in the VM physical address space.  These addresses are properties
2205  * of the emulated core/SoC and therefore user space initially knows this
2206  * information.
2207  */
kvm_vgic_addr(struct kvm * kvm,unsigned long type,u64 * addr,bool write)2208 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
2209 {
2210 	int r = 0;
2211 	struct vgic_dist *vgic = &kvm->arch.vgic;
2212 	int type_needed;
2213 	phys_addr_t *addr_ptr, block_size;
2214 	phys_addr_t alignment;
2215 
2216 	mutex_lock(&kvm->lock);
2217 	switch (type) {
2218 	case KVM_VGIC_V2_ADDR_TYPE_DIST:
2219 		type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
2220 		addr_ptr = &vgic->vgic_dist_base;
2221 		block_size = KVM_VGIC_V2_DIST_SIZE;
2222 		alignment = SZ_4K;
2223 		break;
2224 	case KVM_VGIC_V2_ADDR_TYPE_CPU:
2225 		type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
2226 		addr_ptr = &vgic->vgic_cpu_base;
2227 		block_size = KVM_VGIC_V2_CPU_SIZE;
2228 		alignment = SZ_4K;
2229 		break;
2230 #ifdef CONFIG_KVM_ARM_VGIC_V3
2231 	case KVM_VGIC_V3_ADDR_TYPE_DIST:
2232 		type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
2233 		addr_ptr = &vgic->vgic_dist_base;
2234 		block_size = KVM_VGIC_V3_DIST_SIZE;
2235 		alignment = SZ_64K;
2236 		break;
2237 	case KVM_VGIC_V3_ADDR_TYPE_REDIST:
2238 		type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
2239 		addr_ptr = &vgic->vgic_redist_base;
2240 		block_size = KVM_VGIC_V3_REDIST_SIZE;
2241 		alignment = SZ_64K;
2242 		break;
2243 #endif
2244 	default:
2245 		r = -ENODEV;
2246 		goto out;
2247 	}
2248 
2249 	if (vgic->vgic_model != type_needed) {
2250 		r = -ENODEV;
2251 		goto out;
2252 	}
2253 
2254 	if (write) {
2255 		if (!IS_ALIGNED(*addr, alignment))
2256 			r = -EINVAL;
2257 		else
2258 			r = vgic_ioaddr_assign(kvm, addr_ptr, *addr,
2259 					       block_size);
2260 	} else {
2261 		*addr = *addr_ptr;
2262 	}
2263 
2264 out:
2265 	mutex_unlock(&kvm->lock);
2266 	return r;
2267 }
2268 
vgic_set_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2269 int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2270 {
2271 	int r;
2272 
2273 	switch (attr->group) {
2274 	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2275 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2276 		u64 addr;
2277 		unsigned long type = (unsigned long)attr->attr;
2278 
2279 		if (copy_from_user(&addr, uaddr, sizeof(addr)))
2280 			return -EFAULT;
2281 
2282 		r = kvm_vgic_addr(dev->kvm, type, &addr, true);
2283 		return (r == -ENODEV) ? -ENXIO : r;
2284 	}
2285 	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2286 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2287 		u32 val;
2288 		int ret = 0;
2289 
2290 		if (get_user(val, uaddr))
2291 			return -EFAULT;
2292 
2293 		/*
2294 		 * We require:
2295 		 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
2296 		 * - at most 1024 interrupts
2297 		 * - a multiple of 32 interrupts
2298 		 */
2299 		if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
2300 		    val > VGIC_MAX_IRQS ||
2301 		    (val & 31))
2302 			return -EINVAL;
2303 
2304 		mutex_lock(&dev->kvm->lock);
2305 
2306 		if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
2307 			ret = -EBUSY;
2308 		else
2309 			dev->kvm->arch.vgic.nr_irqs = val;
2310 
2311 		mutex_unlock(&dev->kvm->lock);
2312 
2313 		return ret;
2314 	}
2315 	case KVM_DEV_ARM_VGIC_GRP_CTRL: {
2316 		switch (attr->attr) {
2317 		case KVM_DEV_ARM_VGIC_CTRL_INIT:
2318 			r = vgic_init(dev->kvm);
2319 			return r;
2320 		}
2321 		break;
2322 	}
2323 	}
2324 
2325 	return -ENXIO;
2326 }
2327 
vgic_get_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2328 int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2329 {
2330 	int r = -ENXIO;
2331 
2332 	switch (attr->group) {
2333 	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2334 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2335 		u64 addr;
2336 		unsigned long type = (unsigned long)attr->attr;
2337 
2338 		r = kvm_vgic_addr(dev->kvm, type, &addr, false);
2339 		if (r)
2340 			return (r == -ENODEV) ? -ENXIO : r;
2341 
2342 		if (copy_to_user(uaddr, &addr, sizeof(addr)))
2343 			return -EFAULT;
2344 		break;
2345 	}
2346 	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2347 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2348 
2349 		r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
2350 		break;
2351 	}
2352 
2353 	}
2354 
2355 	return r;
2356 }
2357 
vgic_has_attr_regs(const struct vgic_io_range * ranges,phys_addr_t offset)2358 int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
2359 {
2360 	if (vgic_find_range(ranges, 4, offset))
2361 		return 0;
2362 	else
2363 		return -ENXIO;
2364 }
2365 
vgic_init_maintenance_interrupt(void * info)2366 static void vgic_init_maintenance_interrupt(void *info)
2367 {
2368 	enable_percpu_irq(vgic->maint_irq, 0);
2369 }
2370 
vgic_cpu_notify(struct notifier_block * self,unsigned long action,void * cpu)2371 static int vgic_cpu_notify(struct notifier_block *self,
2372 			   unsigned long action, void *cpu)
2373 {
2374 	switch (action) {
2375 	case CPU_STARTING:
2376 	case CPU_STARTING_FROZEN:
2377 		vgic_init_maintenance_interrupt(NULL);
2378 		break;
2379 	case CPU_DYING:
2380 	case CPU_DYING_FROZEN:
2381 		disable_percpu_irq(vgic->maint_irq);
2382 		break;
2383 	}
2384 
2385 	return NOTIFY_OK;
2386 }
2387 
2388 static struct notifier_block vgic_cpu_nb = {
2389 	.notifier_call = vgic_cpu_notify,
2390 };
2391 
2392 static const struct of_device_id vgic_ids[] = {
2393 	{ .compatible = "arm,cortex-a15-gic",	.data = vgic_v2_probe, },
2394 	{ .compatible = "arm,cortex-a7-gic",	.data = vgic_v2_probe, },
2395 	{ .compatible = "arm,gic-400",		.data = vgic_v2_probe, },
2396 	{ .compatible = "arm,gic-v3",		.data = vgic_v3_probe, },
2397 	{},
2398 };
2399 
kvm_vgic_hyp_init(void)2400 int kvm_vgic_hyp_init(void)
2401 {
2402 	const struct of_device_id *matched_id;
2403 	const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
2404 				const struct vgic_params **);
2405 	struct device_node *vgic_node;
2406 	int ret;
2407 
2408 	vgic_node = of_find_matching_node_and_match(NULL,
2409 						    vgic_ids, &matched_id);
2410 	if (!vgic_node) {
2411 		kvm_err("error: no compatible GIC node found\n");
2412 		return -ENODEV;
2413 	}
2414 
2415 	vgic_probe = matched_id->data;
2416 	ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
2417 	if (ret)
2418 		return ret;
2419 
2420 	ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
2421 				 "vgic", kvm_get_running_vcpus());
2422 	if (ret) {
2423 		kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
2424 		return ret;
2425 	}
2426 
2427 	ret = __register_cpu_notifier(&vgic_cpu_nb);
2428 	if (ret) {
2429 		kvm_err("Cannot register vgic CPU notifier\n");
2430 		goto out_free_irq;
2431 	}
2432 
2433 	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
2434 
2435 	return 0;
2436 
2437 out_free_irq:
2438 	free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
2439 	return ret;
2440 }
2441 
kvm_irq_map_gsi(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * entries,int gsi)2442 int kvm_irq_map_gsi(struct kvm *kvm,
2443 		    struct kvm_kernel_irq_routing_entry *entries,
2444 		    int gsi)
2445 {
2446 	return 0;
2447 }
2448 
kvm_irq_map_chip_pin(struct kvm * kvm,unsigned irqchip,unsigned pin)2449 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
2450 {
2451 	return pin;
2452 }
2453 
kvm_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)2454 int kvm_set_irq(struct kvm *kvm, int irq_source_id,
2455 		u32 irq, int level, bool line_status)
2456 {
2457 	unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
2458 
2459 	trace_kvm_set_irq(irq, level, irq_source_id);
2460 
2461 	BUG_ON(!vgic_initialized(kvm));
2462 
2463 	return kvm_vgic_inject_irq(kvm, 0, spi, level);
2464 }
2465 
2466 /* MSI not implemented yet */
kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)2467 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
2468 		struct kvm *kvm, int irq_source_id,
2469 		int level, bool line_status)
2470 {
2471 	return 0;
2472 }
2473