1/*
2 * Contains GICv2 specific emulation code, was in vgic.c before.
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/cpu.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
25#include <linux/uaccess.h>
26
27#include <linux/irqchip/arm-gic.h>
28
29#include <asm/kvm_emulate.h>
30#include <asm/kvm_arm.h>
31#include <asm/kvm_mmu.h>
32
33#include "vgic.h"
34
35#define GICC_ARCH_VERSION_V2		0x2
36
37static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
38static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
39{
40	return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
41}
42
43static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
44			     struct kvm_exit_mmio *mmio, phys_addr_t offset)
45{
46	u32 reg;
47	u32 word_offset = offset & 3;
48
49	switch (offset & ~3) {
50	case 0:			/* GICD_CTLR */
51		reg = vcpu->kvm->arch.vgic.enabled;
52		vgic_reg_access(mmio, &reg, word_offset,
53				ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
54		if (mmio->is_write) {
55			vcpu->kvm->arch.vgic.enabled = reg & 1;
56			vgic_update_state(vcpu->kvm);
57			return true;
58		}
59		break;
60
61	case 4:			/* GICD_TYPER */
62		reg  = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
63		reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
64		vgic_reg_access(mmio, &reg, word_offset,
65				ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
66		break;
67
68	case 8:			/* GICD_IIDR */
69		reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
70		vgic_reg_access(mmio, &reg, word_offset,
71				ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
72		break;
73	}
74
75	return false;
76}
77
78static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
79				       struct kvm_exit_mmio *mmio,
80				       phys_addr_t offset)
81{
82	return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
83				      vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
84}
85
86static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
87					 struct kvm_exit_mmio *mmio,
88					 phys_addr_t offset)
89{
90	return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
91				      vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
92}
93
94static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
95					struct kvm_exit_mmio *mmio,
96					phys_addr_t offset)
97{
98	return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
99					   vcpu->vcpu_id);
100}
101
102static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
103					  struct kvm_exit_mmio *mmio,
104					  phys_addr_t offset)
105{
106	return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
107					     vcpu->vcpu_id);
108}
109
110static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
111				       struct kvm_exit_mmio *mmio,
112				       phys_addr_t offset)
113{
114	return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
115					  vcpu->vcpu_id);
116}
117
118static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
119					 struct kvm_exit_mmio *mmio,
120					 phys_addr_t offset)
121{
122	return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
123					    vcpu->vcpu_id);
124}
125
126static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
127				     struct kvm_exit_mmio *mmio,
128				     phys_addr_t offset)
129{
130	u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
131					vcpu->vcpu_id, offset);
132	vgic_reg_access(mmio, reg, offset,
133			ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
134	return false;
135}
136
137#define GICD_ITARGETSR_SIZE	32
138#define GICD_CPUTARGETS_BITS	8
139#define GICD_IRQS_PER_ITARGETSR	(GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
140static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
141{
142	struct vgic_dist *dist = &kvm->arch.vgic;
143	int i;
144	u32 val = 0;
145
146	irq -= VGIC_NR_PRIVATE_IRQS;
147
148	for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
149		val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
150
151	return val;
152}
153
154static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
155{
156	struct vgic_dist *dist = &kvm->arch.vgic;
157	struct kvm_vcpu *vcpu;
158	int i, c;
159	unsigned long *bmap;
160	u32 target;
161
162	irq -= VGIC_NR_PRIVATE_IRQS;
163
164	/*
165	 * Pick the LSB in each byte. This ensures we target exactly
166	 * one vcpu per IRQ. If the byte is null, assume we target
167	 * CPU0.
168	 */
169	for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
170		int shift = i * GICD_CPUTARGETS_BITS;
171
172		target = ffs((val >> shift) & 0xffU);
173		target = target ? (target - 1) : 0;
174		dist->irq_spi_cpu[irq + i] = target;
175		kvm_for_each_vcpu(c, vcpu, kvm) {
176			bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
177			if (c == target)
178				set_bit(irq + i, bmap);
179			else
180				clear_bit(irq + i, bmap);
181		}
182	}
183}
184
185static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
186				   struct kvm_exit_mmio *mmio,
187				   phys_addr_t offset)
188{
189	u32 reg;
190
191	/* We treat the banked interrupts targets as read-only */
192	if (offset < 32) {
193		u32 roreg;
194
195		roreg = 1 << vcpu->vcpu_id;
196		roreg |= roreg << 8;
197		roreg |= roreg << 16;
198
199		vgic_reg_access(mmio, &roreg, offset,
200				ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
201		return false;
202	}
203
204	reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
205	vgic_reg_access(mmio, &reg, offset,
206			ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
207	if (mmio->is_write) {
208		vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
209		vgic_update_state(vcpu->kvm);
210		return true;
211	}
212
213	return false;
214}
215
216static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
217				struct kvm_exit_mmio *mmio, phys_addr_t offset)
218{
219	u32 *reg;
220
221	reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
222				  vcpu->vcpu_id, offset >> 1);
223
224	return vgic_handle_cfg_reg(reg, mmio, offset);
225}
226
227static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
228				struct kvm_exit_mmio *mmio, phys_addr_t offset)
229{
230	u32 reg;
231
232	vgic_reg_access(mmio, &reg, offset,
233			ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
234	if (mmio->is_write) {
235		vgic_dispatch_sgi(vcpu, reg);
236		vgic_update_state(vcpu->kvm);
237		return true;
238	}
239
240	return false;
241}
242
243/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
244static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
245					struct kvm_exit_mmio *mmio,
246					phys_addr_t offset)
247{
248	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
249	int sgi;
250	int min_sgi = (offset & ~0x3);
251	int max_sgi = min_sgi + 3;
252	int vcpu_id = vcpu->vcpu_id;
253	u32 reg = 0;
254
255	/* Copy source SGIs from distributor side */
256	for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
257		u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
258
259		reg |= ((u32)sources) << (8 * (sgi - min_sgi));
260	}
261
262	mmio_data_write(mmio, ~0, reg);
263	return false;
264}
265
266static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
267					 struct kvm_exit_mmio *mmio,
268					 phys_addr_t offset, bool set)
269{
270	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
271	int sgi;
272	int min_sgi = (offset & ~0x3);
273	int max_sgi = min_sgi + 3;
274	int vcpu_id = vcpu->vcpu_id;
275	u32 reg;
276	bool updated = false;
277
278	reg = mmio_data_read(mmio, ~0);
279
280	/* Clear pending SGIs on the distributor */
281	for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
282		u8 mask = reg >> (8 * (sgi - min_sgi));
283		u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
284
285		if (set) {
286			if ((*src & mask) != mask)
287				updated = true;
288			*src |= mask;
289		} else {
290			if (*src & mask)
291				updated = true;
292			*src &= ~mask;
293		}
294	}
295
296	if (updated)
297		vgic_update_state(vcpu->kvm);
298
299	return updated;
300}
301
302static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
303				struct kvm_exit_mmio *mmio,
304				phys_addr_t offset)
305{
306	if (!mmio->is_write)
307		return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
308	else
309		return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
310}
311
312static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
313				  struct kvm_exit_mmio *mmio,
314				  phys_addr_t offset)
315{
316	if (!mmio->is_write)
317		return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
318	else
319		return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
320}
321
322static const struct vgic_io_range vgic_dist_ranges[] = {
323	{
324		.base		= GIC_DIST_CTRL,
325		.len		= 12,
326		.bits_per_irq	= 0,
327		.handle_mmio	= handle_mmio_misc,
328	},
329	{
330		.base		= GIC_DIST_IGROUP,
331		.len		= VGIC_MAX_IRQS / 8,
332		.bits_per_irq	= 1,
333		.handle_mmio	= handle_mmio_raz_wi,
334	},
335	{
336		.base		= GIC_DIST_ENABLE_SET,
337		.len		= VGIC_MAX_IRQS / 8,
338		.bits_per_irq	= 1,
339		.handle_mmio	= handle_mmio_set_enable_reg,
340	},
341	{
342		.base		= GIC_DIST_ENABLE_CLEAR,
343		.len		= VGIC_MAX_IRQS / 8,
344		.bits_per_irq	= 1,
345		.handle_mmio	= handle_mmio_clear_enable_reg,
346	},
347	{
348		.base		= GIC_DIST_PENDING_SET,
349		.len		= VGIC_MAX_IRQS / 8,
350		.bits_per_irq	= 1,
351		.handle_mmio	= handle_mmio_set_pending_reg,
352	},
353	{
354		.base		= GIC_DIST_PENDING_CLEAR,
355		.len		= VGIC_MAX_IRQS / 8,
356		.bits_per_irq	= 1,
357		.handle_mmio	= handle_mmio_clear_pending_reg,
358	},
359	{
360		.base		= GIC_DIST_ACTIVE_SET,
361		.len		= VGIC_MAX_IRQS / 8,
362		.bits_per_irq	= 1,
363		.handle_mmio	= handle_mmio_set_active_reg,
364	},
365	{
366		.base		= GIC_DIST_ACTIVE_CLEAR,
367		.len		= VGIC_MAX_IRQS / 8,
368		.bits_per_irq	= 1,
369		.handle_mmio	= handle_mmio_clear_active_reg,
370	},
371	{
372		.base		= GIC_DIST_PRI,
373		.len		= VGIC_MAX_IRQS,
374		.bits_per_irq	= 8,
375		.handle_mmio	= handle_mmio_priority_reg,
376	},
377	{
378		.base		= GIC_DIST_TARGET,
379		.len		= VGIC_MAX_IRQS,
380		.bits_per_irq	= 8,
381		.handle_mmio	= handle_mmio_target_reg,
382	},
383	{
384		.base		= GIC_DIST_CONFIG,
385		.len		= VGIC_MAX_IRQS / 4,
386		.bits_per_irq	= 2,
387		.handle_mmio	= handle_mmio_cfg_reg,
388	},
389	{
390		.base		= GIC_DIST_SOFTINT,
391		.len		= 4,
392		.handle_mmio	= handle_mmio_sgi_reg,
393	},
394	{
395		.base		= GIC_DIST_SGI_PENDING_CLEAR,
396		.len		= VGIC_NR_SGIS,
397		.handle_mmio	= handle_mmio_sgi_clear,
398	},
399	{
400		.base		= GIC_DIST_SGI_PENDING_SET,
401		.len		= VGIC_NR_SGIS,
402		.handle_mmio	= handle_mmio_sgi_set,
403	},
404	{}
405};
406
407static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
408{
409	struct kvm *kvm = vcpu->kvm;
410	struct vgic_dist *dist = &kvm->arch.vgic;
411	int nrcpus = atomic_read(&kvm->online_vcpus);
412	u8 target_cpus;
413	int sgi, mode, c, vcpu_id;
414
415	vcpu_id = vcpu->vcpu_id;
416
417	sgi = reg & 0xf;
418	target_cpus = (reg >> 16) & 0xff;
419	mode = (reg >> 24) & 3;
420
421	switch (mode) {
422	case 0:
423		if (!target_cpus)
424			return;
425		break;
426
427	case 1:
428		target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
429		break;
430
431	case 2:
432		target_cpus = 1 << vcpu_id;
433		break;
434	}
435
436	kvm_for_each_vcpu(c, vcpu, kvm) {
437		if (target_cpus & 1) {
438			/* Flag the SGI as pending */
439			vgic_dist_irq_set_pending(vcpu, sgi);
440			*vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
441			kvm_debug("SGI%d from CPU%d to CPU%d\n",
442				  sgi, vcpu_id, c);
443		}
444
445		target_cpus >>= 1;
446	}
447}
448
449static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
450{
451	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
452	unsigned long sources;
453	int vcpu_id = vcpu->vcpu_id;
454	int c;
455
456	sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
457
458	for_each_set_bit(c, &sources, dist->nr_cpus) {
459		if (vgic_queue_irq(vcpu, c, irq))
460			clear_bit(c, &sources);
461	}
462
463	*vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
464
465	/*
466	 * If the sources bitmap has been cleared it means that we
467	 * could queue all the SGIs onto link registers (see the
468	 * clear_bit above), and therefore we are done with them in
469	 * our emulated gic and can get rid of them.
470	 */
471	if (!sources) {
472		vgic_dist_irq_clear_pending(vcpu, irq);
473		vgic_cpu_irq_clear(vcpu, irq);
474		return true;
475	}
476
477	return false;
478}
479
480/**
481 * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
482 * @kvm: pointer to the kvm struct
483 *
484 * Map the virtual CPU interface into the VM before running any VCPUs.  We
485 * can't do this at creation time, because user space must first set the
486 * virtual CPU interface address in the guest physical address space.
487 */
488static int vgic_v2_map_resources(struct kvm *kvm,
489				 const struct vgic_params *params)
490{
491	struct vgic_dist *dist = &kvm->arch.vgic;
492	int ret = 0;
493
494	if (!irqchip_in_kernel(kvm))
495		return 0;
496
497	mutex_lock(&kvm->lock);
498
499	if (vgic_ready(kvm))
500		goto out;
501
502	if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
503	    IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
504		kvm_err("Need to set vgic cpu and dist addresses first\n");
505		ret = -ENXIO;
506		goto out;
507	}
508
509	vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
510				 KVM_VGIC_V2_DIST_SIZE,
511				 vgic_dist_ranges, -1, &dist->dist_iodev);
512
513	/*
514	 * Initialize the vgic if this hasn't already been done on demand by
515	 * accessing the vgic state from userspace.
516	 */
517	ret = vgic_init(kvm);
518	if (ret) {
519		kvm_err("Unable to allocate maps\n");
520		goto out_unregister;
521	}
522
523	ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
524				    params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
525				    true);
526	if (ret) {
527		kvm_err("Unable to remap VGIC CPU to VCPU\n");
528		goto out_unregister;
529	}
530
531	dist->ready = true;
532	goto out;
533
534out_unregister:
535	kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
536
537out:
538	if (ret)
539		kvm_vgic_destroy(kvm);
540	mutex_unlock(&kvm->lock);
541	return ret;
542}
543
544static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
545{
546	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
547
548	*vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
549}
550
551static int vgic_v2_init_model(struct kvm *kvm)
552{
553	int i;
554
555	for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
556		vgic_set_target_reg(kvm, 0, i);
557
558	return 0;
559}
560
561void vgic_v2_init_emulation(struct kvm *kvm)
562{
563	struct vgic_dist *dist = &kvm->arch.vgic;
564
565	dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
566	dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
567	dist->vm_ops.init_model = vgic_v2_init_model;
568	dist->vm_ops.map_resources = vgic_v2_map_resources;
569
570	kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
571}
572
573static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
574				 struct kvm_exit_mmio *mmio, phys_addr_t offset)
575{
576	bool updated = false;
577	struct vgic_vmcr vmcr;
578	u32 *vmcr_field;
579	u32 reg;
580
581	vgic_get_vmcr(vcpu, &vmcr);
582
583	switch (offset & ~0x3) {
584	case GIC_CPU_CTRL:
585		vmcr_field = &vmcr.ctlr;
586		break;
587	case GIC_CPU_PRIMASK:
588		vmcr_field = &vmcr.pmr;
589		break;
590	case GIC_CPU_BINPOINT:
591		vmcr_field = &vmcr.bpr;
592		break;
593	case GIC_CPU_ALIAS_BINPOINT:
594		vmcr_field = &vmcr.abpr;
595		break;
596	default:
597		BUG();
598	}
599
600	if (!mmio->is_write) {
601		reg = *vmcr_field;
602		mmio_data_write(mmio, ~0, reg);
603	} else {
604		reg = mmio_data_read(mmio, ~0);
605		if (reg != *vmcr_field) {
606			*vmcr_field = reg;
607			vgic_set_vmcr(vcpu, &vmcr);
608			updated = true;
609		}
610	}
611	return updated;
612}
613
614static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
615			     struct kvm_exit_mmio *mmio, phys_addr_t offset)
616{
617	return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
618}
619
620static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
621				  struct kvm_exit_mmio *mmio,
622				  phys_addr_t offset)
623{
624	u32 reg;
625
626	if (mmio->is_write)
627		return false;
628
629	/* GICC_IIDR */
630	reg = (PRODUCT_ID_KVM << 20) |
631	      (GICC_ARCH_VERSION_V2 << 16) |
632	      (IMPLEMENTER_ARM << 0);
633	mmio_data_write(mmio, ~0, reg);
634	return false;
635}
636
637/*
638 * CPU Interface Register accesses - these are not accessed by the VM, but by
639 * user space for saving and restoring VGIC state.
640 */
641static const struct vgic_io_range vgic_cpu_ranges[] = {
642	{
643		.base		= GIC_CPU_CTRL,
644		.len		= 12,
645		.handle_mmio	= handle_cpu_mmio_misc,
646	},
647	{
648		.base		= GIC_CPU_ALIAS_BINPOINT,
649		.len		= 4,
650		.handle_mmio	= handle_mmio_abpr,
651	},
652	{
653		.base		= GIC_CPU_ACTIVEPRIO,
654		.len		= 16,
655		.handle_mmio	= handle_mmio_raz_wi,
656	},
657	{
658		.base		= GIC_CPU_IDENT,
659		.len		= 4,
660		.handle_mmio	= handle_cpu_mmio_ident,
661	},
662};
663
664static int vgic_attr_regs_access(struct kvm_device *dev,
665				 struct kvm_device_attr *attr,
666				 u32 *reg, bool is_write)
667{
668	const struct vgic_io_range *r = NULL, *ranges;
669	phys_addr_t offset;
670	int ret, cpuid, c;
671	struct kvm_vcpu *vcpu, *tmp_vcpu;
672	struct vgic_dist *vgic;
673	struct kvm_exit_mmio mmio;
674	u32 data;
675
676	offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
677	cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
678		KVM_DEV_ARM_VGIC_CPUID_SHIFT;
679
680	mutex_lock(&dev->kvm->lock);
681
682	ret = vgic_init(dev->kvm);
683	if (ret)
684		goto out;
685
686	if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
687		ret = -EINVAL;
688		goto out;
689	}
690
691	vcpu = kvm_get_vcpu(dev->kvm, cpuid);
692	vgic = &dev->kvm->arch.vgic;
693
694	mmio.len = 4;
695	mmio.is_write = is_write;
696	mmio.data = &data;
697	if (is_write)
698		mmio_data_write(&mmio, ~0, *reg);
699	switch (attr->group) {
700	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
701		mmio.phys_addr = vgic->vgic_dist_base + offset;
702		ranges = vgic_dist_ranges;
703		break;
704	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
705		mmio.phys_addr = vgic->vgic_cpu_base + offset;
706		ranges = vgic_cpu_ranges;
707		break;
708	default:
709		BUG();
710	}
711	r = vgic_find_range(ranges, 4, offset);
712
713	if (unlikely(!r || !r->handle_mmio)) {
714		ret = -ENXIO;
715		goto out;
716	}
717
718
719	spin_lock(&vgic->lock);
720
721	/*
722	 * Ensure that no other VCPU is running by checking the vcpu->cpu
723	 * field.  If no other VPCUs are running we can safely access the VGIC
724	 * state, because even if another VPU is run after this point, that
725	 * VCPU will not touch the vgic state, because it will block on
726	 * getting the vgic->lock in kvm_vgic_sync_hwstate().
727	 */
728	kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
729		if (unlikely(tmp_vcpu->cpu != -1)) {
730			ret = -EBUSY;
731			goto out_vgic_unlock;
732		}
733	}
734
735	/*
736	 * Move all pending IRQs from the LRs on all VCPUs so the pending
737	 * state can be properly represented in the register state accessible
738	 * through this API.
739	 */
740	kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
741		vgic_unqueue_irqs(tmp_vcpu);
742
743	offset -= r->base;
744	r->handle_mmio(vcpu, &mmio, offset);
745
746	if (!is_write)
747		*reg = mmio_data_read(&mmio, ~0);
748
749	ret = 0;
750out_vgic_unlock:
751	spin_unlock(&vgic->lock);
752out:
753	mutex_unlock(&dev->kvm->lock);
754	return ret;
755}
756
757static int vgic_v2_create(struct kvm_device *dev, u32 type)
758{
759	return kvm_vgic_create(dev->kvm, type);
760}
761
762static void vgic_v2_destroy(struct kvm_device *dev)
763{
764	kfree(dev);
765}
766
767static int vgic_v2_set_attr(struct kvm_device *dev,
768			    struct kvm_device_attr *attr)
769{
770	int ret;
771
772	ret = vgic_set_common_attr(dev, attr);
773	if (ret != -ENXIO)
774		return ret;
775
776	switch (attr->group) {
777	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
778	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
779		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
780		u32 reg;
781
782		if (get_user(reg, uaddr))
783			return -EFAULT;
784
785		return vgic_attr_regs_access(dev, attr, &reg, true);
786	}
787
788	}
789
790	return -ENXIO;
791}
792
793static int vgic_v2_get_attr(struct kvm_device *dev,
794			    struct kvm_device_attr *attr)
795{
796	int ret;
797
798	ret = vgic_get_common_attr(dev, attr);
799	if (ret != -ENXIO)
800		return ret;
801
802	switch (attr->group) {
803	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
804	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
805		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
806		u32 reg = 0;
807
808		ret = vgic_attr_regs_access(dev, attr, &reg, false);
809		if (ret)
810			return ret;
811		return put_user(reg, uaddr);
812	}
813
814	}
815
816	return -ENXIO;
817}
818
819static int vgic_v2_has_attr(struct kvm_device *dev,
820			    struct kvm_device_attr *attr)
821{
822	phys_addr_t offset;
823
824	switch (attr->group) {
825	case KVM_DEV_ARM_VGIC_GRP_ADDR:
826		switch (attr->attr) {
827		case KVM_VGIC_V2_ADDR_TYPE_DIST:
828		case KVM_VGIC_V2_ADDR_TYPE_CPU:
829			return 0;
830		}
831		break;
832	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
833		offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
834		return vgic_has_attr_regs(vgic_dist_ranges, offset);
835	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
836		offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
837		return vgic_has_attr_regs(vgic_cpu_ranges, offset);
838	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
839		return 0;
840	case KVM_DEV_ARM_VGIC_GRP_CTRL:
841		switch (attr->attr) {
842		case KVM_DEV_ARM_VGIC_CTRL_INIT:
843			return 0;
844		}
845	}
846	return -ENXIO;
847}
848
849struct kvm_device_ops kvm_arm_vgic_v2_ops = {
850	.name = "kvm-arm-vgic-v2",
851	.create = vgic_v2_create,
852	.destroy = vgic_v2_destroy,
853	.set_attr = vgic_v2_set_attr,
854	.get_attr = vgic_v2_get_attr,
855	.has_attr = vgic_v2_has_attr,
856};
857