1/* 2 * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved. 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18#include <linux/cpu.h> 19#include <linux/kvm.h> 20#include <linux/kvm_host.h> 21#include <linux/interrupt.h> 22#include <linux/io.h> 23#include <linux/of.h> 24#include <linux/of_address.h> 25#include <linux/of_irq.h> 26 27#include <linux/irqchip/arm-gic.h> 28 29#include <asm/kvm_emulate.h> 30#include <asm/kvm_arm.h> 31#include <asm/kvm_mmu.h> 32 33static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr) 34{ 35 struct vgic_lr lr_desc; 36 u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr]; 37 38 lr_desc.irq = val & GICH_LR_VIRTUALID; 39 if (lr_desc.irq <= 15) 40 lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7; 41 else 42 lr_desc.source = 0; 43 lr_desc.state = 0; 44 45 if (val & GICH_LR_PENDING_BIT) 46 lr_desc.state |= LR_STATE_PENDING; 47 if (val & GICH_LR_ACTIVE_BIT) 48 lr_desc.state |= LR_STATE_ACTIVE; 49 if (val & GICH_LR_EOI) 50 lr_desc.state |= LR_EOI_INT; 51 52 return lr_desc; 53} 54 55static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr, 56 struct vgic_lr lr_desc) 57{ 58 u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq; 59 60 if (lr_desc.state & LR_STATE_PENDING) 61 lr_val |= GICH_LR_PENDING_BIT; 62 if (lr_desc.state & LR_STATE_ACTIVE) 63 lr_val |= GICH_LR_ACTIVE_BIT; 64 if (lr_desc.state & LR_EOI_INT) 65 lr_val |= GICH_LR_EOI; 66 67 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val; 68} 69 70static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, 71 struct vgic_lr lr_desc) 72{ 73 if (!(lr_desc.state & LR_STATE_MASK)) 74 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); 75 else 76 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr); 77} 78 79static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) 80{ 81 return vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr; 82} 83 84static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) 85{ 86 return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; 87} 88 89static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu) 90{ 91 vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0; 92} 93 94static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) 95{ 96 u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; 97 u32 ret = 0; 98 99 if (misr & GICH_MISR_EOI) 100 ret |= INT_STATUS_EOI; 101 if (misr & GICH_MISR_U) 102 ret |= INT_STATUS_UNDERFLOW; 103 104 return ret; 105} 106 107static void vgic_v2_enable_underflow(struct kvm_vcpu *vcpu) 108{ 109 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE; 110} 111 112static void vgic_v2_disable_underflow(struct kvm_vcpu *vcpu) 113{ 114 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE; 115} 116 117static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 118{ 119 u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr; 120 121 vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT; 122 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT; 123 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT; 124 vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT; 125} 126 127static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 128{ 129 u32 vmcr; 130 131 vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; 132 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK; 133 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK; 134 vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; 135 136 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; 137} 138 139static void vgic_v2_enable(struct kvm_vcpu *vcpu) 140{ 141 /* 142 * By forcing VMCR to zero, the GIC will restore the binary 143 * points to their reset values. Anything else resets to zero 144 * anyway. 145 */ 146 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0; 147 148 /* Get the show on the road... */ 149 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN; 150} 151 152static const struct vgic_ops vgic_v2_ops = { 153 .get_lr = vgic_v2_get_lr, 154 .set_lr = vgic_v2_set_lr, 155 .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, 156 .get_elrsr = vgic_v2_get_elrsr, 157 .get_eisr = vgic_v2_get_eisr, 158 .clear_eisr = vgic_v2_clear_eisr, 159 .get_interrupt_status = vgic_v2_get_interrupt_status, 160 .enable_underflow = vgic_v2_enable_underflow, 161 .disable_underflow = vgic_v2_disable_underflow, 162 .get_vmcr = vgic_v2_get_vmcr, 163 .set_vmcr = vgic_v2_set_vmcr, 164 .enable = vgic_v2_enable, 165}; 166 167static struct vgic_params vgic_v2_params; 168 169/** 170 * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT 171 * @node: pointer to the DT node 172 * @ops: address of a pointer to the GICv2 operations 173 * @params: address of a pointer to HW-specific parameters 174 * 175 * Returns 0 if a GICv2 has been found, with the low level operations 176 * in *ops and the HW parameters in *params. Returns an error code 177 * otherwise. 178 */ 179int vgic_v2_probe(struct device_node *vgic_node, 180 const struct vgic_ops **ops, 181 const struct vgic_params **params) 182{ 183 int ret; 184 struct resource vctrl_res; 185 struct resource vcpu_res; 186 struct vgic_params *vgic = &vgic_v2_params; 187 188 vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0); 189 if (!vgic->maint_irq) { 190 kvm_err("error getting vgic maintenance irq from DT\n"); 191 ret = -ENXIO; 192 goto out; 193 } 194 195 ret = of_address_to_resource(vgic_node, 2, &vctrl_res); 196 if (ret) { 197 kvm_err("Cannot obtain GICH resource\n"); 198 goto out; 199 } 200 201 vgic->vctrl_base = of_iomap(vgic_node, 2); 202 if (!vgic->vctrl_base) { 203 kvm_err("Cannot ioremap GICH\n"); 204 ret = -ENOMEM; 205 goto out; 206 } 207 208 vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR); 209 vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1; 210 211 ret = create_hyp_io_mappings(vgic->vctrl_base, 212 vgic->vctrl_base + resource_size(&vctrl_res), 213 vctrl_res.start); 214 if (ret) { 215 kvm_err("Cannot map VCTRL into hyp\n"); 216 goto out_unmap; 217 } 218 219 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { 220 kvm_err("Cannot obtain GICV resource\n"); 221 ret = -ENXIO; 222 goto out_unmap; 223 } 224 225 if (!PAGE_ALIGNED(vcpu_res.start)) { 226 kvm_err("GICV physical address 0x%llx not page aligned\n", 227 (unsigned long long)vcpu_res.start); 228 ret = -ENXIO; 229 goto out_unmap; 230 } 231 232 if (!PAGE_ALIGNED(resource_size(&vcpu_res))) { 233 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n", 234 (unsigned long long)resource_size(&vcpu_res), 235 PAGE_SIZE); 236 ret = -ENXIO; 237 goto out_unmap; 238 } 239 240 vgic->can_emulate_gicv2 = true; 241 kvm_register_device_ops(&kvm_arm_vgic_v2_ops, KVM_DEV_TYPE_ARM_VGIC_V2); 242 243 vgic->vcpu_base = vcpu_res.start; 244 245 kvm_info("%s@%llx IRQ%d\n", vgic_node->name, 246 vctrl_res.start, vgic->maint_irq); 247 248 vgic->type = VGIC_V2; 249 vgic->max_gic_vcpus = VGIC_V2_MAX_CPUS; 250 *ops = &vgic_v2_ops; 251 *params = vgic; 252 goto out; 253 254out_unmap: 255 iounmap(vgic->vctrl_base); 256out: 257 of_node_put(vgic_node); 258 return ret; 259} 260