root/virt/kvm/arm/vgic/vgic.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. irq_is_pending
  2. vgic_irq_is_mapped_level
  3. vgic_irq_get_lr_count
  4. vgic_irq_is_multi_sgi
  5. vgic_get_irq_kref
  6. vgic_v3_max_apr_idx
  7. vgic_v3_redist_region_full
  8. vgic_v3_rd_region_size
  9. vgic_dist_overlap

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2015, 2016 ARM Ltd.
   4  */
   5 #ifndef __KVM_ARM_VGIC_NEW_H__
   6 #define __KVM_ARM_VGIC_NEW_H__
   7 
   8 #include <linux/irqchip/arm-gic-common.h>
   9 
  10 #define PRODUCT_ID_KVM          0x4b    /* ASCII code K */
  11 #define IMPLEMENTER_ARM         0x43b
  12 
  13 #define VGIC_ADDR_UNDEF         (-1)
  14 #define IS_VGIC_ADDR_UNDEF(_x)  ((_x) == VGIC_ADDR_UNDEF)
  15 
  16 #define INTERRUPT_ID_BITS_SPIS  10
  17 #define INTERRUPT_ID_BITS_ITS   16
  18 #define VGIC_PRI_BITS           5
  19 
  20 #define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
  21 
  22 #define VGIC_AFFINITY_0_SHIFT 0
  23 #define VGIC_AFFINITY_0_MASK (0xffUL << VGIC_AFFINITY_0_SHIFT)
  24 #define VGIC_AFFINITY_1_SHIFT 8
  25 #define VGIC_AFFINITY_1_MASK (0xffUL << VGIC_AFFINITY_1_SHIFT)
  26 #define VGIC_AFFINITY_2_SHIFT 16
  27 #define VGIC_AFFINITY_2_MASK (0xffUL << VGIC_AFFINITY_2_SHIFT)
  28 #define VGIC_AFFINITY_3_SHIFT 24
  29 #define VGIC_AFFINITY_3_MASK (0xffUL << VGIC_AFFINITY_3_SHIFT)
  30 
  31 #define VGIC_AFFINITY_LEVEL(reg, level) \
  32         ((((reg) & VGIC_AFFINITY_## level ##_MASK) \
  33         >> VGIC_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
  34 
  35 /*
  36  * The Userspace encodes the affinity differently from the MPIDR,
  37  * Below macro converts vgic userspace format to MPIDR reg format.
  38  */
  39 #define VGIC_TO_MPIDR(val) (VGIC_AFFINITY_LEVEL(val, 0) | \
  40                             VGIC_AFFINITY_LEVEL(val, 1) | \
  41                             VGIC_AFFINITY_LEVEL(val, 2) | \
  42                             VGIC_AFFINITY_LEVEL(val, 3))
  43 
  44 /*
  45  * As per Documentation/virt/kvm/devices/arm-vgic-v3.txt,
  46  * below macros are defined for CPUREG encoding.
  47  */
  48 #define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK   0x000000000000c000
  49 #define KVM_REG_ARM_VGIC_SYSREG_OP0_SHIFT  14
  50 #define KVM_REG_ARM_VGIC_SYSREG_OP1_MASK   0x0000000000003800
  51 #define KVM_REG_ARM_VGIC_SYSREG_OP1_SHIFT  11
  52 #define KVM_REG_ARM_VGIC_SYSREG_CRN_MASK   0x0000000000000780
  53 #define KVM_REG_ARM_VGIC_SYSREG_CRN_SHIFT  7
  54 #define KVM_REG_ARM_VGIC_SYSREG_CRM_MASK   0x0000000000000078
  55 #define KVM_REG_ARM_VGIC_SYSREG_CRM_SHIFT  3
  56 #define KVM_REG_ARM_VGIC_SYSREG_OP2_MASK   0x0000000000000007
  57 #define KVM_REG_ARM_VGIC_SYSREG_OP2_SHIFT  0
  58 
  59 #define KVM_DEV_ARM_VGIC_SYSREG_MASK (KVM_REG_ARM_VGIC_SYSREG_OP0_MASK | \
  60                                       KVM_REG_ARM_VGIC_SYSREG_OP1_MASK | \
  61                                       KVM_REG_ARM_VGIC_SYSREG_CRN_MASK | \
  62                                       KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \
  63                                       KVM_REG_ARM_VGIC_SYSREG_OP2_MASK)
  64 
  65 /*
  66  * As per Documentation/virt/kvm/devices/arm-vgic-its.txt,
  67  * below macros are defined for ITS table entry encoding.
  68  */
  69 #define KVM_ITS_CTE_VALID_SHIFT         63
  70 #define KVM_ITS_CTE_VALID_MASK          BIT_ULL(63)
  71 #define KVM_ITS_CTE_RDBASE_SHIFT        16
  72 #define KVM_ITS_CTE_ICID_MASK           GENMASK_ULL(15, 0)
  73 #define KVM_ITS_ITE_NEXT_SHIFT          48
  74 #define KVM_ITS_ITE_PINTID_SHIFT        16
  75 #define KVM_ITS_ITE_PINTID_MASK         GENMASK_ULL(47, 16)
  76 #define KVM_ITS_ITE_ICID_MASK           GENMASK_ULL(15, 0)
  77 #define KVM_ITS_DTE_VALID_SHIFT         63
  78 #define KVM_ITS_DTE_VALID_MASK          BIT_ULL(63)
  79 #define KVM_ITS_DTE_NEXT_SHIFT          49
  80 #define KVM_ITS_DTE_NEXT_MASK           GENMASK_ULL(62, 49)
  81 #define KVM_ITS_DTE_ITTADDR_SHIFT       5
  82 #define KVM_ITS_DTE_ITTADDR_MASK        GENMASK_ULL(48, 5)
  83 #define KVM_ITS_DTE_SIZE_MASK           GENMASK_ULL(4, 0)
  84 #define KVM_ITS_L1E_VALID_MASK          BIT_ULL(63)
  85 /* we only support 64 kB translation table page size */
  86 #define KVM_ITS_L1E_ADDR_MASK           GENMASK_ULL(51, 16)
  87 
  88 #define KVM_VGIC_V3_RDIST_INDEX_MASK    GENMASK_ULL(11, 0)
  89 #define KVM_VGIC_V3_RDIST_FLAGS_MASK    GENMASK_ULL(15, 12)
  90 #define KVM_VGIC_V3_RDIST_FLAGS_SHIFT   12
  91 #define KVM_VGIC_V3_RDIST_BASE_MASK     GENMASK_ULL(51, 16)
  92 #define KVM_VGIC_V3_RDIST_COUNT_MASK    GENMASK_ULL(63, 52)
  93 #define KVM_VGIC_V3_RDIST_COUNT_SHIFT   52
  94 
  95 #ifdef CONFIG_DEBUG_SPINLOCK
  96 #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
  97 #else
  98 #define DEBUG_SPINLOCK_BUG_ON(p)
  99 #endif
 100 
 101 /* Requires the irq_lock to be held by the caller. */
 102 static inline bool irq_is_pending(struct vgic_irq *irq)
 103 {
 104         if (irq->config == VGIC_CONFIG_EDGE)
 105                 return irq->pending_latch;
 106         else
 107                 return irq->pending_latch || irq->line_level;
 108 }
 109 
 110 static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
 111 {
 112         return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
 113 }
 114 
 115 static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
 116 {
 117         /* Account for the active state as an interrupt */
 118         if (vgic_irq_is_sgi(irq->intid) && irq->source)
 119                 return hweight8(irq->source) + irq->active;
 120 
 121         return irq_is_pending(irq) || irq->active;
 122 }
 123 
 124 static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
 125 {
 126         return vgic_irq_get_lr_count(irq) > 1;
 127 }
 128 
 129 /*
 130  * This struct provides an intermediate representation of the fields contained
 131  * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
 132  * state to userspace can generate either GICv2 or GICv3 CPU interface
 133  * registers regardless of the hardware backed GIC used.
 134  */
 135 struct vgic_vmcr {
 136         u32     grpen0;
 137         u32     grpen1;
 138 
 139         u32     ackctl;
 140         u32     fiqen;
 141         u32     cbpr;
 142         u32     eoim;
 143 
 144         u32     abpr;
 145         u32     bpr;
 146         u32     pmr;  /* Priority mask field in the GICC_PMR and
 147                        * ICC_PMR_EL1 priority field format */
 148 };
 149 
 150 struct vgic_reg_attr {
 151         struct kvm_vcpu *vcpu;
 152         gpa_t addr;
 153 };
 154 
 155 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
 156                        struct vgic_reg_attr *reg_attr);
 157 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
 158                        struct vgic_reg_attr *reg_attr);
 159 const struct vgic_register_region *
 160 vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
 161                      gpa_t addr, int len);
 162 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
 163                               u32 intid);
 164 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq);
 165 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
 166 bool vgic_get_phys_line_level(struct vgic_irq *irq);
 167 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
 168 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
 169 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
 170                            unsigned long flags);
 171 void vgic_kick_vcpus(struct kvm *kvm);
 172 
 173 int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
 174                       phys_addr_t addr, phys_addr_t alignment);
 175 
 176 void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
 177 void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
 178 void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
 179 void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
 180 void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
 181 int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
 182 int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 183                          int offset, u32 *val);
 184 int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 185                           int offset, u32 *val);
 186 void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
 187 void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
 188 void vgic_v2_enable(struct kvm_vcpu *vcpu);
 189 int vgic_v2_probe(const struct gic_kvm_info *info);
 190 int vgic_v2_map_resources(struct kvm *kvm);
 191 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
 192                              enum vgic_type);
 193 
 194 void vgic_v2_init_lrs(void);
 195 void vgic_v2_load(struct kvm_vcpu *vcpu);
 196 void vgic_v2_put(struct kvm_vcpu *vcpu);
 197 void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
 198 
 199 void vgic_v2_save_state(struct kvm_vcpu *vcpu);
 200 void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
 201 
 202 static inline void vgic_get_irq_kref(struct vgic_irq *irq)
 203 {
 204         if (irq->intid < VGIC_MIN_LPI)
 205                 return;
 206 
 207         kref_get(&irq->refcount);
 208 }
 209 
 210 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
 211 void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
 212 void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
 213 void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
 214 void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
 215 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
 216 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
 217 void vgic_v3_enable(struct kvm_vcpu *vcpu);
 218 int vgic_v3_probe(const struct gic_kvm_info *info);
 219 int vgic_v3_map_resources(struct kvm *kvm);
 220 int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
 221 int vgic_v3_save_pending_tables(struct kvm *kvm);
 222 int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
 223 int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
 224 bool vgic_v3_check_base(struct kvm *kvm);
 225 
 226 void vgic_v3_load(struct kvm_vcpu *vcpu);
 227 void vgic_v3_put(struct kvm_vcpu *vcpu);
 228 void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
 229 
 230 bool vgic_has_its(struct kvm *kvm);
 231 int kvm_vgic_register_its_device(void);
 232 void vgic_enable_lpis(struct kvm_vcpu *vcpu);
 233 void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu);
 234 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
 235 int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
 236 int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 237                          int offset, u32 *val);
 238 int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 239                          int offset, u32 *val);
 240 int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 241                          u64 id, u64 *val);
 242 int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
 243                                 u64 *reg);
 244 int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
 245                                     u32 intid, u64 *val);
 246 int kvm_register_vgic_device(unsigned long type);
 247 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
 248 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
 249 int vgic_lazy_init(struct kvm *kvm);
 250 int vgic_init(struct kvm *kvm);
 251 
 252 void vgic_debug_init(struct kvm *kvm);
 253 void vgic_debug_destroy(struct kvm *kvm);
 254 
 255 bool lock_all_vcpus(struct kvm *kvm);
 256 void unlock_all_vcpus(struct kvm *kvm);
 257 
 258 static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
 259 {
 260         struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
 261 
 262         /*
 263          * num_pri_bits are initialized with HW supported values.
 264          * We can rely safely on num_pri_bits even if VM has not
 265          * restored ICC_CTLR_EL1 before restoring APnR registers.
 266          */
 267         switch (cpu_if->num_pri_bits) {
 268         case 7: return 3;
 269         case 6: return 1;
 270         default: return 0;
 271         }
 272 }
 273 
 274 static inline bool
 275 vgic_v3_redist_region_full(struct vgic_redist_region *region)
 276 {
 277         if (!region->count)
 278                 return false;
 279 
 280         return (region->free_index >= region->count);
 281 }
 282 
 283 struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rdregs);
 284 
 285 static inline size_t
 286 vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
 287 {
 288         if (!rdreg->count)
 289                 return atomic_read(&kvm->online_vcpus) * KVM_VGIC_V3_REDIST_SIZE;
 290         else
 291                 return rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
 292 }
 293 
 294 struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
 295                                                            u32 index);
 296 
 297 bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
 298 
 299 static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
 300 {
 301         struct vgic_dist *d = &kvm->arch.vgic;
 302 
 303         return (base + size > d->vgic_dist_base) &&
 304                 (base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE);
 305 }
 306 
 307 int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
 308 int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
 309                          u32 devid, u32 eventid, struct vgic_irq **irq);
 310 struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
 311 int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi);
 312 void vgic_lpi_translation_cache_init(struct kvm *kvm);
 313 void vgic_lpi_translation_cache_destroy(struct kvm *kvm);
 314 void vgic_its_invalidate_cache(struct kvm *kvm);
 315 
 316 bool vgic_supports_direct_msis(struct kvm *kvm);
 317 int vgic_v4_init(struct kvm *kvm);
 318 void vgic_v4_teardown(struct kvm *kvm);
 319 int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
 320 int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
 321 
 322 #endif

/* [<][>][^][v][top][bottom][index][help] */