1
2
3
4
5 #ifndef __KVM_ARM_VGIC_MMIO_H__
6 #define __KVM_ARM_VGIC_MMIO_H__
7
8 struct vgic_register_region {
9 unsigned int reg_offset;
10 unsigned int len;
11 unsigned int bits_per_irq;
12 unsigned int access_flags;
13 union {
14 unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
15 unsigned int len);
16 unsigned long (*its_read)(struct kvm *kvm, struct vgic_its *its,
17 gpa_t addr, unsigned int len);
18 };
19 union {
20 void (*write)(struct kvm_vcpu *vcpu, gpa_t addr,
21 unsigned int len, unsigned long val);
22 void (*its_write)(struct kvm *kvm, struct vgic_its *its,
23 gpa_t addr, unsigned int len,
24 unsigned long val);
25 };
26 unsigned long (*uaccess_read)(struct kvm_vcpu *vcpu, gpa_t addr,
27 unsigned int len);
28 union {
29 int (*uaccess_write)(struct kvm_vcpu *vcpu, gpa_t addr,
30 unsigned int len, unsigned long val);
31 int (*uaccess_its_write)(struct kvm *kvm, struct vgic_its *its,
32 gpa_t addr, unsigned int len,
33 unsigned long val);
34 };
35 };
36
37 extern struct kvm_io_device_ops kvm_io_gic_ops;
38
39 #define VGIC_ACCESS_8bit 1
40 #define VGIC_ACCESS_32bit 2
41 #define VGIC_ACCESS_64bit 4
42
43
44
45
46
47
48 #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
49
50
51
52
53
54
55
56
57
58 #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
59 8 >> ilog2(bits))
60
61
62
63
64
65
66
67 #define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, ur, uw, bpi, acc) \
68 { \
69 .reg_offset = off, \
70 .bits_per_irq = bpi, \
71 .len = bpi * 1024 / 8, \
72 .access_flags = acc, \
73 .read = rd, \
74 .write = wr, \
75 .uaccess_read = ur, \
76 .uaccess_write = uw, \
77 }
78
79 #define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \
80 { \
81 .reg_offset = off, \
82 .bits_per_irq = 0, \
83 .len = length, \
84 .access_flags = acc, \
85 .read = rd, \
86 .write = wr, \
87 }
88
89 #define REGISTER_DESC_WITH_LENGTH_UACCESS(off, rd, wr, urd, uwr, length, acc) \
90 { \
91 .reg_offset = off, \
92 .bits_per_irq = 0, \
93 .len = length, \
94 .access_flags = acc, \
95 .read = rd, \
96 .write = wr, \
97 .uaccess_read = urd, \
98 .uaccess_write = uwr, \
99 }
100
101 int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu,
102 struct vgic_register_region *reg_desc,
103 struct vgic_io_device *region,
104 int nr_irqs, bool offset_private);
105
106 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
107
108 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
109 unsigned long data);
110
111 unsigned long extract_bytes(u64 data, unsigned int offset,
112 unsigned int num);
113
114 u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
115 unsigned long val);
116
117 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
118 gpa_t addr, unsigned int len);
119
120 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
121 gpa_t addr, unsigned int len);
122
123 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
124 unsigned int len, unsigned long val);
125
126 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
127 unsigned int len, unsigned long val);
128
129 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, gpa_t addr,
130 unsigned int len);
131
132 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
133 unsigned int len, unsigned long val);
134
135 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
136 gpa_t addr, unsigned int len);
137
138 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
139 gpa_t addr, unsigned int len,
140 unsigned long val);
141
142 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
143 gpa_t addr, unsigned int len,
144 unsigned long val);
145
146 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
147 gpa_t addr, unsigned int len);
148
149 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
150 gpa_t addr, unsigned int len,
151 unsigned long val);
152
153 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
154 gpa_t addr, unsigned int len,
155 unsigned long val);
156
157 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
158 gpa_t addr, unsigned int len);
159
160 unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
161 gpa_t addr, unsigned int len);
162
163 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
164 gpa_t addr, unsigned int len,
165 unsigned long val);
166
167 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
168 gpa_t addr, unsigned int len,
169 unsigned long val);
170
171 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
172 gpa_t addr, unsigned int len,
173 unsigned long val);
174
175 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
176 gpa_t addr, unsigned int len,
177 unsigned long val);
178
179 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
180 gpa_t addr, unsigned int len);
181
182 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
183 gpa_t addr, unsigned int len,
184 unsigned long val);
185
186 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
187 gpa_t addr, unsigned int len);
188
189 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
190 gpa_t addr, unsigned int len,
191 unsigned long val);
192
193 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
194 bool is_write, int offset, u32 *val);
195
196 u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid);
197
198 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
199 const u64 val);
200
201 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
202
203 unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
204
205 u64 vgic_sanitise_outer_cacheability(u64 reg);
206 u64 vgic_sanitise_inner_cacheability(u64 reg);
207 u64 vgic_sanitise_shareability(u64 reg);
208 u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
209 u64 (*sanitise_fn)(u64));
210
211
212 const struct vgic_register_region *
213 vgic_find_mmio_region(const struct vgic_register_region *regions,
214 int nr_regions, unsigned int offset);
215
216 #endif