This source file includes following definitions.
- vfio_platform_mask
- vfio_platform_mask_handler
- vfio_platform_set_irq_mask
- vfio_platform_unmask
- vfio_platform_unmask_handler
- vfio_platform_set_irq_unmask
- vfio_automasked_irq_handler
- vfio_irq_handler
- vfio_set_trigger
- vfio_platform_set_irq_trigger
- vfio_platform_set_irqs_ioctl
- vfio_platform_irq_init
- vfio_platform_irq_cleanup
1
2
3
4
5
6
7
8
9 #include <linux/eventfd.h>
10 #include <linux/interrupt.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/vfio.h>
14 #include <linux/irq.h>
15
16 #include "vfio_platform_private.h"
17
18 static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx)
19 {
20 unsigned long flags;
21
22 spin_lock_irqsave(&irq_ctx->lock, flags);
23
24 if (!irq_ctx->masked) {
25 disable_irq_nosync(irq_ctx->hwirq);
26 irq_ctx->masked = true;
27 }
28
29 spin_unlock_irqrestore(&irq_ctx->lock, flags);
30 }
31
32 static int vfio_platform_mask_handler(void *opaque, void *unused)
33 {
34 struct vfio_platform_irq *irq_ctx = opaque;
35
36 vfio_platform_mask(irq_ctx);
37
38 return 0;
39 }
40
41 static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev,
42 unsigned index, unsigned start,
43 unsigned count, uint32_t flags,
44 void *data)
45 {
46 if (start != 0 || count != 1)
47 return -EINVAL;
48
49 if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
50 return -EINVAL;
51
52 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
53 int32_t fd = *(int32_t *)data;
54
55 if (fd >= 0)
56 return vfio_virqfd_enable((void *) &vdev->irqs[index],
57 vfio_platform_mask_handler,
58 NULL, NULL,
59 &vdev->irqs[index].mask, fd);
60
61 vfio_virqfd_disable(&vdev->irqs[index].mask);
62 return 0;
63 }
64
65 if (flags & VFIO_IRQ_SET_DATA_NONE) {
66 vfio_platform_mask(&vdev->irqs[index]);
67
68 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
69 uint8_t mask = *(uint8_t *)data;
70
71 if (mask)
72 vfio_platform_mask(&vdev->irqs[index]);
73 }
74
75 return 0;
76 }
77
78 static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx)
79 {
80 unsigned long flags;
81
82 spin_lock_irqsave(&irq_ctx->lock, flags);
83
84 if (irq_ctx->masked) {
85 enable_irq(irq_ctx->hwirq);
86 irq_ctx->masked = false;
87 }
88
89 spin_unlock_irqrestore(&irq_ctx->lock, flags);
90 }
91
92 static int vfio_platform_unmask_handler(void *opaque, void *unused)
93 {
94 struct vfio_platform_irq *irq_ctx = opaque;
95
96 vfio_platform_unmask(irq_ctx);
97
98 return 0;
99 }
100
101 static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
102 unsigned index, unsigned start,
103 unsigned count, uint32_t flags,
104 void *data)
105 {
106 if (start != 0 || count != 1)
107 return -EINVAL;
108
109 if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
110 return -EINVAL;
111
112 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
113 int32_t fd = *(int32_t *)data;
114
115 if (fd >= 0)
116 return vfio_virqfd_enable((void *) &vdev->irqs[index],
117 vfio_platform_unmask_handler,
118 NULL, NULL,
119 &vdev->irqs[index].unmask,
120 fd);
121
122 vfio_virqfd_disable(&vdev->irqs[index].unmask);
123 return 0;
124 }
125
126 if (flags & VFIO_IRQ_SET_DATA_NONE) {
127 vfio_platform_unmask(&vdev->irqs[index]);
128
129 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
130 uint8_t unmask = *(uint8_t *)data;
131
132 if (unmask)
133 vfio_platform_unmask(&vdev->irqs[index]);
134 }
135
136 return 0;
137 }
138
139 static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
140 {
141 struct vfio_platform_irq *irq_ctx = dev_id;
142 unsigned long flags;
143 int ret = IRQ_NONE;
144
145 spin_lock_irqsave(&irq_ctx->lock, flags);
146
147 if (!irq_ctx->masked) {
148 ret = IRQ_HANDLED;
149
150
151 disable_irq_nosync(irq_ctx->hwirq);
152 irq_ctx->masked = true;
153 }
154
155 spin_unlock_irqrestore(&irq_ctx->lock, flags);
156
157 if (ret == IRQ_HANDLED)
158 eventfd_signal(irq_ctx->trigger, 1);
159
160 return ret;
161 }
162
163 static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
164 {
165 struct vfio_platform_irq *irq_ctx = dev_id;
166
167 eventfd_signal(irq_ctx->trigger, 1);
168
169 return IRQ_HANDLED;
170 }
171
172 static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
173 int fd, irq_handler_t handler)
174 {
175 struct vfio_platform_irq *irq = &vdev->irqs[index];
176 struct eventfd_ctx *trigger;
177 int ret;
178
179 if (irq->trigger) {
180 irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN);
181 free_irq(irq->hwirq, irq);
182 kfree(irq->name);
183 eventfd_ctx_put(irq->trigger);
184 irq->trigger = NULL;
185 }
186
187 if (fd < 0)
188 return 0;
189
190 irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
191 irq->hwirq, vdev->name);
192 if (!irq->name)
193 return -ENOMEM;
194
195 trigger = eventfd_ctx_fdget(fd);
196 if (IS_ERR(trigger)) {
197 kfree(irq->name);
198 return PTR_ERR(trigger);
199 }
200
201 irq->trigger = trigger;
202
203 irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
204 ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
205 if (ret) {
206 kfree(irq->name);
207 eventfd_ctx_put(trigger);
208 irq->trigger = NULL;
209 return ret;
210 }
211
212 if (!irq->masked)
213 enable_irq(irq->hwirq);
214
215 return 0;
216 }
217
218 static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
219 unsigned index, unsigned start,
220 unsigned count, uint32_t flags,
221 void *data)
222 {
223 struct vfio_platform_irq *irq = &vdev->irqs[index];
224 irq_handler_t handler;
225
226 if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED)
227 handler = vfio_automasked_irq_handler;
228 else
229 handler = vfio_irq_handler;
230
231 if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
232 return vfio_set_trigger(vdev, index, -1, handler);
233
234 if (start != 0 || count != 1)
235 return -EINVAL;
236
237 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
238 int32_t fd = *(int32_t *)data;
239
240 return vfio_set_trigger(vdev, index, fd, handler);
241 }
242
243 if (flags & VFIO_IRQ_SET_DATA_NONE) {
244 handler(irq->hwirq, irq);
245
246 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
247 uint8_t trigger = *(uint8_t *)data;
248
249 if (trigger)
250 handler(irq->hwirq, irq);
251 }
252
253 return 0;
254 }
255
256 int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
257 uint32_t flags, unsigned index, unsigned start,
258 unsigned count, void *data)
259 {
260 int (*func)(struct vfio_platform_device *vdev, unsigned index,
261 unsigned start, unsigned count, uint32_t flags,
262 void *data) = NULL;
263
264 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
265 case VFIO_IRQ_SET_ACTION_MASK:
266 func = vfio_platform_set_irq_mask;
267 break;
268 case VFIO_IRQ_SET_ACTION_UNMASK:
269 func = vfio_platform_set_irq_unmask;
270 break;
271 case VFIO_IRQ_SET_ACTION_TRIGGER:
272 func = vfio_platform_set_irq_trigger;
273 break;
274 }
275
276 if (!func)
277 return -ENOTTY;
278
279 return func(vdev, index, start, count, flags, data);
280 }
281
282 int vfio_platform_irq_init(struct vfio_platform_device *vdev)
283 {
284 int cnt = 0, i;
285
286 while (vdev->get_irq(vdev, cnt) >= 0)
287 cnt++;
288
289 vdev->irqs = kcalloc(cnt, sizeof(struct vfio_platform_irq), GFP_KERNEL);
290 if (!vdev->irqs)
291 return -ENOMEM;
292
293 for (i = 0; i < cnt; i++) {
294 int hwirq = vdev->get_irq(vdev, i);
295
296 if (hwirq < 0)
297 goto err;
298
299 spin_lock_init(&vdev->irqs[i].lock);
300
301 vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
302
303 if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
304 vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
305 | VFIO_IRQ_INFO_AUTOMASKED;
306
307 vdev->irqs[i].count = 1;
308 vdev->irqs[i].hwirq = hwirq;
309 vdev->irqs[i].masked = false;
310 }
311
312 vdev->num_irqs = cnt;
313
314 return 0;
315 err:
316 kfree(vdev->irqs);
317 return -EINVAL;
318 }
319
320 void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
321 {
322 int i;
323
324 for (i = 0; i < vdev->num_irqs; i++)
325 vfio_set_trigger(vdev, i, -1, NULL);
326
327 vdev->num_irqs = 0;
328 kfree(vdev->irqs);
329 }