This source file includes following definitions.
- vfio_pci_nvgpu_rw
- vfio_pci_nvgpu_release
- vfio_pci_nvgpu_mmap_fault
- vfio_pci_nvgpu_mmap
- vfio_pci_nvgpu_add_capability
- vfio_pci_nvgpu_group_notifier
- vfio_pci_nvdia_v100_nvlink2_init
- vfio_pci_npu2_rw
- vfio_pci_npu2_mmap
- vfio_pci_npu2_release
- vfio_pci_npu2_add_capability
- vfio_pci_ibm_npu2_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include <linux/io.h>
16 #include <linux/pci.h>
17 #include <linux/uaccess.h>
18 #include <linux/vfio.h>
19 #include <linux/sched/mm.h>
20 #include <linux/mmu_context.h>
21 #include <asm/kvm_ppc.h>
22 #include "vfio_pci_private.h"
23
24 #define CREATE_TRACE_POINTS
25 #include "trace.h"
26
27 EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap_fault);
28 EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap);
29 EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_npu2_mmap);
30
31 struct vfio_pci_nvgpu_data {
32 unsigned long gpu_hpa;
33 unsigned long gpu_tgt;
34 unsigned long useraddr;
35 unsigned long size;
36 struct mm_struct *mm;
37 struct mm_iommu_table_group_mem_t *mem;
38 struct pci_dev *gpdev;
39 struct notifier_block group_notifier;
40 };
41
42 static size_t vfio_pci_nvgpu_rw(struct vfio_pci_device *vdev,
43 char __user *buf, size_t count, loff_t *ppos, bool iswrite)
44 {
45 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
46 struct vfio_pci_nvgpu_data *data = vdev->region[i].data;
47 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
48 loff_t posaligned = pos & PAGE_MASK, posoff = pos & ~PAGE_MASK;
49 size_t sizealigned;
50 void __iomem *ptr;
51
52 if (pos >= vdev->region[i].size)
53 return -EINVAL;
54
55 count = min(count, (size_t)(vdev->region[i].size - pos));
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 sizealigned = _ALIGN_UP(posoff + count, PAGE_SIZE);
71 ptr = ioremap_cache(data->gpu_hpa + posaligned, sizealigned);
72 if (!ptr)
73 return -EFAULT;
74
75 if (iswrite) {
76 if (copy_from_user(ptr + posoff, buf, count))
77 count = -EFAULT;
78 else
79 *ppos += count;
80 } else {
81 if (copy_to_user(buf, ptr + posoff, count))
82 count = -EFAULT;
83 else
84 *ppos += count;
85 }
86
87 iounmap(ptr);
88
89 return count;
90 }
91
92 static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev,
93 struct vfio_pci_region *region)
94 {
95 struct vfio_pci_nvgpu_data *data = region->data;
96 long ret;
97
98
99 if (data->mm) {
100 if (data->mem) {
101 ret = mm_iommu_put(data->mm, data->mem);
102 WARN_ON(ret);
103 }
104
105 mmdrop(data->mm);
106 }
107
108 vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
109 &data->group_notifier);
110
111 pnv_npu2_unmap_lpar_dev(data->gpdev);
112
113 kfree(data);
114 }
115
116 static vm_fault_t vfio_pci_nvgpu_mmap_fault(struct vm_fault *vmf)
117 {
118 vm_fault_t ret;
119 struct vm_area_struct *vma = vmf->vma;
120 struct vfio_pci_region *region = vma->vm_private_data;
121 struct vfio_pci_nvgpu_data *data = region->data;
122 unsigned long vmf_off = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
123 unsigned long nv2pg = data->gpu_hpa >> PAGE_SHIFT;
124 unsigned long vm_pgoff = vma->vm_pgoff &
125 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
126 unsigned long pfn = nv2pg + vm_pgoff + vmf_off;
127
128 ret = vmf_insert_pfn(vma, vmf->address, pfn);
129 trace_vfio_pci_nvgpu_mmap_fault(data->gpdev, pfn << PAGE_SHIFT,
130 vmf->address, ret);
131
132 return ret;
133 }
134
135 static const struct vm_operations_struct vfio_pci_nvgpu_mmap_vmops = {
136 .fault = vfio_pci_nvgpu_mmap_fault,
137 };
138
139 static int vfio_pci_nvgpu_mmap(struct vfio_pci_device *vdev,
140 struct vfio_pci_region *region, struct vm_area_struct *vma)
141 {
142 int ret;
143 struct vfio_pci_nvgpu_data *data = region->data;
144
145 if (data->useraddr)
146 return -EPERM;
147
148 if (vma->vm_end - vma->vm_start > data->size)
149 return -EINVAL;
150
151 vma->vm_private_data = region;
152 vma->vm_flags |= VM_PFNMAP;
153 vma->vm_ops = &vfio_pci_nvgpu_mmap_vmops;
154
155
156
157
158
159
160
161 data->useraddr = vma->vm_start;
162 data->mm = current->mm;
163
164 atomic_inc(&data->mm->mm_count);
165 ret = (int) mm_iommu_newdev(data->mm, data->useraddr,
166 vma_pages(vma), data->gpu_hpa, &data->mem);
167
168 trace_vfio_pci_nvgpu_mmap(vdev->pdev, data->gpu_hpa, data->useraddr,
169 vma->vm_end - vma->vm_start, ret);
170
171 return ret;
172 }
173
174 static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev,
175 struct vfio_pci_region *region, struct vfio_info_cap *caps)
176 {
177 struct vfio_pci_nvgpu_data *data = region->data;
178 struct vfio_region_info_cap_nvlink2_ssatgt cap = {
179 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
180 .header.version = 1,
181 .tgt = data->gpu_tgt
182 };
183
184 return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
185 }
186
187 static const struct vfio_pci_regops vfio_pci_nvgpu_regops = {
188 .rw = vfio_pci_nvgpu_rw,
189 .release = vfio_pci_nvgpu_release,
190 .mmap = vfio_pci_nvgpu_mmap,
191 .add_capability = vfio_pci_nvgpu_add_capability,
192 };
193
194 static int vfio_pci_nvgpu_group_notifier(struct notifier_block *nb,
195 unsigned long action, void *opaque)
196 {
197 struct kvm *kvm = opaque;
198 struct vfio_pci_nvgpu_data *data = container_of(nb,
199 struct vfio_pci_nvgpu_data,
200 group_notifier);
201
202 if (action == VFIO_GROUP_NOTIFY_SET_KVM && kvm &&
203 pnv_npu2_map_lpar_dev(data->gpdev,
204 kvm->arch.lpid, MSR_DR | MSR_PR))
205 return NOTIFY_BAD;
206
207 return NOTIFY_OK;
208 }
209
210 int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
211 {
212 int ret;
213 u64 reg[2];
214 u64 tgt = 0;
215 struct device_node *npu_node, *mem_node;
216 struct pci_dev *npu_dev;
217 struct vfio_pci_nvgpu_data *data;
218 uint32_t mem_phandle = 0;
219 unsigned long events = VFIO_GROUP_NOTIFY_SET_KVM;
220
221
222
223
224
225 npu_dev = pnv_pci_get_npu_dev(vdev->pdev, 0);
226 if (!npu_dev)
227 return -ENODEV;
228
229 npu_node = pci_device_to_OF_node(npu_dev);
230 if (!npu_node)
231 return -EINVAL;
232
233 if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
234 return -EINVAL;
235
236 mem_node = of_find_node_by_phandle(mem_phandle);
237 if (!mem_node)
238 return -EINVAL;
239
240 if (of_property_read_variable_u64_array(mem_node, "reg", reg,
241 ARRAY_SIZE(reg), ARRAY_SIZE(reg)) !=
242 ARRAY_SIZE(reg))
243 return -EINVAL;
244
245 if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
246 dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
247 return -EFAULT;
248 }
249
250 data = kzalloc(sizeof(*data), GFP_KERNEL);
251 if (!data)
252 return -ENOMEM;
253
254 data->gpu_hpa = reg[0];
255 data->gpu_tgt = tgt;
256 data->size = reg[1];
257
258 dev_dbg(&vdev->pdev->dev, "%lx..%lx\n", data->gpu_hpa,
259 data->gpu_hpa + data->size - 1);
260
261 data->gpdev = vdev->pdev;
262 data->group_notifier.notifier_call = vfio_pci_nvgpu_group_notifier;
263
264 ret = vfio_register_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
265 &events, &data->group_notifier);
266 if (ret)
267 goto free_exit;
268
269
270
271
272
273
274
275 vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
276 &data->group_notifier);
277
278 ret = vfio_pci_register_dev_region(vdev,
279 PCI_VENDOR_ID_NVIDIA | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
280 VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM,
281 &vfio_pci_nvgpu_regops,
282 data->size,
283 VFIO_REGION_INFO_FLAG_READ |
284 VFIO_REGION_INFO_FLAG_WRITE |
285 VFIO_REGION_INFO_FLAG_MMAP,
286 data);
287 if (ret)
288 goto free_exit;
289
290 return 0;
291 free_exit:
292 kfree(data);
293
294 return ret;
295 }
296
297
298
299
300 struct vfio_pci_npu2_data {
301 void *base;
302 unsigned long mmio_atsd;
303 unsigned long gpu_tgt;
304 unsigned int link_speed;
305 };
306
307 static size_t vfio_pci_npu2_rw(struct vfio_pci_device *vdev,
308 char __user *buf, size_t count, loff_t *ppos, bool iswrite)
309 {
310 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
311 struct vfio_pci_npu2_data *data = vdev->region[i].data;
312 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
313
314 if (pos >= vdev->region[i].size)
315 return -EINVAL;
316
317 count = min(count, (size_t)(vdev->region[i].size - pos));
318
319 if (iswrite) {
320 if (copy_from_user(data->base + pos, buf, count))
321 return -EFAULT;
322 } else {
323 if (copy_to_user(buf, data->base + pos, count))
324 return -EFAULT;
325 }
326 *ppos += count;
327
328 return count;
329 }
330
331 static int vfio_pci_npu2_mmap(struct vfio_pci_device *vdev,
332 struct vfio_pci_region *region, struct vm_area_struct *vma)
333 {
334 int ret;
335 struct vfio_pci_npu2_data *data = region->data;
336 unsigned long req_len = vma->vm_end - vma->vm_start;
337
338 if (req_len != PAGE_SIZE)
339 return -EINVAL;
340
341 vma->vm_flags |= VM_PFNMAP;
342 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
343
344 ret = remap_pfn_range(vma, vma->vm_start, data->mmio_atsd >> PAGE_SHIFT,
345 req_len, vma->vm_page_prot);
346 trace_vfio_pci_npu2_mmap(vdev->pdev, data->mmio_atsd, vma->vm_start,
347 vma->vm_end - vma->vm_start, ret);
348
349 return ret;
350 }
351
352 static void vfio_pci_npu2_release(struct vfio_pci_device *vdev,
353 struct vfio_pci_region *region)
354 {
355 struct vfio_pci_npu2_data *data = region->data;
356
357 memunmap(data->base);
358 kfree(data);
359 }
360
361 static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev,
362 struct vfio_pci_region *region, struct vfio_info_cap *caps)
363 {
364 struct vfio_pci_npu2_data *data = region->data;
365 struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
366 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
367 .header.version = 1,
368 .tgt = data->gpu_tgt
369 };
370 struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
371 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
372 .header.version = 1,
373 .link_speed = data->link_speed
374 };
375 int ret;
376
377 ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
378 if (ret)
379 return ret;
380
381 return vfio_info_add_capability(caps, &capspd.header, sizeof(capspd));
382 }
383
384 static const struct vfio_pci_regops vfio_pci_npu2_regops = {
385 .rw = vfio_pci_npu2_rw,
386 .mmap = vfio_pci_npu2_mmap,
387 .release = vfio_pci_npu2_release,
388 .add_capability = vfio_pci_npu2_add_capability,
389 };
390
391 int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
392 {
393 int ret;
394 struct vfio_pci_npu2_data *data;
395 struct device_node *nvlink_dn;
396 u32 nvlink_index = 0;
397 struct pci_dev *npdev = vdev->pdev;
398 struct device_node *npu_node = pci_device_to_OF_node(npdev);
399 struct pci_controller *hose = pci_bus_to_host(npdev->bus);
400 u64 mmio_atsd = 0;
401 u64 tgt = 0;
402 u32 link_speed = 0xff;
403
404
405
406
407
408 if (!pnv_pci_get_gpu_dev(vdev->pdev))
409 return -ENODEV;
410
411
412
413
414
415
416
417
418 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
419 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
420 &nvlink_index)))
421 return -ENODEV;
422
423 if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index,
424 &mmio_atsd)) {
425 dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
426 mmio_atsd = 0;
427 }
428
429 if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
430 dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
431 return -EFAULT;
432 }
433
434 if (of_property_read_u32(npu_node, "ibm,nvlink-speed", &link_speed)) {
435 dev_warn(&vdev->pdev->dev, "No ibm,nvlink-speed found\n");
436 return -EFAULT;
437 }
438
439 data = kzalloc(sizeof(*data), GFP_KERNEL);
440 if (!data)
441 return -ENOMEM;
442
443 data->mmio_atsd = mmio_atsd;
444 data->gpu_tgt = tgt;
445 data->link_speed = link_speed;
446 if (data->mmio_atsd) {
447 data->base = memremap(data->mmio_atsd, SZ_64K, MEMREMAP_WT);
448 if (!data->base) {
449 ret = -ENOMEM;
450 goto free_exit;
451 }
452 }
453
454
455
456
457
458
459
460 ret = vfio_pci_register_dev_region(vdev,
461 PCI_VENDOR_ID_IBM |
462 VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
463 VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
464 &vfio_pci_npu2_regops,
465 data->mmio_atsd ? PAGE_SIZE : 0,
466 VFIO_REGION_INFO_FLAG_READ |
467 VFIO_REGION_INFO_FLAG_WRITE |
468 VFIO_REGION_INFO_FLAG_MMAP,
469 data);
470 if (ret)
471 goto free_exit;
472
473 return 0;
474
475 free_exit:
476 if (data->base)
477 memunmap(data->base);
478 kfree(data);
479
480 return ret;
481 }