This source file includes following definitions.
- kfd_chardev_init
- kfd_chardev_exit
- kfd_chardev
- kfd_open
- kfd_ioctl_get_version
- set_queue_properties_from_user
- kfd_ioctl_create_queue
- kfd_ioctl_destroy_queue
- kfd_ioctl_update_queue
- kfd_ioctl_set_cu_mask
- kfd_ioctl_get_queue_wave_state
- kfd_ioctl_set_memory_policy
- kfd_ioctl_set_trap_handler
- kfd_ioctl_dbg_register
- kfd_ioctl_dbg_unregister
- kfd_ioctl_dbg_address_watch
- kfd_ioctl_dbg_wave_control
- kfd_ioctl_get_clock_counters
- kfd_ioctl_get_process_apertures
- kfd_ioctl_get_process_apertures_new
- kfd_ioctl_create_event
- kfd_ioctl_destroy_event
- kfd_ioctl_set_event
- kfd_ioctl_reset_event
- kfd_ioctl_wait_events
- kfd_ioctl_set_scratch_backing_va
- kfd_ioctl_get_tile_config
- kfd_ioctl_acquire_vm
- kfd_dev_is_large_bar
- kfd_ioctl_alloc_memory_of_gpu
- kfd_ioctl_free_memory_of_gpu
- kfd_ioctl_map_memory_to_gpu
- kfd_ioctl_unmap_memory_from_gpu
- kfd_ioctl_get_dmabuf_info
- kfd_ioctl_import_dmabuf
- kfd_ioctl
- kfd_mmio_mmap
- kfd_mmap
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/device.h>
24 #include <linux/export.h>
25 #include <linux/err.h>
26 #include <linux/fs.h>
27 #include <linux/file.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31 #include <linux/compat.h>
32 #include <uapi/linux/kfd_ioctl.h>
33 #include <linux/time.h>
34 #include <linux/mm.h>
35 #include <linux/mman.h>
36 #include <linux/dma-buf.h>
37 #include <asm/processor.h>
38 #include "kfd_priv.h"
39 #include "kfd_device_queue_manager.h"
40 #include "kfd_dbgmgr.h"
41 #include "amdgpu_amdkfd.h"
42
43 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
44 static int kfd_open(struct inode *, struct file *);
45 static int kfd_mmap(struct file *, struct vm_area_struct *);
46
47 static const char kfd_dev_name[] = "kfd";
48
49 static const struct file_operations kfd_fops = {
50 .owner = THIS_MODULE,
51 .unlocked_ioctl = kfd_ioctl,
52 .compat_ioctl = kfd_ioctl,
53 .open = kfd_open,
54 .mmap = kfd_mmap,
55 };
56
57 static int kfd_char_dev_major = -1;
58 static struct class *kfd_class;
59 struct device *kfd_device;
60
61 int kfd_chardev_init(void)
62 {
63 int err = 0;
64
65 kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
66 err = kfd_char_dev_major;
67 if (err < 0)
68 goto err_register_chrdev;
69
70 kfd_class = class_create(THIS_MODULE, kfd_dev_name);
71 err = PTR_ERR(kfd_class);
72 if (IS_ERR(kfd_class))
73 goto err_class_create;
74
75 kfd_device = device_create(kfd_class, NULL,
76 MKDEV(kfd_char_dev_major, 0),
77 NULL, kfd_dev_name);
78 err = PTR_ERR(kfd_device);
79 if (IS_ERR(kfd_device))
80 goto err_device_create;
81
82 return 0;
83
84 err_device_create:
85 class_destroy(kfd_class);
86 err_class_create:
87 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
88 err_register_chrdev:
89 return err;
90 }
91
92 void kfd_chardev_exit(void)
93 {
94 device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
95 class_destroy(kfd_class);
96 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
97 }
98
99 struct device *kfd_chardev(void)
100 {
101 return kfd_device;
102 }
103
104
105 static int kfd_open(struct inode *inode, struct file *filep)
106 {
107 struct kfd_process *process;
108 bool is_32bit_user_mode;
109
110 if (iminor(inode) != 0)
111 return -ENODEV;
112
113 is_32bit_user_mode = in_compat_syscall();
114
115 if (is_32bit_user_mode) {
116 dev_warn(kfd_device,
117 "Process %d (32-bit) failed to open /dev/kfd\n"
118 "32-bit processes are not supported by amdkfd\n",
119 current->pid);
120 return -EPERM;
121 }
122
123 process = kfd_create_process(filep);
124 if (IS_ERR(process))
125 return PTR_ERR(process);
126
127 if (kfd_is_locked())
128 return -EAGAIN;
129
130 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
131 process->pasid, process->is_32bit_user_mode);
132
133 return 0;
134 }
135
136 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
137 void *data)
138 {
139 struct kfd_ioctl_get_version_args *args = data;
140
141 args->major_version = KFD_IOCTL_MAJOR_VERSION;
142 args->minor_version = KFD_IOCTL_MINOR_VERSION;
143
144 return 0;
145 }
146
147 static int set_queue_properties_from_user(struct queue_properties *q_properties,
148 struct kfd_ioctl_create_queue_args *args)
149 {
150 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
151 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
152 return -EINVAL;
153 }
154
155 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
156 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
157 return -EINVAL;
158 }
159
160 if ((args->ring_base_address) &&
161 (!access_ok((const void __user *) args->ring_base_address,
162 sizeof(uint64_t)))) {
163 pr_err("Can't access ring base address\n");
164 return -EFAULT;
165 }
166
167 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
168 pr_err("Ring size must be a power of 2 or 0\n");
169 return -EINVAL;
170 }
171
172 if (!access_ok((const void __user *) args->read_pointer_address,
173 sizeof(uint32_t))) {
174 pr_err("Can't access read pointer\n");
175 return -EFAULT;
176 }
177
178 if (!access_ok((const void __user *) args->write_pointer_address,
179 sizeof(uint32_t))) {
180 pr_err("Can't access write pointer\n");
181 return -EFAULT;
182 }
183
184 if (args->eop_buffer_address &&
185 !access_ok((const void __user *) args->eop_buffer_address,
186 sizeof(uint32_t))) {
187 pr_debug("Can't access eop buffer");
188 return -EFAULT;
189 }
190
191 if (args->ctx_save_restore_address &&
192 !access_ok((const void __user *) args->ctx_save_restore_address,
193 sizeof(uint32_t))) {
194 pr_debug("Can't access ctx save restore buffer");
195 return -EFAULT;
196 }
197
198 q_properties->is_interop = false;
199 q_properties->queue_percent = args->queue_percentage;
200 q_properties->priority = args->queue_priority;
201 q_properties->queue_address = args->ring_base_address;
202 q_properties->queue_size = args->ring_size;
203 q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
204 q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
205 q_properties->eop_ring_buffer_address = args->eop_buffer_address;
206 q_properties->eop_ring_buffer_size = args->eop_buffer_size;
207 q_properties->ctx_save_restore_area_address =
208 args->ctx_save_restore_address;
209 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
210 q_properties->ctl_stack_size = args->ctl_stack_size;
211 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
212 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
213 q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
214 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
215 q_properties->type = KFD_QUEUE_TYPE_SDMA;
216 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
217 q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
218 else
219 return -ENOTSUPP;
220
221 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
222 q_properties->format = KFD_QUEUE_FORMAT_AQL;
223 else
224 q_properties->format = KFD_QUEUE_FORMAT_PM4;
225
226 pr_debug("Queue Percentage: %d, %d\n",
227 q_properties->queue_percent, args->queue_percentage);
228
229 pr_debug("Queue Priority: %d, %d\n",
230 q_properties->priority, args->queue_priority);
231
232 pr_debug("Queue Address: 0x%llX, 0x%llX\n",
233 q_properties->queue_address, args->ring_base_address);
234
235 pr_debug("Queue Size: 0x%llX, %u\n",
236 q_properties->queue_size, args->ring_size);
237
238 pr_debug("Queue r/w Pointers: %px, %px\n",
239 q_properties->read_ptr,
240 q_properties->write_ptr);
241
242 pr_debug("Queue Format: %d\n", q_properties->format);
243
244 pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
245
246 pr_debug("Queue CTX save area: 0x%llX\n",
247 q_properties->ctx_save_restore_area_address);
248
249 return 0;
250 }
251
252 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
253 void *data)
254 {
255 struct kfd_ioctl_create_queue_args *args = data;
256 struct kfd_dev *dev;
257 int err = 0;
258 unsigned int queue_id;
259 struct kfd_process_device *pdd;
260 struct queue_properties q_properties;
261
262 memset(&q_properties, 0, sizeof(struct queue_properties));
263
264 pr_debug("Creating queue ioctl\n");
265
266 err = set_queue_properties_from_user(&q_properties, args);
267 if (err)
268 return err;
269
270 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
271 dev = kfd_device_by_id(args->gpu_id);
272 if (!dev) {
273 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
274 return -EINVAL;
275 }
276
277 mutex_lock(&p->mutex);
278
279 pdd = kfd_bind_process_to_device(dev, p);
280 if (IS_ERR(pdd)) {
281 err = -ESRCH;
282 goto err_bind_process;
283 }
284
285 pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
286 p->pasid,
287 dev->id);
288
289 err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);
290 if (err != 0)
291 goto err_create_queue;
292
293 args->queue_id = queue_id;
294
295
296
297 args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
298 args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
299 args->doorbell_offset <<= PAGE_SHIFT;
300 if (KFD_IS_SOC15(dev->device_info->asic_family))
301
302
303
304
305
306 args->doorbell_offset |= q_properties.doorbell_off;
307
308 mutex_unlock(&p->mutex);
309
310 pr_debug("Queue id %d was created successfully\n", args->queue_id);
311
312 pr_debug("Ring buffer address == 0x%016llX\n",
313 args->ring_base_address);
314
315 pr_debug("Read ptr address == 0x%016llX\n",
316 args->read_pointer_address);
317
318 pr_debug("Write ptr address == 0x%016llX\n",
319 args->write_pointer_address);
320
321 return 0;
322
323 err_create_queue:
324 err_bind_process:
325 mutex_unlock(&p->mutex);
326 return err;
327 }
328
329 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
330 void *data)
331 {
332 int retval;
333 struct kfd_ioctl_destroy_queue_args *args = data;
334
335 pr_debug("Destroying queue id %d for pasid %d\n",
336 args->queue_id,
337 p->pasid);
338
339 mutex_lock(&p->mutex);
340
341 retval = pqm_destroy_queue(&p->pqm, args->queue_id);
342
343 mutex_unlock(&p->mutex);
344 return retval;
345 }
346
347 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
348 void *data)
349 {
350 int retval;
351 struct kfd_ioctl_update_queue_args *args = data;
352 struct queue_properties properties;
353
354 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
355 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
356 return -EINVAL;
357 }
358
359 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
360 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
361 return -EINVAL;
362 }
363
364 if ((args->ring_base_address) &&
365 (!access_ok((const void __user *) args->ring_base_address,
366 sizeof(uint64_t)))) {
367 pr_err("Can't access ring base address\n");
368 return -EFAULT;
369 }
370
371 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
372 pr_err("Ring size must be a power of 2 or 0\n");
373 return -EINVAL;
374 }
375
376 properties.queue_address = args->ring_base_address;
377 properties.queue_size = args->ring_size;
378 properties.queue_percent = args->queue_percentage;
379 properties.priority = args->queue_priority;
380
381 pr_debug("Updating queue id %d for pasid %d\n",
382 args->queue_id, p->pasid);
383
384 mutex_lock(&p->mutex);
385
386 retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
387
388 mutex_unlock(&p->mutex);
389
390 return retval;
391 }
392
393 static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
394 void *data)
395 {
396 int retval;
397 const int max_num_cus = 1024;
398 struct kfd_ioctl_set_cu_mask_args *args = data;
399 struct queue_properties properties;
400 uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
401 size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
402
403 if ((args->num_cu_mask % 32) != 0) {
404 pr_debug("num_cu_mask 0x%x must be a multiple of 32",
405 args->num_cu_mask);
406 return -EINVAL;
407 }
408
409 properties.cu_mask_count = args->num_cu_mask;
410 if (properties.cu_mask_count == 0) {
411 pr_debug("CU mask cannot be 0");
412 return -EINVAL;
413 }
414
415
416
417
418
419 if (properties.cu_mask_count > max_num_cus) {
420 pr_debug("CU mask cannot be greater than 1024 bits");
421 properties.cu_mask_count = max_num_cus;
422 cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
423 }
424
425 properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
426 if (!properties.cu_mask)
427 return -ENOMEM;
428
429 retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
430 if (retval) {
431 pr_debug("Could not copy CU mask from userspace");
432 kfree(properties.cu_mask);
433 return -EFAULT;
434 }
435
436 mutex_lock(&p->mutex);
437
438 retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
439
440 mutex_unlock(&p->mutex);
441
442 if (retval)
443 kfree(properties.cu_mask);
444
445 return retval;
446 }
447
448 static int kfd_ioctl_get_queue_wave_state(struct file *filep,
449 struct kfd_process *p, void *data)
450 {
451 struct kfd_ioctl_get_queue_wave_state_args *args = data;
452 int r;
453
454 mutex_lock(&p->mutex);
455
456 r = pqm_get_wave_state(&p->pqm, args->queue_id,
457 (void __user *)args->ctl_stack_address,
458 &args->ctl_stack_used_size,
459 &args->save_area_used_size);
460
461 mutex_unlock(&p->mutex);
462
463 return r;
464 }
465
466 static int kfd_ioctl_set_memory_policy(struct file *filep,
467 struct kfd_process *p, void *data)
468 {
469 struct kfd_ioctl_set_memory_policy_args *args = data;
470 struct kfd_dev *dev;
471 int err = 0;
472 struct kfd_process_device *pdd;
473 enum cache_policy default_policy, alternate_policy;
474
475 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
476 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
477 return -EINVAL;
478 }
479
480 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
481 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
482 return -EINVAL;
483 }
484
485 dev = kfd_device_by_id(args->gpu_id);
486 if (!dev)
487 return -EINVAL;
488
489 mutex_lock(&p->mutex);
490
491 pdd = kfd_bind_process_to_device(dev, p);
492 if (IS_ERR(pdd)) {
493 err = -ESRCH;
494 goto out;
495 }
496
497 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
498 ? cache_policy_coherent : cache_policy_noncoherent;
499
500 alternate_policy =
501 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
502 ? cache_policy_coherent : cache_policy_noncoherent;
503
504 if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
505 &pdd->qpd,
506 default_policy,
507 alternate_policy,
508 (void __user *)args->alternate_aperture_base,
509 args->alternate_aperture_size))
510 err = -EINVAL;
511
512 out:
513 mutex_unlock(&p->mutex);
514
515 return err;
516 }
517
518 static int kfd_ioctl_set_trap_handler(struct file *filep,
519 struct kfd_process *p, void *data)
520 {
521 struct kfd_ioctl_set_trap_handler_args *args = data;
522 struct kfd_dev *dev;
523 int err = 0;
524 struct kfd_process_device *pdd;
525
526 dev = kfd_device_by_id(args->gpu_id);
527 if (!dev)
528 return -EINVAL;
529
530 mutex_lock(&p->mutex);
531
532 pdd = kfd_bind_process_to_device(dev, p);
533 if (IS_ERR(pdd)) {
534 err = -ESRCH;
535 goto out;
536 }
537
538 if (dev->dqm->ops.set_trap_handler(dev->dqm,
539 &pdd->qpd,
540 args->tba_addr,
541 args->tma_addr))
542 err = -EINVAL;
543
544 out:
545 mutex_unlock(&p->mutex);
546
547 return err;
548 }
549
550 static int kfd_ioctl_dbg_register(struct file *filep,
551 struct kfd_process *p, void *data)
552 {
553 struct kfd_ioctl_dbg_register_args *args = data;
554 struct kfd_dev *dev;
555 struct kfd_dbgmgr *dbgmgr_ptr;
556 struct kfd_process_device *pdd;
557 bool create_ok;
558 long status = 0;
559
560 dev = kfd_device_by_id(args->gpu_id);
561 if (!dev)
562 return -EINVAL;
563
564 if (dev->device_info->asic_family == CHIP_CARRIZO) {
565 pr_debug("kfd_ioctl_dbg_register not supported on CZ\n");
566 return -EINVAL;
567 }
568
569 mutex_lock(&p->mutex);
570 mutex_lock(kfd_get_dbgmgr_mutex());
571
572
573
574
575
576 pdd = kfd_bind_process_to_device(dev, p);
577 if (IS_ERR(pdd)) {
578 status = PTR_ERR(pdd);
579 goto out;
580 }
581
582 if (!dev->dbgmgr) {
583
584 create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
585 if (create_ok) {
586 status = kfd_dbgmgr_register(dbgmgr_ptr, p);
587 if (status != 0)
588 kfd_dbgmgr_destroy(dbgmgr_ptr);
589 else
590 dev->dbgmgr = dbgmgr_ptr;
591 }
592 } else {
593 pr_debug("debugger already registered\n");
594 status = -EINVAL;
595 }
596
597 out:
598 mutex_unlock(kfd_get_dbgmgr_mutex());
599 mutex_unlock(&p->mutex);
600
601 return status;
602 }
603
604 static int kfd_ioctl_dbg_unregister(struct file *filep,
605 struct kfd_process *p, void *data)
606 {
607 struct kfd_ioctl_dbg_unregister_args *args = data;
608 struct kfd_dev *dev;
609 long status;
610
611 dev = kfd_device_by_id(args->gpu_id);
612 if (!dev || !dev->dbgmgr)
613 return -EINVAL;
614
615 if (dev->device_info->asic_family == CHIP_CARRIZO) {
616 pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n");
617 return -EINVAL;
618 }
619
620 mutex_lock(kfd_get_dbgmgr_mutex());
621
622 status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
623 if (!status) {
624 kfd_dbgmgr_destroy(dev->dbgmgr);
625 dev->dbgmgr = NULL;
626 }
627
628 mutex_unlock(kfd_get_dbgmgr_mutex());
629
630 return status;
631 }
632
633
634
635
636
637
638
639
640
641
642 static int kfd_ioctl_dbg_address_watch(struct file *filep,
643 struct kfd_process *p, void *data)
644 {
645 struct kfd_ioctl_dbg_address_watch_args *args = data;
646 struct kfd_dev *dev;
647 struct dbg_address_watch_info aw_info;
648 unsigned char *args_buff;
649 long status;
650 void __user *cmd_from_user;
651 uint64_t watch_mask_value = 0;
652 unsigned int args_idx = 0;
653
654 memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info));
655
656 dev = kfd_device_by_id(args->gpu_id);
657 if (!dev)
658 return -EINVAL;
659
660 if (dev->device_info->asic_family == CHIP_CARRIZO) {
661 pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
662 return -EINVAL;
663 }
664
665 cmd_from_user = (void __user *) args->content_ptr;
666
667
668
669 if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) ||
670 (args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) ||
671 (cmd_from_user == NULL))
672 return -EINVAL;
673
674
675 args_buff = memdup_user(cmd_from_user,
676 args->buf_size_in_bytes - sizeof(*args));
677 if (IS_ERR(args_buff))
678 return PTR_ERR(args_buff);
679
680 aw_info.process = p;
681
682 aw_info.num_watch_points = *((uint32_t *)(&args_buff[args_idx]));
683 args_idx += sizeof(aw_info.num_watch_points);
684
685 aw_info.watch_mode = (enum HSA_DBG_WATCH_MODE *) &args_buff[args_idx];
686 args_idx += sizeof(enum HSA_DBG_WATCH_MODE) * aw_info.num_watch_points;
687
688
689
690
691
692 aw_info.watch_address = (uint64_t *) &args_buff[args_idx];
693
694
695 args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
696
697 if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
698 status = -EINVAL;
699 goto out;
700 }
701
702 watch_mask_value = (uint64_t) args_buff[args_idx];
703
704 if (watch_mask_value > 0) {
705
706
707
708
709
710 aw_info.watch_mask = (uint64_t *) &args_buff[args_idx];
711
712
713 args_idx += sizeof(aw_info.watch_mask) *
714 aw_info.num_watch_points;
715 } else {
716
717 aw_info.watch_mask = NULL;
718 args_idx += sizeof(aw_info.watch_mask);
719 }
720
721 if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
722 status = -EINVAL;
723 goto out;
724 }
725
726
727 aw_info.watch_event = NULL;
728
729 mutex_lock(kfd_get_dbgmgr_mutex());
730
731 status = kfd_dbgmgr_address_watch(dev->dbgmgr, &aw_info);
732
733 mutex_unlock(kfd_get_dbgmgr_mutex());
734
735 out:
736 kfree(args_buff);
737
738 return status;
739 }
740
741
742 static int kfd_ioctl_dbg_wave_control(struct file *filep,
743 struct kfd_process *p, void *data)
744 {
745 struct kfd_ioctl_dbg_wave_control_args *args = data;
746 struct kfd_dev *dev;
747 struct dbg_wave_control_info wac_info;
748 unsigned char *args_buff;
749 uint32_t computed_buff_size;
750 long status;
751 void __user *cmd_from_user;
752 unsigned int args_idx = 0;
753
754 memset((void *) &wac_info, 0, sizeof(struct dbg_wave_control_info));
755
756
757 computed_buff_size = sizeof(*args) +
758 sizeof(wac_info.mode) +
759 sizeof(wac_info.operand) +
760 sizeof(wac_info.dbgWave_msg.DbgWaveMsg) +
761 sizeof(wac_info.dbgWave_msg.MemoryVA) +
762 sizeof(wac_info.trapId);
763
764 dev = kfd_device_by_id(args->gpu_id);
765 if (!dev)
766 return -EINVAL;
767
768 if (dev->device_info->asic_family == CHIP_CARRIZO) {
769 pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
770 return -EINVAL;
771 }
772
773
774 if (args->buf_size_in_bytes != computed_buff_size) {
775 pr_debug("size mismatch, computed : actual %u : %u\n",
776 args->buf_size_in_bytes, computed_buff_size);
777 return -EINVAL;
778 }
779
780 cmd_from_user = (void __user *) args->content_ptr;
781
782 if (cmd_from_user == NULL)
783 return -EINVAL;
784
785
786
787 args_buff = memdup_user(cmd_from_user,
788 args->buf_size_in_bytes - sizeof(*args));
789 if (IS_ERR(args_buff))
790 return PTR_ERR(args_buff);
791
792
793 wac_info.process = p;
794
795 wac_info.operand = *((enum HSA_DBG_WAVEOP *)(&args_buff[args_idx]));
796 args_idx += sizeof(wac_info.operand);
797
798 wac_info.mode = *((enum HSA_DBG_WAVEMODE *)(&args_buff[args_idx]));
799 args_idx += sizeof(wac_info.mode);
800
801 wac_info.trapId = *((uint32_t *)(&args_buff[args_idx]));
802 args_idx += sizeof(wac_info.trapId);
803
804 wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value =
805 *((uint32_t *)(&args_buff[args_idx]));
806 wac_info.dbgWave_msg.MemoryVA = NULL;
807
808 mutex_lock(kfd_get_dbgmgr_mutex());
809
810 pr_debug("Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u\n",
811 wac_info.process, wac_info.operand,
812 wac_info.mode, wac_info.trapId,
813 wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
814
815 status = kfd_dbgmgr_wave_control(dev->dbgmgr, &wac_info);
816
817 pr_debug("Returned status of dbg manager is %ld\n", status);
818
819 mutex_unlock(kfd_get_dbgmgr_mutex());
820
821 kfree(args_buff);
822
823 return status;
824 }
825
826 static int kfd_ioctl_get_clock_counters(struct file *filep,
827 struct kfd_process *p, void *data)
828 {
829 struct kfd_ioctl_get_clock_counters_args *args = data;
830 struct kfd_dev *dev;
831
832 dev = kfd_device_by_id(args->gpu_id);
833 if (dev)
834
835 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd);
836 else
837
838 args->gpu_clock_counter = 0;
839
840
841 args->cpu_clock_counter = ktime_get_raw_ns();
842 args->system_clock_counter = ktime_get_boottime_ns();
843
844
845 args->system_clock_freq = 1000000000;
846
847 return 0;
848 }
849
850
851 static int kfd_ioctl_get_process_apertures(struct file *filp,
852 struct kfd_process *p, void *data)
853 {
854 struct kfd_ioctl_get_process_apertures_args *args = data;
855 struct kfd_process_device_apertures *pAperture;
856 struct kfd_process_device *pdd;
857
858 dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
859
860 args->num_of_nodes = 0;
861
862 mutex_lock(&p->mutex);
863
864
865 if (kfd_has_process_device_data(p)) {
866
867 pdd = kfd_get_first_process_device_data(p);
868 do {
869 pAperture =
870 &args->process_apertures[args->num_of_nodes];
871 pAperture->gpu_id = pdd->dev->id;
872 pAperture->lds_base = pdd->lds_base;
873 pAperture->lds_limit = pdd->lds_limit;
874 pAperture->gpuvm_base = pdd->gpuvm_base;
875 pAperture->gpuvm_limit = pdd->gpuvm_limit;
876 pAperture->scratch_base = pdd->scratch_base;
877 pAperture->scratch_limit = pdd->scratch_limit;
878
879 dev_dbg(kfd_device,
880 "node id %u\n", args->num_of_nodes);
881 dev_dbg(kfd_device,
882 "gpu id %u\n", pdd->dev->id);
883 dev_dbg(kfd_device,
884 "lds_base %llX\n", pdd->lds_base);
885 dev_dbg(kfd_device,
886 "lds_limit %llX\n", pdd->lds_limit);
887 dev_dbg(kfd_device,
888 "gpuvm_base %llX\n", pdd->gpuvm_base);
889 dev_dbg(kfd_device,
890 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
891 dev_dbg(kfd_device,
892 "scratch_base %llX\n", pdd->scratch_base);
893 dev_dbg(kfd_device,
894 "scratch_limit %llX\n", pdd->scratch_limit);
895
896 args->num_of_nodes++;
897
898 pdd = kfd_get_next_process_device_data(p, pdd);
899 } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
900 }
901
902 mutex_unlock(&p->mutex);
903
904 return 0;
905 }
906
907 static int kfd_ioctl_get_process_apertures_new(struct file *filp,
908 struct kfd_process *p, void *data)
909 {
910 struct kfd_ioctl_get_process_apertures_new_args *args = data;
911 struct kfd_process_device_apertures *pa;
912 struct kfd_process_device *pdd;
913 uint32_t nodes = 0;
914 int ret;
915
916 dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
917
918 if (args->num_of_nodes == 0) {
919
920
921
922 mutex_lock(&p->mutex);
923
924 if (!kfd_has_process_device_data(p))
925 goto out_unlock;
926
927
928 pdd = kfd_get_first_process_device_data(p);
929 do {
930 args->num_of_nodes++;
931 pdd = kfd_get_next_process_device_data(p, pdd);
932 } while (pdd);
933
934 goto out_unlock;
935 }
936
937
938
939
940
941 pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
942 args->num_of_nodes), GFP_KERNEL);
943 if (!pa)
944 return -ENOMEM;
945
946 mutex_lock(&p->mutex);
947
948 if (!kfd_has_process_device_data(p)) {
949 args->num_of_nodes = 0;
950 kfree(pa);
951 goto out_unlock;
952 }
953
954
955 pdd = kfd_get_first_process_device_data(p);
956 do {
957 pa[nodes].gpu_id = pdd->dev->id;
958 pa[nodes].lds_base = pdd->lds_base;
959 pa[nodes].lds_limit = pdd->lds_limit;
960 pa[nodes].gpuvm_base = pdd->gpuvm_base;
961 pa[nodes].gpuvm_limit = pdd->gpuvm_limit;
962 pa[nodes].scratch_base = pdd->scratch_base;
963 pa[nodes].scratch_limit = pdd->scratch_limit;
964
965 dev_dbg(kfd_device,
966 "gpu id %u\n", pdd->dev->id);
967 dev_dbg(kfd_device,
968 "lds_base %llX\n", pdd->lds_base);
969 dev_dbg(kfd_device,
970 "lds_limit %llX\n", pdd->lds_limit);
971 dev_dbg(kfd_device,
972 "gpuvm_base %llX\n", pdd->gpuvm_base);
973 dev_dbg(kfd_device,
974 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
975 dev_dbg(kfd_device,
976 "scratch_base %llX\n", pdd->scratch_base);
977 dev_dbg(kfd_device,
978 "scratch_limit %llX\n", pdd->scratch_limit);
979 nodes++;
980
981 pdd = kfd_get_next_process_device_data(p, pdd);
982 } while (pdd && (nodes < args->num_of_nodes));
983 mutex_unlock(&p->mutex);
984
985 args->num_of_nodes = nodes;
986 ret = copy_to_user(
987 (void __user *)args->kfd_process_device_apertures_ptr,
988 pa,
989 (nodes * sizeof(struct kfd_process_device_apertures)));
990 kfree(pa);
991 return ret ? -EFAULT : 0;
992
993 out_unlock:
994 mutex_unlock(&p->mutex);
995 return 0;
996 }
997
998 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
999 void *data)
1000 {
1001 struct kfd_ioctl_create_event_args *args = data;
1002 int err;
1003
1004
1005
1006
1007
1008 if (args->event_page_offset) {
1009 struct kfd_dev *kfd;
1010 struct kfd_process_device *pdd;
1011 void *mem, *kern_addr;
1012 uint64_t size;
1013
1014 if (p->signal_page) {
1015 pr_err("Event page is already set\n");
1016 return -EINVAL;
1017 }
1018
1019 kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
1020 if (!kfd) {
1021 pr_err("Getting device by id failed in %s\n", __func__);
1022 return -EINVAL;
1023 }
1024
1025 mutex_lock(&p->mutex);
1026 pdd = kfd_bind_process_to_device(kfd, p);
1027 if (IS_ERR(pdd)) {
1028 err = PTR_ERR(pdd);
1029 goto out_unlock;
1030 }
1031
1032 mem = kfd_process_device_translate_handle(pdd,
1033 GET_IDR_HANDLE(args->event_page_offset));
1034 if (!mem) {
1035 pr_err("Can't find BO, offset is 0x%llx\n",
1036 args->event_page_offset);
1037 err = -EINVAL;
1038 goto out_unlock;
1039 }
1040 mutex_unlock(&p->mutex);
1041
1042 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
1043 mem, &kern_addr, &size);
1044 if (err) {
1045 pr_err("Failed to map event page to kernel\n");
1046 return err;
1047 }
1048
1049 err = kfd_event_page_set(p, kern_addr, size);
1050 if (err) {
1051 pr_err("Failed to set event page\n");
1052 return err;
1053 }
1054 }
1055
1056 err = kfd_event_create(filp, p, args->event_type,
1057 args->auto_reset != 0, args->node_id,
1058 &args->event_id, &args->event_trigger_data,
1059 &args->event_page_offset,
1060 &args->event_slot_index);
1061
1062 return err;
1063
1064 out_unlock:
1065 mutex_unlock(&p->mutex);
1066 return err;
1067 }
1068
1069 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
1070 void *data)
1071 {
1072 struct kfd_ioctl_destroy_event_args *args = data;
1073
1074 return kfd_event_destroy(p, args->event_id);
1075 }
1076
1077 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
1078 void *data)
1079 {
1080 struct kfd_ioctl_set_event_args *args = data;
1081
1082 return kfd_set_event(p, args->event_id);
1083 }
1084
1085 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
1086 void *data)
1087 {
1088 struct kfd_ioctl_reset_event_args *args = data;
1089
1090 return kfd_reset_event(p, args->event_id);
1091 }
1092
1093 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
1094 void *data)
1095 {
1096 struct kfd_ioctl_wait_events_args *args = data;
1097 int err;
1098
1099 err = kfd_wait_on_events(p, args->num_events,
1100 (void __user *)args->events_ptr,
1101 (args->wait_for_all != 0),
1102 args->timeout, &args->wait_result);
1103
1104 return err;
1105 }
1106 static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
1107 struct kfd_process *p, void *data)
1108 {
1109 struct kfd_ioctl_set_scratch_backing_va_args *args = data;
1110 struct kfd_process_device *pdd;
1111 struct kfd_dev *dev;
1112 long err;
1113
1114 dev = kfd_device_by_id(args->gpu_id);
1115 if (!dev)
1116 return -EINVAL;
1117
1118 mutex_lock(&p->mutex);
1119
1120 pdd = kfd_bind_process_to_device(dev, p);
1121 if (IS_ERR(pdd)) {
1122 err = PTR_ERR(pdd);
1123 goto bind_process_to_device_fail;
1124 }
1125
1126 pdd->qpd.sh_hidden_private_base = args->va_addr;
1127
1128 mutex_unlock(&p->mutex);
1129
1130 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
1131 pdd->qpd.vmid != 0)
1132 dev->kfd2kgd->set_scratch_backing_va(
1133 dev->kgd, args->va_addr, pdd->qpd.vmid);
1134
1135 return 0;
1136
1137 bind_process_to_device_fail:
1138 mutex_unlock(&p->mutex);
1139 return err;
1140 }
1141
1142 static int kfd_ioctl_get_tile_config(struct file *filep,
1143 struct kfd_process *p, void *data)
1144 {
1145 struct kfd_ioctl_get_tile_config_args *args = data;
1146 struct kfd_dev *dev;
1147 struct tile_config config;
1148 int err = 0;
1149
1150 dev = kfd_device_by_id(args->gpu_id);
1151 if (!dev)
1152 return -EINVAL;
1153
1154 dev->kfd2kgd->get_tile_config(dev->kgd, &config);
1155
1156 args->gb_addr_config = config.gb_addr_config;
1157 args->num_banks = config.num_banks;
1158 args->num_ranks = config.num_ranks;
1159
1160 if (args->num_tile_configs > config.num_tile_configs)
1161 args->num_tile_configs = config.num_tile_configs;
1162 err = copy_to_user((void __user *)args->tile_config_ptr,
1163 config.tile_config_ptr,
1164 args->num_tile_configs * sizeof(uint32_t));
1165 if (err) {
1166 args->num_tile_configs = 0;
1167 return -EFAULT;
1168 }
1169
1170 if (args->num_macro_tile_configs > config.num_macro_tile_configs)
1171 args->num_macro_tile_configs =
1172 config.num_macro_tile_configs;
1173 err = copy_to_user((void __user *)args->macro_tile_config_ptr,
1174 config.macro_tile_config_ptr,
1175 args->num_macro_tile_configs * sizeof(uint32_t));
1176 if (err) {
1177 args->num_macro_tile_configs = 0;
1178 return -EFAULT;
1179 }
1180
1181 return 0;
1182 }
1183
1184 static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
1185 void *data)
1186 {
1187 struct kfd_ioctl_acquire_vm_args *args = data;
1188 struct kfd_process_device *pdd;
1189 struct kfd_dev *dev;
1190 struct file *drm_file;
1191 int ret;
1192
1193 dev = kfd_device_by_id(args->gpu_id);
1194 if (!dev)
1195 return -EINVAL;
1196
1197 drm_file = fget(args->drm_fd);
1198 if (!drm_file)
1199 return -EINVAL;
1200
1201 mutex_lock(&p->mutex);
1202
1203 pdd = kfd_get_process_device_data(dev, p);
1204 if (!pdd) {
1205 ret = -EINVAL;
1206 goto err_unlock;
1207 }
1208
1209 if (pdd->drm_file) {
1210 ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
1211 goto err_unlock;
1212 }
1213
1214 ret = kfd_process_device_init_vm(pdd, drm_file);
1215 if (ret)
1216 goto err_unlock;
1217
1218 mutex_unlock(&p->mutex);
1219
1220 return 0;
1221
1222 err_unlock:
1223 mutex_unlock(&p->mutex);
1224 fput(drm_file);
1225 return ret;
1226 }
1227
1228 bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1229 {
1230 struct kfd_local_mem_info mem_info;
1231
1232 if (debug_largebar) {
1233 pr_debug("Simulate large-bar allocation on non large-bar machine\n");
1234 return true;
1235 }
1236
1237 if (dev->device_info->needs_iommu_device)
1238 return false;
1239
1240 amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
1241 if (mem_info.local_mem_size_private == 0 &&
1242 mem_info.local_mem_size_public > 0)
1243 return true;
1244 return false;
1245 }
1246
1247 static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
1248 struct kfd_process *p, void *data)
1249 {
1250 struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1251 struct kfd_process_device *pdd;
1252 void *mem;
1253 struct kfd_dev *dev;
1254 int idr_handle;
1255 long err;
1256 uint64_t offset = args->mmap_offset;
1257 uint32_t flags = args->flags;
1258
1259 if (args->size == 0)
1260 return -EINVAL;
1261
1262 dev = kfd_device_by_id(args->gpu_id);
1263 if (!dev)
1264 return -EINVAL;
1265
1266 if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
1267 (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
1268 !kfd_dev_is_large_bar(dev)) {
1269 pr_err("Alloc host visible vram on small bar is not allowed\n");
1270 return -EINVAL;
1271 }
1272
1273 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
1274 if (args->size != kfd_doorbell_process_slice(dev))
1275 return -EINVAL;
1276 offset = kfd_get_process_doorbells(dev, p);
1277 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1278 if (args->size != PAGE_SIZE)
1279 return -EINVAL;
1280 offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
1281 if (!offset)
1282 return -ENOMEM;
1283 }
1284
1285 mutex_lock(&p->mutex);
1286
1287 pdd = kfd_bind_process_to_device(dev, p);
1288 if (IS_ERR(pdd)) {
1289 err = PTR_ERR(pdd);
1290 goto err_unlock;
1291 }
1292
1293 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1294 dev->kgd, args->va_addr, args->size,
1295 pdd->vm, (struct kgd_mem **) &mem, &offset,
1296 flags);
1297
1298 if (err)
1299 goto err_unlock;
1300
1301 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1302 if (idr_handle < 0) {
1303 err = -EFAULT;
1304 goto err_free;
1305 }
1306
1307 mutex_unlock(&p->mutex);
1308
1309 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1310 args->mmap_offset = offset;
1311
1312
1313
1314
1315 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1316 args->mmap_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(args->gpu_id);
1317 args->mmap_offset <<= PAGE_SHIFT;
1318 }
1319
1320 return 0;
1321
1322 err_free:
1323 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
1324 err_unlock:
1325 mutex_unlock(&p->mutex);
1326 return err;
1327 }
1328
1329 static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
1330 struct kfd_process *p, void *data)
1331 {
1332 struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1333 struct kfd_process_device *pdd;
1334 void *mem;
1335 struct kfd_dev *dev;
1336 int ret;
1337
1338 dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1339 if (!dev)
1340 return -EINVAL;
1341
1342 mutex_lock(&p->mutex);
1343
1344 pdd = kfd_get_process_device_data(dev, p);
1345 if (!pdd) {
1346 pr_err("Process device data doesn't exist\n");
1347 ret = -EINVAL;
1348 goto err_unlock;
1349 }
1350
1351 mem = kfd_process_device_translate_handle(
1352 pdd, GET_IDR_HANDLE(args->handle));
1353 if (!mem) {
1354 ret = -EINVAL;
1355 goto err_unlock;
1356 }
1357
1358 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
1359 (struct kgd_mem *)mem);
1360
1361
1362
1363
1364 if (!ret)
1365 kfd_process_device_remove_obj_handle(
1366 pdd, GET_IDR_HANDLE(args->handle));
1367
1368 err_unlock:
1369 mutex_unlock(&p->mutex);
1370 return ret;
1371 }
1372
1373 static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
1374 struct kfd_process *p, void *data)
1375 {
1376 struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1377 struct kfd_process_device *pdd, *peer_pdd;
1378 void *mem;
1379 struct kfd_dev *dev, *peer;
1380 long err = 0;
1381 int i;
1382 uint32_t *devices_arr = NULL;
1383
1384 dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1385 if (!dev)
1386 return -EINVAL;
1387
1388 if (!args->n_devices) {
1389 pr_debug("Device IDs array empty\n");
1390 return -EINVAL;
1391 }
1392 if (args->n_success > args->n_devices) {
1393 pr_debug("n_success exceeds n_devices\n");
1394 return -EINVAL;
1395 }
1396
1397 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1398 GFP_KERNEL);
1399 if (!devices_arr)
1400 return -ENOMEM;
1401
1402 err = copy_from_user(devices_arr,
1403 (void __user *)args->device_ids_array_ptr,
1404 args->n_devices * sizeof(*devices_arr));
1405 if (err != 0) {
1406 err = -EFAULT;
1407 goto copy_from_user_failed;
1408 }
1409
1410 mutex_lock(&p->mutex);
1411
1412 pdd = kfd_bind_process_to_device(dev, p);
1413 if (IS_ERR(pdd)) {
1414 err = PTR_ERR(pdd);
1415 goto bind_process_to_device_failed;
1416 }
1417
1418 mem = kfd_process_device_translate_handle(pdd,
1419 GET_IDR_HANDLE(args->handle));
1420 if (!mem) {
1421 err = -ENOMEM;
1422 goto get_mem_obj_from_handle_failed;
1423 }
1424
1425 for (i = args->n_success; i < args->n_devices; i++) {
1426 peer = kfd_device_by_id(devices_arr[i]);
1427 if (!peer) {
1428 pr_debug("Getting device by id failed for 0x%x\n",
1429 devices_arr[i]);
1430 err = -EINVAL;
1431 goto get_mem_obj_from_handle_failed;
1432 }
1433
1434 peer_pdd = kfd_bind_process_to_device(peer, p);
1435 if (IS_ERR(peer_pdd)) {
1436 err = PTR_ERR(peer_pdd);
1437 goto get_mem_obj_from_handle_failed;
1438 }
1439 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1440 peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
1441 if (err) {
1442 pr_err("Failed to map to gpu %d/%d\n",
1443 i, args->n_devices);
1444 goto map_memory_to_gpu_failed;
1445 }
1446 args->n_success = i+1;
1447 }
1448
1449 mutex_unlock(&p->mutex);
1450
1451 err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
1452 if (err) {
1453 pr_debug("Sync memory failed, wait interrupted by user signal\n");
1454 goto sync_memory_failed;
1455 }
1456
1457
1458 for (i = 0; i < args->n_devices; i++) {
1459 peer = kfd_device_by_id(devices_arr[i]);
1460 if (WARN_ON_ONCE(!peer))
1461 continue;
1462 peer_pdd = kfd_get_process_device_data(peer, p);
1463 if (WARN_ON_ONCE(!peer_pdd))
1464 continue;
1465 kfd_flush_tlb(peer_pdd);
1466 }
1467
1468 kfree(devices_arr);
1469
1470 return err;
1471
1472 bind_process_to_device_failed:
1473 get_mem_obj_from_handle_failed:
1474 map_memory_to_gpu_failed:
1475 mutex_unlock(&p->mutex);
1476 copy_from_user_failed:
1477 sync_memory_failed:
1478 kfree(devices_arr);
1479
1480 return err;
1481 }
1482
1483 static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1484 struct kfd_process *p, void *data)
1485 {
1486 struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1487 struct kfd_process_device *pdd, *peer_pdd;
1488 void *mem;
1489 struct kfd_dev *dev, *peer;
1490 long err = 0;
1491 uint32_t *devices_arr = NULL, i;
1492
1493 dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1494 if (!dev)
1495 return -EINVAL;
1496
1497 if (!args->n_devices) {
1498 pr_debug("Device IDs array empty\n");
1499 return -EINVAL;
1500 }
1501 if (args->n_success > args->n_devices) {
1502 pr_debug("n_success exceeds n_devices\n");
1503 return -EINVAL;
1504 }
1505
1506 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1507 GFP_KERNEL);
1508 if (!devices_arr)
1509 return -ENOMEM;
1510
1511 err = copy_from_user(devices_arr,
1512 (void __user *)args->device_ids_array_ptr,
1513 args->n_devices * sizeof(*devices_arr));
1514 if (err != 0) {
1515 err = -EFAULT;
1516 goto copy_from_user_failed;
1517 }
1518
1519 mutex_lock(&p->mutex);
1520
1521 pdd = kfd_get_process_device_data(dev, p);
1522 if (!pdd) {
1523 err = -EINVAL;
1524 goto bind_process_to_device_failed;
1525 }
1526
1527 mem = kfd_process_device_translate_handle(pdd,
1528 GET_IDR_HANDLE(args->handle));
1529 if (!mem) {
1530 err = -ENOMEM;
1531 goto get_mem_obj_from_handle_failed;
1532 }
1533
1534 for (i = args->n_success; i < args->n_devices; i++) {
1535 peer = kfd_device_by_id(devices_arr[i]);
1536 if (!peer) {
1537 err = -EINVAL;
1538 goto get_mem_obj_from_handle_failed;
1539 }
1540
1541 peer_pdd = kfd_get_process_device_data(peer, p);
1542 if (!peer_pdd) {
1543 err = -ENODEV;
1544 goto get_mem_obj_from_handle_failed;
1545 }
1546 err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1547 peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
1548 if (err) {
1549 pr_err("Failed to unmap from gpu %d/%d\n",
1550 i, args->n_devices);
1551 goto unmap_memory_from_gpu_failed;
1552 }
1553 args->n_success = i+1;
1554 }
1555 kfree(devices_arr);
1556
1557 mutex_unlock(&p->mutex);
1558
1559 return 0;
1560
1561 bind_process_to_device_failed:
1562 get_mem_obj_from_handle_failed:
1563 unmap_memory_from_gpu_failed:
1564 mutex_unlock(&p->mutex);
1565 copy_from_user_failed:
1566 kfree(devices_arr);
1567 return err;
1568 }
1569
1570 static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1571 struct kfd_process *p, void *data)
1572 {
1573 struct kfd_ioctl_get_dmabuf_info_args *args = data;
1574 struct kfd_dev *dev = NULL;
1575 struct kgd_dev *dma_buf_kgd;
1576 void *metadata_buffer = NULL;
1577 uint32_t flags;
1578 unsigned int i;
1579 int r;
1580
1581
1582 for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
1583 if (dev)
1584 break;
1585 if (!dev)
1586 return -EINVAL;
1587
1588 if (args->metadata_ptr) {
1589 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1590 if (!metadata_buffer)
1591 return -ENOMEM;
1592 }
1593
1594
1595 r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
1596 &dma_buf_kgd, &args->size,
1597 metadata_buffer, args->metadata_size,
1598 &args->metadata_size, &flags);
1599 if (r)
1600 goto exit;
1601
1602
1603 dev = kfd_device_by_kgd(dma_buf_kgd);
1604 if (!dev) {
1605 r = -EINVAL;
1606 goto exit;
1607 }
1608 args->gpu_id = dev->id;
1609 args->flags = flags;
1610
1611
1612 if (metadata_buffer) {
1613 r = copy_to_user((void __user *)args->metadata_ptr,
1614 metadata_buffer, args->metadata_size);
1615 if (r != 0)
1616 r = -EFAULT;
1617 }
1618
1619 exit:
1620 kfree(metadata_buffer);
1621
1622 return r;
1623 }
1624
1625 static int kfd_ioctl_import_dmabuf(struct file *filep,
1626 struct kfd_process *p, void *data)
1627 {
1628 struct kfd_ioctl_import_dmabuf_args *args = data;
1629 struct kfd_process_device *pdd;
1630 struct dma_buf *dmabuf;
1631 struct kfd_dev *dev;
1632 int idr_handle;
1633 uint64_t size;
1634 void *mem;
1635 int r;
1636
1637 dev = kfd_device_by_id(args->gpu_id);
1638 if (!dev)
1639 return -EINVAL;
1640
1641 dmabuf = dma_buf_get(args->dmabuf_fd);
1642 if (IS_ERR(dmabuf))
1643 return PTR_ERR(dmabuf);
1644
1645 mutex_lock(&p->mutex);
1646
1647 pdd = kfd_bind_process_to_device(dev, p);
1648 if (IS_ERR(pdd)) {
1649 r = PTR_ERR(pdd);
1650 goto err_unlock;
1651 }
1652
1653 r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
1654 args->va_addr, pdd->vm,
1655 (struct kgd_mem **)&mem, &size,
1656 NULL);
1657 if (r)
1658 goto err_unlock;
1659
1660 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1661 if (idr_handle < 0) {
1662 r = -EFAULT;
1663 goto err_free;
1664 }
1665
1666 mutex_unlock(&p->mutex);
1667
1668 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1669
1670 return 0;
1671
1672 err_free:
1673 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
1674 err_unlock:
1675 mutex_unlock(&p->mutex);
1676 return r;
1677 }
1678
1679 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
1680 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
1681 .cmd_drv = 0, .name = #ioctl}
1682
1683
1684 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
1685 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
1686 kfd_ioctl_get_version, 0),
1687
1688 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
1689 kfd_ioctl_create_queue, 0),
1690
1691 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
1692 kfd_ioctl_destroy_queue, 0),
1693
1694 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
1695 kfd_ioctl_set_memory_policy, 0),
1696
1697 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
1698 kfd_ioctl_get_clock_counters, 0),
1699
1700 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
1701 kfd_ioctl_get_process_apertures, 0),
1702
1703 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
1704 kfd_ioctl_update_queue, 0),
1705
1706 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
1707 kfd_ioctl_create_event, 0),
1708
1709 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
1710 kfd_ioctl_destroy_event, 0),
1711
1712 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
1713 kfd_ioctl_set_event, 0),
1714
1715 AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
1716 kfd_ioctl_reset_event, 0),
1717
1718 AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
1719 kfd_ioctl_wait_events, 0),
1720
1721 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER,
1722 kfd_ioctl_dbg_register, 0),
1723
1724 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER,
1725 kfd_ioctl_dbg_unregister, 0),
1726
1727 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH,
1728 kfd_ioctl_dbg_address_watch, 0),
1729
1730 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
1731 kfd_ioctl_dbg_wave_control, 0),
1732
1733 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
1734 kfd_ioctl_set_scratch_backing_va, 0),
1735
1736 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
1737 kfd_ioctl_get_tile_config, 0),
1738
1739 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
1740 kfd_ioctl_set_trap_handler, 0),
1741
1742 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
1743 kfd_ioctl_get_process_apertures_new, 0),
1744
1745 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
1746 kfd_ioctl_acquire_vm, 0),
1747
1748 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
1749 kfd_ioctl_alloc_memory_of_gpu, 0),
1750
1751 AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
1752 kfd_ioctl_free_memory_of_gpu, 0),
1753
1754 AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
1755 kfd_ioctl_map_memory_to_gpu, 0),
1756
1757 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
1758 kfd_ioctl_unmap_memory_from_gpu, 0),
1759
1760 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
1761 kfd_ioctl_set_cu_mask, 0),
1762
1763 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
1764 kfd_ioctl_get_queue_wave_state, 0),
1765
1766 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
1767 kfd_ioctl_get_dmabuf_info, 0),
1768
1769 AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
1770 kfd_ioctl_import_dmabuf, 0),
1771
1772 };
1773
1774 #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
1775
1776 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1777 {
1778 struct kfd_process *process;
1779 amdkfd_ioctl_t *func;
1780 const struct amdkfd_ioctl_desc *ioctl = NULL;
1781 unsigned int nr = _IOC_NR(cmd);
1782 char stack_kdata[128];
1783 char *kdata = NULL;
1784 unsigned int usize, asize;
1785 int retcode = -EINVAL;
1786
1787 if (nr >= AMDKFD_CORE_IOCTL_COUNT)
1788 goto err_i1;
1789
1790 if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
1791 u32 amdkfd_size;
1792
1793 ioctl = &amdkfd_ioctls[nr];
1794
1795 amdkfd_size = _IOC_SIZE(ioctl->cmd);
1796 usize = asize = _IOC_SIZE(cmd);
1797 if (amdkfd_size > asize)
1798 asize = amdkfd_size;
1799
1800 cmd = ioctl->cmd;
1801 } else
1802 goto err_i1;
1803
1804 dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
1805
1806 process = kfd_get_process(current);
1807 if (IS_ERR(process)) {
1808 dev_dbg(kfd_device, "no process\n");
1809 goto err_i1;
1810 }
1811
1812
1813 func = ioctl->func;
1814
1815 if (unlikely(!func)) {
1816 dev_dbg(kfd_device, "no function\n");
1817 retcode = -EINVAL;
1818 goto err_i1;
1819 }
1820
1821 if (cmd & (IOC_IN | IOC_OUT)) {
1822 if (asize <= sizeof(stack_kdata)) {
1823 kdata = stack_kdata;
1824 } else {
1825 kdata = kmalloc(asize, GFP_KERNEL);
1826 if (!kdata) {
1827 retcode = -ENOMEM;
1828 goto err_i1;
1829 }
1830 }
1831 if (asize > usize)
1832 memset(kdata + usize, 0, asize - usize);
1833 }
1834
1835 if (cmd & IOC_IN) {
1836 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
1837 retcode = -EFAULT;
1838 goto err_i1;
1839 }
1840 } else if (cmd & IOC_OUT) {
1841 memset(kdata, 0, usize);
1842 }
1843
1844 retcode = func(filep, process, kdata);
1845
1846 if (cmd & IOC_OUT)
1847 if (copy_to_user((void __user *)arg, kdata, usize) != 0)
1848 retcode = -EFAULT;
1849
1850 err_i1:
1851 if (!ioctl)
1852 dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
1853 task_pid_nr(current), cmd, nr);
1854
1855 if (kdata != stack_kdata)
1856 kfree(kdata);
1857
1858 if (retcode)
1859 dev_dbg(kfd_device, "ret = %d\n", retcode);
1860
1861 return retcode;
1862 }
1863
1864 static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
1865 struct vm_area_struct *vma)
1866 {
1867 phys_addr_t address;
1868 int ret;
1869
1870 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1871 return -EINVAL;
1872
1873 address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
1874
1875 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
1876 VM_DONTDUMP | VM_PFNMAP;
1877
1878 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1879
1880 pr_debug("Process %d mapping mmio page\n"
1881 " target user address == 0x%08llX\n"
1882 " physical address == 0x%08llX\n"
1883 " vm_flags == 0x%04lX\n"
1884 " size == 0x%04lX\n",
1885 process->pasid, (unsigned long long) vma->vm_start,
1886 address, vma->vm_flags, PAGE_SIZE);
1887
1888 ret = io_remap_pfn_range(vma,
1889 vma->vm_start,
1890 address >> PAGE_SHIFT,
1891 PAGE_SIZE,
1892 vma->vm_page_prot);
1893 return ret;
1894 }
1895
1896
1897 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
1898 {
1899 struct kfd_process *process;
1900 struct kfd_dev *dev = NULL;
1901 unsigned long vm_pgoff;
1902 unsigned int gpu_id;
1903
1904 process = kfd_get_process(current);
1905 if (IS_ERR(process))
1906 return PTR_ERR(process);
1907
1908 vm_pgoff = vma->vm_pgoff;
1909 vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff);
1910 gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff);
1911 if (gpu_id)
1912 dev = kfd_device_by_id(gpu_id);
1913
1914 switch (vm_pgoff & KFD_MMAP_TYPE_MASK) {
1915 case KFD_MMAP_TYPE_DOORBELL:
1916 if (!dev)
1917 return -ENODEV;
1918 return kfd_doorbell_mmap(dev, process, vma);
1919
1920 case KFD_MMAP_TYPE_EVENTS:
1921 return kfd_event_mmap(process, vma);
1922
1923 case KFD_MMAP_TYPE_RESERVED_MEM:
1924 if (!dev)
1925 return -ENODEV;
1926 return kfd_reserved_mem_mmap(dev, process, vma);
1927 case KFD_MMAP_TYPE_MMIO:
1928 if (!dev)
1929 return -ENODEV;
1930 return kfd_mmio_mmap(dev, process, vma);
1931 }
1932
1933 return -EFAULT;
1934 }