This source file includes following definitions.
- page_slots
- allocate_signal_page
- allocate_event_notification_slot
- lookup_event_by_id
- lookup_signaled_event_by_partial_id
- create_signal_event
- create_other_event
- kfd_event_init_process
- destroy_event
- destroy_events
- shutdown_signal_page
- kfd_event_free_process
- event_can_be_gpu_signaled
- event_can_be_cpu_signaled
- kfd_event_page_set
- kfd_event_create
- kfd_event_destroy
- set_event
- kfd_set_event
- reset_event
- kfd_reset_event
- acknowledge_signal
- set_event_from_interrupt
- kfd_signal_event_interrupt
- alloc_event_waiters
- init_event_waiter_get_status
- init_event_waiter_add_to_waitlist
- test_event_condition
- copy_signaled_event_data
- user_timeout_to_jiffies
- free_waiters
- kfd_wait_on_events
- kfd_event_mmap
- lookup_events_by_type_and_signal
- kfd_signal_iommu_event
- kfd_signal_hw_exception_event
- kfd_signal_vm_fault_event
- kfd_signal_reset_event
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/mm_types.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/mm.h>
28 #include <linux/uaccess.h>
29 #include <linux/mman.h>
30 #include <linux/memory.h>
31 #include "kfd_priv.h"
32 #include "kfd_events.h"
33 #include "kfd_iommu.h"
34 #include <linux/device.h>
35
36
37
38
39 struct kfd_event_waiter {
40 wait_queue_entry_t wait;
41 struct kfd_event *event;
42 bool activated;
43 };
44
45
46
47
48
49
50
51
52 struct kfd_signal_page {
53 uint64_t *kernel_address;
54 uint64_t __user *user_address;
55 bool need_to_free_pages;
56 };
57
58
59 static uint64_t *page_slots(struct kfd_signal_page *page)
60 {
61 return page->kernel_address;
62 }
63
64 static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
65 {
66 void *backing_store;
67 struct kfd_signal_page *page;
68
69 page = kzalloc(sizeof(*page), GFP_KERNEL);
70 if (!page)
71 return NULL;
72
73 backing_store = (void *) __get_free_pages(GFP_KERNEL,
74 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
75 if (!backing_store)
76 goto fail_alloc_signal_store;
77
78
79 memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
80 KFD_SIGNAL_EVENT_LIMIT * 8);
81
82 page->kernel_address = backing_store;
83 page->need_to_free_pages = true;
84 pr_debug("Allocated new event signal page at %p, for process %p\n",
85 page, p);
86
87 return page;
88
89 fail_alloc_signal_store:
90 kfree(page);
91 return NULL;
92 }
93
94 static int allocate_event_notification_slot(struct kfd_process *p,
95 struct kfd_event *ev)
96 {
97 int id;
98
99 if (!p->signal_page) {
100 p->signal_page = allocate_signal_page(p);
101 if (!p->signal_page)
102 return -ENOMEM;
103
104 p->signal_mapped_size = 256*8;
105 }
106
107
108
109
110
111
112
113 id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
114 GFP_KERNEL);
115 if (id < 0)
116 return id;
117
118 ev->event_id = id;
119 page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
120
121 return 0;
122 }
123
124
125
126
127
128 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
129 {
130 return idr_find(&p->event_idr, id);
131 }
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150 static struct kfd_event *lookup_signaled_event_by_partial_id(
151 struct kfd_process *p, uint32_t id, uint32_t bits)
152 {
153 struct kfd_event *ev;
154
155 if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
156 return NULL;
157
158
159
160
161 if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
162 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
163 return NULL;
164
165 return idr_find(&p->event_idr, id);
166 }
167
168
169
170
171 for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
172 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
173 continue;
174
175 ev = idr_find(&p->event_idr, id);
176 }
177
178 return ev;
179 }
180
181 static int create_signal_event(struct file *devkfd,
182 struct kfd_process *p,
183 struct kfd_event *ev)
184 {
185 int ret;
186
187 if (p->signal_mapped_size &&
188 p->signal_event_count == p->signal_mapped_size / 8) {
189 if (!p->signal_event_limit_reached) {
190 pr_warn("Signal event wasn't created because limit was reached\n");
191 p->signal_event_limit_reached = true;
192 }
193 return -ENOSPC;
194 }
195
196 ret = allocate_event_notification_slot(p, ev);
197 if (ret) {
198 pr_warn("Signal event wasn't created because out of kernel memory\n");
199 return ret;
200 }
201
202 p->signal_event_count++;
203
204 ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
205 pr_debug("Signal event number %zu created with id %d, address %p\n",
206 p->signal_event_count, ev->event_id,
207 ev->user_signal_address);
208
209 return 0;
210 }
211
212 static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
213 {
214
215
216
217
218
219 int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
220 (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
221 GFP_KERNEL);
222
223 if (id < 0)
224 return id;
225 ev->event_id = id;
226
227 return 0;
228 }
229
230 void kfd_event_init_process(struct kfd_process *p)
231 {
232 mutex_init(&p->event_mutex);
233 idr_init(&p->event_idr);
234 p->signal_page = NULL;
235 p->signal_event_count = 0;
236 }
237
238 static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
239 {
240 struct kfd_event_waiter *waiter;
241
242
243 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
244 waiter->event = NULL;
245 wake_up_all(&ev->wq);
246
247 if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
248 ev->type == KFD_EVENT_TYPE_DEBUG)
249 p->signal_event_count--;
250
251 idr_remove(&p->event_idr, ev->event_id);
252 kfree(ev);
253 }
254
255 static void destroy_events(struct kfd_process *p)
256 {
257 struct kfd_event *ev;
258 uint32_t id;
259
260 idr_for_each_entry(&p->event_idr, ev, id)
261 destroy_event(p, ev);
262 idr_destroy(&p->event_idr);
263 }
264
265
266
267
268
269 static void shutdown_signal_page(struct kfd_process *p)
270 {
271 struct kfd_signal_page *page = p->signal_page;
272
273 if (page) {
274 if (page->need_to_free_pages)
275 free_pages((unsigned long)page->kernel_address,
276 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
277 kfree(page);
278 }
279 }
280
281 void kfd_event_free_process(struct kfd_process *p)
282 {
283 destroy_events(p);
284 shutdown_signal_page(p);
285 }
286
287 static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
288 {
289 return ev->type == KFD_EVENT_TYPE_SIGNAL ||
290 ev->type == KFD_EVENT_TYPE_DEBUG;
291 }
292
293 static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
294 {
295 return ev->type == KFD_EVENT_TYPE_SIGNAL;
296 }
297
298 int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
299 uint64_t size)
300 {
301 struct kfd_signal_page *page;
302
303 if (p->signal_page)
304 return -EBUSY;
305
306 page = kzalloc(sizeof(*page), GFP_KERNEL);
307 if (!page)
308 return -ENOMEM;
309
310
311 memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
312 KFD_SIGNAL_EVENT_LIMIT * 8);
313
314 page->kernel_address = kernel_address;
315
316 p->signal_page = page;
317 p->signal_mapped_size = size;
318
319 return 0;
320 }
321
322 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
323 uint32_t event_type, bool auto_reset, uint32_t node_id,
324 uint32_t *event_id, uint32_t *event_trigger_data,
325 uint64_t *event_page_offset, uint32_t *event_slot_index)
326 {
327 int ret = 0;
328 struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
329
330 if (!ev)
331 return -ENOMEM;
332
333 ev->type = event_type;
334 ev->auto_reset = auto_reset;
335 ev->signaled = false;
336
337 init_waitqueue_head(&ev->wq);
338
339 *event_page_offset = 0;
340
341 mutex_lock(&p->event_mutex);
342
343 switch (event_type) {
344 case KFD_EVENT_TYPE_SIGNAL:
345 case KFD_EVENT_TYPE_DEBUG:
346 ret = create_signal_event(devkfd, p, ev);
347 if (!ret) {
348 *event_page_offset = KFD_MMAP_TYPE_EVENTS;
349 *event_page_offset <<= PAGE_SHIFT;
350 *event_slot_index = ev->event_id;
351 }
352 break;
353 default:
354 ret = create_other_event(p, ev);
355 break;
356 }
357
358 if (!ret) {
359 *event_id = ev->event_id;
360 *event_trigger_data = ev->event_id;
361 } else {
362 kfree(ev);
363 }
364
365 mutex_unlock(&p->event_mutex);
366
367 return ret;
368 }
369
370
371 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
372 {
373 struct kfd_event *ev;
374 int ret = 0;
375
376 mutex_lock(&p->event_mutex);
377
378 ev = lookup_event_by_id(p, event_id);
379
380 if (ev)
381 destroy_event(p, ev);
382 else
383 ret = -EINVAL;
384
385 mutex_unlock(&p->event_mutex);
386 return ret;
387 }
388
389 static void set_event(struct kfd_event *ev)
390 {
391 struct kfd_event_waiter *waiter;
392
393
394
395
396
397
398 ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
399
400 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
401 waiter->activated = true;
402
403 wake_up_all(&ev->wq);
404 }
405
406
407 int kfd_set_event(struct kfd_process *p, uint32_t event_id)
408 {
409 int ret = 0;
410 struct kfd_event *ev;
411
412 mutex_lock(&p->event_mutex);
413
414 ev = lookup_event_by_id(p, event_id);
415
416 if (ev && event_can_be_cpu_signaled(ev))
417 set_event(ev);
418 else
419 ret = -EINVAL;
420
421 mutex_unlock(&p->event_mutex);
422 return ret;
423 }
424
425 static void reset_event(struct kfd_event *ev)
426 {
427 ev->signaled = false;
428 }
429
430
431 int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
432 {
433 int ret = 0;
434 struct kfd_event *ev;
435
436 mutex_lock(&p->event_mutex);
437
438 ev = lookup_event_by_id(p, event_id);
439
440 if (ev && event_can_be_cpu_signaled(ev))
441 reset_event(ev);
442 else
443 ret = -EINVAL;
444
445 mutex_unlock(&p->event_mutex);
446 return ret;
447
448 }
449
450 static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
451 {
452 page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
453 }
454
455 static void set_event_from_interrupt(struct kfd_process *p,
456 struct kfd_event *ev)
457 {
458 if (ev && event_can_be_gpu_signaled(ev)) {
459 acknowledge_signal(p, ev);
460 set_event(ev);
461 }
462 }
463
464 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
465 uint32_t valid_id_bits)
466 {
467 struct kfd_event *ev = NULL;
468
469
470
471
472
473
474 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
475
476 if (!p)
477 return;
478
479 mutex_lock(&p->event_mutex);
480
481 if (valid_id_bits)
482 ev = lookup_signaled_event_by_partial_id(p, partial_id,
483 valid_id_bits);
484 if (ev) {
485 set_event_from_interrupt(p, ev);
486 } else if (p->signal_page) {
487
488
489
490
491
492 uint64_t *slots = page_slots(p->signal_page);
493 uint32_t id;
494
495 if (valid_id_bits)
496 pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
497 partial_id, valid_id_bits);
498
499 if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) {
500
501
502
503 idr_for_each_entry(&p->event_idr, ev, id) {
504 if (id >= KFD_SIGNAL_EVENT_LIMIT)
505 break;
506
507 if (slots[id] != UNSIGNALED_EVENT_SLOT)
508 set_event_from_interrupt(p, ev);
509 }
510 } else {
511
512
513
514
515 for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
516 if (slots[id] != UNSIGNALED_EVENT_SLOT) {
517 ev = lookup_event_by_id(p, id);
518 set_event_from_interrupt(p, ev);
519 }
520 }
521 }
522
523 mutex_unlock(&p->event_mutex);
524 kfd_unref_process(p);
525 }
526
527 static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
528 {
529 struct kfd_event_waiter *event_waiters;
530 uint32_t i;
531
532 event_waiters = kmalloc_array(num_events,
533 sizeof(struct kfd_event_waiter),
534 GFP_KERNEL);
535
536 for (i = 0; (event_waiters) && (i < num_events) ; i++) {
537 init_wait(&event_waiters[i].wait);
538 event_waiters[i].activated = false;
539 }
540
541 return event_waiters;
542 }
543
544 static int init_event_waiter_get_status(struct kfd_process *p,
545 struct kfd_event_waiter *waiter,
546 uint32_t event_id)
547 {
548 struct kfd_event *ev = lookup_event_by_id(p, event_id);
549
550 if (!ev)
551 return -EINVAL;
552
553 waiter->event = ev;
554 waiter->activated = ev->signaled;
555 ev->signaled = ev->signaled && !ev->auto_reset;
556
557 return 0;
558 }
559
560 static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
561 {
562 struct kfd_event *ev = waiter->event;
563
564
565
566
567 if (!waiter->activated)
568 add_wait_queue(&ev->wq, &waiter->wait);
569 }
570
571
572
573
574
575
576
577
578
579
580
581 static uint32_t test_event_condition(bool all, uint32_t num_events,
582 struct kfd_event_waiter *event_waiters)
583 {
584 uint32_t i;
585 uint32_t activated_count = 0;
586
587 for (i = 0; i < num_events; i++) {
588 if (!event_waiters[i].event)
589 return KFD_IOC_WAIT_RESULT_FAIL;
590
591 if (event_waiters[i].activated) {
592 if (!all)
593 return KFD_IOC_WAIT_RESULT_COMPLETE;
594
595 activated_count++;
596 }
597 }
598
599 return activated_count == num_events ?
600 KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
601 }
602
603
604
605
606
607 static int copy_signaled_event_data(uint32_t num_events,
608 struct kfd_event_waiter *event_waiters,
609 struct kfd_event_data __user *data)
610 {
611 struct kfd_hsa_memory_exception_data *src;
612 struct kfd_hsa_memory_exception_data __user *dst;
613 struct kfd_event_waiter *waiter;
614 struct kfd_event *event;
615 uint32_t i;
616
617 for (i = 0; i < num_events; i++) {
618 waiter = &event_waiters[i];
619 event = waiter->event;
620 if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
621 dst = &data[i].memory_exception_data;
622 src = &event->memory_exception_data;
623 if (copy_to_user(dst, src,
624 sizeof(struct kfd_hsa_memory_exception_data)))
625 return -EFAULT;
626 }
627 }
628
629 return 0;
630
631 }
632
633
634
635 static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
636 {
637 if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
638 return 0;
639
640 if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
641 return MAX_SCHEDULE_TIMEOUT;
642
643
644
645
646
647
648 user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
649
650 return msecs_to_jiffies(user_timeout_ms) + 1;
651 }
652
653 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
654 {
655 uint32_t i;
656
657 for (i = 0; i < num_events; i++)
658 if (waiters[i].event)
659 remove_wait_queue(&waiters[i].event->wq,
660 &waiters[i].wait);
661
662 kfree(waiters);
663 }
664
665 int kfd_wait_on_events(struct kfd_process *p,
666 uint32_t num_events, void __user *data,
667 bool all, uint32_t user_timeout_ms,
668 uint32_t *wait_result)
669 {
670 struct kfd_event_data __user *events =
671 (struct kfd_event_data __user *) data;
672 uint32_t i;
673 int ret = 0;
674
675 struct kfd_event_waiter *event_waiters = NULL;
676 long timeout = user_timeout_to_jiffies(user_timeout_ms);
677
678 event_waiters = alloc_event_waiters(num_events);
679 if (!event_waiters) {
680 ret = -ENOMEM;
681 goto out;
682 }
683
684 mutex_lock(&p->event_mutex);
685
686 for (i = 0; i < num_events; i++) {
687 struct kfd_event_data event_data;
688
689 if (copy_from_user(&event_data, &events[i],
690 sizeof(struct kfd_event_data))) {
691 ret = -EFAULT;
692 goto out_unlock;
693 }
694
695 ret = init_event_waiter_get_status(p, &event_waiters[i],
696 event_data.event_id);
697 if (ret)
698 goto out_unlock;
699 }
700
701
702 *wait_result = test_event_condition(all, num_events, event_waiters);
703 if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
704 ret = copy_signaled_event_data(num_events,
705 event_waiters, events);
706 goto out_unlock;
707 } else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
708
709
710
711 goto out_unlock;
712 }
713
714
715 for (i = 0; i < num_events; i++)
716 init_event_waiter_add_to_waitlist(&event_waiters[i]);
717
718 mutex_unlock(&p->event_mutex);
719
720 while (true) {
721 if (fatal_signal_pending(current)) {
722 ret = -EINTR;
723 break;
724 }
725
726 if (signal_pending(current)) {
727
728
729
730
731
732
733
734
735 ret = -ERESTARTSYS;
736 break;
737 }
738
739
740
741
742
743
744
745
746
747
748 set_current_state(TASK_INTERRUPTIBLE);
749
750 *wait_result = test_event_condition(all, num_events,
751 event_waiters);
752 if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
753 break;
754
755 if (timeout <= 0)
756 break;
757
758 timeout = schedule_timeout(timeout);
759 }
760 __set_current_state(TASK_RUNNING);
761
762
763
764
765 if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
766 ret = copy_signaled_event_data(num_events,
767 event_waiters, events);
768
769 mutex_lock(&p->event_mutex);
770 out_unlock:
771 free_waiters(num_events, event_waiters);
772 mutex_unlock(&p->event_mutex);
773 out:
774 if (ret)
775 *wait_result = KFD_IOC_WAIT_RESULT_FAIL;
776 else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
777 ret = -EIO;
778
779 return ret;
780 }
781
782 int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
783 {
784 unsigned long pfn;
785 struct kfd_signal_page *page;
786 int ret;
787
788
789 if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
790 get_order(vma->vm_end - vma->vm_start)) {
791 pr_err("Event page mmap requested illegal size\n");
792 return -EINVAL;
793 }
794
795 page = p->signal_page;
796 if (!page) {
797
798 pr_debug("Signal page could not be found\n");
799 return -EINVAL;
800 }
801
802 pfn = __pa(page->kernel_address);
803 pfn >>= PAGE_SHIFT;
804
805 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
806 | VM_DONTDUMP | VM_PFNMAP;
807
808 pr_debug("Mapping signal page\n");
809 pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
810 pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
811 pr_debug(" pfn == 0x%016lX\n", pfn);
812 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags);
813 pr_debug(" size == 0x%08lX\n",
814 vma->vm_end - vma->vm_start);
815
816 page->user_address = (uint64_t __user *)vma->vm_start;
817
818
819 ret = remap_pfn_range(vma, vma->vm_start, pfn,
820 vma->vm_end - vma->vm_start, vma->vm_page_prot);
821 if (!ret)
822 p->signal_mapped_size = vma->vm_end - vma->vm_start;
823
824 return ret;
825 }
826
827
828
829
830
831 static void lookup_events_by_type_and_signal(struct kfd_process *p,
832 int type, void *event_data)
833 {
834 struct kfd_hsa_memory_exception_data *ev_data;
835 struct kfd_event *ev;
836 uint32_t id;
837 bool send_signal = true;
838
839 ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
840
841 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
842 idr_for_each_entry_continue(&p->event_idr, ev, id)
843 if (ev->type == type) {
844 send_signal = false;
845 dev_dbg(kfd_device,
846 "Event found: id %X type %d",
847 ev->event_id, ev->type);
848 set_event(ev);
849 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
850 ev->memory_exception_data = *ev_data;
851 }
852
853 if (type == KFD_EVENT_TYPE_MEMORY) {
854 dev_warn(kfd_device,
855 "Sending SIGSEGV to HSA Process with PID %d ",
856 p->lead_thread->pid);
857 send_sig(SIGSEGV, p->lead_thread, 0);
858 }
859
860
861 if (send_signal) {
862 if (send_sigterm) {
863 dev_warn(kfd_device,
864 "Sending SIGTERM to HSA Process with PID %d ",
865 p->lead_thread->pid);
866 send_sig(SIGTERM, p->lead_thread, 0);
867 } else {
868 dev_err(kfd_device,
869 "HSA Process (PID %d) got unhandled exception",
870 p->lead_thread->pid);
871 }
872 }
873 }
874
875 #ifdef KFD_SUPPORT_IOMMU_V2
876 void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
877 unsigned long address, bool is_write_requested,
878 bool is_execute_requested)
879 {
880 struct kfd_hsa_memory_exception_data memory_exception_data;
881 struct vm_area_struct *vma;
882
883
884
885
886
887
888 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
889 struct mm_struct *mm;
890
891 if (!p)
892 return;
893
894
895
896
897 mm = get_task_mm(p->lead_thread);
898 if (!mm) {
899 kfd_unref_process(p);
900 return;
901 }
902
903 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
904
905 down_read(&mm->mmap_sem);
906 vma = find_vma(mm, address);
907
908 memory_exception_data.gpu_id = dev->id;
909 memory_exception_data.va = address;
910
911 memory_exception_data.failure.NotPresent = 1;
912 memory_exception_data.failure.NoExecute = 0;
913 memory_exception_data.failure.ReadOnly = 0;
914 if (vma && address >= vma->vm_start) {
915 memory_exception_data.failure.NotPresent = 0;
916
917 if (is_write_requested && !(vma->vm_flags & VM_WRITE))
918 memory_exception_data.failure.ReadOnly = 1;
919 else
920 memory_exception_data.failure.ReadOnly = 0;
921
922 if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
923 memory_exception_data.failure.NoExecute = 1;
924 else
925 memory_exception_data.failure.NoExecute = 0;
926 }
927
928 up_read(&mm->mmap_sem);
929 mmput(mm);
930
931 pr_debug("notpresent %d, noexecute %d, readonly %d\n",
932 memory_exception_data.failure.NotPresent,
933 memory_exception_data.failure.NoExecute,
934 memory_exception_data.failure.ReadOnly);
935
936
937
938
939 if (dev->device_info->asic_family != CHIP_RAVEN) {
940 mutex_lock(&p->event_mutex);
941
942
943 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
944 &memory_exception_data);
945
946 mutex_unlock(&p->event_mutex);
947 }
948
949 kfd_unref_process(p);
950 }
951 #endif
952
953 void kfd_signal_hw_exception_event(unsigned int pasid)
954 {
955
956
957
958
959
960 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
961
962 if (!p)
963 return;
964
965 mutex_lock(&p->event_mutex);
966
967
968 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
969
970 mutex_unlock(&p->event_mutex);
971 kfd_unref_process(p);
972 }
973
974 void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
975 struct kfd_vm_fault_info *info)
976 {
977 struct kfd_event *ev;
978 uint32_t id;
979 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
980 struct kfd_hsa_memory_exception_data memory_exception_data;
981
982 if (!p)
983 return;
984 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
985 memory_exception_data.gpu_id = dev->id;
986 memory_exception_data.failure.imprecise = true;
987
988 if (info) {
989 memory_exception_data.va = (info->page_addr) << PAGE_SHIFT;
990 memory_exception_data.failure.NotPresent =
991 info->prot_valid ? 1 : 0;
992 memory_exception_data.failure.NoExecute =
993 info->prot_exec ? 1 : 0;
994 memory_exception_data.failure.ReadOnly =
995 info->prot_write ? 1 : 0;
996 memory_exception_data.failure.imprecise = 0;
997 }
998 mutex_lock(&p->event_mutex);
999
1000 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1001 idr_for_each_entry_continue(&p->event_idr, ev, id)
1002 if (ev->type == KFD_EVENT_TYPE_MEMORY) {
1003 ev->memory_exception_data = memory_exception_data;
1004 set_event(ev);
1005 }
1006
1007 mutex_unlock(&p->event_mutex);
1008 kfd_unref_process(p);
1009 }
1010
1011 void kfd_signal_reset_event(struct kfd_dev *dev)
1012 {
1013 struct kfd_hsa_hw_exception_data hw_exception_data;
1014 struct kfd_hsa_memory_exception_data memory_exception_data;
1015 struct kfd_process *p;
1016 struct kfd_event *ev;
1017 unsigned int temp;
1018 uint32_t id, idx;
1019 int reset_cause = atomic_read(&dev->sram_ecc_flag) ?
1020 KFD_HW_EXCEPTION_ECC :
1021 KFD_HW_EXCEPTION_GPU_HANG;
1022
1023
1024 memset(&hw_exception_data, 0, sizeof(hw_exception_data));
1025 hw_exception_data.gpu_id = dev->id;
1026 hw_exception_data.memory_lost = 1;
1027 hw_exception_data.reset_cause = reset_cause;
1028
1029 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
1030 memory_exception_data.ErrorType = KFD_MEM_ERR_SRAM_ECC;
1031 memory_exception_data.gpu_id = dev->id;
1032 memory_exception_data.failure.imprecise = true;
1033
1034 idx = srcu_read_lock(&kfd_processes_srcu);
1035 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1036 mutex_lock(&p->event_mutex);
1037 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1038 idr_for_each_entry_continue(&p->event_idr, ev, id) {
1039 if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
1040 ev->hw_exception_data = hw_exception_data;
1041 set_event(ev);
1042 }
1043 if (ev->type == KFD_EVENT_TYPE_MEMORY &&
1044 reset_cause == KFD_HW_EXCEPTION_ECC) {
1045 ev->memory_exception_data = memory_exception_data;
1046 set_event(ev);
1047 }
1048 }
1049 mutex_unlock(&p->event_mutex);
1050 }
1051 srcu_read_unlock(&kfd_processes_srcu, idx);
1052 }