This source file includes following definitions.
- add_object
- del_object
- __i915_gem_userptr_set_active
- userptr_mn_invalidate_range_start
- i915_mmu_notifier_create
- i915_gem_userptr_release__mmu_notifier
- i915_mmu_notifier_find
- i915_gem_userptr_init__mmu_notifier
- i915_mmu_notifier_free
- __i915_gem_userptr_set_active
- i915_gem_userptr_release__mmu_notifier
- i915_gem_userptr_init__mmu_notifier
- i915_mmu_notifier_free
- __i915_mm_struct_find
- i915_gem_userptr_init__mm_struct
- __i915_mm_struct_free__worker
- __i915_mm_struct_free
- i915_gem_userptr_release__mm_struct
- __i915_gem_userptr_alloc_pages
- __i915_gem_userptr_get_pages_worker
- __i915_gem_userptr_get_pages_schedule
- i915_gem_userptr_get_pages
- i915_gem_userptr_put_pages
- i915_gem_userptr_release
- i915_gem_userptr_dmabuf_export
- i915_gem_userptr_ioctl
- i915_gem_init_userptr
- i915_gem_cleanup_userptr
1
2
3
4
5
6
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/mempolicy.h>
10 #include <linux/swap.h>
11 #include <linux/sched/mm.h>
12
13 #include <drm/i915_drm.h>
14
15 #include "i915_drv.h"
16 #include "i915_gem_ioctls.h"
17 #include "i915_gem_object.h"
18 #include "i915_scatterlist.h"
19
20 struct i915_mm_struct {
21 struct mm_struct *mm;
22 struct drm_i915_private *i915;
23 struct i915_mmu_notifier *mn;
24 struct hlist_node node;
25 struct kref kref;
26 struct work_struct work;
27 };
28
29 #if defined(CONFIG_MMU_NOTIFIER)
30 #include <linux/interval_tree.h>
31
32 struct i915_mmu_notifier {
33 spinlock_t lock;
34 struct hlist_node node;
35 struct mmu_notifier mn;
36 struct rb_root_cached objects;
37 struct i915_mm_struct *mm;
38 };
39
40 struct i915_mmu_object {
41 struct i915_mmu_notifier *mn;
42 struct drm_i915_gem_object *obj;
43 struct interval_tree_node it;
44 };
45
46 static void add_object(struct i915_mmu_object *mo)
47 {
48 GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
49 interval_tree_insert(&mo->it, &mo->mn->objects);
50 }
51
52 static void del_object(struct i915_mmu_object *mo)
53 {
54 if (RB_EMPTY_NODE(&mo->it.rb))
55 return;
56
57 interval_tree_remove(&mo->it, &mo->mn->objects);
58 RB_CLEAR_NODE(&mo->it.rb);
59 }
60
61 static void
62 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
63 {
64 struct i915_mmu_object *mo = obj->userptr.mmu_object;
65
66
67
68
69
70
71
72
73
74
75
76
77 if (!mo)
78 return;
79
80 spin_lock(&mo->mn->lock);
81 if (value)
82 add_object(mo);
83 else
84 del_object(mo);
85 spin_unlock(&mo->mn->lock);
86 }
87
88 static int
89 userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
90 const struct mmu_notifier_range *range)
91 {
92 struct i915_mmu_notifier *mn =
93 container_of(_mn, struct i915_mmu_notifier, mn);
94 struct interval_tree_node *it;
95 struct mutex *unlock = NULL;
96 unsigned long end;
97 int ret = 0;
98
99 if (RB_EMPTY_ROOT(&mn->objects.rb_root))
100 return 0;
101
102
103 end = range->end - 1;
104
105 spin_lock(&mn->lock);
106 it = interval_tree_iter_first(&mn->objects, range->start, end);
107 while (it) {
108 struct drm_i915_gem_object *obj;
109
110 if (!mmu_notifier_range_blockable(range)) {
111 ret = -EAGAIN;
112 break;
113 }
114
115
116
117
118
119
120
121
122
123
124
125 obj = container_of(it, struct i915_mmu_object, it)->obj;
126 if (!kref_get_unless_zero(&obj->base.refcount)) {
127 it = interval_tree_iter_next(it, range->start, end);
128 continue;
129 }
130 spin_unlock(&mn->lock);
131
132 if (!unlock) {
133 unlock = &mn->mm->i915->drm.struct_mutex;
134
135 switch (mutex_trylock_recursive(unlock)) {
136 default:
137 case MUTEX_TRYLOCK_FAILED:
138 if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
139 i915_gem_object_put(obj);
140 return -EINTR;
141 }
142
143 case MUTEX_TRYLOCK_SUCCESS:
144 break;
145
146 case MUTEX_TRYLOCK_RECURSIVE:
147 unlock = ERR_PTR(-EEXIST);
148 break;
149 }
150 }
151
152 ret = i915_gem_object_unbind(obj,
153 I915_GEM_OBJECT_UNBIND_ACTIVE);
154 if (ret == 0)
155 ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
156 i915_gem_object_put(obj);
157 if (ret)
158 goto unlock;
159
160 spin_lock(&mn->lock);
161
162
163
164
165
166
167 it = interval_tree_iter_first(&mn->objects, range->start, end);
168 }
169 spin_unlock(&mn->lock);
170
171 unlock:
172 if (!IS_ERR_OR_NULL(unlock))
173 mutex_unlock(unlock);
174
175 return ret;
176
177 }
178
179 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
180 .invalidate_range_start = userptr_mn_invalidate_range_start,
181 };
182
183 static struct i915_mmu_notifier *
184 i915_mmu_notifier_create(struct i915_mm_struct *mm)
185 {
186 struct i915_mmu_notifier *mn;
187
188 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
189 if (mn == NULL)
190 return ERR_PTR(-ENOMEM);
191
192 spin_lock_init(&mn->lock);
193 mn->mn.ops = &i915_gem_userptr_notifier;
194 mn->objects = RB_ROOT_CACHED;
195 mn->mm = mm;
196
197 return mn;
198 }
199
200 static void
201 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
202 {
203 struct i915_mmu_object *mo;
204
205 mo = fetch_and_zero(&obj->userptr.mmu_object);
206 if (!mo)
207 return;
208
209 spin_lock(&mo->mn->lock);
210 del_object(mo);
211 spin_unlock(&mo->mn->lock);
212 kfree(mo);
213 }
214
215 static struct i915_mmu_notifier *
216 i915_mmu_notifier_find(struct i915_mm_struct *mm)
217 {
218 struct i915_mmu_notifier *mn;
219 int err = 0;
220
221 mn = mm->mn;
222 if (mn)
223 return mn;
224
225 mn = i915_mmu_notifier_create(mm);
226 if (IS_ERR(mn))
227 err = PTR_ERR(mn);
228
229 down_write(&mm->mm->mmap_sem);
230 mutex_lock(&mm->i915->mm_lock);
231 if (mm->mn == NULL && !err) {
232
233 err = __mmu_notifier_register(&mn->mn, mm->mm);
234 if (!err) {
235
236 mm->mn = fetch_and_zero(&mn);
237 }
238 } else if (mm->mn) {
239
240
241
242
243 err = 0;
244 }
245 mutex_unlock(&mm->i915->mm_lock);
246 up_write(&mm->mm->mmap_sem);
247
248 if (mn && !IS_ERR(mn))
249 kfree(mn);
250
251 return err ? ERR_PTR(err) : mm->mn;
252 }
253
254 static int
255 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
256 unsigned flags)
257 {
258 struct i915_mmu_notifier *mn;
259 struct i915_mmu_object *mo;
260
261 if (flags & I915_USERPTR_UNSYNCHRONIZED)
262 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
263
264 if (WARN_ON(obj->userptr.mm == NULL))
265 return -EINVAL;
266
267 mn = i915_mmu_notifier_find(obj->userptr.mm);
268 if (IS_ERR(mn))
269 return PTR_ERR(mn);
270
271 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
272 if (!mo)
273 return -ENOMEM;
274
275 mo->mn = mn;
276 mo->obj = obj;
277 mo->it.start = obj->userptr.ptr;
278 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
279 RB_CLEAR_NODE(&mo->it.rb);
280
281 obj->userptr.mmu_object = mo;
282 return 0;
283 }
284
285 static void
286 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
287 struct mm_struct *mm)
288 {
289 if (mn == NULL)
290 return;
291
292 mmu_notifier_unregister(&mn->mn, mm);
293 kfree(mn);
294 }
295
296 #else
297
298 static void
299 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
300 {
301 }
302
303 static void
304 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
305 {
306 }
307
308 static int
309 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
310 unsigned flags)
311 {
312 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
313 return -ENODEV;
314
315 if (!capable(CAP_SYS_ADMIN))
316 return -EPERM;
317
318 return 0;
319 }
320
321 static void
322 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
323 struct mm_struct *mm)
324 {
325 }
326
327 #endif
328
329 static struct i915_mm_struct *
330 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
331 {
332 struct i915_mm_struct *mm;
333
334
335 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
336 if (mm->mm == real)
337 return mm;
338
339 return NULL;
340 }
341
342 static int
343 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
344 {
345 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
346 struct i915_mm_struct *mm;
347 int ret = 0;
348
349
350
351
352
353
354
355
356
357
358
359 mutex_lock(&dev_priv->mm_lock);
360 mm = __i915_mm_struct_find(dev_priv, current->mm);
361 if (mm == NULL) {
362 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
363 if (mm == NULL) {
364 ret = -ENOMEM;
365 goto out;
366 }
367
368 kref_init(&mm->kref);
369 mm->i915 = to_i915(obj->base.dev);
370
371 mm->mm = current->mm;
372 mmgrab(current->mm);
373
374 mm->mn = NULL;
375
376
377 hash_add(dev_priv->mm_structs,
378 &mm->node, (unsigned long)mm->mm);
379 } else
380 kref_get(&mm->kref);
381
382 obj->userptr.mm = mm;
383 out:
384 mutex_unlock(&dev_priv->mm_lock);
385 return ret;
386 }
387
388 static void
389 __i915_mm_struct_free__worker(struct work_struct *work)
390 {
391 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
392 i915_mmu_notifier_free(mm->mn, mm->mm);
393 mmdrop(mm->mm);
394 kfree(mm);
395 }
396
397 static void
398 __i915_mm_struct_free(struct kref *kref)
399 {
400 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
401
402
403 hash_del(&mm->node);
404 mutex_unlock(&mm->i915->mm_lock);
405
406 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
407 queue_work(mm->i915->mm.userptr_wq, &mm->work);
408 }
409
410 static void
411 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
412 {
413 if (obj->userptr.mm == NULL)
414 return;
415
416 kref_put_mutex(&obj->userptr.mm->kref,
417 __i915_mm_struct_free,
418 &to_i915(obj->base.dev)->mm_lock);
419 obj->userptr.mm = NULL;
420 }
421
422 struct get_pages_work {
423 struct work_struct work;
424 struct drm_i915_gem_object *obj;
425 struct task_struct *task;
426 };
427
428 static struct sg_table *
429 __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
430 struct page **pvec, unsigned long num_pages)
431 {
432 unsigned int max_segment = i915_sg_segment_size();
433 struct sg_table *st;
434 unsigned int sg_page_sizes;
435 int ret;
436
437 st = kmalloc(sizeof(*st), GFP_KERNEL);
438 if (!st)
439 return ERR_PTR(-ENOMEM);
440
441 alloc_table:
442 ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
443 0, num_pages << PAGE_SHIFT,
444 max_segment,
445 GFP_KERNEL);
446 if (ret) {
447 kfree(st);
448 return ERR_PTR(ret);
449 }
450
451 ret = i915_gem_gtt_prepare_pages(obj, st);
452 if (ret) {
453 sg_free_table(st);
454
455 if (max_segment > PAGE_SIZE) {
456 max_segment = PAGE_SIZE;
457 goto alloc_table;
458 }
459
460 kfree(st);
461 return ERR_PTR(ret);
462 }
463
464 sg_page_sizes = i915_sg_page_sizes(st->sgl);
465
466 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
467
468 return st;
469 }
470
471 static void
472 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
473 {
474 struct get_pages_work *work = container_of(_work, typeof(*work), work);
475 struct drm_i915_gem_object *obj = work->obj;
476 const unsigned long npages = obj->base.size >> PAGE_SHIFT;
477 unsigned long pinned;
478 struct page **pvec;
479 int ret;
480
481 ret = -ENOMEM;
482 pinned = 0;
483
484 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
485 if (pvec != NULL) {
486 struct mm_struct *mm = obj->userptr.mm->mm;
487 unsigned int flags = 0;
488
489 if (!i915_gem_object_is_readonly(obj))
490 flags |= FOLL_WRITE;
491
492 ret = -EFAULT;
493 if (mmget_not_zero(mm)) {
494 down_read(&mm->mmap_sem);
495 while (pinned < npages) {
496 ret = get_user_pages_remote
497 (work->task, mm,
498 obj->userptr.ptr + pinned * PAGE_SIZE,
499 npages - pinned,
500 flags,
501 pvec + pinned, NULL, NULL);
502 if (ret < 0)
503 break;
504
505 pinned += ret;
506 }
507 up_read(&mm->mmap_sem);
508 mmput(mm);
509 }
510 }
511
512 mutex_lock(&obj->mm.lock);
513 if (obj->userptr.work == &work->work) {
514 struct sg_table *pages = ERR_PTR(ret);
515
516 if (pinned == npages) {
517 pages = __i915_gem_userptr_alloc_pages(obj, pvec,
518 npages);
519 if (!IS_ERR(pages)) {
520 pinned = 0;
521 pages = NULL;
522 }
523 }
524
525 obj->userptr.work = ERR_CAST(pages);
526 if (IS_ERR(pages))
527 __i915_gem_userptr_set_active(obj, false);
528 }
529 mutex_unlock(&obj->mm.lock);
530
531 release_pages(pvec, pinned);
532 kvfree(pvec);
533
534 i915_gem_object_put(obj);
535 put_task_struct(work->task);
536 kfree(work);
537 }
538
539 static struct sg_table *
540 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
541 {
542 struct get_pages_work *work;
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563 work = kmalloc(sizeof(*work), GFP_KERNEL);
564 if (work == NULL)
565 return ERR_PTR(-ENOMEM);
566
567 obj->userptr.work = &work->work;
568
569 work->obj = i915_gem_object_get(obj);
570
571 work->task = current;
572 get_task_struct(work->task);
573
574 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
575 queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
576
577 return ERR_PTR(-EAGAIN);
578 }
579
580 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
581 {
582 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
583 struct mm_struct *mm = obj->userptr.mm->mm;
584 struct page **pvec;
585 struct sg_table *pages;
586 bool active;
587 int pinned;
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606 if (obj->userptr.work) {
607
608 if (IS_ERR(obj->userptr.work))
609 return PTR_ERR(obj->userptr.work);
610 else
611 return -EAGAIN;
612 }
613
614 pvec = NULL;
615 pinned = 0;
616
617 if (mm == current->mm) {
618 pvec = kvmalloc_array(num_pages, sizeof(struct page *),
619 GFP_KERNEL |
620 __GFP_NORETRY |
621 __GFP_NOWARN);
622
623
624
625
626
627
628
629
630 if (pvec)
631 pinned = __get_user_pages_fast(obj->userptr.ptr,
632 num_pages,
633 !i915_gem_object_is_readonly(obj),
634 pvec);
635 }
636
637 active = false;
638 if (pinned < 0) {
639 pages = ERR_PTR(pinned);
640 pinned = 0;
641 } else if (pinned < num_pages) {
642 pages = __i915_gem_userptr_get_pages_schedule(obj);
643 active = pages == ERR_PTR(-EAGAIN);
644 } else {
645 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
646 active = !IS_ERR(pages);
647 }
648 if (active)
649 __i915_gem_userptr_set_active(obj, true);
650
651 if (IS_ERR(pages))
652 release_pages(pvec, pinned);
653 kvfree(pvec);
654
655 return PTR_ERR_OR_ZERO(pages);
656 }
657
658 static void
659 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
660 struct sg_table *pages)
661 {
662 struct sgt_iter sgt_iter;
663 struct page *page;
664
665
666 obj->userptr.work = NULL;
667 __i915_gem_userptr_set_active(obj, false);
668 if (!pages)
669 return;
670
671 __i915_gem_object_release_shmem(obj, pages, true);
672 i915_gem_gtt_finish_pages(obj, pages);
673
674
675
676
677
678
679 if (i915_gem_object_is_readonly(obj))
680 obj->mm.dirty = false;
681
682 for_each_sgt_page(page, sgt_iter, pages) {
683 if (obj->mm.dirty && trylock_page(page)) {
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702 set_page_dirty(page);
703 unlock_page(page);
704 }
705
706 mark_page_accessed(page);
707 put_page(page);
708 }
709 obj->mm.dirty = false;
710
711 sg_free_table(pages);
712 kfree(pages);
713 }
714
715 static void
716 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
717 {
718 i915_gem_userptr_release__mmu_notifier(obj);
719 i915_gem_userptr_release__mm_struct(obj);
720 }
721
722 static int
723 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
724 {
725 if (obj->userptr.mmu_object)
726 return 0;
727
728 return i915_gem_userptr_init__mmu_notifier(obj, 0);
729 }
730
731 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
732 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
733 I915_GEM_OBJECT_IS_SHRINKABLE |
734 I915_GEM_OBJECT_NO_GGTT |
735 I915_GEM_OBJECT_ASYNC_CANCEL,
736 .get_pages = i915_gem_userptr_get_pages,
737 .put_pages = i915_gem_userptr_put_pages,
738 .dmabuf_export = i915_gem_userptr_dmabuf_export,
739 .release = i915_gem_userptr_release,
740 };
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777 int
778 i915_gem_userptr_ioctl(struct drm_device *dev,
779 void *data,
780 struct drm_file *file)
781 {
782 struct drm_i915_private *dev_priv = to_i915(dev);
783 struct drm_i915_gem_userptr *args = data;
784 struct drm_i915_gem_object *obj;
785 int ret;
786 u32 handle;
787
788 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
789
790
791
792 return -ENODEV;
793 }
794
795 if (args->flags & ~(I915_USERPTR_READ_ONLY |
796 I915_USERPTR_UNSYNCHRONIZED))
797 return -EINVAL;
798
799 if (!args->user_size)
800 return -EINVAL;
801
802 if (offset_in_page(args->user_ptr | args->user_size))
803 return -EINVAL;
804
805 if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
806 return -EFAULT;
807
808 if (args->flags & I915_USERPTR_READ_ONLY) {
809 struct i915_address_space *vm;
810
811
812
813
814
815 vm = dev_priv->kernel_context->vm;
816 if (!vm || !vm->has_read_only)
817 return -ENODEV;
818 }
819
820 obj = i915_gem_object_alloc();
821 if (obj == NULL)
822 return -ENOMEM;
823
824 drm_gem_private_object_init(dev, &obj->base, args->user_size);
825 i915_gem_object_init(obj, &i915_gem_userptr_ops);
826 obj->read_domains = I915_GEM_DOMAIN_CPU;
827 obj->write_domain = I915_GEM_DOMAIN_CPU;
828 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
829
830 obj->userptr.ptr = args->user_ptr;
831 if (args->flags & I915_USERPTR_READ_ONLY)
832 i915_gem_object_set_readonly(obj);
833
834
835
836
837
838 ret = i915_gem_userptr_init__mm_struct(obj);
839 if (ret == 0)
840 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
841 if (ret == 0)
842 ret = drm_gem_handle_create(file, &obj->base, &handle);
843
844
845 i915_gem_object_put(obj);
846 if (ret)
847 return ret;
848
849 args->handle = handle;
850 return 0;
851 }
852
853 int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
854 {
855 mutex_init(&dev_priv->mm_lock);
856 hash_init(dev_priv->mm_structs);
857
858 dev_priv->mm.userptr_wq =
859 alloc_workqueue("i915-userptr-acquire",
860 WQ_HIGHPRI | WQ_UNBOUND,
861 0);
862 if (!dev_priv->mm.userptr_wq)
863 return -ENOMEM;
864
865 return 0;
866 }
867
868 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
869 {
870 destroy_workqueue(dev_priv->mm.userptr_wq);
871 }