1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drmP.h>
26 #include <drm/i915_drm.h>
27 #include "i915_drv.h"
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 #include <linux/mmu_context.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/mempolicy.h>
33 #include <linux/swap.h>
34 
35 struct i915_mm_struct {
36 	struct mm_struct *mm;
37 	struct drm_device *dev;
38 	struct i915_mmu_notifier *mn;
39 	struct hlist_node node;
40 	struct kref kref;
41 	struct work_struct work;
42 };
43 
44 #if defined(CONFIG_MMU_NOTIFIER)
45 #include <linux/interval_tree.h>
46 
47 struct i915_mmu_notifier {
48 	spinlock_t lock;
49 	struct hlist_node node;
50 	struct mmu_notifier mn;
51 	struct rb_root objects;
52 	struct list_head linear;
53 	bool has_linear;
54 };
55 
56 struct i915_mmu_object {
57 	struct i915_mmu_notifier *mn;
58 	struct interval_tree_node it;
59 	struct list_head link;
60 	struct drm_i915_gem_object *obj;
61 	struct work_struct work;
62 	bool active;
63 	bool is_linear;
64 };
65 
__cancel_userptr__worker(struct work_struct * work)66 static void __cancel_userptr__worker(struct work_struct *work)
67 {
68 	struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
69 	struct drm_i915_gem_object *obj = mo->obj;
70 	struct drm_device *dev = obj->base.dev;
71 
72 	mutex_lock(&dev->struct_mutex);
73 	/* Cancel any active worker and force us to re-evaluate gup */
74 	obj->userptr.work = NULL;
75 
76 	if (obj->pages != NULL) {
77 		struct drm_i915_private *dev_priv = to_i915(dev);
78 		struct i915_vma *vma, *tmp;
79 		bool was_interruptible;
80 
81 		was_interruptible = dev_priv->mm.interruptible;
82 		dev_priv->mm.interruptible = false;
83 
84 		list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
85 			int ret = i915_vma_unbind(vma);
86 			WARN_ON(ret && ret != -EIO);
87 		}
88 		WARN_ON(i915_gem_object_put_pages(obj));
89 
90 		dev_priv->mm.interruptible = was_interruptible;
91 	}
92 
93 	drm_gem_object_unreference(&obj->base);
94 	mutex_unlock(&dev->struct_mutex);
95 }
96 
cancel_userptr(struct i915_mmu_object * mo)97 static unsigned long cancel_userptr(struct i915_mmu_object *mo)
98 {
99 	unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size;
100 
101 	/* The mmu_object is released late when destroying the
102 	 * GEM object so it is entirely possible to gain a
103 	 * reference on an object in the process of being freed
104 	 * since our serialisation is via the spinlock and not
105 	 * the struct_mutex - and consequently use it after it
106 	 * is freed and then double free it.
107 	 */
108 	if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
109 		schedule_work(&mo->work);
110 		/* only schedule one work packet to avoid the refleak */
111 		mo->active = false;
112 	}
113 
114 	return end;
115 }
116 
i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier * _mn,struct mm_struct * mm,unsigned long start,unsigned long end)117 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
118 						       struct mm_struct *mm,
119 						       unsigned long start,
120 						       unsigned long end)
121 {
122 	struct i915_mmu_notifier *mn =
123 		container_of(_mn, struct i915_mmu_notifier, mn);
124 	struct i915_mmu_object *mo;
125 
126 	/* interval ranges are inclusive, but invalidate range is exclusive */
127 	end--;
128 
129 	spin_lock(&mn->lock);
130 	if (mn->has_linear) {
131 		list_for_each_entry(mo, &mn->linear, link) {
132 			if (mo->it.last < start || mo->it.start > end)
133 				continue;
134 
135 			cancel_userptr(mo);
136 		}
137 	} else {
138 		struct interval_tree_node *it;
139 
140 		it = interval_tree_iter_first(&mn->objects, start, end);
141 		while (it) {
142 			mo = container_of(it, struct i915_mmu_object, it);
143 			start = cancel_userptr(mo);
144 			it = interval_tree_iter_next(it, start, end);
145 		}
146 	}
147 	spin_unlock(&mn->lock);
148 }
149 
150 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
151 	.invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
152 };
153 
154 static struct i915_mmu_notifier *
i915_mmu_notifier_create(struct mm_struct * mm)155 i915_mmu_notifier_create(struct mm_struct *mm)
156 {
157 	struct i915_mmu_notifier *mn;
158 	int ret;
159 
160 	mn = kmalloc(sizeof(*mn), GFP_KERNEL);
161 	if (mn == NULL)
162 		return ERR_PTR(-ENOMEM);
163 
164 	spin_lock_init(&mn->lock);
165 	mn->mn.ops = &i915_gem_userptr_notifier;
166 	mn->objects = RB_ROOT;
167 	INIT_LIST_HEAD(&mn->linear);
168 	mn->has_linear = false;
169 
170 	 /* Protected by mmap_sem (write-lock) */
171 	ret = __mmu_notifier_register(&mn->mn, mm);
172 	if (ret) {
173 		kfree(mn);
174 		return ERR_PTR(ret);
175 	}
176 
177 	return mn;
178 }
179 
180 static int
i915_mmu_notifier_add(struct drm_device * dev,struct i915_mmu_notifier * mn,struct i915_mmu_object * mo)181 i915_mmu_notifier_add(struct drm_device *dev,
182 		      struct i915_mmu_notifier *mn,
183 		      struct i915_mmu_object *mo)
184 {
185 	struct interval_tree_node *it;
186 	int ret = 0;
187 
188 	/* By this point we have already done a lot of expensive setup that
189 	 * we do not want to repeat just because the caller (e.g. X) has a
190 	 * signal pending (and partly because of that expensive setup, X
191 	 * using an interrupt timer is likely to get stuck in an EINTR loop).
192 	 */
193 	mutex_lock(&dev->struct_mutex);
194 
195 	/* Make sure we drop the final active reference (and thereby
196 	 * remove the objects from the interval tree) before we do
197 	 * the check for overlapping objects.
198 	 */
199 	i915_gem_retire_requests(dev);
200 
201 	spin_lock(&mn->lock);
202 	it = interval_tree_iter_first(&mn->objects,
203 				      mo->it.start, mo->it.last);
204 	if (it) {
205 		struct drm_i915_gem_object *obj;
206 
207 		/* We only need to check the first object in the range as it
208 		 * either has cancelled gup work queued and we need to
209 		 * return back to the user to give time for the gup-workers
210 		 * to flush their object references upon which the object will
211 		 * be removed from the interval-tree, or the the range is
212 		 * still in use by another client and the overlap is invalid.
213 		 *
214 		 * If we do have an overlap, we cannot use the interval tree
215 		 * for fast range invalidation.
216 		 */
217 
218 		obj = container_of(it, struct i915_mmu_object, it)->obj;
219 		if (!obj->userptr.workers)
220 			mn->has_linear = mo->is_linear = true;
221 		else
222 			ret = -EAGAIN;
223 	} else
224 		interval_tree_insert(&mo->it, &mn->objects);
225 
226 	if (ret == 0)
227 		list_add(&mo->link, &mn->linear);
228 
229 	spin_unlock(&mn->lock);
230 	mutex_unlock(&dev->struct_mutex);
231 
232 	return ret;
233 }
234 
i915_mmu_notifier_has_linear(struct i915_mmu_notifier * mn)235 static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
236 {
237 	struct i915_mmu_object *mo;
238 
239 	list_for_each_entry(mo, &mn->linear, link)
240 		if (mo->is_linear)
241 			return true;
242 
243 	return false;
244 }
245 
246 static void
i915_mmu_notifier_del(struct i915_mmu_notifier * mn,struct i915_mmu_object * mo)247 i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
248 		      struct i915_mmu_object *mo)
249 {
250 	spin_lock(&mn->lock);
251 	list_del(&mo->link);
252 	if (mo->is_linear)
253 		mn->has_linear = i915_mmu_notifier_has_linear(mn);
254 	else
255 		interval_tree_remove(&mo->it, &mn->objects);
256 	spin_unlock(&mn->lock);
257 }
258 
259 static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object * obj)260 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
261 {
262 	struct i915_mmu_object *mo;
263 
264 	mo = obj->userptr.mmu_object;
265 	if (mo == NULL)
266 		return;
267 
268 	i915_mmu_notifier_del(mo->mn, mo);
269 	kfree(mo);
270 
271 	obj->userptr.mmu_object = NULL;
272 }
273 
274 static struct i915_mmu_notifier *
i915_mmu_notifier_find(struct i915_mm_struct * mm)275 i915_mmu_notifier_find(struct i915_mm_struct *mm)
276 {
277 	struct i915_mmu_notifier *mn = mm->mn;
278 
279 	mn = mm->mn;
280 	if (mn)
281 		return mn;
282 
283 	down_write(&mm->mm->mmap_sem);
284 	mutex_lock(&to_i915(mm->dev)->mm_lock);
285 	if ((mn = mm->mn) == NULL) {
286 		mn = i915_mmu_notifier_create(mm->mm);
287 		if (!IS_ERR(mn))
288 			mm->mn = mn;
289 	}
290 	mutex_unlock(&to_i915(mm->dev)->mm_lock);
291 	up_write(&mm->mm->mmap_sem);
292 
293 	return mn;
294 }
295 
296 static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object * obj,unsigned flags)297 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
298 				    unsigned flags)
299 {
300 	struct i915_mmu_notifier *mn;
301 	struct i915_mmu_object *mo;
302 	int ret;
303 
304 	if (flags & I915_USERPTR_UNSYNCHRONIZED)
305 		return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
306 
307 	if (WARN_ON(obj->userptr.mm == NULL))
308 		return -EINVAL;
309 
310 	mn = i915_mmu_notifier_find(obj->userptr.mm);
311 	if (IS_ERR(mn))
312 		return PTR_ERR(mn);
313 
314 	mo = kzalloc(sizeof(*mo), GFP_KERNEL);
315 	if (mo == NULL)
316 		return -ENOMEM;
317 
318 	mo->mn = mn;
319 	mo->it.start = obj->userptr.ptr;
320 	mo->it.last = mo->it.start + obj->base.size - 1;
321 	mo->obj = obj;
322 	INIT_WORK(&mo->work, __cancel_userptr__worker);
323 
324 	ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
325 	if (ret) {
326 		kfree(mo);
327 		return ret;
328 	}
329 
330 	obj->userptr.mmu_object = mo;
331 	return 0;
332 }
333 
334 static void
i915_mmu_notifier_free(struct i915_mmu_notifier * mn,struct mm_struct * mm)335 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
336 		       struct mm_struct *mm)
337 {
338 	if (mn == NULL)
339 		return;
340 
341 	mmu_notifier_unregister(&mn->mn, mm);
342 	kfree(mn);
343 }
344 
345 #else
346 
347 static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object * obj)348 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
349 {
350 }
351 
352 static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object * obj,unsigned flags)353 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
354 				    unsigned flags)
355 {
356 	if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
357 		return -ENODEV;
358 
359 	if (!capable(CAP_SYS_ADMIN))
360 		return -EPERM;
361 
362 	return 0;
363 }
364 
365 static void
i915_mmu_notifier_free(struct i915_mmu_notifier * mn,struct mm_struct * mm)366 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
367 		       struct mm_struct *mm)
368 {
369 }
370 
371 #endif
372 
373 static struct i915_mm_struct *
__i915_mm_struct_find(struct drm_i915_private * dev_priv,struct mm_struct * real)374 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
375 {
376 	struct i915_mm_struct *mm;
377 
378 	/* Protected by dev_priv->mm_lock */
379 	hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
380 		if (mm->mm == real)
381 			return mm;
382 
383 	return NULL;
384 }
385 
386 static int
i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object * obj)387 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
388 {
389 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
390 	struct i915_mm_struct *mm;
391 	int ret = 0;
392 
393 	/* During release of the GEM object we hold the struct_mutex. This
394 	 * precludes us from calling mmput() at that time as that may be
395 	 * the last reference and so call exit_mmap(). exit_mmap() will
396 	 * attempt to reap the vma, and if we were holding a GTT mmap
397 	 * would then call drm_gem_vm_close() and attempt to reacquire
398 	 * the struct mutex. So in order to avoid that recursion, we have
399 	 * to defer releasing the mm reference until after we drop the
400 	 * struct_mutex, i.e. we need to schedule a worker to do the clean
401 	 * up.
402 	 */
403 	mutex_lock(&dev_priv->mm_lock);
404 	mm = __i915_mm_struct_find(dev_priv, current->mm);
405 	if (mm == NULL) {
406 		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
407 		if (mm == NULL) {
408 			ret = -ENOMEM;
409 			goto out;
410 		}
411 
412 		kref_init(&mm->kref);
413 		mm->dev = obj->base.dev;
414 
415 		mm->mm = current->mm;
416 		atomic_inc(&current->mm->mm_count);
417 
418 		mm->mn = NULL;
419 
420 		/* Protected by dev_priv->mm_lock */
421 		hash_add(dev_priv->mm_structs,
422 			 &mm->node, (unsigned long)mm->mm);
423 	} else
424 		kref_get(&mm->kref);
425 
426 	obj->userptr.mm = mm;
427 out:
428 	mutex_unlock(&dev_priv->mm_lock);
429 	return ret;
430 }
431 
432 static void
__i915_mm_struct_free__worker(struct work_struct * work)433 __i915_mm_struct_free__worker(struct work_struct *work)
434 {
435 	struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
436 	i915_mmu_notifier_free(mm->mn, mm->mm);
437 	mmdrop(mm->mm);
438 	kfree(mm);
439 }
440 
441 static void
__i915_mm_struct_free(struct kref * kref)442 __i915_mm_struct_free(struct kref *kref)
443 {
444 	struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
445 
446 	/* Protected by dev_priv->mm_lock */
447 	hash_del(&mm->node);
448 	mutex_unlock(&to_i915(mm->dev)->mm_lock);
449 
450 	INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
451 	schedule_work(&mm->work);
452 }
453 
454 static void
i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object * obj)455 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
456 {
457 	if (obj->userptr.mm == NULL)
458 		return;
459 
460 	kref_put_mutex(&obj->userptr.mm->kref,
461 		       __i915_mm_struct_free,
462 		       &to_i915(obj->base.dev)->mm_lock);
463 	obj->userptr.mm = NULL;
464 }
465 
466 struct get_pages_work {
467 	struct work_struct work;
468 	struct drm_i915_gem_object *obj;
469 	struct task_struct *task;
470 };
471 
472 #if IS_ENABLED(CONFIG_SWIOTLB)
473 #define swiotlb_active() swiotlb_nr_tbl()
474 #else
475 #define swiotlb_active() 0
476 #endif
477 
478 static int
st_set_pages(struct sg_table ** st,struct page ** pvec,int num_pages)479 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
480 {
481 	struct scatterlist *sg;
482 	int ret, n;
483 
484 	*st = kmalloc(sizeof(**st), GFP_KERNEL);
485 	if (*st == NULL)
486 		return -ENOMEM;
487 
488 	if (swiotlb_active()) {
489 		ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
490 		if (ret)
491 			goto err;
492 
493 		for_each_sg((*st)->sgl, sg, num_pages, n)
494 			sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
495 	} else {
496 		ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
497 						0, num_pages << PAGE_SHIFT,
498 						GFP_KERNEL);
499 		if (ret)
500 			goto err;
501 	}
502 
503 	return 0;
504 
505 err:
506 	kfree(*st);
507 	*st = NULL;
508 	return ret;
509 }
510 
511 static int
__i915_gem_userptr_set_pages(struct drm_i915_gem_object * obj,struct page ** pvec,int num_pages)512 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
513 			     struct page **pvec, int num_pages)
514 {
515 	int ret;
516 
517 	ret = st_set_pages(&obj->pages, pvec, num_pages);
518 	if (ret)
519 		return ret;
520 
521 	ret = i915_gem_gtt_prepare_object(obj);
522 	if (ret) {
523 		sg_free_table(obj->pages);
524 		kfree(obj->pages);
525 		obj->pages = NULL;
526 	}
527 
528 	return ret;
529 }
530 
531 static int
__i915_gem_userptr_set_active(struct drm_i915_gem_object * obj,bool value)532 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
533 			      bool value)
534 {
535 	int ret = 0;
536 
537 	/* During mm_invalidate_range we need to cancel any userptr that
538 	 * overlaps the range being invalidated. Doing so requires the
539 	 * struct_mutex, and that risks recursion. In order to cause
540 	 * recursion, the user must alias the userptr address space with
541 	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
542 	 * to invalidate that mmaping, mm_invalidate_range is called with
543 	 * the userptr address *and* the struct_mutex held.  To prevent that
544 	 * we set a flag under the i915_mmu_notifier spinlock to indicate
545 	 * whether this object is valid.
546 	 */
547 #if defined(CONFIG_MMU_NOTIFIER)
548 	if (obj->userptr.mmu_object == NULL)
549 		return 0;
550 
551 	spin_lock(&obj->userptr.mmu_object->mn->lock);
552 	/* In order to serialise get_pages with an outstanding
553 	 * cancel_userptr, we must drop the struct_mutex and try again.
554 	 */
555 	if (!value || !work_pending(&obj->userptr.mmu_object->work))
556 		obj->userptr.mmu_object->active = value;
557 	else
558 		ret = -EAGAIN;
559 	spin_unlock(&obj->userptr.mmu_object->mn->lock);
560 #endif
561 
562 	return ret;
563 }
564 
565 static void
__i915_gem_userptr_get_pages_worker(struct work_struct * _work)566 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
567 {
568 	struct get_pages_work *work = container_of(_work, typeof(*work), work);
569 	struct drm_i915_gem_object *obj = work->obj;
570 	struct drm_device *dev = obj->base.dev;
571 	const int npages = obj->base.size >> PAGE_SHIFT;
572 	struct page **pvec;
573 	int pinned, ret;
574 
575 	ret = -ENOMEM;
576 	pinned = 0;
577 
578 	pvec = kmalloc(npages*sizeof(struct page *),
579 		       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
580 	if (pvec == NULL)
581 		pvec = drm_malloc_ab(npages, sizeof(struct page *));
582 	if (pvec != NULL) {
583 		struct mm_struct *mm = obj->userptr.mm->mm;
584 
585 		down_read(&mm->mmap_sem);
586 		while (pinned < npages) {
587 			ret = get_user_pages(work->task, mm,
588 					     obj->userptr.ptr + pinned * PAGE_SIZE,
589 					     npages - pinned,
590 					     !obj->userptr.read_only, 0,
591 					     pvec + pinned, NULL);
592 			if (ret < 0)
593 				break;
594 
595 			pinned += ret;
596 		}
597 		up_read(&mm->mmap_sem);
598 	}
599 
600 	mutex_lock(&dev->struct_mutex);
601 	if (obj->userptr.work == &work->work) {
602 		if (pinned == npages) {
603 			ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
604 			if (ret == 0) {
605 				list_add_tail(&obj->global_list,
606 					      &to_i915(dev)->mm.unbound_list);
607 				obj->get_page.sg = obj->pages->sgl;
608 				obj->get_page.last = 0;
609 				pinned = 0;
610 			}
611 		}
612 		obj->userptr.work = ERR_PTR(ret);
613 		if (ret)
614 			__i915_gem_userptr_set_active(obj, false);
615 	}
616 
617 	obj->userptr.workers--;
618 	drm_gem_object_unreference(&obj->base);
619 	mutex_unlock(&dev->struct_mutex);
620 
621 	release_pages(pvec, pinned, 0);
622 	drm_free_large(pvec);
623 
624 	put_task_struct(work->task);
625 	kfree(work);
626 }
627 
628 static int
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object * obj,bool * active)629 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
630 				      bool *active)
631 {
632 	struct get_pages_work *work;
633 
634 	/* Spawn a worker so that we can acquire the
635 	 * user pages without holding our mutex. Access
636 	 * to the user pages requires mmap_sem, and we have
637 	 * a strict lock ordering of mmap_sem, struct_mutex -
638 	 * we already hold struct_mutex here and so cannot
639 	 * call gup without encountering a lock inversion.
640 	 *
641 	 * Userspace will keep on repeating the operation
642 	 * (thanks to EAGAIN) until either we hit the fast
643 	 * path or the worker completes. If the worker is
644 	 * cancelled or superseded, the task is still run
645 	 * but the results ignored. (This leads to
646 	 * complications that we may have a stray object
647 	 * refcount that we need to be wary of when
648 	 * checking for existing objects during creation.)
649 	 * If the worker encounters an error, it reports
650 	 * that error back to this function through
651 	 * obj->userptr.work = ERR_PTR.
652 	 */
653 	if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
654 		return -EAGAIN;
655 
656 	work = kmalloc(sizeof(*work), GFP_KERNEL);
657 	if (work == NULL)
658 		return -ENOMEM;
659 
660 	obj->userptr.work = &work->work;
661 	obj->userptr.workers++;
662 
663 	work->obj = obj;
664 	drm_gem_object_reference(&obj->base);
665 
666 	work->task = current;
667 	get_task_struct(work->task);
668 
669 	INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
670 	schedule_work(&work->work);
671 
672 	*active = true;
673 	return -EAGAIN;
674 }
675 
676 static int
i915_gem_userptr_get_pages(struct drm_i915_gem_object * obj)677 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
678 {
679 	const int num_pages = obj->base.size >> PAGE_SHIFT;
680 	struct page **pvec;
681 	int pinned, ret;
682 	bool active;
683 
684 	/* If userspace should engineer that these pages are replaced in
685 	 * the vma between us binding this page into the GTT and completion
686 	 * of rendering... Their loss. If they change the mapping of their
687 	 * pages they need to create a new bo to point to the new vma.
688 	 *
689 	 * However, that still leaves open the possibility of the vma
690 	 * being copied upon fork. Which falls under the same userspace
691 	 * synchronisation issue as a regular bo, except that this time
692 	 * the process may not be expecting that a particular piece of
693 	 * memory is tied to the GPU.
694 	 *
695 	 * Fortunately, we can hook into the mmu_notifier in order to
696 	 * discard the page references prior to anything nasty happening
697 	 * to the vma (discard or cloning) which should prevent the more
698 	 * egregious cases from causing harm.
699 	 */
700 	if (IS_ERR(obj->userptr.work)) {
701 		/* active flag will have been dropped already by the worker */
702 		ret = PTR_ERR(obj->userptr.work);
703 		obj->userptr.work = NULL;
704 		return ret;
705 	}
706 	if (obj->userptr.work)
707 		/* active flag should still be held for the pending work */
708 		return -EAGAIN;
709 
710 	/* Let the mmu-notifier know that we have begun and need cancellation */
711 	ret = __i915_gem_userptr_set_active(obj, true);
712 	if (ret)
713 		return ret;
714 
715 	pvec = NULL;
716 	pinned = 0;
717 	if (obj->userptr.mm->mm == current->mm) {
718 		pvec = kmalloc(num_pages*sizeof(struct page *),
719 			       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
720 		if (pvec == NULL) {
721 			pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
722 			if (pvec == NULL) {
723 				__i915_gem_userptr_set_active(obj, false);
724 				return -ENOMEM;
725 			}
726 		}
727 
728 		pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
729 					       !obj->userptr.read_only, pvec);
730 	}
731 
732 	active = false;
733 	if (pinned < 0)
734 		ret = pinned, pinned = 0;
735 	else if (pinned < num_pages)
736 		ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
737 	else
738 		ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
739 	if (ret) {
740 		__i915_gem_userptr_set_active(obj, active);
741 		release_pages(pvec, pinned, 0);
742 	}
743 	drm_free_large(pvec);
744 	return ret;
745 }
746 
747 static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object * obj)748 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
749 {
750 	struct sg_page_iter sg_iter;
751 
752 	BUG_ON(obj->userptr.work != NULL);
753 	__i915_gem_userptr_set_active(obj, false);
754 
755 	if (obj->madv != I915_MADV_WILLNEED)
756 		obj->dirty = 0;
757 
758 	i915_gem_gtt_finish_object(obj);
759 
760 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
761 		struct page *page = sg_page_iter_page(&sg_iter);
762 
763 		if (obj->dirty)
764 			set_page_dirty(page);
765 
766 		mark_page_accessed(page);
767 		page_cache_release(page);
768 	}
769 	obj->dirty = 0;
770 
771 	sg_free_table(obj->pages);
772 	kfree(obj->pages);
773 }
774 
775 static void
i915_gem_userptr_release(struct drm_i915_gem_object * obj)776 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
777 {
778 	i915_gem_userptr_release__mmu_notifier(obj);
779 	i915_gem_userptr_release__mm_struct(obj);
780 }
781 
782 static int
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object * obj)783 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
784 {
785 	if (obj->userptr.mmu_object)
786 		return 0;
787 
788 	return i915_gem_userptr_init__mmu_notifier(obj, 0);
789 }
790 
791 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
792 	.dmabuf_export = i915_gem_userptr_dmabuf_export,
793 	.get_pages = i915_gem_userptr_get_pages,
794 	.put_pages = i915_gem_userptr_put_pages,
795 	.release = i915_gem_userptr_release,
796 };
797 
798 /**
799  * Creates a new mm object that wraps some normal memory from the process
800  * context - user memory.
801  *
802  * We impose several restrictions upon the memory being mapped
803  * into the GPU.
804  * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
805  * 2. It must be normal system memory, not a pointer into another map of IO
806  *    space (e.g. it must not be a GTT mmapping of another object).
807  * 3. We only allow a bo as large as we could in theory map into the GTT,
808  *    that is we limit the size to the total size of the GTT.
809  * 4. The bo is marked as being snoopable. The backing pages are left
810  *    accessible directly by the CPU, but reads and writes by the GPU may
811  *    incur the cost of a snoop (unless you have an LLC architecture).
812  *
813  * Synchronisation between multiple users and the GPU is left to userspace
814  * through the normal set-domain-ioctl. The kernel will enforce that the
815  * GPU relinquishes the VMA before it is returned back to the system
816  * i.e. upon free(), munmap() or process termination. However, the userspace
817  * malloc() library may not immediately relinquish the VMA after free() and
818  * instead reuse it whilst the GPU is still reading and writing to the VMA.
819  * Caveat emptor.
820  *
821  * Also note, that the object created here is not currently a "first class"
822  * object, in that several ioctls are banned. These are the CPU access
823  * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
824  * direct access via your pointer rather than use those ioctls. Another
825  * restriction is that we do not allow userptr surfaces to be pinned to the
826  * hardware and so we reject any attempt to create a framebuffer out of a
827  * userptr.
828  *
829  * If you think this is a good interface to use to pass GPU memory between
830  * drivers, please use dma-buf instead. In fact, wherever possible use
831  * dma-buf instead.
832  */
833 int
i915_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * file)834 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
835 {
836 	struct drm_i915_gem_userptr *args = data;
837 	struct drm_i915_gem_object *obj;
838 	int ret;
839 	u32 handle;
840 
841 	if (args->flags & ~(I915_USERPTR_READ_ONLY |
842 			    I915_USERPTR_UNSYNCHRONIZED))
843 		return -EINVAL;
844 
845 	if (offset_in_page(args->user_ptr | args->user_size))
846 		return -EINVAL;
847 
848 	if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
849 		       (char __user *)(unsigned long)args->user_ptr, args->user_size))
850 		return -EFAULT;
851 
852 	if (args->flags & I915_USERPTR_READ_ONLY) {
853 		/* On almost all of the current hw, we cannot tell the GPU that a
854 		 * page is readonly, so this is just a placeholder in the uAPI.
855 		 */
856 		return -ENODEV;
857 	}
858 
859 	obj = i915_gem_object_alloc(dev);
860 	if (obj == NULL)
861 		return -ENOMEM;
862 
863 	drm_gem_private_object_init(dev, &obj->base, args->user_size);
864 	i915_gem_object_init(obj, &i915_gem_userptr_ops);
865 	obj->cache_level = I915_CACHE_LLC;
866 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
867 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
868 
869 	obj->userptr.ptr = args->user_ptr;
870 	obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
871 
872 	/* And keep a pointer to the current->mm for resolving the user pages
873 	 * at binding. This means that we need to hook into the mmu_notifier
874 	 * in order to detect if the mmu is destroyed.
875 	 */
876 	ret = i915_gem_userptr_init__mm_struct(obj);
877 	if (ret == 0)
878 		ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
879 	if (ret == 0)
880 		ret = drm_gem_handle_create(file, &obj->base, &handle);
881 
882 	/* drop reference from allocate - handle holds it now */
883 	drm_gem_object_unreference_unlocked(&obj->base);
884 	if (ret)
885 		return ret;
886 
887 	args->handle = handle;
888 	return 0;
889 }
890 
891 int
i915_gem_init_userptr(struct drm_device * dev)892 i915_gem_init_userptr(struct drm_device *dev)
893 {
894 	struct drm_i915_private *dev_priv = to_i915(dev);
895 	mutex_init(&dev_priv->mm_lock);
896 	hash_init(dev_priv->mm_structs);
897 	return 0;
898 }
899