This source file includes following definitions.
- i915_vma_alloc
- i915_vma_free
- vma_print_allocator
- vma_print_allocator
- active_to_vma
- __i915_vma_active
- __i915_vma_retire
- vma_create
- vma_lookup
- i915_vma_instance
- i915_vma_bind
- i915_vma_pin_iomap
- i915_vma_flush_writes
- i915_vma_unpin_iomap
- i915_vma_unpin_and_release
- i915_vma_misplaced
- __i915_vma_set_map_and_fenceable
- color_differs
- i915_gem_valid_gtt_space
- assert_bind_count
- i915_vma_insert
- i915_vma_remove
- __i915_vma_do_pin
- i915_vma_close
- __i915_vma_remove_closed
- i915_vma_reopen
- __i915_vma_destroy
- i915_vma_destroy
- i915_vma_parked
- __i915_vma_iounmap
- i915_vma_revoke_mmap
- i915_vma_move_to_active
- i915_vma_unbind
- i915_vma_make_unshrinkable
- i915_vma_make_shrinkable
- i915_vma_make_purgeable
- i915_global_vma_shrink
- i915_global_vma_exit
- i915_global_vma_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 #include <linux/sched/mm.h>
26 #include <drm/drm_gem.h>
27
28 #include "display/intel_frontbuffer.h"
29
30 #include "gt/intel_engine.h"
31 #include "gt/intel_gt.h"
32
33 #include "i915_drv.h"
34 #include "i915_globals.h"
35 #include "i915_trace.h"
36 #include "i915_vma.h"
37
38 static struct i915_global_vma {
39 struct i915_global base;
40 struct kmem_cache *slab_vmas;
41 } global;
42
43 struct i915_vma *i915_vma_alloc(void)
44 {
45 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
46 }
47
48 void i915_vma_free(struct i915_vma *vma)
49 {
50 return kmem_cache_free(global.slab_vmas, vma);
51 }
52
53 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
54
55 #include <linux/stackdepot.h>
56
57 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
58 {
59 unsigned long *entries;
60 unsigned int nr_entries;
61 char buf[512];
62
63 if (!vma->node.stack) {
64 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
65 vma->node.start, vma->node.size, reason);
66 return;
67 }
68
69 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
70 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
71 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
72 vma->node.start, vma->node.size, reason, buf);
73 }
74
75 #else
76
77 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
78 {
79 }
80
81 #endif
82
83 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
84 {
85 return container_of(ref, typeof(struct i915_vma), active);
86 }
87
88 static int __i915_vma_active(struct i915_active *ref)
89 {
90 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
91 }
92
93 static void __i915_vma_retire(struct i915_active *ref)
94 {
95 i915_vma_put(active_to_vma(ref));
96 }
97
98 static struct i915_vma *
99 vma_create(struct drm_i915_gem_object *obj,
100 struct i915_address_space *vm,
101 const struct i915_ggtt_view *view)
102 {
103 struct i915_vma *vma;
104 struct rb_node *rb, **p;
105
106
107 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
108
109 vma = i915_vma_alloc();
110 if (vma == NULL)
111 return ERR_PTR(-ENOMEM);
112
113 vma->vm = vm;
114 vma->ops = &vm->vma_ops;
115 vma->obj = obj;
116 vma->resv = obj->base.resv;
117 vma->size = obj->base.size;
118 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
119
120 i915_active_init(vm->i915, &vma->active,
121 __i915_vma_active, __i915_vma_retire);
122
123
124 if (IS_ENABLED(CONFIG_LOCKDEP)) {
125 fs_reclaim_acquire(GFP_KERNEL);
126 might_lock(&vma->active.mutex);
127 fs_reclaim_release(GFP_KERNEL);
128 }
129
130 INIT_LIST_HEAD(&vma->closed_link);
131
132 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
133 vma->ggtt_view = *view;
134 if (view->type == I915_GGTT_VIEW_PARTIAL) {
135 GEM_BUG_ON(range_overflows_t(u64,
136 view->partial.offset,
137 view->partial.size,
138 obj->base.size >> PAGE_SHIFT));
139 vma->size = view->partial.size;
140 vma->size <<= PAGE_SHIFT;
141 GEM_BUG_ON(vma->size > obj->base.size);
142 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
143 vma->size = intel_rotation_info_size(&view->rotated);
144 vma->size <<= PAGE_SHIFT;
145 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
146 vma->size = intel_remapped_info_size(&view->remapped);
147 vma->size <<= PAGE_SHIFT;
148 }
149 }
150
151 if (unlikely(vma->size > vm->total))
152 goto err_vma;
153
154 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
155
156 if (i915_is_ggtt(vm)) {
157 if (unlikely(overflows_type(vma->size, u32)))
158 goto err_vma;
159
160 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
161 i915_gem_object_get_tiling(obj),
162 i915_gem_object_get_stride(obj));
163 if (unlikely(vma->fence_size < vma->size ||
164 vma->fence_size > vm->total))
165 goto err_vma;
166
167 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
168
169 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
170 i915_gem_object_get_tiling(obj),
171 i915_gem_object_get_stride(obj));
172 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
173
174 vma->flags |= I915_VMA_GGTT;
175 }
176
177 spin_lock(&obj->vma.lock);
178
179 rb = NULL;
180 p = &obj->vma.tree.rb_node;
181 while (*p) {
182 struct i915_vma *pos;
183 long cmp;
184
185 rb = *p;
186 pos = rb_entry(rb, struct i915_vma, obj_node);
187
188
189
190
191
192
193 cmp = i915_vma_compare(pos, vm, view);
194 if (cmp == 0) {
195 spin_unlock(&obj->vma.lock);
196 i915_vma_free(vma);
197 return pos;
198 }
199
200 if (cmp < 0)
201 p = &rb->rb_right;
202 else
203 p = &rb->rb_left;
204 }
205 rb_link_node(&vma->obj_node, rb, p);
206 rb_insert_color(&vma->obj_node, &obj->vma.tree);
207
208 if (i915_vma_is_ggtt(vma))
209
210
211
212
213
214
215 list_add(&vma->obj_link, &obj->vma.list);
216 else
217 list_add_tail(&vma->obj_link, &obj->vma.list);
218
219 spin_unlock(&obj->vma.lock);
220
221 mutex_lock(&vm->mutex);
222 list_add(&vma->vm_link, &vm->unbound_list);
223 mutex_unlock(&vm->mutex);
224
225 return vma;
226
227 err_vma:
228 i915_vma_free(vma);
229 return ERR_PTR(-E2BIG);
230 }
231
232 static struct i915_vma *
233 vma_lookup(struct drm_i915_gem_object *obj,
234 struct i915_address_space *vm,
235 const struct i915_ggtt_view *view)
236 {
237 struct rb_node *rb;
238
239 rb = obj->vma.tree.rb_node;
240 while (rb) {
241 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
242 long cmp;
243
244 cmp = i915_vma_compare(vma, vm, view);
245 if (cmp == 0)
246 return vma;
247
248 if (cmp < 0)
249 rb = rb->rb_right;
250 else
251 rb = rb->rb_left;
252 }
253
254 return NULL;
255 }
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272 struct i915_vma *
273 i915_vma_instance(struct drm_i915_gem_object *obj,
274 struct i915_address_space *vm,
275 const struct i915_ggtt_view *view)
276 {
277 struct i915_vma *vma;
278
279 GEM_BUG_ON(view && !i915_is_ggtt(vm));
280 GEM_BUG_ON(vm->closed);
281
282 spin_lock(&obj->vma.lock);
283 vma = vma_lookup(obj, vm, view);
284 spin_unlock(&obj->vma.lock);
285
286
287 if (unlikely(!vma))
288 vma = vma_create(obj, vm, view);
289
290 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
291 return vma;
292 }
293
294
295
296
297
298
299
300
301
302
303
304 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
305 u32 flags)
306 {
307 u32 bind_flags;
308 u32 vma_flags;
309 int ret;
310
311 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
312 GEM_BUG_ON(vma->size > vma->node.size);
313
314 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
315 vma->node.size,
316 vma->vm->total)))
317 return -ENODEV;
318
319 if (GEM_DEBUG_WARN_ON(!flags))
320 return -EINVAL;
321
322 bind_flags = 0;
323 if (flags & PIN_GLOBAL)
324 bind_flags |= I915_VMA_GLOBAL_BIND;
325 if (flags & PIN_USER)
326 bind_flags |= I915_VMA_LOCAL_BIND;
327
328 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
329 if (flags & PIN_UPDATE)
330 bind_flags |= vma_flags;
331 else
332 bind_flags &= ~vma_flags;
333 if (bind_flags == 0)
334 return 0;
335
336 GEM_BUG_ON(!vma->pages);
337
338 trace_i915_vma_bind(vma, bind_flags);
339 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
340 if (ret)
341 return ret;
342
343 vma->flags |= bind_flags;
344 return 0;
345 }
346
347 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
348 {
349 void __iomem *ptr;
350 int err;
351
352
353 assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
354
355 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
356 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
357 err = -ENODEV;
358 goto err;
359 }
360
361 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
362 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
363
364 ptr = vma->iomap;
365 if (ptr == NULL) {
366 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
367 vma->node.start,
368 vma->node.size);
369 if (ptr == NULL) {
370 err = -ENOMEM;
371 goto err;
372 }
373
374 vma->iomap = ptr;
375 }
376
377 __i915_vma_pin(vma);
378
379 err = i915_vma_pin_fence(vma);
380 if (err)
381 goto err_unpin;
382
383 i915_vma_set_ggtt_write(vma);
384 return ptr;
385
386 err_unpin:
387 __i915_vma_unpin(vma);
388 err:
389 return IO_ERR_PTR(err);
390 }
391
392 void i915_vma_flush_writes(struct i915_vma *vma)
393 {
394 if (!i915_vma_has_ggtt_write(vma))
395 return;
396
397 intel_gt_flush_ggtt_writes(vma->vm->gt);
398
399 i915_vma_unset_ggtt_write(vma);
400 }
401
402 void i915_vma_unpin_iomap(struct i915_vma *vma)
403 {
404 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
405
406 GEM_BUG_ON(vma->iomap == NULL);
407
408 i915_vma_flush_writes(vma);
409
410 i915_vma_unpin_fence(vma);
411 i915_vma_unpin(vma);
412 }
413
414 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
415 {
416 struct i915_vma *vma;
417 struct drm_i915_gem_object *obj;
418
419 vma = fetch_and_zero(p_vma);
420 if (!vma)
421 return;
422
423 obj = vma->obj;
424 GEM_BUG_ON(!obj);
425
426 i915_vma_unpin(vma);
427 i915_vma_close(vma);
428
429 if (flags & I915_VMA_RELEASE_MAP)
430 i915_gem_object_unpin_map(obj);
431
432 i915_gem_object_put(obj);
433 }
434
435 bool i915_vma_misplaced(const struct i915_vma *vma,
436 u64 size, u64 alignment, u64 flags)
437 {
438 if (!drm_mm_node_allocated(&vma->node))
439 return false;
440
441 if (vma->node.size < size)
442 return true;
443
444 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
445 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
446 return true;
447
448 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
449 return true;
450
451 if (flags & PIN_OFFSET_BIAS &&
452 vma->node.start < (flags & PIN_OFFSET_MASK))
453 return true;
454
455 if (flags & PIN_OFFSET_FIXED &&
456 vma->node.start != (flags & PIN_OFFSET_MASK))
457 return true;
458
459 return false;
460 }
461
462 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
463 {
464 bool mappable, fenceable;
465
466 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
467 GEM_BUG_ON(!vma->fence_size);
468
469 fenceable = (vma->node.size >= vma->fence_size &&
470 IS_ALIGNED(vma->node.start, vma->fence_alignment));
471
472 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
473
474 if (mappable && fenceable)
475 vma->flags |= I915_VMA_CAN_FENCE;
476 else
477 vma->flags &= ~I915_VMA_CAN_FENCE;
478 }
479
480 static bool color_differs(struct drm_mm_node *node, unsigned long color)
481 {
482 return node->allocated && node->color != color;
483 }
484
485 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
486 {
487 struct drm_mm_node *node = &vma->node;
488 struct drm_mm_node *other;
489
490
491
492
493
494
495
496
497 if (vma->vm->mm.color_adjust == NULL)
498 return true;
499
500
501 GEM_BUG_ON(!drm_mm_node_allocated(node));
502 GEM_BUG_ON(list_empty(&node->node_list));
503
504 other = list_prev_entry(node, node_list);
505 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
506 return false;
507
508 other = list_next_entry(node, node_list);
509 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
510 return false;
511
512 return true;
513 }
514
515 static void assert_bind_count(const struct drm_i915_gem_object *obj)
516 {
517
518
519
520
521
522
523
524 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
525 }
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541 static int
542 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
543 {
544 struct drm_i915_private *dev_priv = vma->vm->i915;
545 unsigned int cache_level;
546 u64 start, end;
547 int ret;
548
549 GEM_BUG_ON(i915_vma_is_closed(vma));
550 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
551 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
552
553 size = max(size, vma->size);
554 alignment = max(alignment, vma->display_alignment);
555 if (flags & PIN_MAPPABLE) {
556 size = max_t(typeof(size), size, vma->fence_size);
557 alignment = max_t(typeof(alignment),
558 alignment, vma->fence_alignment);
559 }
560
561 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
562 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
563 GEM_BUG_ON(!is_power_of_2(alignment));
564
565 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
566 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
567
568 end = vma->vm->total;
569 if (flags & PIN_MAPPABLE)
570 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
571 if (flags & PIN_ZONE_4G)
572 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
573 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
574
575
576
577
578
579 if (size > end) {
580 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
581 size, flags & PIN_MAPPABLE ? "mappable" : "total",
582 end);
583 return -ENOSPC;
584 }
585
586 if (vma->obj) {
587 ret = i915_gem_object_pin_pages(vma->obj);
588 if (ret)
589 return ret;
590
591 cache_level = vma->obj->cache_level;
592 } else {
593 cache_level = 0;
594 }
595
596 GEM_BUG_ON(vma->pages);
597
598 ret = vma->ops->set_pages(vma);
599 if (ret)
600 goto err_unpin;
601
602 if (flags & PIN_OFFSET_FIXED) {
603 u64 offset = flags & PIN_OFFSET_MASK;
604 if (!IS_ALIGNED(offset, alignment) ||
605 range_overflows(offset, size, end)) {
606 ret = -EINVAL;
607 goto err_clear;
608 }
609
610 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
611 size, offset, cache_level,
612 flags);
613 if (ret)
614 goto err_clear;
615 } else {
616
617
618
619
620
621
622
623
624 if (upper_32_bits(end - 1) &&
625 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
626
627
628
629
630
631
632 u64 page_alignment =
633 rounddown_pow_of_two(vma->page_sizes.sg |
634 I915_GTT_PAGE_SIZE_2M);
635
636
637
638
639
640
641 GEM_BUG_ON(i915_vma_is_ggtt(vma));
642
643 alignment = max(alignment, page_alignment);
644
645 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
646 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
647 }
648
649 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
650 size, alignment, cache_level,
651 start, end, flags);
652 if (ret)
653 goto err_clear;
654
655 GEM_BUG_ON(vma->node.start < start);
656 GEM_BUG_ON(vma->node.start + vma->node.size > end);
657 }
658 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
659 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
660
661 mutex_lock(&vma->vm->mutex);
662 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
663 mutex_unlock(&vma->vm->mutex);
664
665 if (vma->obj) {
666 atomic_inc(&vma->obj->bind_count);
667 assert_bind_count(vma->obj);
668 }
669
670 return 0;
671
672 err_clear:
673 vma->ops->clear_pages(vma);
674 err_unpin:
675 if (vma->obj)
676 i915_gem_object_unpin_pages(vma->obj);
677 return ret;
678 }
679
680 static void
681 i915_vma_remove(struct i915_vma *vma)
682 {
683 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
684 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
685
686 vma->ops->clear_pages(vma);
687
688 mutex_lock(&vma->vm->mutex);
689 drm_mm_remove_node(&vma->node);
690 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
691 mutex_unlock(&vma->vm->mutex);
692
693
694
695
696
697 if (vma->obj) {
698 struct drm_i915_gem_object *obj = vma->obj;
699
700 atomic_dec(&obj->bind_count);
701
702
703
704
705
706
707 i915_gem_object_unpin_pages(obj);
708 assert_bind_count(obj);
709 }
710 }
711
712 int __i915_vma_do_pin(struct i915_vma *vma,
713 u64 size, u64 alignment, u64 flags)
714 {
715 const unsigned int bound = vma->flags;
716 int ret;
717
718 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
719 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
720 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
721
722 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
723 ret = -EBUSY;
724 goto err_unpin;
725 }
726
727 if ((bound & I915_VMA_BIND_MASK) == 0) {
728 ret = i915_vma_insert(vma, size, alignment, flags);
729 if (ret)
730 goto err_unpin;
731 }
732 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
733
734 ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
735 if (ret)
736 goto err_remove;
737
738 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
739
740 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
741 __i915_vma_set_map_and_fenceable(vma);
742
743 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
744 return 0;
745
746 err_remove:
747 if ((bound & I915_VMA_BIND_MASK) == 0) {
748 i915_vma_remove(vma);
749 GEM_BUG_ON(vma->pages);
750 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
751 }
752 err_unpin:
753 __i915_vma_unpin(vma);
754 return ret;
755 }
756
757 void i915_vma_close(struct i915_vma *vma)
758 {
759 struct drm_i915_private *i915 = vma->vm->i915;
760 unsigned long flags;
761
762 GEM_BUG_ON(i915_vma_is_closed(vma));
763
764
765
766
767
768
769
770
771
772
773
774
775
776 spin_lock_irqsave(&i915->gt.closed_lock, flags);
777 list_add(&vma->closed_link, &i915->gt.closed_vma);
778 spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
779 }
780
781 static void __i915_vma_remove_closed(struct i915_vma *vma)
782 {
783 struct drm_i915_private *i915 = vma->vm->i915;
784
785 if (!i915_vma_is_closed(vma))
786 return;
787
788 spin_lock_irq(&i915->gt.closed_lock);
789 list_del_init(&vma->closed_link);
790 spin_unlock_irq(&i915->gt.closed_lock);
791 }
792
793 void i915_vma_reopen(struct i915_vma *vma)
794 {
795 __i915_vma_remove_closed(vma);
796 }
797
798 static void __i915_vma_destroy(struct i915_vma *vma)
799 {
800 GEM_BUG_ON(vma->node.allocated);
801 GEM_BUG_ON(vma->fence);
802
803 mutex_lock(&vma->vm->mutex);
804 list_del(&vma->vm_link);
805 mutex_unlock(&vma->vm->mutex);
806
807 if (vma->obj) {
808 struct drm_i915_gem_object *obj = vma->obj;
809
810 spin_lock(&obj->vma.lock);
811 list_del(&vma->obj_link);
812 rb_erase(&vma->obj_node, &vma->obj->vma.tree);
813 spin_unlock(&obj->vma.lock);
814 }
815
816 i915_active_fini(&vma->active);
817
818 i915_vma_free(vma);
819 }
820
821 void i915_vma_destroy(struct i915_vma *vma)
822 {
823 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
824
825 GEM_BUG_ON(i915_vma_is_pinned(vma));
826
827 __i915_vma_remove_closed(vma);
828
829 WARN_ON(i915_vma_unbind(vma));
830 GEM_BUG_ON(i915_vma_is_active(vma));
831
832 __i915_vma_destroy(vma);
833 }
834
835 void i915_vma_parked(struct drm_i915_private *i915)
836 {
837 struct i915_vma *vma, *next;
838
839 spin_lock_irq(&i915->gt.closed_lock);
840 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
841 list_del_init(&vma->closed_link);
842 spin_unlock_irq(&i915->gt.closed_lock);
843
844 i915_vma_destroy(vma);
845
846 spin_lock_irq(&i915->gt.closed_lock);
847 }
848 spin_unlock_irq(&i915->gt.closed_lock);
849 }
850
851 static void __i915_vma_iounmap(struct i915_vma *vma)
852 {
853 GEM_BUG_ON(i915_vma_is_pinned(vma));
854
855 if (vma->iomap == NULL)
856 return;
857
858 io_mapping_unmap(vma->iomap);
859 vma->iomap = NULL;
860 }
861
862 void i915_vma_revoke_mmap(struct i915_vma *vma)
863 {
864 struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
865 u64 vma_offset;
866
867 lockdep_assert_held(&vma->vm->mutex);
868
869 if (!i915_vma_has_userfault(vma))
870 return;
871
872 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
873 GEM_BUG_ON(!vma->obj->userfault_count);
874
875 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
876 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
877 drm_vma_node_offset_addr(node) + vma_offset,
878 vma->size,
879 1);
880
881 i915_vma_unset_userfault(vma);
882 if (!--vma->obj->userfault_count)
883 list_del(&vma->obj->userfault_link);
884 }
885
886 int i915_vma_move_to_active(struct i915_vma *vma,
887 struct i915_request *rq,
888 unsigned int flags)
889 {
890 struct drm_i915_gem_object *obj = vma->obj;
891 int err;
892
893 assert_vma_held(vma);
894 assert_object_held(obj);
895 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
896
897
898
899
900
901
902
903
904
905 err = i915_active_ref(&vma->active, rq->timeline, rq);
906 if (unlikely(err))
907 return err;
908
909 if (flags & EXEC_OBJECT_WRITE) {
910 if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
911 i915_active_ref(&obj->frontbuffer->write,
912 rq->timeline,
913 rq);
914
915 dma_resv_add_excl_fence(vma->resv, &rq->fence);
916 obj->write_domain = I915_GEM_DOMAIN_RENDER;
917 obj->read_domains = 0;
918 } else {
919 err = dma_resv_reserve_shared(vma->resv, 1);
920 if (unlikely(err))
921 return err;
922
923 dma_resv_add_shared_fence(vma->resv, &rq->fence);
924 obj->write_domain = 0;
925 }
926 obj->read_domains |= I915_GEM_GPU_DOMAINS;
927 obj->mm.dirty = true;
928
929 GEM_BUG_ON(!i915_vma_is_active(vma));
930 return 0;
931 }
932
933 int i915_vma_unbind(struct i915_vma *vma)
934 {
935 int ret;
936
937 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
938
939
940
941
942
943 might_sleep();
944 if (i915_vma_is_active(vma)) {
945
946
947
948
949
950
951
952
953
954
955
956
957
958 __i915_vma_pin(vma);
959 ret = i915_active_wait(&vma->active);
960 __i915_vma_unpin(vma);
961 if (ret)
962 return ret;
963 }
964 GEM_BUG_ON(i915_vma_is_active(vma));
965
966 if (i915_vma_is_pinned(vma)) {
967 vma_print_allocator(vma, "is pinned");
968 return -EBUSY;
969 }
970
971 if (!drm_mm_node_allocated(&vma->node))
972 return 0;
973
974 if (i915_vma_is_map_and_fenceable(vma)) {
975
976
977
978
979
980
981 i915_vma_flush_writes(vma);
982 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
983
984
985 mutex_lock(&vma->vm->mutex);
986 ret = i915_vma_revoke_fence(vma);
987 mutex_unlock(&vma->vm->mutex);
988 if (ret)
989 return ret;
990
991
992 mutex_lock(&vma->vm->mutex);
993 i915_vma_revoke_mmap(vma);
994 mutex_unlock(&vma->vm->mutex);
995
996 __i915_vma_iounmap(vma);
997 vma->flags &= ~I915_VMA_CAN_FENCE;
998 }
999 GEM_BUG_ON(vma->fence);
1000 GEM_BUG_ON(i915_vma_has_userfault(vma));
1001
1002 if (likely(!vma->vm->closed)) {
1003 trace_i915_vma_unbind(vma);
1004 vma->ops->unbind_vma(vma);
1005 }
1006 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1007
1008 i915_vma_remove(vma);
1009
1010 return 0;
1011 }
1012
1013 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1014 {
1015 i915_gem_object_make_unshrinkable(vma->obj);
1016 return vma;
1017 }
1018
1019 void i915_vma_make_shrinkable(struct i915_vma *vma)
1020 {
1021 i915_gem_object_make_shrinkable(vma->obj);
1022 }
1023
1024 void i915_vma_make_purgeable(struct i915_vma *vma)
1025 {
1026 i915_gem_object_make_purgeable(vma->obj);
1027 }
1028
1029 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1030 #include "selftests/i915_vma.c"
1031 #endif
1032
1033 static void i915_global_vma_shrink(void)
1034 {
1035 kmem_cache_shrink(global.slab_vmas);
1036 }
1037
1038 static void i915_global_vma_exit(void)
1039 {
1040 kmem_cache_destroy(global.slab_vmas);
1041 }
1042
1043 static struct i915_global_vma global = { {
1044 .shrink = i915_global_vma_shrink,
1045 .exit = i915_global_vma_exit,
1046 } };
1047
1048 int __init i915_global_vma_init(void)
1049 {
1050 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1051 if (!global.slab_vmas)
1052 return -ENOMEM;
1053
1054 i915_global_register(&global.base);
1055 return 0;
1056 }