This source file includes following definitions.
- shrinker_lock
- shrinker_unlock
- swap_available
- can_release_pages
- unsafe_drop_pages
- try_to_writeback
- i915_gem_shrink
- i915_gem_shrink_all
- i915_gem_shrinker_count
- i915_gem_shrinker_scan
- i915_gem_shrinker_oom
- i915_gem_shrinker_vmap
- i915_gem_driver_register__shrinker
- i915_gem_driver_unregister__shrinker
- i915_gem_shrinker_taints_mutex
- i915_gem_object_make_unshrinkable
- __i915_gem_object_make_shrinkable
- i915_gem_object_make_shrinkable
- i915_gem_object_make_purgeable
1
2
3
4
5
6
7 #include <linux/oom.h>
8 #include <linux/sched/mm.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/swap.h>
12 #include <linux/pci.h>
13 #include <linux/dma-buf.h>
14 #include <linux/vmalloc.h>
15 #include <drm/i915_drm.h>
16
17 #include "i915_trace.h"
18
19 static bool shrinker_lock(struct drm_i915_private *i915,
20 unsigned int flags,
21 bool *unlock)
22 {
23 struct mutex *m = &i915->drm.struct_mutex;
24
25 switch (mutex_trylock_recursive(m)) {
26 case MUTEX_TRYLOCK_RECURSIVE:
27 *unlock = false;
28 return true;
29
30 case MUTEX_TRYLOCK_FAILED:
31 *unlock = false;
32 if (flags & I915_SHRINK_ACTIVE &&
33 mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
34 *unlock = true;
35 return *unlock;
36
37 case MUTEX_TRYLOCK_SUCCESS:
38 *unlock = true;
39 return true;
40 }
41
42 BUG();
43 }
44
45 static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
46 {
47 if (!unlock)
48 return;
49
50 mutex_unlock(&i915->drm.struct_mutex);
51 }
52
53 static bool swap_available(void)
54 {
55 return get_nr_swap_pages() > 0;
56 }
57
58 static bool can_release_pages(struct drm_i915_gem_object *obj)
59 {
60
61 if (!i915_gem_object_is_shrinkable(obj))
62 return false;
63
64
65
66
67
68
69
70
71
72 if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
73 return false;
74
75
76
77
78
79
80
81 if (READ_ONCE(obj->pin_global))
82 return false;
83
84
85
86
87
88 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
89 }
90
91 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
92 unsigned long shrink)
93 {
94 unsigned long flags;
95
96 flags = 0;
97 if (shrink & I915_SHRINK_ACTIVE)
98 flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
99
100 if (i915_gem_object_unbind(obj, flags) == 0)
101 __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
102
103 return !i915_gem_object_has_pages(obj);
104 }
105
106 static void try_to_writeback(struct drm_i915_gem_object *obj,
107 unsigned int flags)
108 {
109 switch (obj->mm.madv) {
110 case I915_MADV_DONTNEED:
111 i915_gem_object_truncate(obj);
112 case __I915_MADV_PURGED:
113 return;
114 }
115
116 if (flags & I915_SHRINK_WRITEBACK)
117 i915_gem_object_writeback(obj);
118 }
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145 unsigned long
146 i915_gem_shrink(struct drm_i915_private *i915,
147 unsigned long target,
148 unsigned long *nr_scanned,
149 unsigned int shrink)
150 {
151 const struct {
152 struct list_head *list;
153 unsigned int bit;
154 } phases[] = {
155 { &i915->mm.purge_list, ~0u },
156 {
157 &i915->mm.shrink_list,
158 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
159 },
160 { NULL, 0 },
161 }, *phase;
162 intel_wakeref_t wakeref = 0;
163 unsigned long count = 0;
164 unsigned long scanned = 0;
165 bool unlock;
166
167 if (!shrinker_lock(i915, shrink, &unlock))
168 return 0;
169
170
171
172
173
174
175
176
177
178
179 trace_i915_gem_shrink(i915, target, shrink);
180
181
182
183
184
185
186 if (shrink & I915_SHRINK_BOUND) {
187 wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
188 if (!wakeref)
189 shrink &= ~I915_SHRINK_BOUND;
190 }
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211 for (phase = phases; phase->list; phase++) {
212 struct list_head still_in_list;
213 struct drm_i915_gem_object *obj;
214 unsigned long flags;
215
216 if ((shrink & phase->bit) == 0)
217 continue;
218
219 INIT_LIST_HEAD(&still_in_list);
220
221
222
223
224
225
226
227
228 spin_lock_irqsave(&i915->mm.obj_lock, flags);
229 while (count < target &&
230 (obj = list_first_entry_or_null(phase->list,
231 typeof(*obj),
232 mm.link))) {
233 list_move_tail(&obj->mm.link, &still_in_list);
234
235 if (shrink & I915_SHRINK_VMAPS &&
236 !is_vmalloc_addr(obj->mm.mapping))
237 continue;
238
239 if (!(shrink & I915_SHRINK_ACTIVE) &&
240 i915_gem_object_is_framebuffer(obj))
241 continue;
242
243 if (!(shrink & I915_SHRINK_BOUND) &&
244 atomic_read(&obj->bind_count))
245 continue;
246
247 if (!can_release_pages(obj))
248 continue;
249
250 if (!kref_get_unless_zero(&obj->base.refcount))
251 continue;
252
253 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
254
255 if (unsafe_drop_pages(obj, shrink)) {
256
257 mutex_lock_nested(&obj->mm.lock,
258 I915_MM_SHRINKER);
259 if (!i915_gem_object_has_pages(obj)) {
260 try_to_writeback(obj, shrink);
261 count += obj->base.size >> PAGE_SHIFT;
262 }
263 mutex_unlock(&obj->mm.lock);
264 }
265
266 scanned += obj->base.size >> PAGE_SHIFT;
267 i915_gem_object_put(obj);
268
269 spin_lock_irqsave(&i915->mm.obj_lock, flags);
270 }
271 list_splice_tail(&still_in_list, phase->list);
272 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
273 }
274
275 if (shrink & I915_SHRINK_BOUND)
276 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
277
278 shrinker_unlock(i915, unlock);
279
280 if (nr_scanned)
281 *nr_scanned += scanned;
282 return count;
283 }
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
300 {
301 intel_wakeref_t wakeref;
302 unsigned long freed = 0;
303
304 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
305 freed = i915_gem_shrink(i915, -1UL, NULL,
306 I915_SHRINK_BOUND |
307 I915_SHRINK_UNBOUND |
308 I915_SHRINK_ACTIVE);
309 }
310
311 return freed;
312 }
313
314 static unsigned long
315 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
316 {
317 struct drm_i915_private *i915 =
318 container_of(shrinker, struct drm_i915_private, mm.shrinker);
319 unsigned long num_objects;
320 unsigned long count;
321
322 count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
323 num_objects = READ_ONCE(i915->mm.shrink_count);
324
325
326
327
328
329
330
331
332 if (num_objects) {
333 unsigned long avg = 2 * count / num_objects;
334
335 i915->mm.shrinker.batch =
336 max((i915->mm.shrinker.batch + avg) >> 1,
337 128ul );
338 }
339
340 return count;
341 }
342
343 static unsigned long
344 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
345 {
346 struct drm_i915_private *i915 =
347 container_of(shrinker, struct drm_i915_private, mm.shrinker);
348 unsigned long freed;
349 bool unlock;
350
351 sc->nr_scanned = 0;
352
353 if (!shrinker_lock(i915, 0, &unlock))
354 return SHRINK_STOP;
355
356 freed = i915_gem_shrink(i915,
357 sc->nr_to_scan,
358 &sc->nr_scanned,
359 I915_SHRINK_BOUND |
360 I915_SHRINK_UNBOUND |
361 I915_SHRINK_WRITEBACK);
362 if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
363 intel_wakeref_t wakeref;
364
365 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
366 freed += i915_gem_shrink(i915,
367 sc->nr_to_scan - sc->nr_scanned,
368 &sc->nr_scanned,
369 I915_SHRINK_ACTIVE |
370 I915_SHRINK_BOUND |
371 I915_SHRINK_UNBOUND |
372 I915_SHRINK_WRITEBACK);
373 }
374 }
375
376 shrinker_unlock(i915, unlock);
377
378 return sc->nr_scanned ? freed : SHRINK_STOP;
379 }
380
381 static int
382 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
383 {
384 struct drm_i915_private *i915 =
385 container_of(nb, struct drm_i915_private, mm.oom_notifier);
386 struct drm_i915_gem_object *obj;
387 unsigned long unevictable, available, freed_pages;
388 intel_wakeref_t wakeref;
389 unsigned long flags;
390
391 freed_pages = 0;
392 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
393 freed_pages += i915_gem_shrink(i915, -1UL, NULL,
394 I915_SHRINK_BOUND |
395 I915_SHRINK_UNBOUND |
396 I915_SHRINK_WRITEBACK);
397
398
399
400
401
402 available = unevictable = 0;
403 spin_lock_irqsave(&i915->mm.obj_lock, flags);
404 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
405 if (!can_release_pages(obj))
406 unevictable += obj->base.size >> PAGE_SHIFT;
407 else
408 available += obj->base.size >> PAGE_SHIFT;
409 }
410 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
411
412 if (freed_pages || available)
413 pr_info("Purging GPU memory, %lu pages freed, "
414 "%lu pages still pinned, %lu pages left available.\n",
415 freed_pages, unevictable, available);
416
417 *(unsigned long *)ptr += freed_pages;
418 return NOTIFY_DONE;
419 }
420
421 static int
422 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
423 {
424 struct drm_i915_private *i915 =
425 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
426 struct i915_vma *vma, *next;
427 unsigned long freed_pages = 0;
428 intel_wakeref_t wakeref;
429 bool unlock;
430
431 if (!shrinker_lock(i915, 0, &unlock))
432 return NOTIFY_DONE;
433
434 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
435 freed_pages += i915_gem_shrink(i915, -1UL, NULL,
436 I915_SHRINK_BOUND |
437 I915_SHRINK_UNBOUND |
438 I915_SHRINK_VMAPS);
439
440
441 mutex_lock(&i915->ggtt.vm.mutex);
442 list_for_each_entry_safe(vma, next,
443 &i915->ggtt.vm.bound_list, vm_link) {
444 unsigned long count = vma->node.size >> PAGE_SHIFT;
445
446 if (!vma->iomap || i915_vma_is_active(vma))
447 continue;
448
449 mutex_unlock(&i915->ggtt.vm.mutex);
450 if (i915_vma_unbind(vma) == 0)
451 freed_pages += count;
452 mutex_lock(&i915->ggtt.vm.mutex);
453 }
454 mutex_unlock(&i915->ggtt.vm.mutex);
455
456 shrinker_unlock(i915, unlock);
457
458 *(unsigned long *)ptr += freed_pages;
459 return NOTIFY_DONE;
460 }
461
462 void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
463 {
464 i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
465 i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
466 i915->mm.shrinker.seeks = DEFAULT_SEEKS;
467 i915->mm.shrinker.batch = 4096;
468 WARN_ON(register_shrinker(&i915->mm.shrinker));
469
470 i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
471 WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
472
473 i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
474 WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
475 }
476
477 void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
478 {
479 WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
480 WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
481 unregister_shrinker(&i915->mm.shrinker);
482 }
483
484 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
485 struct mutex *mutex)
486 {
487 bool unlock = false;
488
489 if (!IS_ENABLED(CONFIG_LOCKDEP))
490 return;
491
492 if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
493 mutex_acquire(&i915->drm.struct_mutex.dep_map,
494 I915_MM_NORMAL, 0, _RET_IP_);
495 unlock = true;
496 }
497
498 fs_reclaim_acquire(GFP_KERNEL);
499
500
501
502
503
504
505
506
507
508 mutex_acquire(&i915->drm.struct_mutex.dep_map,
509 I915_MM_SHRINKER, 0, _RET_IP_);
510
511 mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
512 mutex_release(&mutex->dep_map, 0, _RET_IP_);
513
514 mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
515
516 fs_reclaim_release(GFP_KERNEL);
517
518 if (unlock)
519 mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
520 }
521
522 #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
523
524 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
525 {
526
527
528
529
530
531
532 if (!list_empty(&obj->mm.link)) {
533 struct drm_i915_private *i915 = obj_to_i915(obj);
534 unsigned long flags;
535
536 spin_lock_irqsave(&i915->mm.obj_lock, flags);
537 GEM_BUG_ON(list_empty(&obj->mm.link));
538
539 list_del_init(&obj->mm.link);
540 i915->mm.shrink_count--;
541 i915->mm.shrink_memory -= obj->base.size;
542
543 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
544 }
545 }
546
547 static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
548 struct list_head *head)
549 {
550 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
551 GEM_BUG_ON(!list_empty(&obj->mm.link));
552
553 if (i915_gem_object_is_shrinkable(obj)) {
554 struct drm_i915_private *i915 = obj_to_i915(obj);
555 unsigned long flags;
556
557 spin_lock_irqsave(&i915->mm.obj_lock, flags);
558 GEM_BUG_ON(!kref_read(&obj->base.refcount));
559
560 list_add_tail(&obj->mm.link, head);
561 i915->mm.shrink_count++;
562 i915->mm.shrink_memory += obj->base.size;
563
564 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
565 }
566 }
567
568 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
569 {
570 __i915_gem_object_make_shrinkable(obj,
571 &obj_to_i915(obj)->mm.shrink_list);
572 }
573
574 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
575 {
576 __i915_gem_object_make_shrinkable(obj,
577 &obj_to_i915(obj)->mm.purge_list);
578 }