This source file includes following definitions.
- __vma_matches
- i915_gem_mmap_ioctl
- tile_row_pages
- i915_gem_mmap_gtt_version
- compute_partial_view
- i915_gem_fault
- __i915_gem_object_release_mmap
- i915_gem_object_release_mmap
- create_mmap_offset
- i915_gem_mmap_gtt
- i915_gem_mmap_gtt_ioctl
1
2
3
4
5
6
7 #include <linux/mman.h>
8 #include <linux/sizes.h>
9
10 #include "gt/intel_gt.h"
11
12 #include "i915_drv.h"
13 #include "i915_gem_gtt.h"
14 #include "i915_gem_ioctls.h"
15 #include "i915_gem_object.h"
16 #include "i915_trace.h"
17 #include "i915_vma.h"
18
19 static inline bool
20 __vma_matches(struct vm_area_struct *vma, struct file *filp,
21 unsigned long addr, unsigned long size)
22 {
23 if (vma->vm_file != filp)
24 return false;
25
26 return vma->vm_start == addr &&
27 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
28 }
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50 int
51 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
52 struct drm_file *file)
53 {
54 struct drm_i915_gem_mmap *args = data;
55 struct drm_i915_gem_object *obj;
56 unsigned long addr;
57
58 if (args->flags & ~(I915_MMAP_WC))
59 return -EINVAL;
60
61 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
62 return -ENODEV;
63
64 obj = i915_gem_object_lookup(file, args->handle);
65 if (!obj)
66 return -ENOENT;
67
68
69
70
71 if (!obj->base.filp) {
72 addr = -ENXIO;
73 goto err;
74 }
75
76 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
77 addr = -EINVAL;
78 goto err;
79 }
80
81 addr = vm_mmap(obj->base.filp, 0, args->size,
82 PROT_READ | PROT_WRITE, MAP_SHARED,
83 args->offset);
84 if (IS_ERR_VALUE(addr))
85 goto err;
86
87 if (args->flags & I915_MMAP_WC) {
88 struct mm_struct *mm = current->mm;
89 struct vm_area_struct *vma;
90
91 if (down_write_killable(&mm->mmap_sem)) {
92 addr = -EINTR;
93 goto err;
94 }
95 vma = find_vma(mm, addr);
96 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
97 vma->vm_page_prot =
98 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
99 else
100 addr = -ENOMEM;
101 up_write(&mm->mmap_sem);
102 if (IS_ERR_VALUE(addr))
103 goto err;
104 }
105 i915_gem_object_put(obj);
106
107 args->addr_ptr = (u64)addr;
108 return 0;
109
110 err:
111 i915_gem_object_put(obj);
112 return addr;
113 }
114
115 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
116 {
117 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
118 }
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171 int i915_gem_mmap_gtt_version(void)
172 {
173 return 3;
174 }
175
176 static inline struct i915_ggtt_view
177 compute_partial_view(const struct drm_i915_gem_object *obj,
178 pgoff_t page_offset,
179 unsigned int chunk)
180 {
181 struct i915_ggtt_view view;
182
183 if (i915_gem_object_is_tiled(obj))
184 chunk = roundup(chunk, tile_row_pages(obj));
185
186 view.type = I915_GGTT_VIEW_PARTIAL;
187 view.partial.offset = rounddown(page_offset, chunk);
188 view.partial.size =
189 min_t(unsigned int, chunk,
190 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
191
192
193 if (chunk >= obj->base.size >> PAGE_SHIFT)
194 view.type = I915_GGTT_VIEW_NORMAL;
195
196 return view;
197 }
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217 vm_fault_t i915_gem_fault(struct vm_fault *vmf)
218 {
219 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
220 struct vm_area_struct *area = vmf->vma;
221 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
222 struct drm_device *dev = obj->base.dev;
223 struct drm_i915_private *i915 = to_i915(dev);
224 struct intel_runtime_pm *rpm = &i915->runtime_pm;
225 struct i915_ggtt *ggtt = &i915->ggtt;
226 bool write = area->vm_flags & VM_WRITE;
227 intel_wakeref_t wakeref;
228 struct i915_vma *vma;
229 pgoff_t page_offset;
230 int srcu;
231 int ret;
232
233
234 if (i915_gem_object_is_readonly(obj) && write)
235 return VM_FAULT_SIGBUS;
236
237
238 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
239
240 trace_i915_gem_object_fault(obj, page_offset, true, write);
241
242 ret = i915_gem_object_pin_pages(obj);
243 if (ret)
244 goto err;
245
246 wakeref = intel_runtime_pm_get(rpm);
247
248 ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
249 if (ret)
250 goto err_rpm;
251
252 ret = i915_mutex_lock_interruptible(dev);
253 if (ret)
254 goto err_reset;
255
256
257 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
258 ret = -EFAULT;
259 goto err_unlock;
260 }
261
262
263 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
264 PIN_MAPPABLE |
265 PIN_NONBLOCK |
266 PIN_NOEVICT);
267 if (IS_ERR(vma)) {
268
269 struct i915_ggtt_view view =
270 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
271 unsigned int flags;
272
273 flags = PIN_MAPPABLE | PIN_NOSEARCH;
274 if (view.type == I915_GGTT_VIEW_NORMAL)
275 flags |= PIN_NONBLOCK;
276
277
278
279
280
281
282 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
283 if (IS_ERR(vma)) {
284 flags = PIN_MAPPABLE;
285 view.type = I915_GGTT_VIEW_PARTIAL;
286 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
287 }
288 }
289 if (IS_ERR(vma)) {
290 ret = PTR_ERR(vma);
291 goto err_unlock;
292 }
293
294 ret = i915_vma_pin_fence(vma);
295 if (ret)
296 goto err_unpin;
297
298
299 ret = remap_io_mapping(area,
300 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
301 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
302 min_t(u64, vma->size, area->vm_end - area->vm_start),
303 &ggtt->iomap);
304 if (ret)
305 goto err_fence;
306
307 assert_rpm_wakelock_held(rpm);
308
309
310 mutex_lock(&i915->ggtt.vm.mutex);
311 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
312 list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
313 mutex_unlock(&i915->ggtt.vm.mutex);
314
315 if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
316 intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
317 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
318
319 if (write) {
320 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
321 i915_vma_set_ggtt_write(vma);
322 obj->mm.dirty = true;
323 }
324
325 err_fence:
326 i915_vma_unpin_fence(vma);
327 err_unpin:
328 __i915_vma_unpin(vma);
329 err_unlock:
330 mutex_unlock(&dev->struct_mutex);
331 err_reset:
332 intel_gt_reset_unlock(ggtt->vm.gt, srcu);
333 err_rpm:
334 intel_runtime_pm_put(rpm, wakeref);
335 i915_gem_object_unpin_pages(obj);
336 err:
337 switch (ret) {
338 case -EIO:
339
340
341
342
343
344
345 if (!intel_gt_is_wedged(ggtt->vm.gt))
346 return VM_FAULT_SIGBUS;
347
348 case -EAGAIN:
349
350
351
352
353
354 case 0:
355 case -ERESTARTSYS:
356 case -EINTR:
357 case -EBUSY:
358
359
360
361
362 return VM_FAULT_NOPAGE;
363 case -ENOMEM:
364 return VM_FAULT_OOM;
365 case -ENOSPC:
366 case -EFAULT:
367 case -ENODEV:
368 return VM_FAULT_SIGBUS;
369 default:
370 WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
371 return VM_FAULT_SIGBUS;
372 }
373 }
374
375 void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
376 {
377 struct i915_vma *vma;
378
379 GEM_BUG_ON(!obj->userfault_count);
380
381 obj->userfault_count = 0;
382 list_del(&obj->userfault_link);
383 drm_vma_node_unmap(&obj->base.vma_node,
384 obj->base.dev->anon_inode->i_mapping);
385
386 for_each_ggtt_vma(vma, obj)
387 i915_vma_unset_userfault(vma);
388 }
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404 void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
405 {
406 struct drm_i915_private *i915 = to_i915(obj->base.dev);
407 intel_wakeref_t wakeref;
408
409
410
411
412
413
414
415
416
417 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
418 mutex_lock(&i915->ggtt.vm.mutex);
419
420 if (!obj->userfault_count)
421 goto out;
422
423 __i915_gem_object_release_mmap(obj);
424
425
426
427
428
429
430
431
432 wmb();
433
434 out:
435 mutex_unlock(&i915->ggtt.vm.mutex);
436 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
437 }
438
439 static int create_mmap_offset(struct drm_i915_gem_object *obj)
440 {
441 struct drm_i915_private *i915 = to_i915(obj->base.dev);
442 int err;
443
444 err = drm_gem_create_mmap_offset(&obj->base);
445 if (likely(!err))
446 return 0;
447
448
449 do {
450 err = i915_gem_wait_for_idle(i915,
451 I915_WAIT_INTERRUPTIBLE,
452 MAX_SCHEDULE_TIMEOUT);
453 if (err)
454 break;
455
456 i915_gem_drain_freed_objects(i915);
457 err = drm_gem_create_mmap_offset(&obj->base);
458 if (!err)
459 break;
460
461 } while (flush_delayed_work(&i915->gem.retire_work));
462
463 return err;
464 }
465
466 int
467 i915_gem_mmap_gtt(struct drm_file *file,
468 struct drm_device *dev,
469 u32 handle,
470 u64 *offset)
471 {
472 struct drm_i915_gem_object *obj;
473 int ret;
474
475 obj = i915_gem_object_lookup(file, handle);
476 if (!obj)
477 return -ENOENT;
478
479 if (i915_gem_object_never_bind_ggtt(obj)) {
480 ret = -ENODEV;
481 goto out;
482 }
483
484 ret = create_mmap_offset(obj);
485 if (ret == 0)
486 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
487
488 out:
489 i915_gem_object_put(obj);
490 return ret;
491 }
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508 int
509 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
510 struct drm_file *file)
511 {
512 struct drm_i915_gem_mmap_gtt *args = data;
513
514 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
515 }
516
517 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
518 #include "selftests/i915_gem_mman.c"
519 #endif