1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31
radeon_gem_object_free(struct drm_gem_object * gobj)32 void radeon_gem_object_free(struct drm_gem_object *gobj)
33 {
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35
36 if (robj) {
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_mn_unregister(robj);
40 radeon_bo_unref(&robj);
41 }
42 }
43
radeon_gem_object_create(struct radeon_device * rdev,unsigned long size,int alignment,int initial_domain,u32 flags,bool kernel,struct drm_gem_object ** obj)44 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
45 int alignment, int initial_domain,
46 u32 flags, bool kernel,
47 struct drm_gem_object **obj)
48 {
49 struct radeon_bo *robj;
50 unsigned long max_size;
51 int r;
52
53 *obj = NULL;
54 /* At least align on page size */
55 if (alignment < PAGE_SIZE) {
56 alignment = PAGE_SIZE;
57 }
58
59 /* Maximum bo size is the unpinned gtt size since we use the gtt to
60 * handle vram to system pool migrations.
61 */
62 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
63 if (size > max_size) {
64 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
65 size >> 20, max_size >> 20);
66 return -ENOMEM;
67 }
68
69 retry:
70 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
71 flags, NULL, NULL, &robj);
72 if (r) {
73 if (r != -ERESTARTSYS) {
74 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
75 initial_domain |= RADEON_GEM_DOMAIN_GTT;
76 goto retry;
77 }
78 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
79 size, initial_domain, alignment, r);
80 }
81 return r;
82 }
83 *obj = &robj->gem_base;
84 robj->pid = task_pid_nr(current);
85
86 mutex_lock(&rdev->gem.mutex);
87 list_add_tail(&robj->list, &rdev->gem.objects);
88 mutex_unlock(&rdev->gem.mutex);
89
90 return 0;
91 }
92
radeon_gem_set_domain(struct drm_gem_object * gobj,uint32_t rdomain,uint32_t wdomain)93 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
94 uint32_t rdomain, uint32_t wdomain)
95 {
96 struct radeon_bo *robj;
97 uint32_t domain;
98 long r;
99
100 /* FIXME: reeimplement */
101 robj = gem_to_radeon_bo(gobj);
102 /* work out where to validate the buffer to */
103 domain = wdomain;
104 if (!domain) {
105 domain = rdomain;
106 }
107 if (!domain) {
108 /* Do nothings */
109 printk(KERN_WARNING "Set domain without domain !\n");
110 return 0;
111 }
112 if (domain == RADEON_GEM_DOMAIN_CPU) {
113 /* Asking for cpu access wait for object idle */
114 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
115 if (!r)
116 r = -EBUSY;
117
118 if (r < 0 && r != -EINTR) {
119 printk(KERN_ERR "Failed to wait for object: %li\n", r);
120 return r;
121 }
122 }
123 return 0;
124 }
125
radeon_gem_init(struct radeon_device * rdev)126 int radeon_gem_init(struct radeon_device *rdev)
127 {
128 INIT_LIST_HEAD(&rdev->gem.objects);
129 return 0;
130 }
131
radeon_gem_fini(struct radeon_device * rdev)132 void radeon_gem_fini(struct radeon_device *rdev)
133 {
134 radeon_bo_force_delete(rdev);
135 }
136
137 /*
138 * Call from drm_gem_handle_create which appear in both new and open ioctl
139 * case.
140 */
radeon_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)141 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
142 {
143 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
144 struct radeon_device *rdev = rbo->rdev;
145 struct radeon_fpriv *fpriv = file_priv->driver_priv;
146 struct radeon_vm *vm = &fpriv->vm;
147 struct radeon_bo_va *bo_va;
148 int r;
149
150 if ((rdev->family < CHIP_CAYMAN) ||
151 (!rdev->accel_working)) {
152 return 0;
153 }
154
155 r = radeon_bo_reserve(rbo, false);
156 if (r) {
157 return r;
158 }
159
160 bo_va = radeon_vm_bo_find(vm, rbo);
161 if (!bo_va) {
162 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
163 } else {
164 ++bo_va->ref_count;
165 }
166 radeon_bo_unreserve(rbo);
167
168 return 0;
169 }
170
radeon_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)171 void radeon_gem_object_close(struct drm_gem_object *obj,
172 struct drm_file *file_priv)
173 {
174 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
175 struct radeon_device *rdev = rbo->rdev;
176 struct radeon_fpriv *fpriv = file_priv->driver_priv;
177 struct radeon_vm *vm = &fpriv->vm;
178 struct radeon_bo_va *bo_va;
179 int r;
180
181 if ((rdev->family < CHIP_CAYMAN) ||
182 (!rdev->accel_working)) {
183 return;
184 }
185
186 r = radeon_bo_reserve(rbo, true);
187 if (r) {
188 dev_err(rdev->dev, "leaking bo va because "
189 "we fail to reserve bo (%d)\n", r);
190 return;
191 }
192 bo_va = radeon_vm_bo_find(vm, rbo);
193 if (bo_va) {
194 if (--bo_va->ref_count == 0) {
195 radeon_vm_bo_rmv(rdev, bo_va);
196 }
197 }
198 radeon_bo_unreserve(rbo);
199 }
200
radeon_gem_handle_lockup(struct radeon_device * rdev,int r)201 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
202 {
203 if (r == -EDEADLK) {
204 r = radeon_gpu_reset(rdev);
205 if (!r)
206 r = -EAGAIN;
207 }
208 return r;
209 }
210
211 /*
212 * GEM ioctls.
213 */
radeon_gem_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)214 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
215 struct drm_file *filp)
216 {
217 struct radeon_device *rdev = dev->dev_private;
218 struct drm_radeon_gem_info *args = data;
219 struct ttm_mem_type_manager *man;
220
221 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
222
223 args->vram_size = rdev->mc.real_vram_size;
224 args->vram_visible = (u64)man->size << PAGE_SHIFT;
225 args->vram_visible -= rdev->vram_pin_size;
226 args->gart_size = rdev->mc.gtt_size;
227 args->gart_size -= rdev->gart_pin_size;
228
229 return 0;
230 }
231
radeon_gem_pread_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)232 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
233 struct drm_file *filp)
234 {
235 /* TODO: implement */
236 DRM_ERROR("unimplemented %s\n", __func__);
237 return -ENOSYS;
238 }
239
radeon_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)240 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
241 struct drm_file *filp)
242 {
243 /* TODO: implement */
244 DRM_ERROR("unimplemented %s\n", __func__);
245 return -ENOSYS;
246 }
247
radeon_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)248 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
249 struct drm_file *filp)
250 {
251 struct radeon_device *rdev = dev->dev_private;
252 struct drm_radeon_gem_create *args = data;
253 struct drm_gem_object *gobj;
254 uint32_t handle;
255 int r;
256
257 down_read(&rdev->exclusive_lock);
258 /* create a gem object to contain this object in */
259 args->size = roundup(args->size, PAGE_SIZE);
260 r = radeon_gem_object_create(rdev, args->size, args->alignment,
261 args->initial_domain, args->flags,
262 false, &gobj);
263 if (r) {
264 up_read(&rdev->exclusive_lock);
265 r = radeon_gem_handle_lockup(rdev, r);
266 return r;
267 }
268 r = drm_gem_handle_create(filp, gobj, &handle);
269 /* drop reference from allocate - handle holds it now */
270 drm_gem_object_unreference_unlocked(gobj);
271 if (r) {
272 up_read(&rdev->exclusive_lock);
273 r = radeon_gem_handle_lockup(rdev, r);
274 return r;
275 }
276 args->handle = handle;
277 up_read(&rdev->exclusive_lock);
278 return 0;
279 }
280
radeon_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)281 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
282 struct drm_file *filp)
283 {
284 struct radeon_device *rdev = dev->dev_private;
285 struct drm_radeon_gem_userptr *args = data;
286 struct drm_gem_object *gobj;
287 struct radeon_bo *bo;
288 uint32_t handle;
289 int r;
290
291 if (offset_in_page(args->addr | args->size))
292 return -EINVAL;
293
294 /* reject unknown flag values */
295 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
296 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
297 RADEON_GEM_USERPTR_REGISTER))
298 return -EINVAL;
299
300 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
301 /* readonly pages not tested on older hardware */
302 if (rdev->family < CHIP_R600)
303 return -EINVAL;
304
305 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
306 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
307
308 /* if we want to write to it we must require anonymous
309 memory and install a MMU notifier */
310 return -EACCES;
311 }
312
313 down_read(&rdev->exclusive_lock);
314
315 /* create a gem object to contain this object in */
316 r = radeon_gem_object_create(rdev, args->size, 0,
317 RADEON_GEM_DOMAIN_CPU, 0,
318 false, &gobj);
319 if (r)
320 goto handle_lockup;
321
322 bo = gem_to_radeon_bo(gobj);
323 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
324 if (r)
325 goto release_object;
326
327 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
328 r = radeon_mn_register(bo, args->addr);
329 if (r)
330 goto release_object;
331 }
332
333 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
334 down_read(¤t->mm->mmap_sem);
335 r = radeon_bo_reserve(bo, true);
336 if (r) {
337 up_read(¤t->mm->mmap_sem);
338 goto release_object;
339 }
340
341 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
342 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
343 radeon_bo_unreserve(bo);
344 up_read(¤t->mm->mmap_sem);
345 if (r)
346 goto release_object;
347 }
348
349 r = drm_gem_handle_create(filp, gobj, &handle);
350 /* drop reference from allocate - handle holds it now */
351 drm_gem_object_unreference_unlocked(gobj);
352 if (r)
353 goto handle_lockup;
354
355 args->handle = handle;
356 up_read(&rdev->exclusive_lock);
357 return 0;
358
359 release_object:
360 drm_gem_object_unreference_unlocked(gobj);
361
362 handle_lockup:
363 up_read(&rdev->exclusive_lock);
364 r = radeon_gem_handle_lockup(rdev, r);
365
366 return r;
367 }
368
radeon_gem_set_domain_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)369 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
370 struct drm_file *filp)
371 {
372 /* transition the BO to a domain -
373 * just validate the BO into a certain domain */
374 struct radeon_device *rdev = dev->dev_private;
375 struct drm_radeon_gem_set_domain *args = data;
376 struct drm_gem_object *gobj;
377 struct radeon_bo *robj;
378 int r;
379
380 /* for now if someone requests domain CPU -
381 * just make sure the buffer is finished with */
382 down_read(&rdev->exclusive_lock);
383
384 /* just do a BO wait for now */
385 gobj = drm_gem_object_lookup(dev, filp, args->handle);
386 if (gobj == NULL) {
387 up_read(&rdev->exclusive_lock);
388 return -ENOENT;
389 }
390 robj = gem_to_radeon_bo(gobj);
391
392 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
393
394 drm_gem_object_unreference_unlocked(gobj);
395 up_read(&rdev->exclusive_lock);
396 r = radeon_gem_handle_lockup(robj->rdev, r);
397 return r;
398 }
399
radeon_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)400 int radeon_mode_dumb_mmap(struct drm_file *filp,
401 struct drm_device *dev,
402 uint32_t handle, uint64_t *offset_p)
403 {
404 struct drm_gem_object *gobj;
405 struct radeon_bo *robj;
406
407 gobj = drm_gem_object_lookup(dev, filp, handle);
408 if (gobj == NULL) {
409 return -ENOENT;
410 }
411 robj = gem_to_radeon_bo(gobj);
412 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
413 drm_gem_object_unreference_unlocked(gobj);
414 return -EPERM;
415 }
416 *offset_p = radeon_bo_mmap_offset(robj);
417 drm_gem_object_unreference_unlocked(gobj);
418 return 0;
419 }
420
radeon_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)421 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
422 struct drm_file *filp)
423 {
424 struct drm_radeon_gem_mmap *args = data;
425
426 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
427 }
428
radeon_gem_busy_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)429 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
430 struct drm_file *filp)
431 {
432 struct radeon_device *rdev = dev->dev_private;
433 struct drm_radeon_gem_busy *args = data;
434 struct drm_gem_object *gobj;
435 struct radeon_bo *robj;
436 int r;
437 uint32_t cur_placement = 0;
438
439 gobj = drm_gem_object_lookup(dev, filp, args->handle);
440 if (gobj == NULL) {
441 return -ENOENT;
442 }
443 robj = gem_to_radeon_bo(gobj);
444 r = radeon_bo_wait(robj, &cur_placement, true);
445 args->domain = radeon_mem_type_to_domain(cur_placement);
446 drm_gem_object_unreference_unlocked(gobj);
447 r = radeon_gem_handle_lockup(rdev, r);
448 return r;
449 }
450
radeon_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)451 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
452 struct drm_file *filp)
453 {
454 struct radeon_device *rdev = dev->dev_private;
455 struct drm_radeon_gem_wait_idle *args = data;
456 struct drm_gem_object *gobj;
457 struct radeon_bo *robj;
458 int r = 0;
459 uint32_t cur_placement = 0;
460 long ret;
461
462 gobj = drm_gem_object_lookup(dev, filp, args->handle);
463 if (gobj == NULL) {
464 return -ENOENT;
465 }
466 robj = gem_to_radeon_bo(gobj);
467
468 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
469 if (ret == 0)
470 r = -EBUSY;
471 else if (ret < 0)
472 r = ret;
473
474 /* Flush HDP cache via MMIO if necessary */
475 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
476 if (rdev->asic->mmio_hdp_flush &&
477 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
478 robj->rdev->asic->mmio_hdp_flush(rdev);
479 drm_gem_object_unreference_unlocked(gobj);
480 r = radeon_gem_handle_lockup(rdev, r);
481 return r;
482 }
483
radeon_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)484 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
485 struct drm_file *filp)
486 {
487 struct drm_radeon_gem_set_tiling *args = data;
488 struct drm_gem_object *gobj;
489 struct radeon_bo *robj;
490 int r = 0;
491
492 DRM_DEBUG("%d \n", args->handle);
493 gobj = drm_gem_object_lookup(dev, filp, args->handle);
494 if (gobj == NULL)
495 return -ENOENT;
496 robj = gem_to_radeon_bo(gobj);
497 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
498 drm_gem_object_unreference_unlocked(gobj);
499 return r;
500 }
501
radeon_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)502 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
503 struct drm_file *filp)
504 {
505 struct drm_radeon_gem_get_tiling *args = data;
506 struct drm_gem_object *gobj;
507 struct radeon_bo *rbo;
508 int r = 0;
509
510 DRM_DEBUG("\n");
511 gobj = drm_gem_object_lookup(dev, filp, args->handle);
512 if (gobj == NULL)
513 return -ENOENT;
514 rbo = gem_to_radeon_bo(gobj);
515 r = radeon_bo_reserve(rbo, false);
516 if (unlikely(r != 0))
517 goto out;
518 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
519 radeon_bo_unreserve(rbo);
520 out:
521 drm_gem_object_unreference_unlocked(gobj);
522 return r;
523 }
524
525 /**
526 * radeon_gem_va_update_vm -update the bo_va in its VM
527 *
528 * @rdev: radeon_device pointer
529 * @bo_va: bo_va to update
530 *
531 * Update the bo_va directly after setting it's address. Errors are not
532 * vital here, so they are not reported back to userspace.
533 */
radeon_gem_va_update_vm(struct radeon_device * rdev,struct radeon_bo_va * bo_va)534 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
535 struct radeon_bo_va *bo_va)
536 {
537 struct ttm_validate_buffer tv, *entry;
538 struct radeon_bo_list *vm_bos;
539 struct ww_acquire_ctx ticket;
540 struct list_head list;
541 unsigned domain;
542 int r;
543
544 INIT_LIST_HEAD(&list);
545
546 tv.bo = &bo_va->bo->tbo;
547 tv.shared = true;
548 list_add(&tv.head, &list);
549
550 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
551 if (!vm_bos)
552 return;
553
554 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
555 if (r)
556 goto error_free;
557
558 list_for_each_entry(entry, &list, head) {
559 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
560 /* if anything is swapped out don't swap it in here,
561 just abort and wait for the next CS */
562 if (domain == RADEON_GEM_DOMAIN_CPU)
563 goto error_unreserve;
564 }
565
566 mutex_lock(&bo_va->vm->mutex);
567 r = radeon_vm_clear_freed(rdev, bo_va->vm);
568 if (r)
569 goto error_unlock;
570
571 if (bo_va->it.start)
572 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
573
574 error_unlock:
575 mutex_unlock(&bo_va->vm->mutex);
576
577 error_unreserve:
578 ttm_eu_backoff_reservation(&ticket, &list);
579
580 error_free:
581 drm_free_large(vm_bos);
582
583 if (r && r != -ERESTARTSYS)
584 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
585 }
586
radeon_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)587 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
588 struct drm_file *filp)
589 {
590 struct drm_radeon_gem_va *args = data;
591 struct drm_gem_object *gobj;
592 struct radeon_device *rdev = dev->dev_private;
593 struct radeon_fpriv *fpriv = filp->driver_priv;
594 struct radeon_bo *rbo;
595 struct radeon_bo_va *bo_va;
596 u32 invalid_flags;
597 int r = 0;
598
599 if (!rdev->vm_manager.enabled) {
600 args->operation = RADEON_VA_RESULT_ERROR;
601 return -ENOTTY;
602 }
603
604 /* !! DONT REMOVE !!
605 * We don't support vm_id yet, to be sure we don't have have broken
606 * userspace, reject anyone trying to use non 0 value thus moving
607 * forward we can use those fields without breaking existant userspace
608 */
609 if (args->vm_id) {
610 args->operation = RADEON_VA_RESULT_ERROR;
611 return -EINVAL;
612 }
613
614 if (args->offset < RADEON_VA_RESERVED_SIZE) {
615 dev_err(&dev->pdev->dev,
616 "offset 0x%lX is in reserved area 0x%X\n",
617 (unsigned long)args->offset,
618 RADEON_VA_RESERVED_SIZE);
619 args->operation = RADEON_VA_RESULT_ERROR;
620 return -EINVAL;
621 }
622
623 /* don't remove, we need to enforce userspace to set the snooped flag
624 * otherwise we will endup with broken userspace and we won't be able
625 * to enable this feature without adding new interface
626 */
627 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
628 if ((args->flags & invalid_flags)) {
629 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
630 args->flags, invalid_flags);
631 args->operation = RADEON_VA_RESULT_ERROR;
632 return -EINVAL;
633 }
634
635 switch (args->operation) {
636 case RADEON_VA_MAP:
637 case RADEON_VA_UNMAP:
638 break;
639 default:
640 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
641 args->operation);
642 args->operation = RADEON_VA_RESULT_ERROR;
643 return -EINVAL;
644 }
645
646 gobj = drm_gem_object_lookup(dev, filp, args->handle);
647 if (gobj == NULL) {
648 args->operation = RADEON_VA_RESULT_ERROR;
649 return -ENOENT;
650 }
651 rbo = gem_to_radeon_bo(gobj);
652 r = radeon_bo_reserve(rbo, false);
653 if (r) {
654 args->operation = RADEON_VA_RESULT_ERROR;
655 drm_gem_object_unreference_unlocked(gobj);
656 return r;
657 }
658 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
659 if (!bo_va) {
660 args->operation = RADEON_VA_RESULT_ERROR;
661 drm_gem_object_unreference_unlocked(gobj);
662 return -ENOENT;
663 }
664
665 switch (args->operation) {
666 case RADEON_VA_MAP:
667 if (bo_va->it.start) {
668 args->operation = RADEON_VA_RESULT_VA_EXIST;
669 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
670 radeon_bo_unreserve(rbo);
671 goto out;
672 }
673 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
674 break;
675 case RADEON_VA_UNMAP:
676 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
677 break;
678 default:
679 break;
680 }
681 if (!r)
682 radeon_gem_va_update_vm(rdev, bo_va);
683 args->operation = RADEON_VA_RESULT_OK;
684 if (r) {
685 args->operation = RADEON_VA_RESULT_ERROR;
686 }
687 out:
688 drm_gem_object_unreference_unlocked(gobj);
689 return r;
690 }
691
radeon_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)692 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
693 struct drm_file *filp)
694 {
695 struct drm_radeon_gem_op *args = data;
696 struct drm_gem_object *gobj;
697 struct radeon_bo *robj;
698 int r;
699
700 gobj = drm_gem_object_lookup(dev, filp, args->handle);
701 if (gobj == NULL) {
702 return -ENOENT;
703 }
704 robj = gem_to_radeon_bo(gobj);
705
706 r = -EPERM;
707 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
708 goto out;
709
710 r = radeon_bo_reserve(robj, false);
711 if (unlikely(r))
712 goto out;
713
714 switch (args->op) {
715 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
716 args->value = robj->initial_domain;
717 break;
718 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
719 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
720 RADEON_GEM_DOMAIN_GTT |
721 RADEON_GEM_DOMAIN_CPU);
722 break;
723 default:
724 r = -EINVAL;
725 }
726
727 radeon_bo_unreserve(robj);
728 out:
729 drm_gem_object_unreference_unlocked(gobj);
730 return r;
731 }
732
radeon_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)733 int radeon_mode_dumb_create(struct drm_file *file_priv,
734 struct drm_device *dev,
735 struct drm_mode_create_dumb *args)
736 {
737 struct radeon_device *rdev = dev->dev_private;
738 struct drm_gem_object *gobj;
739 uint32_t handle;
740 int r;
741
742 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
743 args->size = args->pitch * args->height;
744 args->size = ALIGN(args->size, PAGE_SIZE);
745
746 r = radeon_gem_object_create(rdev, args->size, 0,
747 RADEON_GEM_DOMAIN_VRAM, 0,
748 false, &gobj);
749 if (r)
750 return -ENOMEM;
751
752 r = drm_gem_handle_create(file_priv, gobj, &handle);
753 /* drop reference from allocate - handle holds it now */
754 drm_gem_object_unreference_unlocked(gobj);
755 if (r) {
756 return r;
757 }
758 args->handle = handle;
759 return 0;
760 }
761
762 #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_gem_info(struct seq_file * m,void * data)763 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
764 {
765 struct drm_info_node *node = (struct drm_info_node *)m->private;
766 struct drm_device *dev = node->minor->dev;
767 struct radeon_device *rdev = dev->dev_private;
768 struct radeon_bo *rbo;
769 unsigned i = 0;
770
771 mutex_lock(&rdev->gem.mutex);
772 list_for_each_entry(rbo, &rdev->gem.objects, list) {
773 unsigned domain;
774 const char *placement;
775
776 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
777 switch (domain) {
778 case RADEON_GEM_DOMAIN_VRAM:
779 placement = "VRAM";
780 break;
781 case RADEON_GEM_DOMAIN_GTT:
782 placement = " GTT";
783 break;
784 case RADEON_GEM_DOMAIN_CPU:
785 default:
786 placement = " CPU";
787 break;
788 }
789 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
790 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
791 placement, (unsigned long)rbo->pid);
792 i++;
793 }
794 mutex_unlock(&rdev->gem.mutex);
795 return 0;
796 }
797
798 static struct drm_info_list radeon_debugfs_gem_list[] = {
799 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
800 };
801 #endif
802
radeon_gem_debugfs_init(struct radeon_device * rdev)803 int radeon_gem_debugfs_init(struct radeon_device *rdev)
804 {
805 #if defined(CONFIG_DEBUG_FS)
806 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
807 #endif
808 return 0;
809 }
810