This source file includes following definitions.
- radeon_gem_object_free
- radeon_gem_object_create
- radeon_gem_set_domain
- radeon_gem_init
- radeon_gem_fini
- radeon_gem_object_open
- radeon_gem_object_close
- radeon_gem_handle_lockup
- radeon_gem_info_ioctl
- radeon_gem_pread_ioctl
- radeon_gem_pwrite_ioctl
- radeon_gem_create_ioctl
- radeon_gem_userptr_ioctl
- radeon_gem_set_domain_ioctl
- radeon_mode_dumb_mmap
- radeon_gem_mmap_ioctl
- radeon_gem_busy_ioctl
- radeon_gem_wait_idle_ioctl
- radeon_gem_set_tiling_ioctl
- radeon_gem_get_tiling_ioctl
- radeon_gem_va_update_vm
- radeon_gem_va_ioctl
- radeon_gem_op_ioctl
- radeon_mode_dumb_create
- radeon_debugfs_gem_info
- radeon_gem_debugfs_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29 #include <drm/drm_debugfs.h>
30 #include <drm/drm_device.h>
31 #include <drm/drm_file.h>
32 #include <drm/drm_pci.h>
33 #include <drm/radeon_drm.h>
34
35 #include "radeon.h"
36
37 void radeon_gem_object_free(struct drm_gem_object *gobj)
38 {
39 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
40
41 if (robj) {
42 radeon_mn_unregister(robj);
43 radeon_bo_unref(&robj);
44 }
45 }
46
47 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
48 int alignment, int initial_domain,
49 u32 flags, bool kernel,
50 struct drm_gem_object **obj)
51 {
52 struct radeon_bo *robj;
53 unsigned long max_size;
54 int r;
55
56 *obj = NULL;
57
58 if (alignment < PAGE_SIZE) {
59 alignment = PAGE_SIZE;
60 }
61
62
63
64
65 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
66 if (size > max_size) {
67 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
68 size >> 20, max_size >> 20);
69 return -ENOMEM;
70 }
71
72 retry:
73 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
74 flags, NULL, NULL, &robj);
75 if (r) {
76 if (r != -ERESTARTSYS) {
77 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
78 initial_domain |= RADEON_GEM_DOMAIN_GTT;
79 goto retry;
80 }
81 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
82 size, initial_domain, alignment, r);
83 }
84 return r;
85 }
86 *obj = &robj->tbo.base;
87 robj->pid = task_pid_nr(current);
88
89 mutex_lock(&rdev->gem.mutex);
90 list_add_tail(&robj->list, &rdev->gem.objects);
91 mutex_unlock(&rdev->gem.mutex);
92
93 return 0;
94 }
95
96 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
97 uint32_t rdomain, uint32_t wdomain)
98 {
99 struct radeon_bo *robj;
100 uint32_t domain;
101 long r;
102
103
104 robj = gem_to_radeon_bo(gobj);
105
106 domain = wdomain;
107 if (!domain) {
108 domain = rdomain;
109 }
110 if (!domain) {
111
112 pr_warn("Set domain without domain !\n");
113 return 0;
114 }
115 if (domain == RADEON_GEM_DOMAIN_CPU) {
116
117 r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
118 if (!r)
119 r = -EBUSY;
120
121 if (r < 0 && r != -EINTR) {
122 pr_err("Failed to wait for object: %li\n", r);
123 return r;
124 }
125 }
126 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
127
128 return -EINVAL;
129 }
130 return 0;
131 }
132
133 int radeon_gem_init(struct radeon_device *rdev)
134 {
135 INIT_LIST_HEAD(&rdev->gem.objects);
136 return 0;
137 }
138
139 void radeon_gem_fini(struct radeon_device *rdev)
140 {
141 radeon_bo_force_delete(rdev);
142 }
143
144
145
146
147
148 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
149 {
150 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
151 struct radeon_device *rdev = rbo->rdev;
152 struct radeon_fpriv *fpriv = file_priv->driver_priv;
153 struct radeon_vm *vm = &fpriv->vm;
154 struct radeon_bo_va *bo_va;
155 int r;
156
157 if ((rdev->family < CHIP_CAYMAN) ||
158 (!rdev->accel_working)) {
159 return 0;
160 }
161
162 r = radeon_bo_reserve(rbo, false);
163 if (r) {
164 return r;
165 }
166
167 bo_va = radeon_vm_bo_find(vm, rbo);
168 if (!bo_va) {
169 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
170 } else {
171 ++bo_va->ref_count;
172 }
173 radeon_bo_unreserve(rbo);
174
175 return 0;
176 }
177
178 void radeon_gem_object_close(struct drm_gem_object *obj,
179 struct drm_file *file_priv)
180 {
181 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
182 struct radeon_device *rdev = rbo->rdev;
183 struct radeon_fpriv *fpriv = file_priv->driver_priv;
184 struct radeon_vm *vm = &fpriv->vm;
185 struct radeon_bo_va *bo_va;
186 int r;
187
188 if ((rdev->family < CHIP_CAYMAN) ||
189 (!rdev->accel_working)) {
190 return;
191 }
192
193 r = radeon_bo_reserve(rbo, true);
194 if (r) {
195 dev_err(rdev->dev, "leaking bo va because "
196 "we fail to reserve bo (%d)\n", r);
197 return;
198 }
199 bo_va = radeon_vm_bo_find(vm, rbo);
200 if (bo_va) {
201 if (--bo_va->ref_count == 0) {
202 radeon_vm_bo_rmv(rdev, bo_va);
203 }
204 }
205 radeon_bo_unreserve(rbo);
206 }
207
208 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
209 {
210 if (r == -EDEADLK) {
211 r = radeon_gpu_reset(rdev);
212 if (!r)
213 r = -EAGAIN;
214 }
215 return r;
216 }
217
218
219
220
221 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
222 struct drm_file *filp)
223 {
224 struct radeon_device *rdev = dev->dev_private;
225 struct drm_radeon_gem_info *args = data;
226 struct ttm_mem_type_manager *man;
227
228 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
229
230 args->vram_size = (u64)man->size << PAGE_SHIFT;
231 args->vram_visible = rdev->mc.visible_vram_size;
232 args->vram_visible -= rdev->vram_pin_size;
233 args->gart_size = rdev->mc.gtt_size;
234 args->gart_size -= rdev->gart_pin_size;
235
236 return 0;
237 }
238
239 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
240 struct drm_file *filp)
241 {
242
243 DRM_ERROR("unimplemented %s\n", __func__);
244 return -ENOSYS;
245 }
246
247 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
248 struct drm_file *filp)
249 {
250
251 DRM_ERROR("unimplemented %s\n", __func__);
252 return -ENOSYS;
253 }
254
255 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
256 struct drm_file *filp)
257 {
258 struct radeon_device *rdev = dev->dev_private;
259 struct drm_radeon_gem_create *args = data;
260 struct drm_gem_object *gobj;
261 uint32_t handle;
262 int r;
263
264 down_read(&rdev->exclusive_lock);
265
266 args->size = roundup(args->size, PAGE_SIZE);
267 r = radeon_gem_object_create(rdev, args->size, args->alignment,
268 args->initial_domain, args->flags,
269 false, &gobj);
270 if (r) {
271 up_read(&rdev->exclusive_lock);
272 r = radeon_gem_handle_lockup(rdev, r);
273 return r;
274 }
275 r = drm_gem_handle_create(filp, gobj, &handle);
276
277 drm_gem_object_put_unlocked(gobj);
278 if (r) {
279 up_read(&rdev->exclusive_lock);
280 r = radeon_gem_handle_lockup(rdev, r);
281 return r;
282 }
283 args->handle = handle;
284 up_read(&rdev->exclusive_lock);
285 return 0;
286 }
287
288 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
289 struct drm_file *filp)
290 {
291 struct ttm_operation_ctx ctx = { true, false };
292 struct radeon_device *rdev = dev->dev_private;
293 struct drm_radeon_gem_userptr *args = data;
294 struct drm_gem_object *gobj;
295 struct radeon_bo *bo;
296 uint32_t handle;
297 int r;
298
299 args->addr = untagged_addr(args->addr);
300
301 if (offset_in_page(args->addr | args->size))
302 return -EINVAL;
303
304
305 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
306 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
307 RADEON_GEM_USERPTR_REGISTER))
308 return -EINVAL;
309
310 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
311
312 if (rdev->family < CHIP_R600)
313 return -EINVAL;
314
315 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
316 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
317
318
319
320 return -EACCES;
321 }
322
323 down_read(&rdev->exclusive_lock);
324
325
326 r = radeon_gem_object_create(rdev, args->size, 0,
327 RADEON_GEM_DOMAIN_CPU, 0,
328 false, &gobj);
329 if (r)
330 goto handle_lockup;
331
332 bo = gem_to_radeon_bo(gobj);
333 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
334 if (r)
335 goto release_object;
336
337 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
338 r = radeon_mn_register(bo, args->addr);
339 if (r)
340 goto release_object;
341 }
342
343 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
344 down_read(¤t->mm->mmap_sem);
345 r = radeon_bo_reserve(bo, true);
346 if (r) {
347 up_read(¤t->mm->mmap_sem);
348 goto release_object;
349 }
350
351 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
352 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
353 radeon_bo_unreserve(bo);
354 up_read(¤t->mm->mmap_sem);
355 if (r)
356 goto release_object;
357 }
358
359 r = drm_gem_handle_create(filp, gobj, &handle);
360
361 drm_gem_object_put_unlocked(gobj);
362 if (r)
363 goto handle_lockup;
364
365 args->handle = handle;
366 up_read(&rdev->exclusive_lock);
367 return 0;
368
369 release_object:
370 drm_gem_object_put_unlocked(gobj);
371
372 handle_lockup:
373 up_read(&rdev->exclusive_lock);
374 r = radeon_gem_handle_lockup(rdev, r);
375
376 return r;
377 }
378
379 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
380 struct drm_file *filp)
381 {
382
383
384 struct radeon_device *rdev = dev->dev_private;
385 struct drm_radeon_gem_set_domain *args = data;
386 struct drm_gem_object *gobj;
387 struct radeon_bo *robj;
388 int r;
389
390
391
392 down_read(&rdev->exclusive_lock);
393
394
395 gobj = drm_gem_object_lookup(filp, args->handle);
396 if (gobj == NULL) {
397 up_read(&rdev->exclusive_lock);
398 return -ENOENT;
399 }
400 robj = gem_to_radeon_bo(gobj);
401
402 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
403
404 drm_gem_object_put_unlocked(gobj);
405 up_read(&rdev->exclusive_lock);
406 r = radeon_gem_handle_lockup(robj->rdev, r);
407 return r;
408 }
409
410 int radeon_mode_dumb_mmap(struct drm_file *filp,
411 struct drm_device *dev,
412 uint32_t handle, uint64_t *offset_p)
413 {
414 struct drm_gem_object *gobj;
415 struct radeon_bo *robj;
416
417 gobj = drm_gem_object_lookup(filp, handle);
418 if (gobj == NULL) {
419 return -ENOENT;
420 }
421 robj = gem_to_radeon_bo(gobj);
422 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
423 drm_gem_object_put_unlocked(gobj);
424 return -EPERM;
425 }
426 *offset_p = radeon_bo_mmap_offset(robj);
427 drm_gem_object_put_unlocked(gobj);
428 return 0;
429 }
430
431 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
432 struct drm_file *filp)
433 {
434 struct drm_radeon_gem_mmap *args = data;
435
436 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
437 }
438
439 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
440 struct drm_file *filp)
441 {
442 struct drm_radeon_gem_busy *args = data;
443 struct drm_gem_object *gobj;
444 struct radeon_bo *robj;
445 int r;
446 uint32_t cur_placement = 0;
447
448 gobj = drm_gem_object_lookup(filp, args->handle);
449 if (gobj == NULL) {
450 return -ENOENT;
451 }
452 robj = gem_to_radeon_bo(gobj);
453
454 r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
455 if (r == 0)
456 r = -EBUSY;
457 else
458 r = 0;
459
460 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
461 args->domain = radeon_mem_type_to_domain(cur_placement);
462 drm_gem_object_put_unlocked(gobj);
463 return r;
464 }
465
466 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
467 struct drm_file *filp)
468 {
469 struct radeon_device *rdev = dev->dev_private;
470 struct drm_radeon_gem_wait_idle *args = data;
471 struct drm_gem_object *gobj;
472 struct radeon_bo *robj;
473 int r = 0;
474 uint32_t cur_placement = 0;
475 long ret;
476
477 gobj = drm_gem_object_lookup(filp, args->handle);
478 if (gobj == NULL) {
479 return -ENOENT;
480 }
481 robj = gem_to_radeon_bo(gobj);
482
483 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
484 if (ret == 0)
485 r = -EBUSY;
486 else if (ret < 0)
487 r = ret;
488
489
490 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
491 if (rdev->asic->mmio_hdp_flush &&
492 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
493 robj->rdev->asic->mmio_hdp_flush(rdev);
494 drm_gem_object_put_unlocked(gobj);
495 r = radeon_gem_handle_lockup(rdev, r);
496 return r;
497 }
498
499 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
500 struct drm_file *filp)
501 {
502 struct drm_radeon_gem_set_tiling *args = data;
503 struct drm_gem_object *gobj;
504 struct radeon_bo *robj;
505 int r = 0;
506
507 DRM_DEBUG("%d \n", args->handle);
508 gobj = drm_gem_object_lookup(filp, args->handle);
509 if (gobj == NULL)
510 return -ENOENT;
511 robj = gem_to_radeon_bo(gobj);
512 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
513 drm_gem_object_put_unlocked(gobj);
514 return r;
515 }
516
517 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
518 struct drm_file *filp)
519 {
520 struct drm_radeon_gem_get_tiling *args = data;
521 struct drm_gem_object *gobj;
522 struct radeon_bo *rbo;
523 int r = 0;
524
525 DRM_DEBUG("\n");
526 gobj = drm_gem_object_lookup(filp, args->handle);
527 if (gobj == NULL)
528 return -ENOENT;
529 rbo = gem_to_radeon_bo(gobj);
530 r = radeon_bo_reserve(rbo, false);
531 if (unlikely(r != 0))
532 goto out;
533 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
534 radeon_bo_unreserve(rbo);
535 out:
536 drm_gem_object_put_unlocked(gobj);
537 return r;
538 }
539
540
541
542
543
544
545
546
547
548
549 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
550 struct radeon_bo_va *bo_va)
551 {
552 struct ttm_validate_buffer tv, *entry;
553 struct radeon_bo_list *vm_bos;
554 struct ww_acquire_ctx ticket;
555 struct list_head list;
556 unsigned domain;
557 int r;
558
559 INIT_LIST_HEAD(&list);
560
561 tv.bo = &bo_va->bo->tbo;
562 tv.num_shared = 1;
563 list_add(&tv.head, &list);
564
565 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
566 if (!vm_bos)
567 return;
568
569 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
570 if (r)
571 goto error_free;
572
573 list_for_each_entry(entry, &list, head) {
574 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
575
576
577 if (domain == RADEON_GEM_DOMAIN_CPU)
578 goto error_unreserve;
579 }
580
581 mutex_lock(&bo_va->vm->mutex);
582 r = radeon_vm_clear_freed(rdev, bo_va->vm);
583 if (r)
584 goto error_unlock;
585
586 if (bo_va->it.start)
587 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
588
589 error_unlock:
590 mutex_unlock(&bo_va->vm->mutex);
591
592 error_unreserve:
593 ttm_eu_backoff_reservation(&ticket, &list);
594
595 error_free:
596 kvfree(vm_bos);
597
598 if (r && r != -ERESTARTSYS)
599 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
600 }
601
602 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
603 struct drm_file *filp)
604 {
605 struct drm_radeon_gem_va *args = data;
606 struct drm_gem_object *gobj;
607 struct radeon_device *rdev = dev->dev_private;
608 struct radeon_fpriv *fpriv = filp->driver_priv;
609 struct radeon_bo *rbo;
610 struct radeon_bo_va *bo_va;
611 u32 invalid_flags;
612 int r = 0;
613
614 if (!rdev->vm_manager.enabled) {
615 args->operation = RADEON_VA_RESULT_ERROR;
616 return -ENOTTY;
617 }
618
619
620
621
622
623
624 if (args->vm_id) {
625 args->operation = RADEON_VA_RESULT_ERROR;
626 return -EINVAL;
627 }
628
629 if (args->offset < RADEON_VA_RESERVED_SIZE) {
630 dev_err(&dev->pdev->dev,
631 "offset 0x%lX is in reserved area 0x%X\n",
632 (unsigned long)args->offset,
633 RADEON_VA_RESERVED_SIZE);
634 args->operation = RADEON_VA_RESULT_ERROR;
635 return -EINVAL;
636 }
637
638
639
640
641
642 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
643 if ((args->flags & invalid_flags)) {
644 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
645 args->flags, invalid_flags);
646 args->operation = RADEON_VA_RESULT_ERROR;
647 return -EINVAL;
648 }
649
650 switch (args->operation) {
651 case RADEON_VA_MAP:
652 case RADEON_VA_UNMAP:
653 break;
654 default:
655 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
656 args->operation);
657 args->operation = RADEON_VA_RESULT_ERROR;
658 return -EINVAL;
659 }
660
661 gobj = drm_gem_object_lookup(filp, args->handle);
662 if (gobj == NULL) {
663 args->operation = RADEON_VA_RESULT_ERROR;
664 return -ENOENT;
665 }
666 rbo = gem_to_radeon_bo(gobj);
667 r = radeon_bo_reserve(rbo, false);
668 if (r) {
669 args->operation = RADEON_VA_RESULT_ERROR;
670 drm_gem_object_put_unlocked(gobj);
671 return r;
672 }
673 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
674 if (!bo_va) {
675 args->operation = RADEON_VA_RESULT_ERROR;
676 radeon_bo_unreserve(rbo);
677 drm_gem_object_put_unlocked(gobj);
678 return -ENOENT;
679 }
680
681 switch (args->operation) {
682 case RADEON_VA_MAP:
683 if (bo_va->it.start) {
684 args->operation = RADEON_VA_RESULT_VA_EXIST;
685 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
686 radeon_bo_unreserve(rbo);
687 goto out;
688 }
689 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
690 break;
691 case RADEON_VA_UNMAP:
692 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
693 break;
694 default:
695 break;
696 }
697 if (!r)
698 radeon_gem_va_update_vm(rdev, bo_va);
699 args->operation = RADEON_VA_RESULT_OK;
700 if (r) {
701 args->operation = RADEON_VA_RESULT_ERROR;
702 }
703 out:
704 drm_gem_object_put_unlocked(gobj);
705 return r;
706 }
707
708 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
709 struct drm_file *filp)
710 {
711 struct drm_radeon_gem_op *args = data;
712 struct drm_gem_object *gobj;
713 struct radeon_bo *robj;
714 int r;
715
716 gobj = drm_gem_object_lookup(filp, args->handle);
717 if (gobj == NULL) {
718 return -ENOENT;
719 }
720 robj = gem_to_radeon_bo(gobj);
721
722 r = -EPERM;
723 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
724 goto out;
725
726 r = radeon_bo_reserve(robj, false);
727 if (unlikely(r))
728 goto out;
729
730 switch (args->op) {
731 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
732 args->value = robj->initial_domain;
733 break;
734 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
735 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
736 RADEON_GEM_DOMAIN_GTT |
737 RADEON_GEM_DOMAIN_CPU);
738 break;
739 default:
740 r = -EINVAL;
741 }
742
743 radeon_bo_unreserve(robj);
744 out:
745 drm_gem_object_put_unlocked(gobj);
746 return r;
747 }
748
749 int radeon_mode_dumb_create(struct drm_file *file_priv,
750 struct drm_device *dev,
751 struct drm_mode_create_dumb *args)
752 {
753 struct radeon_device *rdev = dev->dev_private;
754 struct drm_gem_object *gobj;
755 uint32_t handle;
756 int r;
757
758 args->pitch = radeon_align_pitch(rdev, args->width,
759 DIV_ROUND_UP(args->bpp, 8), 0);
760 args->size = args->pitch * args->height;
761 args->size = ALIGN(args->size, PAGE_SIZE);
762
763 r = radeon_gem_object_create(rdev, args->size, 0,
764 RADEON_GEM_DOMAIN_VRAM, 0,
765 false, &gobj);
766 if (r)
767 return -ENOMEM;
768
769 r = drm_gem_handle_create(file_priv, gobj, &handle);
770
771 drm_gem_object_put_unlocked(gobj);
772 if (r) {
773 return r;
774 }
775 args->handle = handle;
776 return 0;
777 }
778
779 #if defined(CONFIG_DEBUG_FS)
780 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
781 {
782 struct drm_info_node *node = (struct drm_info_node *)m->private;
783 struct drm_device *dev = node->minor->dev;
784 struct radeon_device *rdev = dev->dev_private;
785 struct radeon_bo *rbo;
786 unsigned i = 0;
787
788 mutex_lock(&rdev->gem.mutex);
789 list_for_each_entry(rbo, &rdev->gem.objects, list) {
790 unsigned domain;
791 const char *placement;
792
793 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
794 switch (domain) {
795 case RADEON_GEM_DOMAIN_VRAM:
796 placement = "VRAM";
797 break;
798 case RADEON_GEM_DOMAIN_GTT:
799 placement = " GTT";
800 break;
801 case RADEON_GEM_DOMAIN_CPU:
802 default:
803 placement = " CPU";
804 break;
805 }
806 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
807 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
808 placement, (unsigned long)rbo->pid);
809 i++;
810 }
811 mutex_unlock(&rdev->gem.mutex);
812 return 0;
813 }
814
815 static struct drm_info_list radeon_debugfs_gem_list[] = {
816 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
817 };
818 #endif
819
820 int radeon_gem_debugfs_init(struct radeon_device *rdev)
821 {
822 #if defined(CONFIG_DEBUG_FS)
823 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
824 #endif
825 return 0;
826 }