This source file includes following definitions.
- vmw_buffer_object
- vmw_user_buffer_object
- vmw_bo_pin_in_placement
- vmw_bo_pin_in_vram_or_gmr
- vmw_bo_pin_in_vram
- vmw_bo_pin_in_start_of_vram
- vmw_bo_unpin
- vmw_bo_get_guest_ptr
- vmw_bo_pin_reserved
- vmw_bo_map_and_cache
- vmw_bo_unmap
- vmw_bo_acc_size
- vmw_bo_bo_free
- vmw_user_bo_destroy
- vmw_bo_init
- vmw_user_bo_release
- vmw_user_bo_ref_obj_release
- vmw_user_bo_alloc
- vmw_user_bo_verify_access
- vmw_user_bo_synccpu_grab
- vmw_user_bo_synccpu_release
- vmw_user_bo_synccpu_ioctl
- vmw_bo_alloc_ioctl
- vmw_bo_unref_ioctl
- vmw_user_bo_lookup
- vmw_user_bo_noref_lookup
- vmw_user_bo_reference
- vmw_bo_fence_single
- vmw_dumb_create
- vmw_dumb_map_offset
- vmw_dumb_destroy
- vmw_bo_swap_notify
- vmw_bo_move_notify
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29 #include <drm/ttm/ttm_placement.h>
30
31 #include "vmwgfx_drv.h"
32 #include "ttm_object.h"
33
34
35
36
37
38
39
40
41 struct vmw_user_buffer_object {
42 struct ttm_prime_object prime;
43 struct vmw_buffer_object vbo;
44 };
45
46
47
48
49
50
51
52
53
54
55 static struct vmw_buffer_object *
56 vmw_buffer_object(struct ttm_buffer_object *bo)
57 {
58 return container_of(bo, struct vmw_buffer_object, base);
59 }
60
61
62
63
64
65
66
67
68
69
70 static struct vmw_user_buffer_object *
71 vmw_user_buffer_object(struct ttm_buffer_object *bo)
72 {
73 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74
75 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76 }
77
78
79
80
81
82
83
84
85
86
87
88
89 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90 struct vmw_buffer_object *buf,
91 struct ttm_placement *placement,
92 bool interruptible)
93 {
94 struct ttm_operation_ctx ctx = {interruptible, false };
95 struct ttm_buffer_object *bo = &buf->base;
96 int ret;
97 uint32_t new_flags;
98
99 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
100 if (unlikely(ret != 0))
101 return ret;
102
103 vmw_execbuf_release_pinned_bo(dev_priv);
104
105 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
106 if (unlikely(ret != 0))
107 goto err;
108
109 if (buf->pin_count > 0)
110 ret = ttm_bo_mem_compat(placement, &bo->mem,
111 &new_flags) == true ? 0 : -EINVAL;
112 else
113 ret = ttm_bo_validate(bo, placement, &ctx);
114
115 if (!ret)
116 vmw_bo_pin_reserved(buf, true);
117
118 ttm_bo_unreserve(bo);
119
120 err:
121 ttm_write_unlock(&dev_priv->reservation_sem);
122 return ret;
123 }
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
140 struct vmw_buffer_object *buf,
141 bool interruptible)
142 {
143 struct ttm_operation_ctx ctx = {interruptible, false };
144 struct ttm_buffer_object *bo = &buf->base;
145 int ret;
146 uint32_t new_flags;
147
148 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
149 if (unlikely(ret != 0))
150 return ret;
151
152 vmw_execbuf_release_pinned_bo(dev_priv);
153
154 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
155 if (unlikely(ret != 0))
156 goto err;
157
158 if (buf->pin_count > 0) {
159 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
160 &new_flags) == true ? 0 : -EINVAL;
161 goto out_unreserve;
162 }
163
164 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
165 if (likely(ret == 0) || ret == -ERESTARTSYS)
166 goto out_unreserve;
167
168 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
169
170 out_unreserve:
171 if (!ret)
172 vmw_bo_pin_reserved(buf, true);
173
174 ttm_bo_unreserve(bo);
175 err:
176 ttm_write_unlock(&dev_priv->reservation_sem);
177 return ret;
178 }
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
194 struct vmw_buffer_object *buf,
195 bool interruptible)
196 {
197 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
198 interruptible);
199 }
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
215 struct vmw_buffer_object *buf,
216 bool interruptible)
217 {
218 struct ttm_operation_ctx ctx = {interruptible, false };
219 struct ttm_buffer_object *bo = &buf->base;
220 struct ttm_placement placement;
221 struct ttm_place place;
222 int ret = 0;
223 uint32_t new_flags;
224
225 place = vmw_vram_placement.placement[0];
226 place.lpfn = bo->num_pages;
227 placement.num_placement = 1;
228 placement.placement = &place;
229 placement.num_busy_placement = 1;
230 placement.busy_placement = &place;
231
232 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
233 if (unlikely(ret != 0))
234 return ret;
235
236 vmw_execbuf_release_pinned_bo(dev_priv);
237 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
238 if (unlikely(ret != 0))
239 goto err_unlock;
240
241
242
243
244
245
246 if (bo->mem.mem_type == TTM_PL_VRAM &&
247 bo->mem.start < bo->num_pages &&
248 bo->mem.start > 0 &&
249 buf->pin_count == 0) {
250 ctx.interruptible = false;
251 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
252 }
253
254 if (buf->pin_count > 0)
255 ret = ttm_bo_mem_compat(&placement, &bo->mem,
256 &new_flags) == true ? 0 : -EINVAL;
257 else
258 ret = ttm_bo_validate(bo, &placement, &ctx);
259
260
261 WARN_ON(ret == 0 && bo->offset != 0);
262 if (!ret)
263 vmw_bo_pin_reserved(buf, true);
264
265 ttm_bo_unreserve(bo);
266 err_unlock:
267 ttm_write_unlock(&dev_priv->reservation_sem);
268
269 return ret;
270 }
271
272
273
274
275
276
277
278
279
280
281
282
283
284 int vmw_bo_unpin(struct vmw_private *dev_priv,
285 struct vmw_buffer_object *buf,
286 bool interruptible)
287 {
288 struct ttm_buffer_object *bo = &buf->base;
289 int ret;
290
291 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
292 if (unlikely(ret != 0))
293 return ret;
294
295 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
296 if (unlikely(ret != 0))
297 goto err;
298
299 vmw_bo_pin_reserved(buf, false);
300
301 ttm_bo_unreserve(bo);
302
303 err:
304 ttm_read_unlock(&dev_priv->reservation_sem);
305 return ret;
306 }
307
308
309
310
311
312
313
314
315 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
316 SVGAGuestPtr *ptr)
317 {
318 if (bo->mem.mem_type == TTM_PL_VRAM) {
319 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
320 ptr->offset = bo->offset;
321 } else {
322 ptr->gmrId = bo->mem.start;
323 ptr->offset = 0;
324 }
325 }
326
327
328
329
330
331
332
333
334
335 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
336 {
337 struct ttm_operation_ctx ctx = { false, true };
338 struct ttm_place pl;
339 struct ttm_placement placement;
340 struct ttm_buffer_object *bo = &vbo->base;
341 uint32_t old_mem_type = bo->mem.mem_type;
342 int ret;
343
344 dma_resv_assert_held(bo->base.resv);
345
346 if (pin) {
347 if (vbo->pin_count++ > 0)
348 return;
349 } else {
350 WARN_ON(vbo->pin_count <= 0);
351 if (--vbo->pin_count > 0)
352 return;
353 }
354
355 pl.fpfn = 0;
356 pl.lpfn = 0;
357 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
358 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
359 if (pin)
360 pl.flags |= TTM_PL_FLAG_NO_EVICT;
361
362 memset(&placement, 0, sizeof(placement));
363 placement.num_placement = 1;
364 placement.placement = &pl;
365
366 ret = ttm_bo_validate(bo, &placement, &ctx);
367
368 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
369 }
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
388 {
389 struct ttm_buffer_object *bo = &vbo->base;
390 bool not_used;
391 void *virtual;
392 int ret;
393
394 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
395 if (virtual)
396 return virtual;
397
398 ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
399 if (ret)
400 DRM_ERROR("Buffer object map failed: %d.\n", ret);
401
402 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
403 }
404
405
406
407
408
409
410
411
412
413
414 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
415 {
416 if (vbo->map.bo == NULL)
417 return;
418
419 ttm_bo_kunmap(&vbo->map);
420 }
421
422
423
424
425
426
427
428
429
430 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
431 bool user)
432 {
433 static size_t struct_size, user_struct_size;
434 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
435 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
436
437 if (unlikely(struct_size == 0)) {
438 size_t backend_size = ttm_round_pot(vmw_tt_size);
439
440 struct_size = backend_size +
441 ttm_round_pot(sizeof(struct vmw_buffer_object));
442 user_struct_size = backend_size +
443 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
444 TTM_OBJ_EXTRA_SIZE;
445 }
446
447 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
448 page_array_size +=
449 ttm_round_pot(num_pages * sizeof(dma_addr_t));
450
451 return ((user) ? user_struct_size : struct_size) +
452 page_array_size;
453 }
454
455
456
457
458
459
460
461 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
462 {
463 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
464
465 vmw_bo_unmap(vmw_bo);
466 kfree(vmw_bo);
467 }
468
469
470
471
472
473
474
475 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
476 {
477 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
478
479 vmw_bo_unmap(&vmw_user_bo->vbo);
480 ttm_prime_object_kfree(vmw_user_bo, prime);
481 }
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497 int vmw_bo_init(struct vmw_private *dev_priv,
498 struct vmw_buffer_object *vmw_bo,
499 size_t size, struct ttm_placement *placement,
500 bool interruptible,
501 void (*bo_free)(struct ttm_buffer_object *bo))
502 {
503 struct ttm_bo_device *bdev = &dev_priv->bdev;
504 size_t acc_size;
505 int ret;
506 bool user = (bo_free == &vmw_user_bo_destroy);
507
508 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
509
510 acc_size = vmw_bo_acc_size(dev_priv, size, user);
511 memset(vmw_bo, 0, sizeof(*vmw_bo));
512 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
513 vmw_bo->base.priority = 3;
514
515 INIT_LIST_HEAD(&vmw_bo->res_list);
516
517 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
518 ttm_bo_type_device, placement,
519 0, interruptible, acc_size,
520 NULL, NULL, bo_free);
521 return ret;
522 }
523
524
525
526
527
528
529
530
531
532
533
534 static void vmw_user_bo_release(struct ttm_base_object **p_base)
535 {
536 struct vmw_user_buffer_object *vmw_user_bo;
537 struct ttm_base_object *base = *p_base;
538
539 *p_base = NULL;
540
541 if (unlikely(base == NULL))
542 return;
543
544 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
545 prime.base);
546 ttm_bo_put(&vmw_user_bo->vbo.base);
547 }
548
549
550
551
552
553
554
555
556
557
558
559
560 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
561 enum ttm_ref_type ref_type)
562 {
563 struct vmw_user_buffer_object *user_bo;
564
565 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
566
567 switch (ref_type) {
568 case TTM_REF_SYNCCPU_WRITE:
569 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
570 break;
571 default:
572 WARN_ONCE(true, "Undefined buffer object reference release.\n");
573 }
574 }
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590 int vmw_user_bo_alloc(struct vmw_private *dev_priv,
591 struct ttm_object_file *tfile,
592 uint32_t size,
593 bool shareable,
594 uint32_t *handle,
595 struct vmw_buffer_object **p_vbo,
596 struct ttm_base_object **p_base)
597 {
598 struct vmw_user_buffer_object *user_bo;
599 int ret;
600
601 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
602 if (unlikely(!user_bo)) {
603 DRM_ERROR("Failed to allocate a buffer.\n");
604 return -ENOMEM;
605 }
606
607 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
608 (dev_priv->has_mob) ?
609 &vmw_sys_placement :
610 &vmw_vram_sys_placement, true,
611 &vmw_user_bo_destroy);
612 if (unlikely(ret != 0))
613 return ret;
614
615 ttm_bo_get(&user_bo->vbo.base);
616 ret = ttm_prime_object_init(tfile,
617 size,
618 &user_bo->prime,
619 shareable,
620 ttm_buffer_type,
621 &vmw_user_bo_release,
622 &vmw_user_bo_ref_obj_release);
623 if (unlikely(ret != 0)) {
624 ttm_bo_put(&user_bo->vbo.base);
625 goto out_no_base_object;
626 }
627
628 *p_vbo = &user_bo->vbo;
629 if (p_base) {
630 *p_base = &user_bo->prime.base;
631 kref_get(&(*p_base)->refcount);
632 }
633 *handle = user_bo->prime.base.handle;
634
635 out_no_base_object:
636 return ret;
637 }
638
639
640
641
642
643
644
645
646
647 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
648 struct ttm_object_file *tfile)
649 {
650 struct vmw_user_buffer_object *vmw_user_bo;
651
652 if (unlikely(bo->destroy != vmw_user_bo_destroy))
653 return -EPERM;
654
655 vmw_user_bo = vmw_user_buffer_object(bo);
656
657
658 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
659 return 0;
660
661 DRM_ERROR("Could not grant buffer access.\n");
662 return -EPERM;
663 }
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
682 struct ttm_object_file *tfile,
683 uint32_t flags)
684 {
685 struct ttm_buffer_object *bo = &user_bo->vbo.base;
686 bool existed;
687 int ret;
688
689 if (flags & drm_vmw_synccpu_allow_cs) {
690 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
691 long lret;
692
693 lret = dma_resv_wait_timeout_rcu
694 (bo->base.resv, true, true,
695 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
696 if (!lret)
697 return -EBUSY;
698 else if (lret < 0)
699 return lret;
700 return 0;
701 }
702
703 ret = ttm_bo_synccpu_write_grab
704 (bo, !!(flags & drm_vmw_synccpu_dontblock));
705 if (unlikely(ret != 0))
706 return ret;
707
708 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
709 TTM_REF_SYNCCPU_WRITE, &existed, false);
710 if (ret != 0 || existed)
711 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
712
713 return ret;
714 }
715
716
717
718
719
720
721
722
723
724 static int vmw_user_bo_synccpu_release(uint32_t handle,
725 struct ttm_object_file *tfile,
726 uint32_t flags)
727 {
728 if (!(flags & drm_vmw_synccpu_allow_cs))
729 return ttm_ref_object_base_unref(tfile, handle,
730 TTM_REF_SYNCCPU_WRITE);
731
732 return 0;
733 }
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
749 struct drm_file *file_priv)
750 {
751 struct drm_vmw_synccpu_arg *arg =
752 (struct drm_vmw_synccpu_arg *) data;
753 struct vmw_buffer_object *vbo;
754 struct vmw_user_buffer_object *user_bo;
755 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
756 struct ttm_base_object *buffer_base;
757 int ret;
758
759 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
760 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
761 drm_vmw_synccpu_dontblock |
762 drm_vmw_synccpu_allow_cs)) != 0) {
763 DRM_ERROR("Illegal synccpu flags.\n");
764 return -EINVAL;
765 }
766
767 switch (arg->op) {
768 case drm_vmw_synccpu_grab:
769 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
770 &buffer_base);
771 if (unlikely(ret != 0))
772 return ret;
773
774 user_bo = container_of(vbo, struct vmw_user_buffer_object,
775 vbo);
776 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
777 vmw_bo_unreference(&vbo);
778 ttm_base_object_unref(&buffer_base);
779 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
780 ret != -EBUSY)) {
781 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
782 (unsigned int) arg->handle);
783 return ret;
784 }
785 break;
786 case drm_vmw_synccpu_release:
787 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
788 arg->flags);
789 if (unlikely(ret != 0)) {
790 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
791 (unsigned int) arg->handle);
792 return ret;
793 }
794 break;
795 default:
796 DRM_ERROR("Invalid synccpu operation.\n");
797 return -EINVAL;
798 }
799
800 return 0;
801 }
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
817 struct drm_file *file_priv)
818 {
819 struct vmw_private *dev_priv = vmw_priv(dev);
820 union drm_vmw_alloc_dmabuf_arg *arg =
821 (union drm_vmw_alloc_dmabuf_arg *)data;
822 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
823 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
824 struct vmw_buffer_object *vbo;
825 uint32_t handle;
826 int ret;
827
828 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
829 if (unlikely(ret != 0))
830 return ret;
831
832 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
833 req->size, false, &handle, &vbo,
834 NULL);
835 if (unlikely(ret != 0))
836 goto out_no_bo;
837
838 rep->handle = handle;
839 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
840 rep->cur_gmr_id = handle;
841 rep->cur_gmr_offset = 0;
842
843 vmw_bo_unreference(&vbo);
844
845 out_no_bo:
846 ttm_read_unlock(&dev_priv->reservation_sem);
847
848 return ret;
849 }
850
851
852
853
854
855
856
857
858
859
860
861
862
863 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
864 struct drm_file *file_priv)
865 {
866 struct drm_vmw_unref_dmabuf_arg *arg =
867 (struct drm_vmw_unref_dmabuf_arg *)data;
868
869 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
870 arg->handle,
871 TTM_REF_USAGE);
872 }
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889 int vmw_user_bo_lookup(struct ttm_object_file *tfile,
890 uint32_t handle, struct vmw_buffer_object **out,
891 struct ttm_base_object **p_base)
892 {
893 struct vmw_user_buffer_object *vmw_user_bo;
894 struct ttm_base_object *base;
895
896 base = ttm_base_object_lookup(tfile, handle);
897 if (unlikely(base == NULL)) {
898 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
899 (unsigned long)handle);
900 return -ESRCH;
901 }
902
903 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
904 ttm_base_object_unref(&base);
905 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
906 (unsigned long)handle);
907 return -EINVAL;
908 }
909
910 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
911 prime.base);
912 ttm_bo_get(&vmw_user_bo->vbo.base);
913 if (p_base)
914 *p_base = base;
915 else
916 ttm_base_object_unref(&base);
917 *out = &vmw_user_bo->vbo;
918
919 return 0;
920 }
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939 struct vmw_buffer_object *
940 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
941 {
942 struct vmw_user_buffer_object *vmw_user_bo;
943 struct ttm_base_object *base;
944
945 base = ttm_base_object_noref_lookup(tfile, handle);
946 if (!base) {
947 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
948 (unsigned long)handle);
949 return ERR_PTR(-ESRCH);
950 }
951
952 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
953 ttm_base_object_noref_release();
954 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
955 (unsigned long)handle);
956 return ERR_PTR(-EINVAL);
957 }
958
959 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
960 prime.base);
961 return &vmw_user_bo->vbo;
962 }
963
964
965
966
967
968
969
970
971
972 int vmw_user_bo_reference(struct ttm_object_file *tfile,
973 struct vmw_buffer_object *vbo,
974 uint32_t *handle)
975 {
976 struct vmw_user_buffer_object *user_bo;
977
978 if (vbo->base.destroy != vmw_user_bo_destroy)
979 return -EINVAL;
980
981 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
982
983 *handle = user_bo->prime.base.handle;
984 return ttm_ref_object_add(tfile, &user_bo->prime.base,
985 TTM_REF_USAGE, NULL, false);
986 }
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1002 struct vmw_fence_obj *fence)
1003 {
1004 struct ttm_bo_device *bdev = bo->bdev;
1005
1006 struct vmw_private *dev_priv =
1007 container_of(bdev, struct vmw_private, bdev);
1008
1009 if (fence == NULL) {
1010 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1011 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1012 dma_fence_put(&fence->base);
1013 } else
1014 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1015 }
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 int vmw_dumb_create(struct drm_file *file_priv,
1031 struct drm_device *dev,
1032 struct drm_mode_create_dumb *args)
1033 {
1034 struct vmw_private *dev_priv = vmw_priv(dev);
1035 struct vmw_buffer_object *vbo;
1036 int ret;
1037
1038 args->pitch = args->width * ((args->bpp + 7) / 8);
1039 args->size = args->pitch * args->height;
1040
1041 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1042 if (unlikely(ret != 0))
1043 return ret;
1044
1045 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1046 args->size, false, &args->handle,
1047 &vbo, NULL);
1048 if (unlikely(ret != 0))
1049 goto out_no_bo;
1050
1051 vmw_bo_unreference(&vbo);
1052 out_no_bo:
1053 ttm_read_unlock(&dev_priv->reservation_sem);
1054 return ret;
1055 }
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 int vmw_dumb_map_offset(struct drm_file *file_priv,
1070 struct drm_device *dev, uint32_t handle,
1071 uint64_t *offset)
1072 {
1073 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1074 struct vmw_buffer_object *out_buf;
1075 int ret;
1076
1077 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1078 if (ret != 0)
1079 return -EINVAL;
1080
1081 *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1082 vmw_bo_unreference(&out_buf);
1083 return 0;
1084 }
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 int vmw_dumb_destroy(struct drm_file *file_priv,
1098 struct drm_device *dev,
1099 uint32_t handle)
1100 {
1101 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1102 handle, TTM_REF_USAGE);
1103 }
1104
1105
1106
1107
1108
1109
1110
1111 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1112 {
1113
1114 if (bo->destroy != vmw_bo_bo_free &&
1115 bo->destroy != vmw_user_bo_destroy)
1116 return;
1117
1118
1119 vmw_bo_unmap(vmw_buffer_object(bo));
1120 }
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1134 struct ttm_mem_reg *mem)
1135 {
1136 struct vmw_buffer_object *vbo;
1137
1138 if (mem == NULL)
1139 return;
1140
1141
1142 if (bo->destroy != vmw_bo_bo_free &&
1143 bo->destroy != vmw_user_bo_destroy)
1144 return;
1145
1146 vbo = container_of(bo, struct vmw_buffer_object, base);
1147
1148
1149
1150
1151
1152
1153 if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1154 vmw_bo_unmap(vbo);
1155
1156
1157
1158
1159
1160
1161 if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1162 vmw_resource_unbind_list(vbo);
1163 }