This source file includes following definitions.
- drm_prime_add_buf_handle
- drm_prime_lookup_buf_by_handle
- drm_prime_lookup_buf_handle
- drm_prime_remove_buf_handle_locked
- drm_prime_init_file_private
- drm_prime_destroy_file_private
- drm_gem_dmabuf_export
- drm_gem_dmabuf_release
- drm_gem_prime_fd_to_handle
- drm_prime_fd_to_handle_ioctl
- export_and_register_object
- drm_gem_prime_handle_to_fd
- drm_prime_handle_to_fd_ioctl
- drm_gem_map_attach
- drm_gem_map_detach
- drm_gem_map_dma_buf
- drm_gem_unmap_dma_buf
- drm_gem_dmabuf_vmap
- drm_gem_dmabuf_vunmap
- drm_gem_prime_mmap
- drm_gem_dmabuf_mmap
- drm_prime_pages_to_sg
- drm_gem_prime_export
- drm_gem_prime_import_dev
- drm_gem_prime_import
- drm_prime_sg_to_page_addr_arrays
- drm_prime_gem_destroy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <linux/rbtree.h>
32
33 #include <drm/drm.h>
34 #include <drm/drm_drv.h>
35 #include <drm/drm_file.h>
36 #include <drm/drm_framebuffer.h>
37 #include <drm/drm_gem.h>
38 #include <drm/drm_prime.h>
39
40 #include "drm_internal.h"
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90 struct drm_prime_member {
91 struct dma_buf *dma_buf;
92 uint32_t handle;
93
94 struct rb_node dmabuf_rb;
95 struct rb_node handle_rb;
96 };
97
98 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
99 struct dma_buf *dma_buf, uint32_t handle)
100 {
101 struct drm_prime_member *member;
102 struct rb_node **p, *rb;
103
104 member = kmalloc(sizeof(*member), GFP_KERNEL);
105 if (!member)
106 return -ENOMEM;
107
108 get_dma_buf(dma_buf);
109 member->dma_buf = dma_buf;
110 member->handle = handle;
111
112 rb = NULL;
113 p = &prime_fpriv->dmabufs.rb_node;
114 while (*p) {
115 struct drm_prime_member *pos;
116
117 rb = *p;
118 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
119 if (dma_buf > pos->dma_buf)
120 p = &rb->rb_right;
121 else
122 p = &rb->rb_left;
123 }
124 rb_link_node(&member->dmabuf_rb, rb, p);
125 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
126
127 rb = NULL;
128 p = &prime_fpriv->handles.rb_node;
129 while (*p) {
130 struct drm_prime_member *pos;
131
132 rb = *p;
133 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
134 if (handle > pos->handle)
135 p = &rb->rb_right;
136 else
137 p = &rb->rb_left;
138 }
139 rb_link_node(&member->handle_rb, rb, p);
140 rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
141
142 return 0;
143 }
144
145 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
146 uint32_t handle)
147 {
148 struct rb_node *rb;
149
150 rb = prime_fpriv->handles.rb_node;
151 while (rb) {
152 struct drm_prime_member *member;
153
154 member = rb_entry(rb, struct drm_prime_member, handle_rb);
155 if (member->handle == handle)
156 return member->dma_buf;
157 else if (member->handle < handle)
158 rb = rb->rb_right;
159 else
160 rb = rb->rb_left;
161 }
162
163 return NULL;
164 }
165
166 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
167 struct dma_buf *dma_buf,
168 uint32_t *handle)
169 {
170 struct rb_node *rb;
171
172 rb = prime_fpriv->dmabufs.rb_node;
173 while (rb) {
174 struct drm_prime_member *member;
175
176 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
177 if (member->dma_buf == dma_buf) {
178 *handle = member->handle;
179 return 0;
180 } else if (member->dma_buf < dma_buf) {
181 rb = rb->rb_right;
182 } else {
183 rb = rb->rb_left;
184 }
185 }
186
187 return -ENOENT;
188 }
189
190 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
191 struct dma_buf *dma_buf)
192 {
193 struct rb_node *rb;
194
195 rb = prime_fpriv->dmabufs.rb_node;
196 while (rb) {
197 struct drm_prime_member *member;
198
199 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
200 if (member->dma_buf == dma_buf) {
201 rb_erase(&member->handle_rb, &prime_fpriv->handles);
202 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
203
204 dma_buf_put(dma_buf);
205 kfree(member);
206 return;
207 } else if (member->dma_buf < dma_buf) {
208 rb = rb->rb_right;
209 } else {
210 rb = rb->rb_left;
211 }
212 }
213 }
214
215 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
216 {
217 mutex_init(&prime_fpriv->lock);
218 prime_fpriv->dmabufs = RB_ROOT;
219 prime_fpriv->handles = RB_ROOT;
220 }
221
222 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
223 {
224
225 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
226 }
227
228
229
230
231
232
233
234
235
236
237
238
239
240 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
241 struct dma_buf_export_info *exp_info)
242 {
243 struct dma_buf *dma_buf;
244
245 dma_buf = dma_buf_export(exp_info);
246 if (IS_ERR(dma_buf))
247 return dma_buf;
248
249 drm_dev_get(dev);
250 drm_gem_object_get(exp_info->priv);
251
252 return dma_buf;
253 }
254 EXPORT_SYMBOL(drm_gem_dmabuf_export);
255
256
257
258
259
260
261
262
263
264
265 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
266 {
267 struct drm_gem_object *obj = dma_buf->priv;
268 struct drm_device *dev = obj->dev;
269
270
271 drm_gem_object_put_unlocked(obj);
272
273 drm_dev_put(dev);
274 }
275 EXPORT_SYMBOL(drm_gem_dmabuf_release);
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
292 struct drm_file *file_priv, int prime_fd,
293 uint32_t *handle)
294 {
295 struct dma_buf *dma_buf;
296 struct drm_gem_object *obj;
297 int ret;
298
299 dma_buf = dma_buf_get(prime_fd);
300 if (IS_ERR(dma_buf))
301 return PTR_ERR(dma_buf);
302
303 mutex_lock(&file_priv->prime.lock);
304
305 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
306 dma_buf, handle);
307 if (ret == 0)
308 goto out_put;
309
310
311 mutex_lock(&dev->object_name_lock);
312 if (dev->driver->gem_prime_import)
313 obj = dev->driver->gem_prime_import(dev, dma_buf);
314 else
315 obj = drm_gem_prime_import(dev, dma_buf);
316 if (IS_ERR(obj)) {
317 ret = PTR_ERR(obj);
318 goto out_unlock;
319 }
320
321 if (obj->dma_buf) {
322 WARN_ON(obj->dma_buf != dma_buf);
323 } else {
324 obj->dma_buf = dma_buf;
325 get_dma_buf(dma_buf);
326 }
327
328
329 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
330 drm_gem_object_put_unlocked(obj);
331 if (ret)
332 goto out_put;
333
334 ret = drm_prime_add_buf_handle(&file_priv->prime,
335 dma_buf, *handle);
336 mutex_unlock(&file_priv->prime.lock);
337 if (ret)
338 goto fail;
339
340 dma_buf_put(dma_buf);
341
342 return 0;
343
344 fail:
345
346
347
348 drm_gem_handle_delete(file_priv, *handle);
349 dma_buf_put(dma_buf);
350 return ret;
351
352 out_unlock:
353 mutex_unlock(&dev->object_name_lock);
354 out_put:
355 mutex_unlock(&file_priv->prime.lock);
356 dma_buf_put(dma_buf);
357 return ret;
358 }
359 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
360
361 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
362 struct drm_file *file_priv)
363 {
364 struct drm_prime_handle *args = data;
365
366 if (!dev->driver->prime_fd_to_handle)
367 return -ENOSYS;
368
369 return dev->driver->prime_fd_to_handle(dev, file_priv,
370 args->fd, &args->handle);
371 }
372
373 static struct dma_buf *export_and_register_object(struct drm_device *dev,
374 struct drm_gem_object *obj,
375 uint32_t flags)
376 {
377 struct dma_buf *dmabuf;
378
379
380 if (obj->handle_count == 0) {
381 dmabuf = ERR_PTR(-ENOENT);
382 return dmabuf;
383 }
384
385 if (obj->funcs && obj->funcs->export)
386 dmabuf = obj->funcs->export(obj, flags);
387 else if (dev->driver->gem_prime_export)
388 dmabuf = dev->driver->gem_prime_export(obj, flags);
389 else
390 dmabuf = drm_gem_prime_export(obj, flags);
391 if (IS_ERR(dmabuf)) {
392
393
394
395 return dmabuf;
396 }
397
398
399
400
401
402
403 obj->dma_buf = dmabuf;
404 get_dma_buf(obj->dma_buf);
405
406 return dmabuf;
407 }
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
423 struct drm_file *file_priv, uint32_t handle,
424 uint32_t flags,
425 int *prime_fd)
426 {
427 struct drm_gem_object *obj;
428 int ret = 0;
429 struct dma_buf *dmabuf;
430
431 mutex_lock(&file_priv->prime.lock);
432 obj = drm_gem_object_lookup(file_priv, handle);
433 if (!obj) {
434 ret = -ENOENT;
435 goto out_unlock;
436 }
437
438 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
439 if (dmabuf) {
440 get_dma_buf(dmabuf);
441 goto out_have_handle;
442 }
443
444 mutex_lock(&dev->object_name_lock);
445
446 if (obj->import_attach) {
447 dmabuf = obj->import_attach->dmabuf;
448 get_dma_buf(dmabuf);
449 goto out_have_obj;
450 }
451
452 if (obj->dma_buf) {
453 get_dma_buf(obj->dma_buf);
454 dmabuf = obj->dma_buf;
455 goto out_have_obj;
456 }
457
458 dmabuf = export_and_register_object(dev, obj, flags);
459 if (IS_ERR(dmabuf)) {
460
461
462
463 ret = PTR_ERR(dmabuf);
464 mutex_unlock(&dev->object_name_lock);
465 goto out;
466 }
467
468 out_have_obj:
469
470
471
472
473
474
475 ret = drm_prime_add_buf_handle(&file_priv->prime,
476 dmabuf, handle);
477 mutex_unlock(&dev->object_name_lock);
478 if (ret)
479 goto fail_put_dmabuf;
480
481 out_have_handle:
482 ret = dma_buf_fd(dmabuf, flags);
483
484
485
486
487
488
489 if (ret < 0) {
490 goto fail_put_dmabuf;
491 } else {
492 *prime_fd = ret;
493 ret = 0;
494 }
495
496 goto out;
497
498 fail_put_dmabuf:
499 dma_buf_put(dmabuf);
500 out:
501 drm_gem_object_put_unlocked(obj);
502 out_unlock:
503 mutex_unlock(&file_priv->prime.lock);
504
505 return ret;
506 }
507 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
508
509 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
510 struct drm_file *file_priv)
511 {
512 struct drm_prime_handle *args = data;
513
514 if (!dev->driver->prime_handle_to_fd)
515 return -ENOSYS;
516
517
518 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
519 return -EINVAL;
520
521 return dev->driver->prime_handle_to_fd(dev, file_priv,
522 args->handle, args->flags, &args->fd);
523 }
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574 int drm_gem_map_attach(struct dma_buf *dma_buf,
575 struct dma_buf_attachment *attach)
576 {
577 struct drm_gem_object *obj = dma_buf->priv;
578
579 return drm_gem_pin(obj);
580 }
581 EXPORT_SYMBOL(drm_gem_map_attach);
582
583
584
585
586
587
588
589
590
591
592 void drm_gem_map_detach(struct dma_buf *dma_buf,
593 struct dma_buf_attachment *attach)
594 {
595 struct drm_gem_object *obj = dma_buf->priv;
596
597 drm_gem_unpin(obj);
598 }
599 EXPORT_SYMBOL(drm_gem_map_detach);
600
601
602
603
604
605
606
607
608
609
610
611
612
613 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
614 enum dma_data_direction dir)
615 {
616 struct drm_gem_object *obj = attach->dmabuf->priv;
617 struct sg_table *sgt;
618
619 if (WARN_ON(dir == DMA_NONE))
620 return ERR_PTR(-EINVAL);
621
622 if (obj->funcs)
623 sgt = obj->funcs->get_sg_table(obj);
624 else
625 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
626
627 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
628 DMA_ATTR_SKIP_CPU_SYNC)) {
629 sg_free_table(sgt);
630 kfree(sgt);
631 sgt = ERR_PTR(-ENOMEM);
632 }
633
634 return sgt;
635 }
636 EXPORT_SYMBOL(drm_gem_map_dma_buf);
637
638
639
640
641
642
643
644
645
646 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
647 struct sg_table *sgt,
648 enum dma_data_direction dir)
649 {
650 if (!sgt)
651 return;
652
653 dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
654 DMA_ATTR_SKIP_CPU_SYNC);
655 sg_free_table(sgt);
656 kfree(sgt);
657 }
658 EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
659
660
661
662
663
664
665
666
667
668
669 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
670 {
671 struct drm_gem_object *obj = dma_buf->priv;
672 void *vaddr;
673
674 vaddr = drm_gem_vmap(obj);
675 if (IS_ERR(vaddr))
676 vaddr = NULL;
677
678 return vaddr;
679 }
680 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
681
682
683
684
685
686
687
688
689
690 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
691 {
692 struct drm_gem_object *obj = dma_buf->priv;
693
694 drm_gem_vunmap(obj, vaddr);
695 }
696 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
697
698
699
700
701
702
703
704
705
706
707
708
709
710 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
711 {
712 struct drm_file *priv;
713 struct file *fil;
714 int ret;
715
716 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
717 fil = kzalloc(sizeof(*fil), GFP_KERNEL);
718 if (!priv || !fil) {
719 ret = -ENOMEM;
720 goto out;
721 }
722
723
724 priv->minor = obj->dev->primary;
725 fil->private_data = priv;
726
727 ret = drm_vma_node_allow(&obj->vma_node, priv);
728 if (ret)
729 goto out;
730
731 vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
732
733 ret = obj->dev->driver->fops->mmap(fil, vma);
734
735 drm_vma_node_revoke(&obj->vma_node, priv);
736 out:
737 kfree(priv);
738 kfree(fil);
739
740 return ret;
741 }
742 EXPORT_SYMBOL(drm_gem_prime_mmap);
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
759 {
760 struct drm_gem_object *obj = dma_buf->priv;
761 struct drm_device *dev = obj->dev;
762
763 if (!dev->driver->gem_prime_mmap)
764 return -ENOSYS;
765
766 return dev->driver->gem_prime_mmap(obj, vma);
767 }
768 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
769
770 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
771 .cache_sgt_mapping = true,
772 .attach = drm_gem_map_attach,
773 .detach = drm_gem_map_detach,
774 .map_dma_buf = drm_gem_map_dma_buf,
775 .unmap_dma_buf = drm_gem_unmap_dma_buf,
776 .release = drm_gem_dmabuf_release,
777 .mmap = drm_gem_dmabuf_mmap,
778 .vmap = drm_gem_dmabuf_vmap,
779 .vunmap = drm_gem_dmabuf_vunmap,
780 };
781
782
783
784
785
786
787
788
789
790
791
792
793 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
794 {
795 struct sg_table *sg = NULL;
796 int ret;
797
798 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
799 if (!sg) {
800 ret = -ENOMEM;
801 goto out;
802 }
803
804 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
805 nr_pages << PAGE_SHIFT, GFP_KERNEL);
806 if (ret)
807 goto out;
808
809 return sg;
810 out:
811 kfree(sg);
812 return ERR_PTR(ret);
813 }
814 EXPORT_SYMBOL(drm_prime_pages_to_sg);
815
816
817
818
819
820
821
822
823
824
825 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
826 int flags)
827 {
828 struct drm_device *dev = obj->dev;
829 struct dma_buf_export_info exp_info = {
830 .exp_name = KBUILD_MODNAME,
831 .owner = dev->driver->fops->owner,
832 .ops = &drm_gem_prime_dmabuf_ops,
833 .size = obj->size,
834 .flags = flags,
835 .priv = obj,
836 .resv = obj->resv,
837 };
838
839 return drm_gem_dmabuf_export(dev, &exp_info);
840 }
841 EXPORT_SYMBOL(drm_gem_prime_export);
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
858 struct dma_buf *dma_buf,
859 struct device *attach_dev)
860 {
861 struct dma_buf_attachment *attach;
862 struct sg_table *sgt;
863 struct drm_gem_object *obj;
864 int ret;
865
866 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
867 obj = dma_buf->priv;
868 if (obj->dev == dev) {
869
870
871
872
873 drm_gem_object_get(obj);
874 return obj;
875 }
876 }
877
878 if (!dev->driver->gem_prime_import_sg_table)
879 return ERR_PTR(-EINVAL);
880
881 attach = dma_buf_attach(dma_buf, attach_dev);
882 if (IS_ERR(attach))
883 return ERR_CAST(attach);
884
885 get_dma_buf(dma_buf);
886
887 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
888 if (IS_ERR(sgt)) {
889 ret = PTR_ERR(sgt);
890 goto fail_detach;
891 }
892
893 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
894 if (IS_ERR(obj)) {
895 ret = PTR_ERR(obj);
896 goto fail_unmap;
897 }
898
899 obj->import_attach = attach;
900 obj->resv = dma_buf->resv;
901
902 return obj;
903
904 fail_unmap:
905 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
906 fail_detach:
907 dma_buf_detach(dma_buf, attach);
908 dma_buf_put(dma_buf);
909
910 return ERR_PTR(ret);
911 }
912 EXPORT_SYMBOL(drm_gem_prime_import_dev);
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
928 struct dma_buf *dma_buf)
929 {
930 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
931 }
932 EXPORT_SYMBOL(drm_gem_prime_import);
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
948 dma_addr_t *addrs, int max_entries)
949 {
950 unsigned count;
951 struct scatterlist *sg;
952 struct page *page;
953 u32 len, index;
954 dma_addr_t addr;
955
956 index = 0;
957 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
958 len = sg->length;
959 page = sg_page(sg);
960 addr = sg_dma_address(sg);
961
962 while (len > 0) {
963 if (WARN_ON(index >= max_entries))
964 return -1;
965 if (pages)
966 pages[index] = page;
967 if (addrs)
968 addrs[index] = addr;
969
970 page++;
971 addr += PAGE_SIZE;
972 len -= PAGE_SIZE;
973 index++;
974 }
975 }
976 return 0;
977 }
978 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
979
980
981
982
983
984
985
986
987
988 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
989 {
990 struct dma_buf_attachment *attach;
991 struct dma_buf *dma_buf;
992 attach = obj->import_attach;
993 if (sg)
994 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
995 dma_buf = attach->dmabuf;
996 dma_buf_detach(attach->dmabuf, attach);
997
998 dma_buf_put(dma_buf);
999 }
1000 EXPORT_SYMBOL(drm_prime_gem_destroy);