This source file includes following definitions.
- dmabuffs_dname
- dma_buf_fs_init_context
- dma_buf_release
- dma_buf_mmap_internal
- dma_buf_llseek
- dma_buf_poll_cb
- dma_buf_poll
- dma_buf_set_name
- dma_buf_ioctl
- dma_buf_show_fdinfo
- is_dma_buf_file
- dma_buf_getfile
- dma_buf_export
- dma_buf_fd
- dma_buf_get
- dma_buf_put
- dma_buf_detach
- dma_buf_map_attachment
- dma_buf_unmap_attachment
- __dma_buf_begin_cpu_access
- dma_buf_begin_cpu_access
- dma_buf_end_cpu_access
- dma_buf_kmap
- dma_buf_kunmap
- dma_buf_mmap
- dma_buf_vmap
- dma_buf_vunmap
- dma_buf_debug_show
- dma_buf_init_debugfs
- dma_buf_uninit_debugfs
- dma_buf_init_debugfs
- dma_buf_uninit_debugfs
- dma_buf_init
- dma_buf_deinit
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/export.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/poll.h>
24 #include <linux/dma-resv.h>
25 #include <linux/mm.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
28
29 #include <uapi/linux/dma-buf.h>
30 #include <uapi/linux/magic.h>
31
32 static inline int is_dma_buf_file(struct file *);
33
34 struct dma_buf_list {
35 struct list_head head;
36 struct mutex lock;
37 };
38
39 static struct dma_buf_list db_list;
40
41 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
42 {
43 struct dma_buf *dmabuf;
44 char name[DMA_BUF_NAME_LEN];
45 size_t ret = 0;
46
47 dmabuf = dentry->d_fsdata;
48 mutex_lock(&dmabuf->lock);
49 if (dmabuf->name)
50 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
51 mutex_unlock(&dmabuf->lock);
52
53 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
54 dentry->d_name.name, ret > 0 ? name : "");
55 }
56
57 static const struct dentry_operations dma_buf_dentry_ops = {
58 .d_dname = dmabuffs_dname,
59 };
60
61 static struct vfsmount *dma_buf_mnt;
62
63 static int dma_buf_fs_init_context(struct fs_context *fc)
64 {
65 struct pseudo_fs_context *ctx;
66
67 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
68 if (!ctx)
69 return -ENOMEM;
70 ctx->dops = &dma_buf_dentry_ops;
71 return 0;
72 }
73
74 static struct file_system_type dma_buf_fs_type = {
75 .name = "dmabuf",
76 .init_fs_context = dma_buf_fs_init_context,
77 .kill_sb = kill_anon_super,
78 };
79
80 static int dma_buf_release(struct inode *inode, struct file *file)
81 {
82 struct dma_buf *dmabuf;
83
84 if (!is_dma_buf_file(file))
85 return -EINVAL;
86
87 dmabuf = file->private_data;
88
89 BUG_ON(dmabuf->vmapping_counter);
90
91
92
93
94
95
96
97
98
99 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
100
101 dmabuf->ops->release(dmabuf);
102
103 mutex_lock(&db_list.lock);
104 list_del(&dmabuf->list_node);
105 mutex_unlock(&db_list.lock);
106
107 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
108 dma_resv_fini(dmabuf->resv);
109
110 module_put(dmabuf->owner);
111 kfree(dmabuf->name);
112 kfree(dmabuf);
113 return 0;
114 }
115
116 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
117 {
118 struct dma_buf *dmabuf;
119
120 if (!is_dma_buf_file(file))
121 return -EINVAL;
122
123 dmabuf = file->private_data;
124
125
126 if (!dmabuf->ops->mmap)
127 return -EINVAL;
128
129
130 if (vma->vm_pgoff + vma_pages(vma) >
131 dmabuf->size >> PAGE_SHIFT)
132 return -EINVAL;
133
134 return dmabuf->ops->mmap(dmabuf, vma);
135 }
136
137 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
138 {
139 struct dma_buf *dmabuf;
140 loff_t base;
141
142 if (!is_dma_buf_file(file))
143 return -EBADF;
144
145 dmabuf = file->private_data;
146
147
148
149
150 if (whence == SEEK_END)
151 base = dmabuf->size;
152 else if (whence == SEEK_SET)
153 base = 0;
154 else
155 return -EINVAL;
156
157 if (offset != 0)
158 return -EINVAL;
159
160 return base + offset;
161 }
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
186 {
187 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
188 unsigned long flags;
189
190 spin_lock_irqsave(&dcb->poll->lock, flags);
191 wake_up_locked_poll(dcb->poll, dcb->active);
192 dcb->active = 0;
193 spin_unlock_irqrestore(&dcb->poll->lock, flags);
194 }
195
196 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
197 {
198 struct dma_buf *dmabuf;
199 struct dma_resv *resv;
200 struct dma_resv_list *fobj;
201 struct dma_fence *fence_excl;
202 __poll_t events;
203 unsigned shared_count, seq;
204
205 dmabuf = file->private_data;
206 if (!dmabuf || !dmabuf->resv)
207 return EPOLLERR;
208
209 resv = dmabuf->resv;
210
211 poll_wait(file, &dmabuf->poll, poll);
212
213 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
214 if (!events)
215 return 0;
216
217 retry:
218 seq = read_seqcount_begin(&resv->seq);
219 rcu_read_lock();
220
221 fobj = rcu_dereference(resv->fence);
222 if (fobj)
223 shared_count = fobj->shared_count;
224 else
225 shared_count = 0;
226 fence_excl = rcu_dereference(resv->fence_excl);
227 if (read_seqcount_retry(&resv->seq, seq)) {
228 rcu_read_unlock();
229 goto retry;
230 }
231
232 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
233 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
234 __poll_t pevents = EPOLLIN;
235
236 if (shared_count == 0)
237 pevents |= EPOLLOUT;
238
239 spin_lock_irq(&dmabuf->poll.lock);
240 if (dcb->active) {
241 dcb->active |= pevents;
242 events &= ~pevents;
243 } else
244 dcb->active = pevents;
245 spin_unlock_irq(&dmabuf->poll.lock);
246
247 if (events & pevents) {
248 if (!dma_fence_get_rcu(fence_excl)) {
249
250 events &= ~pevents;
251 dma_buf_poll_cb(NULL, &dcb->cb);
252 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
253 dma_buf_poll_cb)) {
254 events &= ~pevents;
255 dma_fence_put(fence_excl);
256 } else {
257
258
259
260
261 dma_fence_put(fence_excl);
262 dma_buf_poll_cb(NULL, &dcb->cb);
263 }
264 }
265 }
266
267 if ((events & EPOLLOUT) && shared_count > 0) {
268 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
269 int i;
270
271
272 spin_lock_irq(&dmabuf->poll.lock);
273 if (dcb->active)
274 events &= ~EPOLLOUT;
275 else
276 dcb->active = EPOLLOUT;
277 spin_unlock_irq(&dmabuf->poll.lock);
278
279 if (!(events & EPOLLOUT))
280 goto out;
281
282 for (i = 0; i < shared_count; ++i) {
283 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
284
285 if (!dma_fence_get_rcu(fence)) {
286
287
288
289
290
291
292 events &= ~EPOLLOUT;
293 dma_buf_poll_cb(NULL, &dcb->cb);
294 break;
295 }
296 if (!dma_fence_add_callback(fence, &dcb->cb,
297 dma_buf_poll_cb)) {
298 dma_fence_put(fence);
299 events &= ~EPOLLOUT;
300 break;
301 }
302 dma_fence_put(fence);
303 }
304
305
306 if (i == shared_count)
307 dma_buf_poll_cb(NULL, &dcb->cb);
308 }
309
310 out:
311 rcu_read_unlock();
312 return events;
313 }
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
331 {
332 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
333 long ret = 0;
334
335 if (IS_ERR(name))
336 return PTR_ERR(name);
337
338 mutex_lock(&dmabuf->lock);
339 if (!list_empty(&dmabuf->attachments)) {
340 ret = -EBUSY;
341 kfree(name);
342 goto out_unlock;
343 }
344 kfree(dmabuf->name);
345 dmabuf->name = name;
346
347 out_unlock:
348 mutex_unlock(&dmabuf->lock);
349 return ret;
350 }
351
352 static long dma_buf_ioctl(struct file *file,
353 unsigned int cmd, unsigned long arg)
354 {
355 struct dma_buf *dmabuf;
356 struct dma_buf_sync sync;
357 enum dma_data_direction direction;
358 int ret;
359
360 dmabuf = file->private_data;
361
362 switch (cmd) {
363 case DMA_BUF_IOCTL_SYNC:
364 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
365 return -EFAULT;
366
367 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
368 return -EINVAL;
369
370 switch (sync.flags & DMA_BUF_SYNC_RW) {
371 case DMA_BUF_SYNC_READ:
372 direction = DMA_FROM_DEVICE;
373 break;
374 case DMA_BUF_SYNC_WRITE:
375 direction = DMA_TO_DEVICE;
376 break;
377 case DMA_BUF_SYNC_RW:
378 direction = DMA_BIDIRECTIONAL;
379 break;
380 default:
381 return -EINVAL;
382 }
383
384 if (sync.flags & DMA_BUF_SYNC_END)
385 ret = dma_buf_end_cpu_access(dmabuf, direction);
386 else
387 ret = dma_buf_begin_cpu_access(dmabuf, direction);
388
389 return ret;
390
391 case DMA_BUF_SET_NAME_A:
392 case DMA_BUF_SET_NAME_B:
393 return dma_buf_set_name(dmabuf, (const char __user *)arg);
394
395 default:
396 return -ENOTTY;
397 }
398 }
399
400 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
401 {
402 struct dma_buf *dmabuf = file->private_data;
403
404 seq_printf(m, "size:\t%zu\n", dmabuf->size);
405
406 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
407 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
408 mutex_lock(&dmabuf->lock);
409 if (dmabuf->name)
410 seq_printf(m, "name:\t%s\n", dmabuf->name);
411 mutex_unlock(&dmabuf->lock);
412 }
413
414 static const struct file_operations dma_buf_fops = {
415 .release = dma_buf_release,
416 .mmap = dma_buf_mmap_internal,
417 .llseek = dma_buf_llseek,
418 .poll = dma_buf_poll,
419 .unlocked_ioctl = dma_buf_ioctl,
420 #ifdef CONFIG_COMPAT
421 .compat_ioctl = dma_buf_ioctl,
422 #endif
423 .show_fdinfo = dma_buf_show_fdinfo,
424 };
425
426
427
428
429 static inline int is_dma_buf_file(struct file *file)
430 {
431 return file->f_op == &dma_buf_fops;
432 }
433
434 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
435 {
436 struct file *file;
437 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
438
439 if (IS_ERR(inode))
440 return ERR_CAST(inode);
441
442 inode->i_size = dmabuf->size;
443 inode_set_bytes(inode, dmabuf->size);
444
445 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
446 flags, &dma_buf_fops);
447 if (IS_ERR(file))
448 goto err_alloc_file;
449 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
450 file->private_data = dmabuf;
451 file->f_path.dentry->d_fsdata = dmabuf;
452
453 return file;
454
455 err_alloc_file:
456 iput(inode);
457 return file;
458 }
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
509 {
510 struct dma_buf *dmabuf;
511 struct dma_resv *resv = exp_info->resv;
512 struct file *file;
513 size_t alloc_size = sizeof(struct dma_buf);
514 int ret;
515
516 if (!exp_info->resv)
517 alloc_size += sizeof(struct dma_resv);
518 else
519
520 alloc_size += 1;
521
522 if (WARN_ON(!exp_info->priv
523 || !exp_info->ops
524 || !exp_info->ops->map_dma_buf
525 || !exp_info->ops->unmap_dma_buf
526 || !exp_info->ops->release)) {
527 return ERR_PTR(-EINVAL);
528 }
529
530 if (!try_module_get(exp_info->owner))
531 return ERR_PTR(-ENOENT);
532
533 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
534 if (!dmabuf) {
535 ret = -ENOMEM;
536 goto err_module;
537 }
538
539 dmabuf->priv = exp_info->priv;
540 dmabuf->ops = exp_info->ops;
541 dmabuf->size = exp_info->size;
542 dmabuf->exp_name = exp_info->exp_name;
543 dmabuf->owner = exp_info->owner;
544 init_waitqueue_head(&dmabuf->poll);
545 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
546 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
547
548 if (!resv) {
549 resv = (struct dma_resv *)&dmabuf[1];
550 dma_resv_init(resv);
551 }
552 dmabuf->resv = resv;
553
554 file = dma_buf_getfile(dmabuf, exp_info->flags);
555 if (IS_ERR(file)) {
556 ret = PTR_ERR(file);
557 goto err_dmabuf;
558 }
559
560 file->f_mode |= FMODE_LSEEK;
561 dmabuf->file = file;
562
563 mutex_init(&dmabuf->lock);
564 INIT_LIST_HEAD(&dmabuf->attachments);
565
566 mutex_lock(&db_list.lock);
567 list_add(&dmabuf->list_node, &db_list.head);
568 mutex_unlock(&db_list.lock);
569
570 return dmabuf;
571
572 err_dmabuf:
573 kfree(dmabuf);
574 err_module:
575 module_put(exp_info->owner);
576 return ERR_PTR(ret);
577 }
578 EXPORT_SYMBOL_GPL(dma_buf_export);
579
580
581
582
583
584
585
586
587 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
588 {
589 int fd;
590
591 if (!dmabuf || !dmabuf->file)
592 return -EINVAL;
593
594 fd = get_unused_fd_flags(flags);
595 if (fd < 0)
596 return fd;
597
598 fd_install(fd, dmabuf->file);
599
600 return fd;
601 }
602 EXPORT_SYMBOL_GPL(dma_buf_fd);
603
604
605
606
607
608
609
610
611
612 struct dma_buf *dma_buf_get(int fd)
613 {
614 struct file *file;
615
616 file = fget(fd);
617
618 if (!file)
619 return ERR_PTR(-EBADF);
620
621 if (!is_dma_buf_file(file)) {
622 fput(file);
623 return ERR_PTR(-EINVAL);
624 }
625
626 return file->private_data;
627 }
628 EXPORT_SYMBOL_GPL(dma_buf_get);
629
630
631
632
633
634
635
636
637
638
639
640 void dma_buf_put(struct dma_buf *dmabuf)
641 {
642 if (WARN_ON(!dmabuf || !dmabuf->file))
643 return;
644
645 fput(dmabuf->file);
646 }
647 EXPORT_SYMBOL_GPL(dma_buf_put);
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
668 struct device *dev)
669 {
670 struct dma_buf_attachment *attach;
671 int ret;
672
673 if (WARN_ON(!dmabuf || !dev))
674 return ERR_PTR(-EINVAL);
675
676 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
677 if (!attach)
678 return ERR_PTR(-ENOMEM);
679
680 attach->dev = dev;
681 attach->dmabuf = dmabuf;
682
683 mutex_lock(&dmabuf->lock);
684
685 if (dmabuf->ops->attach) {
686 ret = dmabuf->ops->attach(dmabuf, attach);
687 if (ret)
688 goto err_attach;
689 }
690 list_add(&attach->node, &dmabuf->attachments);
691
692 mutex_unlock(&dmabuf->lock);
693
694 return attach;
695
696 err_attach:
697 kfree(attach);
698 mutex_unlock(&dmabuf->lock);
699 return ERR_PTR(ret);
700 }
701 EXPORT_SYMBOL_GPL(dma_buf_attach);
702
703
704
705
706
707
708
709
710
711 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
712 {
713 if (WARN_ON(!dmabuf || !attach))
714 return;
715
716 if (attach->sgt)
717 dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
718
719 mutex_lock(&dmabuf->lock);
720 list_del(&attach->node);
721 if (dmabuf->ops->detach)
722 dmabuf->ops->detach(dmabuf, attach);
723
724 mutex_unlock(&dmabuf->lock);
725 kfree(attach);
726 }
727 EXPORT_SYMBOL_GPL(dma_buf_detach);
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
745 enum dma_data_direction direction)
746 {
747 struct sg_table *sg_table;
748
749 might_sleep();
750
751 if (WARN_ON(!attach || !attach->dmabuf))
752 return ERR_PTR(-EINVAL);
753
754 if (attach->sgt) {
755
756
757
758
759 if (attach->dir != direction &&
760 attach->dir != DMA_BIDIRECTIONAL)
761 return ERR_PTR(-EBUSY);
762
763 return attach->sgt;
764 }
765
766 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
767 if (!sg_table)
768 sg_table = ERR_PTR(-ENOMEM);
769
770 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
771 attach->sgt = sg_table;
772 attach->dir = direction;
773 }
774
775 return sg_table;
776 }
777 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
778
779
780
781
782
783
784
785
786
787
788
789 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
790 struct sg_table *sg_table,
791 enum dma_data_direction direction)
792 {
793 might_sleep();
794
795 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
796 return;
797
798 if (attach->sgt == sg_table)
799 return;
800
801 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
802 }
803 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
910 enum dma_data_direction direction)
911 {
912 bool write = (direction == DMA_BIDIRECTIONAL ||
913 direction == DMA_TO_DEVICE);
914 struct dma_resv *resv = dmabuf->resv;
915 long ret;
916
917
918 ret = dma_resv_wait_timeout_rcu(resv, write, true,
919 MAX_SCHEDULE_TIMEOUT);
920 if (ret < 0)
921 return ret;
922
923 return 0;
924 }
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
941 enum dma_data_direction direction)
942 {
943 int ret = 0;
944
945 if (WARN_ON(!dmabuf))
946 return -EINVAL;
947
948 if (dmabuf->ops->begin_cpu_access)
949 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
950
951
952
953
954
955 if (ret == 0)
956 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
957
958 return ret;
959 }
960 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
961
962
963
964
965
966
967
968
969
970
971
972
973
974 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
975 enum dma_data_direction direction)
976 {
977 int ret = 0;
978
979 WARN_ON(!dmabuf);
980
981 if (dmabuf->ops->end_cpu_access)
982 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
983
984 return ret;
985 }
986 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
987
988
989
990
991
992
993
994
995
996
997 void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
998 {
999 WARN_ON(!dmabuf);
1000
1001 if (!dmabuf->ops->map)
1002 return NULL;
1003 return dmabuf->ops->map(dmabuf, page_num);
1004 }
1005 EXPORT_SYMBOL_GPL(dma_buf_kmap);
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
1016 void *vaddr)
1017 {
1018 WARN_ON(!dmabuf);
1019
1020 if (dmabuf->ops->unmap)
1021 dmabuf->ops->unmap(dmabuf, page_num, vaddr);
1022 }
1023 EXPORT_SYMBOL_GPL(dma_buf_kunmap);
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1041 unsigned long pgoff)
1042 {
1043 struct file *oldfile;
1044 int ret;
1045
1046 if (WARN_ON(!dmabuf || !vma))
1047 return -EINVAL;
1048
1049
1050 if (!dmabuf->ops->mmap)
1051 return -EINVAL;
1052
1053
1054 if (pgoff + vma_pages(vma) < pgoff)
1055 return -EOVERFLOW;
1056
1057
1058 if (pgoff + vma_pages(vma) >
1059 dmabuf->size >> PAGE_SHIFT)
1060 return -EINVAL;
1061
1062
1063 get_file(dmabuf->file);
1064 oldfile = vma->vm_file;
1065 vma->vm_file = dmabuf->file;
1066 vma->vm_pgoff = pgoff;
1067
1068 ret = dmabuf->ops->mmap(dmabuf, vma);
1069 if (ret) {
1070
1071 vma->vm_file = oldfile;
1072 fput(dmabuf->file);
1073 } else {
1074 if (oldfile)
1075 fput(oldfile);
1076 }
1077 return ret;
1078
1079 }
1080 EXPORT_SYMBOL_GPL(dma_buf_mmap);
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094 void *dma_buf_vmap(struct dma_buf *dmabuf)
1095 {
1096 void *ptr;
1097
1098 if (WARN_ON(!dmabuf))
1099 return NULL;
1100
1101 if (!dmabuf->ops->vmap)
1102 return NULL;
1103
1104 mutex_lock(&dmabuf->lock);
1105 if (dmabuf->vmapping_counter) {
1106 dmabuf->vmapping_counter++;
1107 BUG_ON(!dmabuf->vmap_ptr);
1108 ptr = dmabuf->vmap_ptr;
1109 goto out_unlock;
1110 }
1111
1112 BUG_ON(dmabuf->vmap_ptr);
1113
1114 ptr = dmabuf->ops->vmap(dmabuf);
1115 if (WARN_ON_ONCE(IS_ERR(ptr)))
1116 ptr = NULL;
1117 if (!ptr)
1118 goto out_unlock;
1119
1120 dmabuf->vmap_ptr = ptr;
1121 dmabuf->vmapping_counter = 1;
1122
1123 out_unlock:
1124 mutex_unlock(&dmabuf->lock);
1125 return ptr;
1126 }
1127 EXPORT_SYMBOL_GPL(dma_buf_vmap);
1128
1129
1130
1131
1132
1133
1134 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1135 {
1136 if (WARN_ON(!dmabuf))
1137 return;
1138
1139 BUG_ON(!dmabuf->vmap_ptr);
1140 BUG_ON(dmabuf->vmapping_counter == 0);
1141 BUG_ON(dmabuf->vmap_ptr != vaddr);
1142
1143 mutex_lock(&dmabuf->lock);
1144 if (--dmabuf->vmapping_counter == 0) {
1145 if (dmabuf->ops->vunmap)
1146 dmabuf->ops->vunmap(dmabuf, vaddr);
1147 dmabuf->vmap_ptr = NULL;
1148 }
1149 mutex_unlock(&dmabuf->lock);
1150 }
1151 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1152
1153 #ifdef CONFIG_DEBUG_FS
1154 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1155 {
1156 int ret;
1157 struct dma_buf *buf_obj;
1158 struct dma_buf_attachment *attach_obj;
1159 struct dma_resv *robj;
1160 struct dma_resv_list *fobj;
1161 struct dma_fence *fence;
1162 unsigned seq;
1163 int count = 0, attach_count, shared_count, i;
1164 size_t size = 0;
1165
1166 ret = mutex_lock_interruptible(&db_list.lock);
1167
1168 if (ret)
1169 return ret;
1170
1171 seq_puts(s, "\nDma-buf Objects:\n");
1172 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1173 "size", "flags", "mode", "count", "ino");
1174
1175 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1176 ret = mutex_lock_interruptible(&buf_obj->lock);
1177
1178 if (ret) {
1179 seq_puts(s,
1180 "\tERROR locking buffer object: skipping\n");
1181 continue;
1182 }
1183
1184 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1185 buf_obj->size,
1186 buf_obj->file->f_flags, buf_obj->file->f_mode,
1187 file_count(buf_obj->file),
1188 buf_obj->exp_name,
1189 file_inode(buf_obj->file)->i_ino,
1190 buf_obj->name ?: "");
1191
1192 robj = buf_obj->resv;
1193 while (true) {
1194 seq = read_seqcount_begin(&robj->seq);
1195 rcu_read_lock();
1196 fobj = rcu_dereference(robj->fence);
1197 shared_count = fobj ? fobj->shared_count : 0;
1198 fence = rcu_dereference(robj->fence_excl);
1199 if (!read_seqcount_retry(&robj->seq, seq))
1200 break;
1201 rcu_read_unlock();
1202 }
1203
1204 if (fence)
1205 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1206 fence->ops->get_driver_name(fence),
1207 fence->ops->get_timeline_name(fence),
1208 dma_fence_is_signaled(fence) ? "" : "un");
1209 for (i = 0; i < shared_count; i++) {
1210 fence = rcu_dereference(fobj->shared[i]);
1211 if (!dma_fence_get_rcu(fence))
1212 continue;
1213 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1214 fence->ops->get_driver_name(fence),
1215 fence->ops->get_timeline_name(fence),
1216 dma_fence_is_signaled(fence) ? "" : "un");
1217 dma_fence_put(fence);
1218 }
1219 rcu_read_unlock();
1220
1221 seq_puts(s, "\tAttached Devices:\n");
1222 attach_count = 0;
1223
1224 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1225 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1226 attach_count++;
1227 }
1228
1229 seq_printf(s, "Total %d devices attached\n\n",
1230 attach_count);
1231
1232 count++;
1233 size += buf_obj->size;
1234 mutex_unlock(&buf_obj->lock);
1235 }
1236
1237 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1238
1239 mutex_unlock(&db_list.lock);
1240 return 0;
1241 }
1242
1243 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1244
1245 static struct dentry *dma_buf_debugfs_dir;
1246
1247 static int dma_buf_init_debugfs(void)
1248 {
1249 struct dentry *d;
1250 int err = 0;
1251
1252 d = debugfs_create_dir("dma_buf", NULL);
1253 if (IS_ERR(d))
1254 return PTR_ERR(d);
1255
1256 dma_buf_debugfs_dir = d;
1257
1258 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1259 NULL, &dma_buf_debug_fops);
1260 if (IS_ERR(d)) {
1261 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1262 debugfs_remove_recursive(dma_buf_debugfs_dir);
1263 dma_buf_debugfs_dir = NULL;
1264 err = PTR_ERR(d);
1265 }
1266
1267 return err;
1268 }
1269
1270 static void dma_buf_uninit_debugfs(void)
1271 {
1272 debugfs_remove_recursive(dma_buf_debugfs_dir);
1273 }
1274 #else
1275 static inline int dma_buf_init_debugfs(void)
1276 {
1277 return 0;
1278 }
1279 static inline void dma_buf_uninit_debugfs(void)
1280 {
1281 }
1282 #endif
1283
1284 static int __init dma_buf_init(void)
1285 {
1286 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1287 if (IS_ERR(dma_buf_mnt))
1288 return PTR_ERR(dma_buf_mnt);
1289
1290 mutex_init(&db_list.lock);
1291 INIT_LIST_HEAD(&db_list.head);
1292 dma_buf_init_debugfs();
1293 return 0;
1294 }
1295 subsys_initcall(dma_buf_init);
1296
1297 static void __exit dma_buf_deinit(void)
1298 {
1299 dma_buf_uninit_debugfs();
1300 kern_unmount(dma_buf_mnt);
1301 }
1302 __exitcall(dma_buf_deinit);