This source file includes following definitions.
- drm_gem_init
- drm_gem_destroy
- drm_gem_object_init
- drm_gem_private_object_init
- drm_gem_remove_prime_handles
- drm_gem_object_handle_free
- drm_gem_object_exported_dma_buf_free
- drm_gem_object_handle_put_unlocked
- drm_gem_object_release_handle
- drm_gem_handle_delete
- drm_gem_dumb_map_offset
- drm_gem_dumb_destroy
- drm_gem_handle_create_tail
- drm_gem_handle_create
- drm_gem_free_mmap_offset
- drm_gem_create_mmap_offset_size
- drm_gem_create_mmap_offset
- drm_gem_check_release_pagevec
- drm_gem_get_pages
- drm_gem_put_pages
- objects_lookup
- drm_gem_objects_lookup
- drm_gem_object_lookup
- drm_gem_dma_resv_wait
- drm_gem_close_ioctl
- drm_gem_flink_ioctl
- drm_gem_open_ioctl
- drm_gem_open
- drm_gem_release
- drm_gem_object_release
- drm_gem_object_free
- drm_gem_object_put_unlocked
- drm_gem_object_put
- drm_gem_vm_open
- drm_gem_vm_close
- drm_gem_mmap_obj
- drm_gem_mmap
- drm_gem_print_info
- drm_gem_pin
- drm_gem_unpin
- drm_gem_vmap
- drm_gem_vunmap
- drm_gem_lock_reservations
- drm_gem_unlock_reservations
- drm_gem_fence_array_add
- drm_gem_fence_array_add_implicit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <linux/pagevec.h>
41
42 #include <drm/drm.h>
43 #include <drm/drm_device.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_file.h>
46 #include <drm/drm_gem.h>
47 #include <drm/drm_print.h>
48 #include <drm/drm_vma_manager.h>
49
50 #include "drm_internal.h"
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84 int
85 drm_gem_init(struct drm_device *dev)
86 {
87 struct drm_vma_offset_manager *vma_offset_manager;
88
89 mutex_init(&dev->object_name_lock);
90 idr_init_base(&dev->object_name_idr, 1);
91
92 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
93 if (!vma_offset_manager) {
94 DRM_ERROR("out of memory\n");
95 return -ENOMEM;
96 }
97
98 dev->vma_offset_manager = vma_offset_manager;
99 drm_vma_offset_manager_init(vma_offset_manager,
100 DRM_FILE_PAGE_OFFSET_START,
101 DRM_FILE_PAGE_OFFSET_SIZE);
102
103 return 0;
104 }
105
106 void
107 drm_gem_destroy(struct drm_device *dev)
108 {
109
110 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
111 kfree(dev->vma_offset_manager);
112 dev->vma_offset_manager = NULL;
113 }
114
115
116
117
118
119
120
121
122
123
124 int drm_gem_object_init(struct drm_device *dev,
125 struct drm_gem_object *obj, size_t size)
126 {
127 struct file *filp;
128
129 drm_gem_private_object_init(dev, obj, size);
130
131 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
132 if (IS_ERR(filp))
133 return PTR_ERR(filp);
134
135 obj->filp = filp;
136
137 return 0;
138 }
139 EXPORT_SYMBOL(drm_gem_object_init);
140
141
142
143
144
145
146
147
148
149
150
151 void drm_gem_private_object_init(struct drm_device *dev,
152 struct drm_gem_object *obj, size_t size)
153 {
154 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
155
156 obj->dev = dev;
157 obj->filp = NULL;
158
159 kref_init(&obj->refcount);
160 obj->handle_count = 0;
161 obj->size = size;
162 dma_resv_init(&obj->_resv);
163 if (!obj->resv)
164 obj->resv = &obj->_resv;
165
166 drm_vma_node_reset(&obj->vma_node);
167 }
168 EXPORT_SYMBOL(drm_gem_private_object_init);
169
170 static void
171 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
172 {
173
174
175
176
177 mutex_lock(&filp->prime.lock);
178 if (obj->dma_buf) {
179 drm_prime_remove_buf_handle_locked(&filp->prime,
180 obj->dma_buf);
181 }
182 mutex_unlock(&filp->prime.lock);
183 }
184
185
186
187
188
189
190
191
192
193
194
195 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
196 {
197 struct drm_device *dev = obj->dev;
198
199
200 if (obj->name) {
201 idr_remove(&dev->object_name_idr, obj->name);
202 obj->name = 0;
203 }
204 }
205
206 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
207 {
208
209 if (obj->dma_buf) {
210 dma_buf_put(obj->dma_buf);
211 obj->dma_buf = NULL;
212 }
213 }
214
215 static void
216 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
217 {
218 struct drm_device *dev = obj->dev;
219 bool final = false;
220
221 if (WARN_ON(obj->handle_count == 0))
222 return;
223
224
225
226
227
228
229
230 mutex_lock(&dev->object_name_lock);
231 if (--obj->handle_count == 0) {
232 drm_gem_object_handle_free(obj);
233 drm_gem_object_exported_dma_buf_free(obj);
234 final = true;
235 }
236 mutex_unlock(&dev->object_name_lock);
237
238 if (final)
239 drm_gem_object_put_unlocked(obj);
240 }
241
242
243
244
245
246 static int
247 drm_gem_object_release_handle(int id, void *ptr, void *data)
248 {
249 struct drm_file *file_priv = data;
250 struct drm_gem_object *obj = ptr;
251 struct drm_device *dev = obj->dev;
252
253 if (obj->funcs && obj->funcs->close)
254 obj->funcs->close(obj, file_priv);
255 else if (dev->driver->gem_close_object)
256 dev->driver->gem_close_object(obj, file_priv);
257
258 drm_gem_remove_prime_handles(obj, file_priv);
259 drm_vma_node_revoke(&obj->vma_node, file_priv);
260
261 drm_gem_object_handle_put_unlocked(obj);
262
263 return 0;
264 }
265
266
267
268
269
270
271
272
273
274
275 int
276 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
277 {
278 struct drm_gem_object *obj;
279
280 spin_lock(&filp->table_lock);
281
282
283 obj = idr_replace(&filp->object_idr, NULL, handle);
284 spin_unlock(&filp->table_lock);
285 if (IS_ERR_OR_NULL(obj))
286 return -EINVAL;
287
288
289 drm_gem_object_release_handle(handle, obj, filp);
290
291
292 spin_lock(&filp->table_lock);
293 idr_remove(&filp->object_idr, handle);
294 spin_unlock(&filp->table_lock);
295
296 return 0;
297 }
298 EXPORT_SYMBOL(drm_gem_handle_delete);
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
314 u32 handle, u64 *offset)
315 {
316 struct drm_gem_object *obj;
317 int ret;
318
319 obj = drm_gem_object_lookup(file, handle);
320 if (!obj)
321 return -ENOENT;
322
323
324 if (obj->import_attach) {
325 ret = -EINVAL;
326 goto out;
327 }
328
329 ret = drm_gem_create_mmap_offset(obj);
330 if (ret)
331 goto out;
332
333 *offset = drm_vma_node_offset_addr(&obj->vma_node);
334 out:
335 drm_gem_object_put_unlocked(obj);
336
337 return ret;
338 }
339 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
340
341
342
343
344
345
346
347
348
349
350 int drm_gem_dumb_destroy(struct drm_file *file,
351 struct drm_device *dev,
352 uint32_t handle)
353 {
354 return drm_gem_handle_delete(file, handle);
355 }
356 EXPORT_SYMBOL(drm_gem_dumb_destroy);
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372 int
373 drm_gem_handle_create_tail(struct drm_file *file_priv,
374 struct drm_gem_object *obj,
375 u32 *handlep)
376 {
377 struct drm_device *dev = obj->dev;
378 u32 handle;
379 int ret;
380
381 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
382 if (obj->handle_count++ == 0)
383 drm_gem_object_get(obj);
384
385
386
387
388
389 idr_preload(GFP_KERNEL);
390 spin_lock(&file_priv->table_lock);
391
392 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
393
394 spin_unlock(&file_priv->table_lock);
395 idr_preload_end();
396
397 mutex_unlock(&dev->object_name_lock);
398 if (ret < 0)
399 goto err_unref;
400
401 handle = ret;
402
403 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
404 if (ret)
405 goto err_remove;
406
407 if (obj->funcs && obj->funcs->open) {
408 ret = obj->funcs->open(obj, file_priv);
409 if (ret)
410 goto err_revoke;
411 } else if (dev->driver->gem_open_object) {
412 ret = dev->driver->gem_open_object(obj, file_priv);
413 if (ret)
414 goto err_revoke;
415 }
416
417 *handlep = handle;
418 return 0;
419
420 err_revoke:
421 drm_vma_node_revoke(&obj->vma_node, file_priv);
422 err_remove:
423 spin_lock(&file_priv->table_lock);
424 idr_remove(&file_priv->object_idr, handle);
425 spin_unlock(&file_priv->table_lock);
426 err_unref:
427 drm_gem_object_handle_put_unlocked(obj);
428 return ret;
429 }
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 int drm_gem_handle_create(struct drm_file *file_priv,
445 struct drm_gem_object *obj,
446 u32 *handlep)
447 {
448 mutex_lock(&obj->dev->object_name_lock);
449
450 return drm_gem_handle_create_tail(file_priv, obj, handlep);
451 }
452 EXPORT_SYMBOL(drm_gem_handle_create);
453
454
455
456
457
458
459
460
461
462
463
464
465 void
466 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
467 {
468 struct drm_device *dev = obj->dev;
469
470 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
471 }
472 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491 int
492 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
493 {
494 struct drm_device *dev = obj->dev;
495
496 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
497 size / PAGE_SIZE);
498 }
499 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
516 {
517 return drm_gem_create_mmap_offset_size(obj, obj->size);
518 }
519 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
520
521
522
523
524
525 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
526 {
527 check_move_unevictable_pages(pvec);
528 __pagevec_release(pvec);
529 cond_resched();
530 }
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
554 {
555 struct address_space *mapping;
556 struct page *p, **pages;
557 struct pagevec pvec;
558 int i, npages;
559
560
561 mapping = obj->filp->f_mapping;
562
563
564
565
566
567 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
568
569 npages = obj->size >> PAGE_SHIFT;
570
571 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
572 if (pages == NULL)
573 return ERR_PTR(-ENOMEM);
574
575 mapping_set_unevictable(mapping);
576
577 for (i = 0; i < npages; i++) {
578 p = shmem_read_mapping_page(mapping, i);
579 if (IS_ERR(p))
580 goto fail;
581 pages[i] = p;
582
583
584
585
586
587
588 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
589 (page_to_pfn(p) >= 0x00100000UL));
590 }
591
592 return pages;
593
594 fail:
595 mapping_clear_unevictable(mapping);
596 pagevec_init(&pvec);
597 while (i--) {
598 if (!pagevec_add(&pvec, pages[i]))
599 drm_gem_check_release_pagevec(&pvec);
600 }
601 if (pagevec_count(&pvec))
602 drm_gem_check_release_pagevec(&pvec);
603
604 kvfree(pages);
605 return ERR_CAST(p);
606 }
607 EXPORT_SYMBOL(drm_gem_get_pages);
608
609
610
611
612
613
614
615
616 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
617 bool dirty, bool accessed)
618 {
619 int i, npages;
620 struct address_space *mapping;
621 struct pagevec pvec;
622
623 mapping = file_inode(obj->filp)->i_mapping;
624 mapping_clear_unevictable(mapping);
625
626
627
628
629
630 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
631
632 npages = obj->size >> PAGE_SHIFT;
633
634 pagevec_init(&pvec);
635 for (i = 0; i < npages; i++) {
636 if (!pages[i])
637 continue;
638
639 if (dirty)
640 set_page_dirty(pages[i]);
641
642 if (accessed)
643 mark_page_accessed(pages[i]);
644
645
646 if (!pagevec_add(&pvec, pages[i]))
647 drm_gem_check_release_pagevec(&pvec);
648 }
649 if (pagevec_count(&pvec))
650 drm_gem_check_release_pagevec(&pvec);
651
652 kvfree(pages);
653 }
654 EXPORT_SYMBOL(drm_gem_put_pages);
655
656 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
657 struct drm_gem_object **objs)
658 {
659 int i, ret = 0;
660 struct drm_gem_object *obj;
661
662 spin_lock(&filp->table_lock);
663
664 for (i = 0; i < count; i++) {
665
666 obj = idr_find(&filp->object_idr, handle[i]);
667 if (!obj) {
668 ret = -ENOENT;
669 break;
670 }
671 drm_gem_object_get(obj);
672 objs[i] = obj;
673 }
674 spin_unlock(&filp->table_lock);
675
676 return ret;
677 }
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
699 int count, struct drm_gem_object ***objs_out)
700 {
701 int ret;
702 u32 *handles;
703 struct drm_gem_object **objs;
704
705 if (!count)
706 return 0;
707
708 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
709 GFP_KERNEL | __GFP_ZERO);
710 if (!objs)
711 return -ENOMEM;
712
713 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
714 if (!handles) {
715 ret = -ENOMEM;
716 goto out;
717 }
718
719 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
720 ret = -EFAULT;
721 DRM_DEBUG("Failed to copy in GEM handles\n");
722 goto out;
723 }
724
725 ret = objects_lookup(filp, handles, count, objs);
726 *objs_out = objs;
727
728 out:
729 kvfree(handles);
730 return ret;
731
732 }
733 EXPORT_SYMBOL(drm_gem_objects_lookup);
734
735
736
737
738
739
740
741
742
743
744
745
746
747 struct drm_gem_object *
748 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
749 {
750 struct drm_gem_object *obj = NULL;
751
752 objects_lookup(filp, &handle, 1, &obj);
753 return obj;
754 }
755 EXPORT_SYMBOL(drm_gem_object_lookup);
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
771 bool wait_all, unsigned long timeout)
772 {
773 long ret;
774 struct drm_gem_object *obj;
775
776 obj = drm_gem_object_lookup(filep, handle);
777 if (!obj) {
778 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
779 return -EINVAL;
780 }
781
782 ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
783 true, timeout);
784 if (ret == 0)
785 ret = -ETIME;
786 else if (ret > 0)
787 ret = 0;
788
789 drm_gem_object_put_unlocked(obj);
790
791 return ret;
792 }
793 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
794
795
796
797
798
799
800
801
802
803 int
804 drm_gem_close_ioctl(struct drm_device *dev, void *data,
805 struct drm_file *file_priv)
806 {
807 struct drm_gem_close *args = data;
808 int ret;
809
810 if (!drm_core_check_feature(dev, DRIVER_GEM))
811 return -EOPNOTSUPP;
812
813 ret = drm_gem_handle_delete(file_priv, args->handle);
814
815 return ret;
816 }
817
818
819
820
821
822
823
824
825
826
827
828
829 int
830 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
831 struct drm_file *file_priv)
832 {
833 struct drm_gem_flink *args = data;
834 struct drm_gem_object *obj;
835 int ret;
836
837 if (!drm_core_check_feature(dev, DRIVER_GEM))
838 return -EOPNOTSUPP;
839
840 obj = drm_gem_object_lookup(file_priv, args->handle);
841 if (obj == NULL)
842 return -ENOENT;
843
844 mutex_lock(&dev->object_name_lock);
845
846 if (obj->handle_count == 0) {
847 ret = -ENOENT;
848 goto err;
849 }
850
851 if (!obj->name) {
852 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
853 if (ret < 0)
854 goto err;
855
856 obj->name = ret;
857 }
858
859 args->name = (uint64_t) obj->name;
860 ret = 0;
861
862 err:
863 mutex_unlock(&dev->object_name_lock);
864 drm_gem_object_put_unlocked(obj);
865 return ret;
866 }
867
868
869
870
871
872
873
874
875
876
877
878
879 int
880 drm_gem_open_ioctl(struct drm_device *dev, void *data,
881 struct drm_file *file_priv)
882 {
883 struct drm_gem_open *args = data;
884 struct drm_gem_object *obj;
885 int ret;
886 u32 handle;
887
888 if (!drm_core_check_feature(dev, DRIVER_GEM))
889 return -EOPNOTSUPP;
890
891 mutex_lock(&dev->object_name_lock);
892 obj = idr_find(&dev->object_name_idr, (int) args->name);
893 if (obj) {
894 drm_gem_object_get(obj);
895 } else {
896 mutex_unlock(&dev->object_name_lock);
897 return -ENOENT;
898 }
899
900
901 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
902 drm_gem_object_put_unlocked(obj);
903 if (ret)
904 return ret;
905
906 args->handle = handle;
907 args->size = obj->size;
908
909 return 0;
910 }
911
912
913
914
915
916
917
918
919
920 void
921 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
922 {
923 idr_init_base(&file_private->object_idr, 1);
924 spin_lock_init(&file_private->table_lock);
925 }
926
927
928
929
930
931
932
933
934
935
936 void
937 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
938 {
939 idr_for_each(&file_private->object_idr,
940 &drm_gem_object_release_handle, file_private);
941 idr_destroy(&file_private->object_idr);
942 }
943
944
945
946
947
948
949
950
951 void
952 drm_gem_object_release(struct drm_gem_object *obj)
953 {
954 WARN_ON(obj->dma_buf);
955
956 if (obj->filp)
957 fput(obj->filp);
958
959 dma_resv_fini(&obj->_resv);
960 drm_gem_free_mmap_offset(obj);
961 }
962 EXPORT_SYMBOL(drm_gem_object_release);
963
964
965
966
967
968
969
970
971
972
973 void
974 drm_gem_object_free(struct kref *kref)
975 {
976 struct drm_gem_object *obj =
977 container_of(kref, struct drm_gem_object, refcount);
978 struct drm_device *dev = obj->dev;
979
980 if (obj->funcs) {
981 obj->funcs->free(obj);
982 } else if (dev->driver->gem_free_object_unlocked) {
983 dev->driver->gem_free_object_unlocked(obj);
984 } else if (dev->driver->gem_free_object) {
985 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
986
987 dev->driver->gem_free_object(obj);
988 }
989 }
990 EXPORT_SYMBOL(drm_gem_object_free);
991
992
993
994
995
996
997
998
999
1000
1001 void
1002 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
1003 {
1004 struct drm_device *dev;
1005
1006 if (!obj)
1007 return;
1008
1009 dev = obj->dev;
1010
1011 if (dev->driver->gem_free_object) {
1012 might_lock(&dev->struct_mutex);
1013 if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
1014 &dev->struct_mutex))
1015 mutex_unlock(&dev->struct_mutex);
1016 } else {
1017 kref_put(&obj->refcount, drm_gem_object_free);
1018 }
1019 }
1020 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033 void
1034 drm_gem_object_put(struct drm_gem_object *obj)
1035 {
1036 if (obj) {
1037 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1038
1039 kref_put(&obj->refcount, drm_gem_object_free);
1040 }
1041 }
1042 EXPORT_SYMBOL(drm_gem_object_put);
1043
1044
1045
1046
1047
1048
1049
1050
1051 void drm_gem_vm_open(struct vm_area_struct *vma)
1052 {
1053 struct drm_gem_object *obj = vma->vm_private_data;
1054
1055 drm_gem_object_get(obj);
1056 }
1057 EXPORT_SYMBOL(drm_gem_vm_open);
1058
1059
1060
1061
1062
1063
1064
1065
1066 void drm_gem_vm_close(struct vm_area_struct *vma)
1067 {
1068 struct drm_gem_object *obj = vma->vm_private_data;
1069
1070 drm_gem_object_put_unlocked(obj);
1071 }
1072 EXPORT_SYMBOL(drm_gem_vm_close);
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1099 struct vm_area_struct *vma)
1100 {
1101 struct drm_device *dev = obj->dev;
1102
1103
1104 if (obj_size < vma->vm_end - vma->vm_start)
1105 return -EINVAL;
1106
1107 if (obj->funcs && obj->funcs->vm_ops)
1108 vma->vm_ops = obj->funcs->vm_ops;
1109 else if (dev->driver->gem_vm_ops)
1110 vma->vm_ops = dev->driver->gem_vm_ops;
1111 else
1112 return -EINVAL;
1113
1114 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1115 vma->vm_private_data = obj;
1116 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1117 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1118
1119
1120
1121
1122
1123
1124
1125 drm_gem_object_get(obj);
1126
1127 return 0;
1128 }
1129 EXPORT_SYMBOL(drm_gem_mmap_obj);
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1147 {
1148 struct drm_file *priv = filp->private_data;
1149 struct drm_device *dev = priv->minor->dev;
1150 struct drm_gem_object *obj = NULL;
1151 struct drm_vma_offset_node *node;
1152 int ret;
1153
1154 if (drm_dev_is_unplugged(dev))
1155 return -ENODEV;
1156
1157 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1158 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1159 vma->vm_pgoff,
1160 vma_pages(vma));
1161 if (likely(node)) {
1162 obj = container_of(node, struct drm_gem_object, vma_node);
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 if (!kref_get_unless_zero(&obj->refcount))
1174 obj = NULL;
1175 }
1176 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1177
1178 if (!obj)
1179 return -EINVAL;
1180
1181 if (!drm_vma_node_is_allowed(node, priv)) {
1182 drm_gem_object_put_unlocked(obj);
1183 return -EACCES;
1184 }
1185
1186 if (node->readonly) {
1187 if (vma->vm_flags & VM_WRITE) {
1188 drm_gem_object_put_unlocked(obj);
1189 return -EINVAL;
1190 }
1191
1192 vma->vm_flags &= ~VM_MAYWRITE;
1193 }
1194
1195 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1196 vma);
1197
1198 drm_gem_object_put_unlocked(obj);
1199
1200 return ret;
1201 }
1202 EXPORT_SYMBOL(drm_gem_mmap);
1203
1204 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1205 const struct drm_gem_object *obj)
1206 {
1207 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1208 drm_printf_indent(p, indent, "refcount=%u\n",
1209 kref_read(&obj->refcount));
1210 drm_printf_indent(p, indent, "start=%08lx\n",
1211 drm_vma_node_start(&obj->vma_node));
1212 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1213 drm_printf_indent(p, indent, "imported=%s\n",
1214 obj->import_attach ? "yes" : "no");
1215
1216 if (obj->funcs && obj->funcs->print_info)
1217 obj->funcs->print_info(p, indent, obj);
1218 else if (obj->dev->driver->gem_print_info)
1219 obj->dev->driver->gem_print_info(p, indent, obj);
1220 }
1221
1222 int drm_gem_pin(struct drm_gem_object *obj)
1223 {
1224 if (obj->funcs && obj->funcs->pin)
1225 return obj->funcs->pin(obj);
1226 else if (obj->dev->driver->gem_prime_pin)
1227 return obj->dev->driver->gem_prime_pin(obj);
1228 else
1229 return 0;
1230 }
1231
1232 void drm_gem_unpin(struct drm_gem_object *obj)
1233 {
1234 if (obj->funcs && obj->funcs->unpin)
1235 obj->funcs->unpin(obj);
1236 else if (obj->dev->driver->gem_prime_unpin)
1237 obj->dev->driver->gem_prime_unpin(obj);
1238 }
1239
1240 void *drm_gem_vmap(struct drm_gem_object *obj)
1241 {
1242 void *vaddr;
1243
1244 if (obj->funcs && obj->funcs->vmap)
1245 vaddr = obj->funcs->vmap(obj);
1246 else if (obj->dev->driver->gem_prime_vmap)
1247 vaddr = obj->dev->driver->gem_prime_vmap(obj);
1248 else
1249 vaddr = ERR_PTR(-EOPNOTSUPP);
1250
1251 if (!vaddr)
1252 vaddr = ERR_PTR(-ENOMEM);
1253
1254 return vaddr;
1255 }
1256
1257 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1258 {
1259 if (!vaddr)
1260 return;
1261
1262 if (obj->funcs && obj->funcs->vunmap)
1263 obj->funcs->vunmap(obj, vaddr);
1264 else if (obj->dev->driver->gem_prime_vunmap)
1265 obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1266 }
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281 int
1282 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1283 struct ww_acquire_ctx *acquire_ctx)
1284 {
1285 int contended = -1;
1286 int i, ret;
1287
1288 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1289
1290 retry:
1291 if (contended != -1) {
1292 struct drm_gem_object *obj = objs[contended];
1293
1294 ret = dma_resv_lock_slow_interruptible(obj->resv,
1295 acquire_ctx);
1296 if (ret) {
1297 ww_acquire_done(acquire_ctx);
1298 return ret;
1299 }
1300 }
1301
1302 for (i = 0; i < count; i++) {
1303 if (i == contended)
1304 continue;
1305
1306 ret = dma_resv_lock_interruptible(objs[i]->resv,
1307 acquire_ctx);
1308 if (ret) {
1309 int j;
1310
1311 for (j = 0; j < i; j++)
1312 dma_resv_unlock(objs[j]->resv);
1313
1314 if (contended != -1 && contended >= i)
1315 dma_resv_unlock(objs[contended]->resv);
1316
1317 if (ret == -EDEADLK) {
1318 contended = i;
1319 goto retry;
1320 }
1321
1322 ww_acquire_done(acquire_ctx);
1323 return ret;
1324 }
1325 }
1326
1327 ww_acquire_done(acquire_ctx);
1328
1329 return 0;
1330 }
1331 EXPORT_SYMBOL(drm_gem_lock_reservations);
1332
1333 void
1334 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1335 struct ww_acquire_ctx *acquire_ctx)
1336 {
1337 int i;
1338
1339 for (i = 0; i < count; i++)
1340 dma_resv_unlock(objs[i]->resv);
1341
1342 ww_acquire_fini(acquire_ctx);
1343 }
1344 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356 int drm_gem_fence_array_add(struct xarray *fence_array,
1357 struct dma_fence *fence)
1358 {
1359 struct dma_fence *entry;
1360 unsigned long index;
1361 u32 id = 0;
1362 int ret;
1363
1364 if (!fence)
1365 return 0;
1366
1367
1368
1369
1370
1371 xa_for_each(fence_array, index, entry) {
1372 if (entry->context != fence->context)
1373 continue;
1374
1375 if (dma_fence_is_later(fence, entry)) {
1376 dma_fence_put(entry);
1377 xa_store(fence_array, index, fence, GFP_KERNEL);
1378 } else {
1379 dma_fence_put(fence);
1380 }
1381 return 0;
1382 }
1383
1384 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1385 if (ret != 0)
1386 dma_fence_put(fence);
1387
1388 return ret;
1389 }
1390 EXPORT_SYMBOL(drm_gem_fence_array_add);
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1407 struct drm_gem_object *obj,
1408 bool write)
1409 {
1410 int ret;
1411 struct dma_fence **fences;
1412 unsigned int i, fence_count;
1413
1414 if (!write) {
1415 struct dma_fence *fence =
1416 dma_resv_get_excl_rcu(obj->resv);
1417
1418 return drm_gem_fence_array_add(fence_array, fence);
1419 }
1420
1421 ret = dma_resv_get_fences_rcu(obj->resv, NULL,
1422 &fence_count, &fences);
1423 if (ret || !fence_count)
1424 return ret;
1425
1426 for (i = 0; i < fence_count; i++) {
1427 ret = drm_gem_fence_array_add(fence_array, fences[i]);
1428 if (ret)
1429 break;
1430 }
1431
1432 for (; i < fence_count; i++)
1433 dma_fence_put(fences[i]);
1434 kfree(fences);
1435 return ret;
1436 }
1437 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);