This source file includes following definitions.
- __drm_gem_cma_create
- drm_gem_cma_create
- drm_gem_cma_create_with_handle
- drm_gem_cma_free_object
- drm_gem_cma_dumb_create_internal
- drm_gem_cma_dumb_create
- drm_gem_cma_mmap_obj
- drm_gem_cma_mmap
- drm_gem_cma_get_unmapped_area
- drm_gem_cma_print_info
- drm_gem_cma_prime_get_sg_table
- drm_gem_cma_prime_import_sg_table
- drm_gem_cma_prime_mmap
- drm_gem_cma_prime_vmap
- drm_gem_cma_prime_vunmap
- drm_cma_gem_create_object_default_funcs
- drm_gem_cma_prime_import_sg_table_vmap
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/dma-buf.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/mm.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18
19 #include <drm/drm.h>
20 #include <drm/drm_device.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_gem_cma_helper.h>
23 #include <drm/drm_vma_manager.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 static struct drm_gem_cma_object *
49 __drm_gem_cma_create(struct drm_device *drm, size_t size)
50 {
51 struct drm_gem_cma_object *cma_obj;
52 struct drm_gem_object *gem_obj;
53 int ret;
54
55 if (drm->driver->gem_create_object)
56 gem_obj = drm->driver->gem_create_object(drm, size);
57 else
58 gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
59 if (!gem_obj)
60 return ERR_PTR(-ENOMEM);
61 cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
62
63 ret = drm_gem_object_init(drm, gem_obj, size);
64 if (ret)
65 goto error;
66
67 ret = drm_gem_create_mmap_offset(gem_obj);
68 if (ret) {
69 drm_gem_object_release(gem_obj);
70 goto error;
71 }
72
73 return cma_obj;
74
75 error:
76 kfree(cma_obj);
77 return ERR_PTR(ret);
78 }
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
94 size_t size)
95 {
96 struct drm_gem_cma_object *cma_obj;
97 int ret;
98
99 size = round_up(size, PAGE_SIZE);
100
101 cma_obj = __drm_gem_cma_create(drm, size);
102 if (IS_ERR(cma_obj))
103 return cma_obj;
104
105 cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
106 GFP_KERNEL | __GFP_NOWARN);
107 if (!cma_obj->vaddr) {
108 dev_dbg(drm->dev, "failed to allocate buffer with size %zu\n",
109 size);
110 ret = -ENOMEM;
111 goto error;
112 }
113
114 return cma_obj;
115
116 error:
117 drm_gem_object_put_unlocked(&cma_obj->base);
118 return ERR_PTR(ret);
119 }
120 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138 static struct drm_gem_cma_object *
139 drm_gem_cma_create_with_handle(struct drm_file *file_priv,
140 struct drm_device *drm, size_t size,
141 uint32_t *handle)
142 {
143 struct drm_gem_cma_object *cma_obj;
144 struct drm_gem_object *gem_obj;
145 int ret;
146
147 cma_obj = drm_gem_cma_create(drm, size);
148 if (IS_ERR(cma_obj))
149 return cma_obj;
150
151 gem_obj = &cma_obj->base;
152
153
154
155
156
157 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
158
159 drm_gem_object_put_unlocked(gem_obj);
160 if (ret)
161 return ERR_PTR(ret);
162
163 return cma_obj;
164 }
165
166
167
168
169
170
171
172
173
174
175
176 void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
177 {
178 struct drm_gem_cma_object *cma_obj;
179
180 cma_obj = to_drm_gem_cma_obj(gem_obj);
181
182 if (gem_obj->import_attach) {
183 if (cma_obj->vaddr)
184 dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
185 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
186 } else if (cma_obj->vaddr) {
187 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
188 cma_obj->vaddr, cma_obj->paddr);
189 }
190
191 drm_gem_object_release(gem_obj);
192
193 kfree(cma_obj);
194 }
195 EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211 int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
212 struct drm_device *drm,
213 struct drm_mode_create_dumb *args)
214 {
215 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
216 struct drm_gem_cma_object *cma_obj;
217
218 if (args->pitch < min_pitch)
219 args->pitch = min_pitch;
220
221 if (args->size < args->pitch * args->height)
222 args->size = args->pitch * args->height;
223
224 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
225 &args->handle);
226 return PTR_ERR_OR_ZERO(cma_obj);
227 }
228 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248 int drm_gem_cma_dumb_create(struct drm_file *file_priv,
249 struct drm_device *drm,
250 struct drm_mode_create_dumb *args)
251 {
252 struct drm_gem_cma_object *cma_obj;
253
254 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
255 args->size = args->pitch * args->height;
256
257 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
258 &args->handle);
259 return PTR_ERR_OR_ZERO(cma_obj);
260 }
261 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
262
263 const struct vm_operations_struct drm_gem_cma_vm_ops = {
264 .open = drm_gem_vm_open,
265 .close = drm_gem_vm_close,
266 };
267 EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
268
269 static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
270 struct vm_area_struct *vma)
271 {
272 int ret;
273
274
275
276
277
278
279 vma->vm_flags &= ~VM_PFNMAP;
280 vma->vm_pgoff = 0;
281
282 ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
283 cma_obj->paddr, vma->vm_end - vma->vm_start);
284 if (ret)
285 drm_gem_vm_close(vma);
286
287 return ret;
288 }
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
309 {
310 struct drm_gem_cma_object *cma_obj;
311 struct drm_gem_object *gem_obj;
312 int ret;
313
314 ret = drm_gem_mmap(filp, vma);
315 if (ret)
316 return ret;
317
318 gem_obj = vma->vm_private_data;
319 cma_obj = to_drm_gem_cma_obj(gem_obj);
320
321 return drm_gem_cma_mmap_obj(cma_obj, vma);
322 }
323 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
324
325 #ifndef CONFIG_MMU
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342 unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
343 unsigned long addr,
344 unsigned long len,
345 unsigned long pgoff,
346 unsigned long flags)
347 {
348 struct drm_gem_cma_object *cma_obj;
349 struct drm_gem_object *obj = NULL;
350 struct drm_file *priv = filp->private_data;
351 struct drm_device *dev = priv->minor->dev;
352 struct drm_vma_offset_node *node;
353
354 if (drm_dev_is_unplugged(dev))
355 return -ENODEV;
356
357 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
358 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
359 pgoff,
360 len >> PAGE_SHIFT);
361 if (likely(node)) {
362 obj = container_of(node, struct drm_gem_object, vma_node);
363
364
365
366
367
368
369
370
371
372
373 if (!kref_get_unless_zero(&obj->refcount))
374 obj = NULL;
375 }
376
377 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
378
379 if (!obj)
380 return -EINVAL;
381
382 if (!drm_vma_node_is_allowed(node, priv)) {
383 drm_gem_object_put_unlocked(obj);
384 return -EACCES;
385 }
386
387 cma_obj = to_drm_gem_cma_obj(obj);
388
389 drm_gem_object_put_unlocked(obj);
390
391 return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
392 }
393 EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
394 #endif
395
396
397
398
399
400
401
402
403
404
405 void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
406 const struct drm_gem_object *obj)
407 {
408 const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
409
410 drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
411 drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
412 }
413 EXPORT_SYMBOL(drm_gem_cma_print_info);
414
415
416
417
418
419
420
421
422
423
424
425
426
427 struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
428 {
429 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
430 struct sg_table *sgt;
431 int ret;
432
433 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
434 if (!sgt)
435 return ERR_PTR(-ENOMEM);
436
437 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
438 cma_obj->paddr, obj->size);
439 if (ret < 0)
440 goto out;
441
442 return sgt;
443
444 out:
445 kfree(sgt);
446 return ERR_PTR(ret);
447 }
448 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467 struct drm_gem_object *
468 drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
469 struct dma_buf_attachment *attach,
470 struct sg_table *sgt)
471 {
472 struct drm_gem_cma_object *cma_obj;
473
474 if (sgt->nents != 1) {
475
476 dma_addr_t next_addr = sg_dma_address(sgt->sgl);
477 struct scatterlist *s;
478 unsigned int i;
479
480 for_each_sg(sgt->sgl, s, sgt->nents, i) {
481
482
483
484
485 if (!sg_dma_len(s))
486 continue;
487
488 if (sg_dma_address(s) != next_addr)
489 return ERR_PTR(-EINVAL);
490
491 next_addr = sg_dma_address(s) + sg_dma_len(s);
492 }
493 }
494
495
496 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
497 if (IS_ERR(cma_obj))
498 return ERR_CAST(cma_obj);
499
500 cma_obj->paddr = sg_dma_address(sgt->sgl);
501 cma_obj->sgt = sgt;
502
503 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
504
505 return &cma_obj->base;
506 }
507 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
508
509
510
511
512
513
514
515
516
517
518
519
520
521 int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
522 struct vm_area_struct *vma)
523 {
524 struct drm_gem_cma_object *cma_obj;
525 int ret;
526
527 ret = drm_gem_mmap_obj(obj, obj->size, vma);
528 if (ret < 0)
529 return ret;
530
531 cma_obj = to_drm_gem_cma_obj(obj);
532 return drm_gem_cma_mmap_obj(cma_obj, vma);
533 }
534 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550 void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
551 {
552 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
553
554 return cma_obj->vaddr;
555 }
556 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
557
558
559
560
561
562
563
564
565
566
567
568
569 void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
570 {
571
572 }
573 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
574
575 static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = {
576 .free = drm_gem_cma_free_object,
577 .print_info = drm_gem_cma_print_info,
578 .get_sg_table = drm_gem_cma_prime_get_sg_table,
579 .vmap = drm_gem_cma_prime_vmap,
580 .vm_ops = &drm_gem_cma_vm_ops,
581 };
582
583
584
585
586
587
588
589
590
591
592
593
594
595 struct drm_gem_object *
596 drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size)
597 {
598 struct drm_gem_cma_object *cma_obj;
599
600 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
601 if (!cma_obj)
602 return NULL;
603
604 cma_obj->base.funcs = &drm_cma_gem_default_funcs;
605
606 return &cma_obj->base;
607 }
608 EXPORT_SYMBOL(drm_cma_gem_create_object_default_funcs);
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630 struct drm_gem_object *
631 drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
632 struct dma_buf_attachment *attach,
633 struct sg_table *sgt)
634 {
635 struct drm_gem_cma_object *cma_obj;
636 struct drm_gem_object *obj;
637 void *vaddr;
638
639 vaddr = dma_buf_vmap(attach->dmabuf);
640 if (!vaddr) {
641 DRM_ERROR("Failed to vmap PRIME buffer\n");
642 return ERR_PTR(-ENOMEM);
643 }
644
645 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
646 if (IS_ERR(obj)) {
647 dma_buf_vunmap(attach->dmabuf, vaddr);
648 return obj;
649 }
650
651 cma_obj = to_drm_gem_cma_obj(obj);
652 cma_obj->vaddr = vaddr;
653
654 return obj;
655 }
656 EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);