This source file includes following definitions.
- drm_file_alloc
- drm_events_release
- drm_file_free
- drm_close_helper
- drm_cpu_valid
- drm_open_helper
- drm_open
- drm_lastclose
- drm_release
- drm_read
- drm_poll
- drm_event_reserve_init_locked
- drm_event_reserve_init
- drm_event_cancel_free
- drm_send_event_locked
- drm_send_event
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include <linux/dma-fence.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/poll.h>
38 #include <linux/slab.h>
39
40 #include <drm/drm_client.h>
41 #include <drm/drm_drv.h>
42 #include <drm/drm_file.h>
43 #include <drm/drm_print.h>
44
45 #include "drm_crtc_internal.h"
46 #include "drm_internal.h"
47 #include "drm_legacy.h"
48
49
50 DEFINE_MUTEX(drm_global_mutex);
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 struct drm_file *drm_file_alloc(struct drm_minor *minor)
118 {
119 struct drm_device *dev = minor->dev;
120 struct drm_file *file;
121 int ret;
122
123 file = kzalloc(sizeof(*file), GFP_KERNEL);
124 if (!file)
125 return ERR_PTR(-ENOMEM);
126
127 file->pid = get_pid(task_pid(current));
128 file->minor = minor;
129
130
131 file->authenticated = capable(CAP_SYS_ADMIN);
132
133 INIT_LIST_HEAD(&file->lhead);
134 INIT_LIST_HEAD(&file->fbs);
135 mutex_init(&file->fbs_lock);
136 INIT_LIST_HEAD(&file->blobs);
137 INIT_LIST_HEAD(&file->pending_event_list);
138 INIT_LIST_HEAD(&file->event_list);
139 init_waitqueue_head(&file->event_wait);
140 file->event_space = 4096;
141
142 mutex_init(&file->event_read_lock);
143
144 if (drm_core_check_feature(dev, DRIVER_GEM))
145 drm_gem_open(dev, file);
146
147 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
148 drm_syncobj_open(file);
149
150 drm_prime_init_file_private(&file->prime);
151
152 if (dev->driver->open) {
153 ret = dev->driver->open(dev, file);
154 if (ret < 0)
155 goto out_prime_destroy;
156 }
157
158 return file;
159
160 out_prime_destroy:
161 drm_prime_destroy_file_private(&file->prime);
162 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
163 drm_syncobj_release(file);
164 if (drm_core_check_feature(dev, DRIVER_GEM))
165 drm_gem_release(dev, file);
166 put_pid(file->pid);
167 kfree(file);
168
169 return ERR_PTR(ret);
170 }
171
172 static void drm_events_release(struct drm_file *file_priv)
173 {
174 struct drm_device *dev = file_priv->minor->dev;
175 struct drm_pending_event *e, *et;
176 unsigned long flags;
177
178 spin_lock_irqsave(&dev->event_lock, flags);
179
180
181 list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
182 pending_link) {
183 list_del(&e->pending_link);
184 e->file_priv = NULL;
185 }
186
187
188 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
189 list_del(&e->link);
190 kfree(e);
191 }
192
193 spin_unlock_irqrestore(&dev->event_lock, flags);
194 }
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209 void drm_file_free(struct drm_file *file)
210 {
211 struct drm_device *dev;
212
213 if (!file)
214 return;
215
216 dev = file->minor->dev;
217
218 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
219 task_pid_nr(current),
220 (long)old_encode_dev(file->minor->kdev->devt),
221 dev->open_count);
222
223 if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
224 dev->driver->preclose)
225 dev->driver->preclose(dev, file);
226
227 if (drm_core_check_feature(dev, DRIVER_LEGACY))
228 drm_legacy_lock_release(dev, file->filp);
229
230 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
231 drm_legacy_reclaim_buffers(dev, file);
232
233 drm_events_release(file);
234
235 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
236 drm_fb_release(file);
237 drm_property_destroy_user_blobs(dev, file);
238 }
239
240 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
241 drm_syncobj_release(file);
242
243 if (drm_core_check_feature(dev, DRIVER_GEM))
244 drm_gem_release(dev, file);
245
246 drm_legacy_ctxbitmap_flush(dev, file);
247
248 if (drm_is_primary_client(file))
249 drm_master_release(file);
250
251 if (dev->driver->postclose)
252 dev->driver->postclose(dev, file);
253
254 drm_prime_destroy_file_private(&file->prime);
255
256 WARN_ON(!list_empty(&file->event_list));
257
258 put_pid(file->pid);
259 kfree(file);
260 }
261
262 static void drm_close_helper(struct file *filp)
263 {
264 struct drm_file *file_priv = filp->private_data;
265 struct drm_device *dev = file_priv->minor->dev;
266
267 mutex_lock(&dev->filelist_mutex);
268 list_del(&file_priv->lhead);
269 mutex_unlock(&dev->filelist_mutex);
270
271 drm_file_free(file_priv);
272 }
273
274
275
276
277
278
279 static int drm_cpu_valid(void)
280 {
281 #if defined(__sparc__) && !defined(__sparc_v9__)
282 return 0;
283 #endif
284 return 1;
285 }
286
287
288
289
290
291
292
293
294
295
296
297 static int drm_open_helper(struct file *filp, struct drm_minor *minor)
298 {
299 struct drm_device *dev = minor->dev;
300 struct drm_file *priv;
301 int ret;
302
303 if (filp->f_flags & O_EXCL)
304 return -EBUSY;
305 if (!drm_cpu_valid())
306 return -EINVAL;
307 if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
308 return -EINVAL;
309
310 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
311
312 priv = drm_file_alloc(minor);
313 if (IS_ERR(priv))
314 return PTR_ERR(priv);
315
316 if (drm_is_primary_client(priv)) {
317 ret = drm_master_open(priv);
318 if (ret) {
319 drm_file_free(priv);
320 return ret;
321 }
322 }
323
324 filp->private_data = priv;
325 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
326 priv->filp = filp;
327
328 mutex_lock(&dev->filelist_mutex);
329 list_add(&priv->lhead, &dev->filelist);
330 mutex_unlock(&dev->filelist_mutex);
331
332 #ifdef __alpha__
333
334
335
336 if (!dev->hose) {
337 struct pci_dev *pci_dev;
338 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
339 if (pci_dev) {
340 dev->hose = pci_dev->sysdata;
341 pci_dev_put(pci_dev);
342 }
343 if (!dev->hose) {
344 struct pci_bus *b = list_entry(pci_root_buses.next,
345 struct pci_bus, node);
346 if (b)
347 dev->hose = b->sysdata;
348 }
349 }
350 #endif
351
352 return 0;
353 }
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368 int drm_open(struct inode *inode, struct file *filp)
369 {
370 struct drm_device *dev;
371 struct drm_minor *minor;
372 int retcode;
373 int need_setup = 0;
374
375 minor = drm_minor_acquire(iminor(inode));
376 if (IS_ERR(minor))
377 return PTR_ERR(minor);
378
379 dev = minor->dev;
380 if (!dev->open_count++)
381 need_setup = 1;
382
383
384 filp->f_mapping = dev->anon_inode->i_mapping;
385
386 retcode = drm_open_helper(filp, minor);
387 if (retcode)
388 goto err_undo;
389 if (need_setup) {
390 retcode = drm_legacy_setup(dev);
391 if (retcode) {
392 drm_close_helper(filp);
393 goto err_undo;
394 }
395 }
396 return 0;
397
398 err_undo:
399 dev->open_count--;
400 drm_minor_release(minor);
401 return retcode;
402 }
403 EXPORT_SYMBOL(drm_open);
404
405 void drm_lastclose(struct drm_device * dev)
406 {
407 DRM_DEBUG("\n");
408
409 if (dev->driver->lastclose)
410 dev->driver->lastclose(dev);
411 DRM_DEBUG("driver lastclose completed\n");
412
413 if (drm_core_check_feature(dev, DRIVER_LEGACY))
414 drm_legacy_dev_reinit(dev);
415
416 drm_client_dev_restore(dev);
417 }
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433 int drm_release(struct inode *inode, struct file *filp)
434 {
435 struct drm_file *file_priv = filp->private_data;
436 struct drm_minor *minor = file_priv->minor;
437 struct drm_device *dev = minor->dev;
438
439 mutex_lock(&drm_global_mutex);
440
441 DRM_DEBUG("open_count = %d\n", dev->open_count);
442
443 drm_close_helper(filp);
444
445 if (!--dev->open_count)
446 drm_lastclose(dev);
447
448 mutex_unlock(&drm_global_mutex);
449
450 drm_minor_release(minor);
451
452 return 0;
453 }
454 EXPORT_SYMBOL(drm_release);
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482 ssize_t drm_read(struct file *filp, char __user *buffer,
483 size_t count, loff_t *offset)
484 {
485 struct drm_file *file_priv = filp->private_data;
486 struct drm_device *dev = file_priv->minor->dev;
487 ssize_t ret;
488
489 if (!access_ok(buffer, count))
490 return -EFAULT;
491
492 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
493 if (ret)
494 return ret;
495
496 for (;;) {
497 struct drm_pending_event *e = NULL;
498
499 spin_lock_irq(&dev->event_lock);
500 if (!list_empty(&file_priv->event_list)) {
501 e = list_first_entry(&file_priv->event_list,
502 struct drm_pending_event, link);
503 file_priv->event_space += e->event->length;
504 list_del(&e->link);
505 }
506 spin_unlock_irq(&dev->event_lock);
507
508 if (e == NULL) {
509 if (ret)
510 break;
511
512 if (filp->f_flags & O_NONBLOCK) {
513 ret = -EAGAIN;
514 break;
515 }
516
517 mutex_unlock(&file_priv->event_read_lock);
518 ret = wait_event_interruptible(file_priv->event_wait,
519 !list_empty(&file_priv->event_list));
520 if (ret >= 0)
521 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
522 if (ret)
523 return ret;
524 } else {
525 unsigned length = e->event->length;
526
527 if (length > count - ret) {
528 put_back_event:
529 spin_lock_irq(&dev->event_lock);
530 file_priv->event_space -= length;
531 list_add(&e->link, &file_priv->event_list);
532 spin_unlock_irq(&dev->event_lock);
533 wake_up_interruptible(&file_priv->event_wait);
534 break;
535 }
536
537 if (copy_to_user(buffer + ret, e->event, length)) {
538 if (ret == 0)
539 ret = -EFAULT;
540 goto put_back_event;
541 }
542
543 ret += length;
544 kfree(e);
545 }
546 }
547 mutex_unlock(&file_priv->event_read_lock);
548
549 return ret;
550 }
551 EXPORT_SYMBOL(drm_read);
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569 __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
570 {
571 struct drm_file *file_priv = filp->private_data;
572 __poll_t mask = 0;
573
574 poll_wait(filp, &file_priv->event_wait, wait);
575
576 if (!list_empty(&file_priv->event_list))
577 mask |= EPOLLIN | EPOLLRDNORM;
578
579 return mask;
580 }
581 EXPORT_SYMBOL(drm_poll);
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607 int drm_event_reserve_init_locked(struct drm_device *dev,
608 struct drm_file *file_priv,
609 struct drm_pending_event *p,
610 struct drm_event *e)
611 {
612 if (file_priv->event_space < e->length)
613 return -ENOMEM;
614
615 file_priv->event_space -= e->length;
616
617 p->event = e;
618 list_add(&p->pending_link, &file_priv->pending_event_list);
619 p->file_priv = file_priv;
620
621 return 0;
622 }
623 EXPORT_SYMBOL(drm_event_reserve_init_locked);
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649 int drm_event_reserve_init(struct drm_device *dev,
650 struct drm_file *file_priv,
651 struct drm_pending_event *p,
652 struct drm_event *e)
653 {
654 unsigned long flags;
655 int ret;
656
657 spin_lock_irqsave(&dev->event_lock, flags);
658 ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
659 spin_unlock_irqrestore(&dev->event_lock, flags);
660
661 return ret;
662 }
663 EXPORT_SYMBOL(drm_event_reserve_init);
664
665
666
667
668
669
670
671
672
673
674 void drm_event_cancel_free(struct drm_device *dev,
675 struct drm_pending_event *p)
676 {
677 unsigned long flags;
678 spin_lock_irqsave(&dev->event_lock, flags);
679 if (p->file_priv) {
680 p->file_priv->event_space += p->event->length;
681 list_del(&p->pending_link);
682 }
683 spin_unlock_irqrestore(&dev->event_lock, flags);
684
685 if (p->fence)
686 dma_fence_put(p->fence);
687
688 kfree(p);
689 }
690 EXPORT_SYMBOL(drm_event_cancel_free);
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
707 {
708 assert_spin_locked(&dev->event_lock);
709
710 if (e->completion) {
711 complete_all(e->completion);
712 e->completion_release(e->completion);
713 e->completion = NULL;
714 }
715
716 if (e->fence) {
717 dma_fence_signal(e->fence);
718 dma_fence_put(e->fence);
719 }
720
721 if (!e->file_priv) {
722 kfree(e);
723 return;
724 }
725
726 list_del(&e->pending_link);
727 list_add_tail(&e->link,
728 &e->file_priv->event_list);
729 wake_up_interruptible(&e->file_priv->event_wait);
730 }
731 EXPORT_SYMBOL(drm_send_event_locked);
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
749 {
750 unsigned long irqflags;
751
752 spin_lock_irqsave(&dev->event_lock, irqflags);
753 drm_send_event_locked(dev, e);
754 spin_unlock_irqrestore(&dev->event_lock, irqflags);
755 }
756 EXPORT_SYMBOL(drm_send_event);