This source file includes following definitions.
- virtio_get_driver_name
- virtio_get_timeline_name
- virtio_fence_signaled
- virtio_fence_value_str
- virtio_timeline_value_str
- virtio_gpu_fence_alloc
- virtio_gpu_fence_emit
- virtio_gpu_fence_event_process
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #include <trace/events/dma_fence.h>
27
28 #include "virtgpu_drv.h"
29
30 static const char *virtio_get_driver_name(struct dma_fence *f)
31 {
32 return "virtio_gpu";
33 }
34
35 static const char *virtio_get_timeline_name(struct dma_fence *f)
36 {
37 return "controlq";
38 }
39
40 bool virtio_fence_signaled(struct dma_fence *f)
41 {
42 struct virtio_gpu_fence *fence = to_virtio_fence(f);
43
44 if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
45 return true;
46 return false;
47 }
48
49 static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
50 {
51 snprintf(str, size, "%llu", f->seqno);
52 }
53
54 static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
55 {
56 struct virtio_gpu_fence *fence = to_virtio_fence(f);
57
58 snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
59 }
60
61 static const struct dma_fence_ops virtio_fence_ops = {
62 .get_driver_name = virtio_get_driver_name,
63 .get_timeline_name = virtio_get_timeline_name,
64 .signaled = virtio_fence_signaled,
65 .fence_value_str = virtio_fence_value_str,
66 .timeline_value_str = virtio_timeline_value_str,
67 };
68
69 struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
70 {
71 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
72 struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
73 GFP_KERNEL);
74 if (!fence)
75 return fence;
76
77 fence->drv = drv;
78
79
80
81
82
83 dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
84
85 return fence;
86 }
87
88 void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
89 struct virtio_gpu_ctrl_hdr *cmd_hdr,
90 struct virtio_gpu_fence *fence)
91 {
92 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
93 unsigned long irq_flags;
94
95 spin_lock_irqsave(&drv->lock, irq_flags);
96 fence->f.seqno = ++drv->sync_seq;
97 dma_fence_get(&fence->f);
98 list_add_tail(&fence->node, &drv->fences);
99 spin_unlock_irqrestore(&drv->lock, irq_flags);
100
101 trace_dma_fence_emit(&fence->f);
102
103 cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
104 cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
105 }
106
107 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
108 u64 last_seq)
109 {
110 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
111 struct virtio_gpu_fence *fence, *tmp;
112 unsigned long irq_flags;
113
114 spin_lock_irqsave(&drv->lock, irq_flags);
115 atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
116 list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
117 if (last_seq < fence->f.seqno)
118 continue;
119 dma_fence_signal_locked(&fence->f);
120 list_del(&fence->node);
121 dma_fence_put(&fence->f);
122 }
123 spin_unlock_irqrestore(&drv->lock, irq_flags);
124 }