This source file includes following definitions.
- radeon_sync_create
- radeon_sync_fence
- radeon_sync_resv
- radeon_sync_rings
- radeon_sync_free
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 #include "radeon.h"
32 #include "radeon_trace.h"
33
34
35
36
37
38
39
40
41 void radeon_sync_create(struct radeon_sync *sync)
42 {
43 unsigned i;
44
45 for (i = 0; i < RADEON_NUM_SYNCS; ++i)
46 sync->semaphores[i] = NULL;
47
48 for (i = 0; i < RADEON_NUM_RINGS; ++i)
49 sync->sync_to[i] = NULL;
50
51 sync->last_vm_update = NULL;
52 }
53
54
55
56
57
58
59
60
61
62 void radeon_sync_fence(struct radeon_sync *sync,
63 struct radeon_fence *fence)
64 {
65 struct radeon_fence *other;
66
67 if (!fence)
68 return;
69
70 other = sync->sync_to[fence->ring];
71 sync->sync_to[fence->ring] = radeon_fence_later(fence, other);
72
73 if (fence->is_vm_update) {
74 other = sync->last_vm_update;
75 sync->last_vm_update = radeon_fence_later(fence, other);
76 }
77 }
78
79
80
81
82
83
84
85
86
87
88 int radeon_sync_resv(struct radeon_device *rdev,
89 struct radeon_sync *sync,
90 struct dma_resv *resv,
91 bool shared)
92 {
93 struct dma_resv_list *flist;
94 struct dma_fence *f;
95 struct radeon_fence *fence;
96 unsigned i;
97 int r = 0;
98
99
100 f = dma_resv_get_excl(resv);
101 fence = f ? to_radeon_fence(f) : NULL;
102 if (fence && fence->rdev == rdev)
103 radeon_sync_fence(sync, fence);
104 else if (f)
105 r = dma_fence_wait(f, true);
106
107 flist = dma_resv_get_list(resv);
108 if (shared || !flist || r)
109 return r;
110
111 for (i = 0; i < flist->shared_count; ++i) {
112 f = rcu_dereference_protected(flist->shared[i],
113 dma_resv_held(resv));
114 fence = to_radeon_fence(f);
115 if (fence && fence->rdev == rdev)
116 radeon_sync_fence(sync, fence);
117 else
118 r = dma_fence_wait(f, true);
119
120 if (r)
121 break;
122 }
123 return r;
124 }
125
126
127
128
129
130
131
132
133
134
135
136 int radeon_sync_rings(struct radeon_device *rdev,
137 struct radeon_sync *sync,
138 int ring)
139 {
140 unsigned count = 0;
141 int i, r;
142
143 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
144 struct radeon_fence *fence = sync->sync_to[i];
145 struct radeon_semaphore *semaphore;
146
147
148 if (!radeon_fence_need_sync(fence, ring))
149 continue;
150
151
152 if (!rdev->ring[i].ready) {
153 dev_err(rdev->dev, "Syncing to a disabled ring!");
154 return -EINVAL;
155 }
156
157 if (count >= RADEON_NUM_SYNCS) {
158
159 r = radeon_fence_wait(fence, false);
160 if (r)
161 return r;
162 continue;
163 }
164 r = radeon_semaphore_create(rdev, &semaphore);
165 if (r)
166 return r;
167
168 sync->semaphores[count++] = semaphore;
169
170
171 r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
172 if (r)
173 return r;
174
175
176 if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
177
178 radeon_ring_undo(&rdev->ring[i]);
179 r = radeon_fence_wait(fence, false);
180 if (r)
181 return r;
182 continue;
183 }
184
185
186 if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
187
188 radeon_ring_undo(&rdev->ring[i]);
189 r = radeon_fence_wait(fence, false);
190 if (r)
191 return r;
192 continue;
193 }
194
195 radeon_ring_commit(rdev, &rdev->ring[i], false);
196 radeon_fence_note_sync(fence, ring);
197 }
198
199 return 0;
200 }
201
202
203
204
205
206
207
208
209
210
211 void radeon_sync_free(struct radeon_device *rdev,
212 struct radeon_sync *sync,
213 struct radeon_fence *fence)
214 {
215 unsigned i;
216
217 for (i = 0; i < RADEON_NUM_SYNCS; ++i)
218 radeon_semaphore_free(rdev, &sync->semaphores[i], fence);
219 }