This source file includes following definitions.
- dma_fence_chain_get_prev
- dma_fence_chain_walk
- dma_fence_chain_find_seqno
- dma_fence_chain_get_driver_name
- dma_fence_chain_get_timeline_name
- dma_fence_chain_irq_work
- dma_fence_chain_cb
- dma_fence_chain_enable_signaling
- dma_fence_chain_signaled
- dma_fence_chain_release
- dma_fence_chain_init
1
2
3
4
5
6
7
8
9
10 #include <linux/dma-fence-chain.h>
11
12 static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
13
14
15
16
17
18
19
20
21 static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
22 {
23 struct dma_fence *prev;
24
25 rcu_read_lock();
26 prev = dma_fence_get_rcu_safe(&chain->prev);
27 rcu_read_unlock();
28 return prev;
29 }
30
31
32
33
34
35
36
37
38
39 struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
40 {
41 struct dma_fence_chain *chain, *prev_chain;
42 struct dma_fence *prev, *replacement, *tmp;
43
44 chain = to_dma_fence_chain(fence);
45 if (!chain) {
46 dma_fence_put(fence);
47 return NULL;
48 }
49
50 while ((prev = dma_fence_chain_get_prev(chain))) {
51
52 prev_chain = to_dma_fence_chain(prev);
53 if (prev_chain) {
54 if (!dma_fence_is_signaled(prev_chain->fence))
55 break;
56
57 replacement = dma_fence_chain_get_prev(prev_chain);
58 } else {
59 if (!dma_fence_is_signaled(prev))
60 break;
61
62 replacement = NULL;
63 }
64
65 tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
66 if (tmp == prev)
67 dma_fence_put(tmp);
68 else
69 dma_fence_put(replacement);
70 dma_fence_put(prev);
71 }
72
73 dma_fence_put(fence);
74 return prev;
75 }
76 EXPORT_SYMBOL(dma_fence_chain_walk);
77
78
79
80
81
82
83
84
85
86
87
88
89 int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
90 {
91 struct dma_fence_chain *chain;
92
93 if (!seqno)
94 return 0;
95
96 chain = to_dma_fence_chain(*pfence);
97 if (!chain || chain->base.seqno < seqno)
98 return -EINVAL;
99
100 dma_fence_chain_for_each(*pfence, &chain->base) {
101 if ((*pfence)->context != chain->base.context ||
102 to_dma_fence_chain(*pfence)->prev_seqno < seqno)
103 break;
104 }
105 dma_fence_put(&chain->base);
106
107 return 0;
108 }
109 EXPORT_SYMBOL(dma_fence_chain_find_seqno);
110
111 static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
112 {
113 return "dma_fence_chain";
114 }
115
116 static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
117 {
118 return "unbound";
119 }
120
121 static void dma_fence_chain_irq_work(struct irq_work *work)
122 {
123 struct dma_fence_chain *chain;
124
125 chain = container_of(work, typeof(*chain), work);
126
127
128 if (!dma_fence_chain_enable_signaling(&chain->base))
129
130 dma_fence_signal(&chain->base);
131 dma_fence_put(&chain->base);
132 }
133
134 static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
135 {
136 struct dma_fence_chain *chain;
137
138 chain = container_of(cb, typeof(*chain), cb);
139 irq_work_queue(&chain->work);
140 dma_fence_put(f);
141 }
142
143 static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
144 {
145 struct dma_fence_chain *head = to_dma_fence_chain(fence);
146
147 dma_fence_get(&head->base);
148 dma_fence_chain_for_each(fence, &head->base) {
149 struct dma_fence_chain *chain = to_dma_fence_chain(fence);
150 struct dma_fence *f = chain ? chain->fence : fence;
151
152 dma_fence_get(f);
153 if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
154 dma_fence_put(fence);
155 return true;
156 }
157 dma_fence_put(f);
158 }
159 dma_fence_put(&head->base);
160 return false;
161 }
162
163 static bool dma_fence_chain_signaled(struct dma_fence *fence)
164 {
165 dma_fence_chain_for_each(fence, fence) {
166 struct dma_fence_chain *chain = to_dma_fence_chain(fence);
167 struct dma_fence *f = chain ? chain->fence : fence;
168
169 if (!dma_fence_is_signaled(f)) {
170 dma_fence_put(fence);
171 return false;
172 }
173 }
174
175 return true;
176 }
177
178 static void dma_fence_chain_release(struct dma_fence *fence)
179 {
180 struct dma_fence_chain *chain = to_dma_fence_chain(fence);
181 struct dma_fence *prev;
182
183
184
185
186 while ((prev = rcu_dereference_protected(chain->prev, true))) {
187 struct dma_fence_chain *prev_chain;
188
189 if (kref_read(&prev->refcount) > 1)
190 break;
191
192 prev_chain = to_dma_fence_chain(prev);
193 if (!prev_chain)
194 break;
195
196
197
198
199 chain->prev = prev_chain->prev;
200 RCU_INIT_POINTER(prev_chain->prev, NULL);
201 dma_fence_put(prev);
202 }
203 dma_fence_put(prev);
204
205 dma_fence_put(chain->fence);
206 dma_fence_free(fence);
207 }
208
209 const struct dma_fence_ops dma_fence_chain_ops = {
210 .use_64bit_seqno = true,
211 .get_driver_name = dma_fence_chain_get_driver_name,
212 .get_timeline_name = dma_fence_chain_get_timeline_name,
213 .enable_signaling = dma_fence_chain_enable_signaling,
214 .signaled = dma_fence_chain_signaled,
215 .release = dma_fence_chain_release,
216 };
217 EXPORT_SYMBOL(dma_fence_chain_ops);
218
219
220
221
222
223
224
225
226
227
228 void dma_fence_chain_init(struct dma_fence_chain *chain,
229 struct dma_fence *prev,
230 struct dma_fence *fence,
231 uint64_t seqno)
232 {
233 struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
234 uint64_t context;
235
236 spin_lock_init(&chain->lock);
237 rcu_assign_pointer(chain->prev, prev);
238 chain->fence = fence;
239 chain->prev_seqno = 0;
240 init_irq_work(&chain->work, dma_fence_chain_irq_work);
241
242
243 if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
244 context = prev->context;
245 chain->prev_seqno = prev->seqno;
246 } else {
247 context = dma_fence_context_alloc(1);
248
249 if (prev_chain)
250 seqno = max(prev->seqno, seqno);
251 }
252
253 dma_fence_init(&chain->base, &dma_fence_chain_ops,
254 &chain->lock, context, seqno);
255 }
256 EXPORT_SYMBOL(dma_fence_chain_init);