This source file includes following definitions.
- do_async_gen_syndrome
- do_sync_gen_syndrome
- async_gen_syndrome
- pq_val_chan
- async_syndrome_val
- async_pq_init
- async_pq_exit
1
2
3
4
5
6 #include <linux/kernel.h>
7 #include <linux/interrupt.h>
8 #include <linux/module.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/raid/pq.h>
11 #include <linux/async_tx.h>
12 #include <linux/gfp.h>
13
14
15
16
17
18 static struct page *pq_scribble_page;
19
20
21
22
23
24
25
26 #define P(b, d) (b[d-2])
27 #define Q(b, d) (b[d-1])
28
29 #define MAX_DISKS 255
30
31
32
33
34 static __async_inline struct dma_async_tx_descriptor *
35 do_async_gen_syndrome(struct dma_chan *chan,
36 const unsigned char *scfs, int disks,
37 struct dmaengine_unmap_data *unmap,
38 enum dma_ctrl_flags dma_flags,
39 struct async_submit_ctl *submit)
40 {
41 struct dma_async_tx_descriptor *tx = NULL;
42 struct dma_device *dma = chan->device;
43 enum async_tx_flags flags_orig = submit->flags;
44 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
45 dma_async_tx_callback cb_param_orig = submit->cb_param;
46 int src_cnt = disks - 2;
47 unsigned short pq_src_cnt;
48 dma_addr_t dma_dest[2];
49 int src_off = 0;
50
51 while (src_cnt > 0) {
52 submit->flags = flags_orig;
53 pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
54
55
56
57
58 if (src_cnt > pq_src_cnt) {
59 submit->flags &= ~ASYNC_TX_ACK;
60 submit->flags |= ASYNC_TX_FENCE;
61 submit->cb_fn = NULL;
62 submit->cb_param = NULL;
63 } else {
64 submit->cb_fn = cb_fn_orig;
65 submit->cb_param = cb_param_orig;
66 if (cb_fn_orig)
67 dma_flags |= DMA_PREP_INTERRUPT;
68 }
69 if (submit->flags & ASYNC_TX_FENCE)
70 dma_flags |= DMA_PREP_FENCE;
71
72
73
74
75 for (;;) {
76 dma_dest[0] = unmap->addr[disks - 2];
77 dma_dest[1] = unmap->addr[disks - 1];
78 tx = dma->device_prep_dma_pq(chan, dma_dest,
79 &unmap->addr[src_off],
80 pq_src_cnt,
81 &scfs[src_off], unmap->len,
82 dma_flags);
83 if (likely(tx))
84 break;
85 async_tx_quiesce(&submit->depend_tx);
86 dma_async_issue_pending(chan);
87 }
88
89 dma_set_unmap(tx, unmap);
90 async_tx_submit(chan, tx, submit);
91 submit->depend_tx = tx;
92
93
94 src_cnt -= pq_src_cnt;
95 src_off += pq_src_cnt;
96
97 dma_flags |= DMA_PREP_CONTINUE;
98 }
99
100 return tx;
101 }
102
103
104
105
106 static void
107 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
108 size_t len, struct async_submit_ctl *submit)
109 {
110 void **srcs;
111 int i;
112 int start = -1, stop = disks - 3;
113
114 if (submit->scribble)
115 srcs = submit->scribble;
116 else
117 srcs = (void **) blocks;
118
119 for (i = 0; i < disks; i++) {
120 if (blocks[i] == NULL) {
121 BUG_ON(i > disks - 3);
122 srcs[i] = (void*)raid6_empty_zero_page;
123 } else {
124 srcs[i] = page_address(blocks[i]) + offset;
125 if (i < disks - 2) {
126 stop = i;
127 if (start == -1)
128 start = i;
129 }
130 }
131 }
132 if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
133 BUG_ON(!raid6_call.xor_syndrome);
134 if (start >= 0)
135 raid6_call.xor_syndrome(disks, start, stop, len, srcs);
136 } else
137 raid6_call.gen_syndrome(disks, len, srcs);
138 async_tx_sync_epilog(submit);
139 }
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162 struct dma_async_tx_descriptor *
163 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
164 size_t len, struct async_submit_ctl *submit)
165 {
166 int src_cnt = disks - 2;
167 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
168 &P(blocks, disks), 2,
169 blocks, src_cnt, len);
170 struct dma_device *device = chan ? chan->device : NULL;
171 struct dmaengine_unmap_data *unmap = NULL;
172
173 BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
174
175 if (device)
176 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
177
178
179 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
180 (src_cnt <= dma_maxpq(device, 0) ||
181 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
182 is_dma_pq_aligned(device, offset, 0, len)) {
183 struct dma_async_tx_descriptor *tx;
184 enum dma_ctrl_flags dma_flags = 0;
185 unsigned char coefs[MAX_DISKS];
186 int i, j;
187
188
189 pr_debug("%s: (async) disks: %d len: %zu\n",
190 __func__, disks, len);
191
192
193
194
195 unmap->len = len;
196 for (i = 0, j = 0; i < src_cnt; i++) {
197 if (blocks[i] == NULL)
198 continue;
199 unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
200 len, DMA_TO_DEVICE);
201 coefs[j] = raid6_gfexp[i];
202 unmap->to_cnt++;
203 j++;
204 }
205
206
207
208
209
210 unmap->bidi_cnt++;
211 if (P(blocks, disks))
212 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
213 offset, len, DMA_BIDIRECTIONAL);
214 else {
215 unmap->addr[j++] = 0;
216 dma_flags |= DMA_PREP_PQ_DISABLE_P;
217 }
218
219 unmap->bidi_cnt++;
220 if (Q(blocks, disks))
221 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
222 offset, len, DMA_BIDIRECTIONAL);
223 else {
224 unmap->addr[j++] = 0;
225 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
226 }
227
228 tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
229 dmaengine_unmap_put(unmap);
230 return tx;
231 }
232
233 dmaengine_unmap_put(unmap);
234
235
236 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
237
238
239 async_tx_quiesce(&submit->depend_tx);
240
241 if (!P(blocks, disks)) {
242 P(blocks, disks) = pq_scribble_page;
243 BUG_ON(len + offset > PAGE_SIZE);
244 }
245 if (!Q(blocks, disks)) {
246 Q(blocks, disks) = pq_scribble_page;
247 BUG_ON(len + offset > PAGE_SIZE);
248 }
249 do_sync_gen_syndrome(blocks, offset, disks, len, submit);
250
251 return NULL;
252 }
253 EXPORT_SYMBOL_GPL(async_gen_syndrome);
254
255 static inline struct dma_chan *
256 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
257 {
258 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
259 return NULL;
260 #endif
261 return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
262 disks, len);
263 }
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280 struct dma_async_tx_descriptor *
281 async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
282 size_t len, enum sum_check_flags *pqres, struct page *spare,
283 struct async_submit_ctl *submit)
284 {
285 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
286 struct dma_device *device = chan ? chan->device : NULL;
287 struct dma_async_tx_descriptor *tx;
288 unsigned char coefs[MAX_DISKS];
289 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
290 struct dmaengine_unmap_data *unmap = NULL;
291
292 BUG_ON(disks < 4 || disks > MAX_DISKS);
293
294 if (device)
295 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
296
297 if (unmap && disks <= dma_maxpq(device, 0) &&
298 is_dma_pq_aligned(device, offset, 0, len)) {
299 struct device *dev = device->dev;
300 dma_addr_t pq[2];
301 int i, j = 0, src_cnt = 0;
302
303 pr_debug("%s: (async) disks: %d len: %zu\n",
304 __func__, disks, len);
305
306 unmap->len = len;
307 for (i = 0; i < disks-2; i++)
308 if (likely(blocks[i])) {
309 unmap->addr[j] = dma_map_page(dev, blocks[i],
310 offset, len,
311 DMA_TO_DEVICE);
312 coefs[j] = raid6_gfexp[i];
313 unmap->to_cnt++;
314 src_cnt++;
315 j++;
316 }
317
318 if (!P(blocks, disks)) {
319 pq[0] = 0;
320 dma_flags |= DMA_PREP_PQ_DISABLE_P;
321 } else {
322 pq[0] = dma_map_page(dev, P(blocks, disks),
323 offset, len,
324 DMA_TO_DEVICE);
325 unmap->addr[j++] = pq[0];
326 unmap->to_cnt++;
327 }
328 if (!Q(blocks, disks)) {
329 pq[1] = 0;
330 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
331 } else {
332 pq[1] = dma_map_page(dev, Q(blocks, disks),
333 offset, len,
334 DMA_TO_DEVICE);
335 unmap->addr[j++] = pq[1];
336 unmap->to_cnt++;
337 }
338
339 if (submit->flags & ASYNC_TX_FENCE)
340 dma_flags |= DMA_PREP_FENCE;
341 for (;;) {
342 tx = device->device_prep_dma_pq_val(chan, pq,
343 unmap->addr,
344 src_cnt,
345 coefs,
346 len, pqres,
347 dma_flags);
348 if (likely(tx))
349 break;
350 async_tx_quiesce(&submit->depend_tx);
351 dma_async_issue_pending(chan);
352 }
353
354 dma_set_unmap(tx, unmap);
355 async_tx_submit(chan, tx, submit);
356 } else {
357 struct page *p_src = P(blocks, disks);
358 struct page *q_src = Q(blocks, disks);
359 enum async_tx_flags flags_orig = submit->flags;
360 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
361 void *scribble = submit->scribble;
362 void *cb_param_orig = submit->cb_param;
363 void *p, *q, *s;
364
365 pr_debug("%s: (sync) disks: %d len: %zu\n",
366 __func__, disks, len);
367
368
369
370
371 BUG_ON(!spare || !scribble);
372
373
374 async_tx_quiesce(&submit->depend_tx);
375
376
377
378
379 tx = NULL;
380 *pqres = 0;
381 if (p_src) {
382 init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
383 NULL, NULL, scribble);
384 tx = async_xor(spare, blocks, offset, disks-2, len, submit);
385 async_tx_quiesce(&tx);
386 p = page_address(p_src) + offset;
387 s = page_address(spare) + offset;
388 *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
389 }
390
391 if (q_src) {
392 P(blocks, disks) = NULL;
393 Q(blocks, disks) = spare;
394 init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
395 tx = async_gen_syndrome(blocks, offset, disks, len, submit);
396 async_tx_quiesce(&tx);
397 q = page_address(q_src) + offset;
398 s = page_address(spare) + offset;
399 *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
400 }
401
402
403 P(blocks, disks) = p_src;
404 Q(blocks, disks) = q_src;
405
406 submit->cb_fn = cb_fn_orig;
407 submit->cb_param = cb_param_orig;
408 submit->flags = flags_orig;
409 async_tx_sync_epilog(submit);
410 tx = NULL;
411 }
412 dmaengine_unmap_put(unmap);
413
414 return tx;
415 }
416 EXPORT_SYMBOL_GPL(async_syndrome_val);
417
418 static int __init async_pq_init(void)
419 {
420 pq_scribble_page = alloc_page(GFP_KERNEL);
421
422 if (pq_scribble_page)
423 return 0;
424
425 pr_err("%s: failed to allocate required spare page\n", __func__);
426
427 return -ENOMEM;
428 }
429
430 static void __exit async_pq_exit(void)
431 {
432 __free_page(pq_scribble_page);
433 }
434
435 module_init(async_pq_init);
436 module_exit(async_pq_exit);
437
438 MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
439 MODULE_LICENSE("GPL");