Lines Matching refs:submit

33 		  size_t len, struct async_submit_ctl *submit)  in async_sum_product()  argument
35 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_sum_product()
52 if (submit->flags & ASYNC_TX_FENCE) in async_sum_product()
68 async_tx_submit(chan, tx, submit); in async_sum_product()
80 async_tx_quiesce(&submit->depend_tx); in async_sum_product()
98 struct async_submit_ctl *submit) in async_mult() argument
100 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_mult()
116 if (submit->flags & ASYNC_TX_FENCE) in async_mult()
135 async_tx_submit(chan, tx, submit); in async_mult()
148 async_tx_quiesce(&submit->depend_tx); in async_mult()
161 struct page **blocks, struct async_submit_ctl *submit) in __2data_recov_4() argument
167 enum async_tx_flags flags = submit->flags; in __2data_recov_4()
168 dma_async_tx_callback cb_fn = submit->cb_fn; in __2data_recov_4()
169 void *cb_param = submit->cb_param; in __2data_recov_4()
170 void *scribble = submit->scribble; in __2data_recov_4()
184 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_4()
185 tx = async_sum_product(b, srcs, coef, bytes, submit); in __2data_recov_4()
190 init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, in __2data_recov_4()
192 tx = async_xor(a, srcs, 0, 2, bytes, submit); in __2data_recov_4()
200 struct page **blocks, struct async_submit_ctl *submit) in __2data_recov_5() argument
206 enum async_tx_flags flags = submit->flags; in __2data_recov_5()
207 dma_async_tx_callback cb_fn = submit->cb_fn; in __2data_recov_5()
208 void *cb_param = submit->cb_param; in __2data_recov_5()
209 void *scribble = submit->scribble; in __2data_recov_5()
235 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_5()
236 tx = async_memcpy(dp, g, 0, 0, bytes, submit); in __2data_recov_5()
237 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_5()
238 tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); in __2data_recov_5()
243 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_5()
245 tx = async_xor(dp, srcs, 0, 2, bytes, submit); in __2data_recov_5()
250 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_5()
252 tx = async_xor(dq, srcs, 0, 2, bytes, submit); in __2data_recov_5()
259 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_5()
260 tx = async_sum_product(dq, srcs, coef, bytes, submit); in __2data_recov_5()
265 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, in __2data_recov_5()
267 tx = async_xor(dp, srcs, 0, 2, bytes, submit); in __2data_recov_5()
274 struct page **blocks, struct async_submit_ctl *submit) in __2data_recov_n() argument
280 enum async_tx_flags flags = submit->flags; in __2data_recov_n()
281 dma_async_tx_callback cb_fn = submit->cb_fn; in __2data_recov_n()
282 void *cb_param = submit->cb_param; in __2data_recov_n()
283 void *scribble = submit->scribble; in __2data_recov_n()
299 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_n()
300 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); in __2data_recov_n()
311 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_n()
313 tx = async_xor(dp, srcs, 0, 2, bytes, submit); in __2data_recov_n()
318 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_n()
320 tx = async_xor(dq, srcs, 0, 2, bytes, submit); in __2data_recov_n()
327 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_n()
328 tx = async_sum_product(dq, srcs, coef, bytes, submit); in __2data_recov_n()
333 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, in __2data_recov_n()
335 tx = async_xor(dp, srcs, 0, 2, bytes, submit); in __2data_recov_n()
351 struct page **blocks, struct async_submit_ctl *submit) in async_raid6_2data_recov() argument
353 void *scribble = submit->scribble; in async_raid6_2data_recov()
370 async_tx_quiesce(&submit->depend_tx); in async_raid6_2data_recov()
379 async_tx_sync_epilog(submit); in async_raid6_2data_recov()
400 return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); in async_raid6_2data_recov()
407 return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); in async_raid6_2data_recov()
409 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); in async_raid6_2data_recov()
424 struct page **blocks, struct async_submit_ctl *submit) in async_raid6_datap_recov() argument
429 enum async_tx_flags flags = submit->flags; in async_raid6_datap_recov()
430 dma_async_tx_callback cb_fn = submit->cb_fn; in async_raid6_datap_recov()
431 void *cb_param = submit->cb_param; in async_raid6_datap_recov()
432 void *scribble = submit->scribble; in async_raid6_datap_recov()
446 async_tx_quiesce(&submit->depend_tx); in async_raid6_datap_recov()
455 async_tx_sync_epilog(submit); in async_raid6_datap_recov()
490 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, in async_raid6_datap_recov()
492 tx = async_memcpy(p, g, 0, 0, bytes, submit); in async_raid6_datap_recov()
494 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, in async_raid6_datap_recov()
496 tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); in async_raid6_datap_recov()
498 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, in async_raid6_datap_recov()
500 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); in async_raid6_datap_recov()
512 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in async_raid6_datap_recov()
514 tx = async_xor(dq, srcs, 0, 2, bytes, submit); in async_raid6_datap_recov()
516 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in async_raid6_datap_recov()
517 tx = async_mult(dq, dq, coef, bytes, submit); in async_raid6_datap_recov()
521 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, in async_raid6_datap_recov()
523 tx = async_xor(p, srcs, 0, 2, bytes, submit); in async_raid6_datap_recov()