Lines Matching refs:disks

50 		      const unsigned char *scfs, int disks,  in do_async_gen_syndrome()  argument
60 int src_cnt = disks - 2; in do_async_gen_syndrome()
91 dma_dest[0] = unmap->addr[disks - 2]; in do_async_gen_syndrome()
92 dma_dest[1] = unmap->addr[disks - 1]; in do_async_gen_syndrome()
122 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in do_sync_gen_syndrome() argument
127 int start = -1, stop = disks - 3; in do_sync_gen_syndrome()
134 for (i = 0; i < disks; i++) { in do_sync_gen_syndrome()
136 BUG_ON(i > disks - 3); /* P or Q can't be zero */ in do_sync_gen_syndrome()
140 if (i < disks - 2) { in do_sync_gen_syndrome()
150 raid6_call.xor_syndrome(disks, start, stop, len, srcs); in do_sync_gen_syndrome()
152 raid6_call.gen_syndrome(disks, len, srcs); in do_sync_gen_syndrome()
178 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in async_gen_syndrome() argument
181 int src_cnt = disks - 2; in async_gen_syndrome()
183 &P(blocks, disks), 2, in async_gen_syndrome()
188 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); in async_gen_syndrome()
191 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); in async_gen_syndrome()
205 __func__, disks, len); in async_gen_syndrome()
226 if (P(blocks, disks)) in async_gen_syndrome()
227 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), in async_gen_syndrome()
235 if (Q(blocks, disks)) in async_gen_syndrome()
236 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), in async_gen_syndrome()
251 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); in async_gen_syndrome()
256 if (!P(blocks, disks)) { in async_gen_syndrome()
257 P(blocks, disks) = pq_scribble_page; in async_gen_syndrome()
260 if (!Q(blocks, disks)) { in async_gen_syndrome()
261 Q(blocks, disks) = pq_scribble_page; in async_gen_syndrome()
264 do_sync_gen_syndrome(blocks, offset, disks, len, submit); in async_gen_syndrome()
271 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) in pq_val_chan() argument
277 disks, len); in pq_val_chan()
296 async_syndrome_val(struct page **blocks, unsigned int offset, int disks, in async_syndrome_val() argument
300 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); in async_syndrome_val()
303 unsigned char coefs[disks-2]; in async_syndrome_val()
307 BUG_ON(disks < 4); in async_syndrome_val()
310 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); in async_syndrome_val()
312 if (unmap && disks <= dma_maxpq(device, 0) && in async_syndrome_val()
319 __func__, disks, len); in async_syndrome_val()
322 for (i = 0; i < disks-2; i++) in async_syndrome_val()
333 if (!P(blocks, disks)) { in async_syndrome_val()
337 pq[0] = dma_map_page(dev, P(blocks, disks), in async_syndrome_val()
343 if (!Q(blocks, disks)) { in async_syndrome_val()
347 pq[1] = dma_map_page(dev, Q(blocks, disks), in async_syndrome_val()
374 struct page *p_src = P(blocks, disks); in async_syndrome_val()
375 struct page *q_src = Q(blocks, disks); in async_syndrome_val()
383 __func__, disks, len); in async_syndrome_val()
401 tx = async_xor(spare, blocks, offset, disks-2, len, submit); in async_syndrome_val()
409 P(blocks, disks) = NULL; in async_syndrome_val()
410 Q(blocks, disks) = spare; in async_syndrome_val()
412 tx = async_gen_syndrome(blocks, offset, disks, len, submit); in async_syndrome_val()
420 P(blocks, disks) = p_src; in async_syndrome_val()
421 Q(blocks, disks) = q_src; in async_syndrome_val()