root/crypto/async_tx/async_raid6_recov.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. async_sum_product
  2. async_mult
  3. __2data_recov_4
  4. __2data_recov_5
  5. __2data_recov_n
  6. async_raid6_2data_recov
  7. async_raid6_datap_recov

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Asynchronous RAID-6 recovery calculations ASYNC_TX API.
   4  * Copyright(c) 2009 Intel Corporation
   5  *
   6  * based on raid6recov.c:
   7  *   Copyright 2002 H. Peter Anvin
   8  */
   9 #include <linux/kernel.h>
  10 #include <linux/interrupt.h>
  11 #include <linux/module.h>
  12 #include <linux/dma-mapping.h>
  13 #include <linux/raid/pq.h>
  14 #include <linux/async_tx.h>
  15 #include <linux/dmaengine.h>
  16 
  17 static struct dma_async_tx_descriptor *
  18 async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
  19                   size_t len, struct async_submit_ctl *submit)
  20 {
  21         struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
  22                                                       &dest, 1, srcs, 2, len);
  23         struct dma_device *dma = chan ? chan->device : NULL;
  24         struct dmaengine_unmap_data *unmap = NULL;
  25         const u8 *amul, *bmul;
  26         u8 ax, bx;
  27         u8 *a, *b, *c;
  28 
  29         if (dma)
  30                 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
  31 
  32         if (unmap) {
  33                 struct device *dev = dma->dev;
  34                 dma_addr_t pq[2];
  35                 struct dma_async_tx_descriptor *tx;
  36                 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
  37 
  38                 if (submit->flags & ASYNC_TX_FENCE)
  39                         dma_flags |= DMA_PREP_FENCE;
  40                 unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
  41                 unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
  42                 unmap->to_cnt = 2;
  43 
  44                 unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
  45                 unmap->bidi_cnt = 1;
  46                 /* engine only looks at Q, but expects it to follow P */
  47                 pq[1] = unmap->addr[2];
  48 
  49                 unmap->len = len;
  50                 tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef,
  51                                              len, dma_flags);
  52                 if (tx) {
  53                         dma_set_unmap(tx, unmap);
  54                         async_tx_submit(chan, tx, submit);
  55                         dmaengine_unmap_put(unmap);
  56                         return tx;
  57                 }
  58 
  59                 /* could not get a descriptor, unmap and fall through to
  60                  * the synchronous path
  61                  */
  62                 dmaengine_unmap_put(unmap);
  63         }
  64 
  65         /* run the operation synchronously */
  66         async_tx_quiesce(&submit->depend_tx);
  67         amul = raid6_gfmul[coef[0]];
  68         bmul = raid6_gfmul[coef[1]];
  69         a = page_address(srcs[0]);
  70         b = page_address(srcs[1]);
  71         c = page_address(dest);
  72 
  73         while (len--) {
  74                 ax    = amul[*a++];
  75                 bx    = bmul[*b++];
  76                 *c++ = ax ^ bx;
  77         }
  78 
  79         return NULL;
  80 }
  81 
  82 static struct dma_async_tx_descriptor *
  83 async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
  84            struct async_submit_ctl *submit)
  85 {
  86         struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
  87                                                       &dest, 1, &src, 1, len);
  88         struct dma_device *dma = chan ? chan->device : NULL;
  89         struct dmaengine_unmap_data *unmap = NULL;
  90         const u8 *qmul; /* Q multiplier table */
  91         u8 *d, *s;
  92 
  93         if (dma)
  94                 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
  95 
  96         if (unmap) {
  97                 dma_addr_t dma_dest[2];
  98                 struct device *dev = dma->dev;
  99                 struct dma_async_tx_descriptor *tx;
 100                 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
 101 
 102                 if (submit->flags & ASYNC_TX_FENCE)
 103                         dma_flags |= DMA_PREP_FENCE;
 104                 unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
 105                 unmap->to_cnt++;
 106                 unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
 107                 dma_dest[1] = unmap->addr[1];
 108                 unmap->bidi_cnt++;
 109                 unmap->len = len;
 110 
 111                 /* this looks funny, but the engine looks for Q at
 112                  * dma_dest[1] and ignores dma_dest[0] as a dest
 113                  * due to DMA_PREP_PQ_DISABLE_P
 114                  */
 115                 tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr,
 116                                              1, &coef, len, dma_flags);
 117 
 118                 if (tx) {
 119                         dma_set_unmap(tx, unmap);
 120                         dmaengine_unmap_put(unmap);
 121                         async_tx_submit(chan, tx, submit);
 122                         return tx;
 123                 }
 124 
 125                 /* could not get a descriptor, unmap and fall through to
 126                  * the synchronous path
 127                  */
 128                 dmaengine_unmap_put(unmap);
 129         }
 130 
 131         /* no channel available, or failed to allocate a descriptor, so
 132          * perform the operation synchronously
 133          */
 134         async_tx_quiesce(&submit->depend_tx);
 135         qmul  = raid6_gfmul[coef];
 136         d = page_address(dest);
 137         s = page_address(src);
 138 
 139         while (len--)
 140                 *d++ = qmul[*s++];
 141 
 142         return NULL;
 143 }
 144 
 145 static struct dma_async_tx_descriptor *
 146 __2data_recov_4(int disks, size_t bytes, int faila, int failb,
 147                 struct page **blocks, struct async_submit_ctl *submit)
 148 {
 149         struct dma_async_tx_descriptor *tx = NULL;
 150         struct page *p, *q, *a, *b;
 151         struct page *srcs[2];
 152         unsigned char coef[2];
 153         enum async_tx_flags flags = submit->flags;
 154         dma_async_tx_callback cb_fn = submit->cb_fn;
 155         void *cb_param = submit->cb_param;
 156         void *scribble = submit->scribble;
 157 
 158         p = blocks[disks-2];
 159         q = blocks[disks-1];
 160 
 161         a = blocks[faila];
 162         b = blocks[failb];
 163 
 164         /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */
 165         /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
 166         srcs[0] = p;
 167         srcs[1] = q;
 168         coef[0] = raid6_gfexi[failb-faila];
 169         coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
 170         init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
 171         tx = async_sum_product(b, srcs, coef, bytes, submit);
 172 
 173         /* Dy = P+Pxy+Dx */
 174         srcs[0] = p;
 175         srcs[1] = b;
 176         init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn,
 177                           cb_param, scribble);
 178         tx = async_xor(a, srcs, 0, 2, bytes, submit);
 179 
 180         return tx;
 181 
 182 }
 183 
 184 static struct dma_async_tx_descriptor *
 185 __2data_recov_5(int disks, size_t bytes, int faila, int failb,
 186                 struct page **blocks, struct async_submit_ctl *submit)
 187 {
 188         struct dma_async_tx_descriptor *tx = NULL;
 189         struct page *p, *q, *g, *dp, *dq;
 190         struct page *srcs[2];
 191         unsigned char coef[2];
 192         enum async_tx_flags flags = submit->flags;
 193         dma_async_tx_callback cb_fn = submit->cb_fn;
 194         void *cb_param = submit->cb_param;
 195         void *scribble = submit->scribble;
 196         int good_srcs, good, i;
 197 
 198         good_srcs = 0;
 199         good = -1;
 200         for (i = 0; i < disks-2; i++) {
 201                 if (blocks[i] == NULL)
 202                         continue;
 203                 if (i == faila || i == failb)
 204                         continue;
 205                 good = i;
 206                 good_srcs++;
 207         }
 208         BUG_ON(good_srcs > 1);
 209 
 210         p = blocks[disks-2];
 211         q = blocks[disks-1];
 212         g = blocks[good];
 213 
 214         /* Compute syndrome with zero for the missing data pages
 215          * Use the dead data pages as temporary storage for delta p and
 216          * delta q
 217          */
 218         dp = blocks[faila];
 219         dq = blocks[failb];
 220 
 221         init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
 222         tx = async_memcpy(dp, g, 0, 0, bytes, submit);
 223         init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
 224         tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
 225 
 226         /* compute P + Pxy */
 227         srcs[0] = dp;
 228         srcs[1] = p;
 229         init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
 230                           NULL, NULL, scribble);
 231         tx = async_xor(dp, srcs, 0, 2, bytes, submit);
 232 
 233         /* compute Q + Qxy */
 234         srcs[0] = dq;
 235         srcs[1] = q;
 236         init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
 237                           NULL, NULL, scribble);
 238         tx = async_xor(dq, srcs, 0, 2, bytes, submit);
 239 
 240         /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
 241         srcs[0] = dp;
 242         srcs[1] = dq;
 243         coef[0] = raid6_gfexi[failb-faila];
 244         coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
 245         init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
 246         tx = async_sum_product(dq, srcs, coef, bytes, submit);
 247 
 248         /* Dy = P+Pxy+Dx */
 249         srcs[0] = dp;
 250         srcs[1] = dq;
 251         init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
 252                           cb_param, scribble);
 253         tx = async_xor(dp, srcs, 0, 2, bytes, submit);
 254 
 255         return tx;
 256 }
 257 
 258 static struct dma_async_tx_descriptor *
 259 __2data_recov_n(int disks, size_t bytes, int faila, int failb,
 260               struct page **blocks, struct async_submit_ctl *submit)
 261 {
 262         struct dma_async_tx_descriptor *tx = NULL;
 263         struct page *p, *q, *dp, *dq;
 264         struct page *srcs[2];
 265         unsigned char coef[2];
 266         enum async_tx_flags flags = submit->flags;
 267         dma_async_tx_callback cb_fn = submit->cb_fn;
 268         void *cb_param = submit->cb_param;
 269         void *scribble = submit->scribble;
 270 
 271         p = blocks[disks-2];
 272         q = blocks[disks-1];
 273 
 274         /* Compute syndrome with zero for the missing data pages
 275          * Use the dead data pages as temporary storage for
 276          * delta p and delta q
 277          */
 278         dp = blocks[faila];
 279         blocks[faila] = NULL;
 280         blocks[disks-2] = dp;
 281         dq = blocks[failb];
 282         blocks[failb] = NULL;
 283         blocks[disks-1] = dq;
 284 
 285         init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
 286         tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
 287 
 288         /* Restore pointer table */
 289         blocks[faila]   = dp;
 290         blocks[failb]   = dq;
 291         blocks[disks-2] = p;
 292         blocks[disks-1] = q;
 293 
 294         /* compute P + Pxy */
 295         srcs[0] = dp;
 296         srcs[1] = p;
 297         init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
 298                           NULL, NULL, scribble);
 299         tx = async_xor(dp, srcs, 0, 2, bytes, submit);
 300 
 301         /* compute Q + Qxy */
 302         srcs[0] = dq;
 303         srcs[1] = q;
 304         init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
 305                           NULL, NULL, scribble);
 306         tx = async_xor(dq, srcs, 0, 2, bytes, submit);
 307 
 308         /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
 309         srcs[0] = dp;
 310         srcs[1] = dq;
 311         coef[0] = raid6_gfexi[failb-faila];
 312         coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
 313         init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
 314         tx = async_sum_product(dq, srcs, coef, bytes, submit);
 315 
 316         /* Dy = P+Pxy+Dx */
 317         srcs[0] = dp;
 318         srcs[1] = dq;
 319         init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
 320                           cb_param, scribble);
 321         tx = async_xor(dp, srcs, 0, 2, bytes, submit);
 322 
 323         return tx;
 324 }
 325 
 326 /**
 327  * async_raid6_2data_recov - asynchronously calculate two missing data blocks
 328  * @disks: number of disks in the RAID-6 array
 329  * @bytes: block size
 330  * @faila: first failed drive index
 331  * @failb: second failed drive index
 332  * @blocks: array of source pointers where the last two entries are p and q
 333  * @submit: submission/completion modifiers
 334  */
 335 struct dma_async_tx_descriptor *
 336 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
 337                         struct page **blocks, struct async_submit_ctl *submit)
 338 {
 339         void *scribble = submit->scribble;
 340         int non_zero_srcs, i;
 341 
 342         BUG_ON(faila == failb);
 343         if (failb < faila)
 344                 swap(faila, failb);
 345 
 346         pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
 347 
 348         /* if a dma resource is not available or a scribble buffer is not
 349          * available punt to the synchronous path.  In the 'dma not
 350          * available' case be sure to use the scribble buffer to
 351          * preserve the content of 'blocks' as the caller intended.
 352          */
 353         if (!async_dma_find_channel(DMA_PQ) || !scribble) {
 354                 void **ptrs = scribble ? scribble : (void **) blocks;
 355 
 356                 async_tx_quiesce(&submit->depend_tx);
 357                 for (i = 0; i < disks; i++)
 358                         if (blocks[i] == NULL)
 359                                 ptrs[i] = (void *) raid6_empty_zero_page;
 360                         else
 361                                 ptrs[i] = page_address(blocks[i]);
 362 
 363                 raid6_2data_recov(disks, bytes, faila, failb, ptrs);
 364 
 365                 async_tx_sync_epilog(submit);
 366 
 367                 return NULL;
 368         }
 369 
 370         non_zero_srcs = 0;
 371         for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
 372                 if (blocks[i])
 373                         non_zero_srcs++;
 374         switch (non_zero_srcs) {
 375         case 0:
 376         case 1:
 377                 /* There must be at least 2 sources - the failed devices. */
 378                 BUG();
 379 
 380         case 2:
 381                 /* dma devices do not uniformly understand a zero source pq
 382                  * operation (in contrast to the synchronous case), so
 383                  * explicitly handle the special case of a 4 disk array with
 384                  * both data disks missing.
 385                  */
 386                 return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
 387         case 3:
 388                 /* dma devices do not uniformly understand a single
 389                  * source pq operation (in contrast to the synchronous
 390                  * case), so explicitly handle the special case of a 5 disk
 391                  * array with 2 of 3 data disks missing.
 392                  */
 393                 return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
 394         default:
 395                 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
 396         }
 397 }
 398 EXPORT_SYMBOL_GPL(async_raid6_2data_recov);
 399 
 400 /**
 401  * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block
 402  * @disks: number of disks in the RAID-6 array
 403  * @bytes: block size
 404  * @faila: failed drive index
 405  * @blocks: array of source pointers where the last two entries are p and q
 406  * @submit: submission/completion modifiers
 407  */
 408 struct dma_async_tx_descriptor *
 409 async_raid6_datap_recov(int disks, size_t bytes, int faila,
 410                         struct page **blocks, struct async_submit_ctl *submit)
 411 {
 412         struct dma_async_tx_descriptor *tx = NULL;
 413         struct page *p, *q, *dq;
 414         u8 coef;
 415         enum async_tx_flags flags = submit->flags;
 416         dma_async_tx_callback cb_fn = submit->cb_fn;
 417         void *cb_param = submit->cb_param;
 418         void *scribble = submit->scribble;
 419         int good_srcs, good, i;
 420         struct page *srcs[2];
 421 
 422         pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
 423 
 424         /* if a dma resource is not available or a scribble buffer is not
 425          * available punt to the synchronous path.  In the 'dma not
 426          * available' case be sure to use the scribble buffer to
 427          * preserve the content of 'blocks' as the caller intended.
 428          */
 429         if (!async_dma_find_channel(DMA_PQ) || !scribble) {
 430                 void **ptrs = scribble ? scribble : (void **) blocks;
 431 
 432                 async_tx_quiesce(&submit->depend_tx);
 433                 for (i = 0; i < disks; i++)
 434                         if (blocks[i] == NULL)
 435                                 ptrs[i] = (void*)raid6_empty_zero_page;
 436                         else
 437                                 ptrs[i] = page_address(blocks[i]);
 438 
 439                 raid6_datap_recov(disks, bytes, faila, ptrs);
 440 
 441                 async_tx_sync_epilog(submit);
 442 
 443                 return NULL;
 444         }
 445 
 446         good_srcs = 0;
 447         good = -1;
 448         for (i = 0; i < disks-2; i++) {
 449                 if (i == faila)
 450                         continue;
 451                 if (blocks[i]) {
 452                         good = i;
 453                         good_srcs++;
 454                         if (good_srcs > 1)
 455                                 break;
 456                 }
 457         }
 458         BUG_ON(good_srcs == 0);
 459 
 460         p = blocks[disks-2];
 461         q = blocks[disks-1];
 462 
 463         /* Compute syndrome with zero for the missing data page
 464          * Use the dead data page as temporary storage for delta q
 465          */
 466         dq = blocks[faila];
 467         blocks[faila] = NULL;
 468         blocks[disks-1] = dq;
 469 
 470         /* in the 4-disk case we only need to perform a single source
 471          * multiplication with the one good data block.
 472          */
 473         if (good_srcs == 1) {
 474                 struct page *g = blocks[good];
 475 
 476                 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
 477                                   scribble);
 478                 tx = async_memcpy(p, g, 0, 0, bytes, submit);
 479 
 480                 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
 481                                   scribble);
 482                 tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
 483         } else {
 484                 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
 485                                   scribble);
 486                 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
 487         }
 488 
 489         /* Restore pointer table */
 490         blocks[faila]   = dq;
 491         blocks[disks-1] = q;
 492 
 493         /* calculate g^{-faila} */
 494         coef = raid6_gfinv[raid6_gfexp[faila]];
 495 
 496         srcs[0] = dq;
 497         srcs[1] = q;
 498         init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
 499                           NULL, NULL, scribble);
 500         tx = async_xor(dq, srcs, 0, 2, bytes, submit);
 501 
 502         init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
 503         tx = async_mult(dq, dq, coef, bytes, submit);
 504 
 505         srcs[0] = p;
 506         srcs[1] = dq;
 507         init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
 508                           cb_param, scribble);
 509         tx = async_xor(p, srcs, 0, 2, bytes, submit);
 510 
 511         return tx;
 512 }
 513 EXPORT_SYMBOL_GPL(async_raid6_datap_recov);
 514 
 515 MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
 516 MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");
 517 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */