root/drivers/md/raid5.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. r5_next_bio
  2. algorithm_valid_raid5
  3. algorithm_valid_raid6
  4. algorithm_is_DDF

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _RAID5_H
   3 #define _RAID5_H
   4 
   5 #include <linux/raid/xor.h>
   6 #include <linux/dmaengine.h>
   7 
   8 /*
   9  *
  10  * Each stripe contains one buffer per device.  Each buffer can be in
  11  * one of a number of states stored in "flags".  Changes between
  12  * these states happen *almost* exclusively under the protection of the
  13  * STRIPE_ACTIVE flag.  Some very specific changes can happen in bi_end_io, and
  14  * these are not protected by STRIPE_ACTIVE.
  15  *
  16  * The flag bits that are used to represent these states are:
  17  *   R5_UPTODATE and R5_LOCKED
  18  *
  19  * State Empty == !UPTODATE, !LOCK
  20  *        We have no data, and there is no active request
  21  * State Want == !UPTODATE, LOCK
  22  *        A read request is being submitted for this block
  23  * State Dirty == UPTODATE, LOCK
  24  *        Some new data is in this buffer, and it is being written out
  25  * State Clean == UPTODATE, !LOCK
  26  *        We have valid data which is the same as on disc
  27  *
  28  * The possible state transitions are:
  29  *
  30  *  Empty -> Want   - on read or write to get old data for  parity calc
  31  *  Empty -> Dirty  - on compute_parity to satisfy write/sync request.
  32  *  Empty -> Clean  - on compute_block when computing a block for failed drive
  33  *  Want  -> Empty  - on failed read
  34  *  Want  -> Clean  - on successful completion of read request
  35  *  Dirty -> Clean  - on successful completion of write request
  36  *  Dirty -> Clean  - on failed write
  37  *  Clean -> Dirty  - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
  38  *
  39  * The Want->Empty, Want->Clean, Dirty->Clean, transitions
  40  * all happen in b_end_io at interrupt time.
  41  * Each sets the Uptodate bit before releasing the Lock bit.
  42  * This leaves one multi-stage transition:
  43  *    Want->Dirty->Clean
  44  * This is safe because thinking that a Clean buffer is actually dirty
  45  * will at worst delay some action, and the stripe will be scheduled
  46  * for attention after the transition is complete.
  47  *
  48  * There is one possibility that is not covered by these states.  That
  49  * is if one drive has failed and there is a spare being rebuilt.  We
  50  * can't distinguish between a clean block that has been generated
  51  * from parity calculations, and a clean block that has been
  52  * successfully written to the spare ( or to parity when resyncing).
  53  * To distinguish these states we have a stripe bit STRIPE_INSYNC that
  54  * is set whenever a write is scheduled to the spare, or to the parity
  55  * disc if there is no spare.  A sync request clears this bit, and
  56  * when we find it set with no buffers locked, we know the sync is
  57  * complete.
  58  *
  59  * Buffers for the md device that arrive via make_request are attached
  60  * to the appropriate stripe in one of two lists linked on b_reqnext.
  61  * One list (bh_read) for read requests, one (bh_write) for write.
  62  * There should never be more than one buffer on the two lists
  63  * together, but we are not guaranteed of that so we allow for more.
  64  *
  65  * If a buffer is on the read list when the associated cache buffer is
  66  * Uptodate, the data is copied into the read buffer and it's b_end_io
  67  * routine is called.  This may happen in the end_request routine only
  68  * if the buffer has just successfully been read.  end_request should
  69  * remove the buffers from the list and then set the Uptodate bit on
  70  * the buffer.  Other threads may do this only if they first check
  71  * that the Uptodate bit is set.  Once they have checked that they may
  72  * take buffers off the read queue.
  73  *
  74  * When a buffer on the write list is committed for write it is copied
  75  * into the cache buffer, which is then marked dirty, and moved onto a
  76  * third list, the written list (bh_written).  Once both the parity
  77  * block and the cached buffer are successfully written, any buffer on
  78  * a written list can be returned with b_end_io.
  79  *
  80  * The write list and read list both act as fifos.  The read list,
  81  * write list and written list are protected by the device_lock.
  82  * The device_lock is only for list manipulations and will only be
  83  * held for a very short time.  It can be claimed from interrupts.
  84  *
  85  *
  86  * Stripes in the stripe cache can be on one of two lists (or on
  87  * neither).  The "inactive_list" contains stripes which are not
  88  * currently being used for any request.  They can freely be reused
  89  * for another stripe.  The "handle_list" contains stripes that need
  90  * to be handled in some way.  Both of these are fifo queues.  Each
  91  * stripe is also (potentially) linked to a hash bucket in the hash
  92  * table so that it can be found by sector number.  Stripes that are
  93  * not hashed must be on the inactive_list, and will normally be at
  94  * the front.  All stripes start life this way.
  95  *
  96  * The inactive_list, handle_list and hash bucket lists are all protected by the
  97  * device_lock.
  98  *  - stripes have a reference counter. If count==0, they are on a list.
  99  *  - If a stripe might need handling, STRIPE_HANDLE is set.
 100  *  - When refcount reaches zero, then if STRIPE_HANDLE it is put on
 101  *    handle_list else inactive_list
 102  *
 103  * This, combined with the fact that STRIPE_HANDLE is only ever
 104  * cleared while a stripe has a non-zero count means that if the
 105  * refcount is 0 and STRIPE_HANDLE is set, then it is on the
 106  * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
 107  * the stripe is on inactive_list.
 108  *
 109  * The possible transitions are:
 110  *  activate an unhashed/inactive stripe (get_active_stripe())
 111  *     lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
 112  *  activate a hashed, possibly active stripe (get_active_stripe())
 113  *     lockdev check-hash if(!cnt++)unlink-stripe unlockdev
 114  *  attach a request to an active stripe (add_stripe_bh())
 115  *     lockdev attach-buffer unlockdev
 116  *  handle a stripe (handle_stripe())
 117  *     setSTRIPE_ACTIVE,  clrSTRIPE_HANDLE ...
 118  *              (lockdev check-buffers unlockdev) ..
 119  *              change-state ..
 120  *              record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
 121  *  release an active stripe (release_stripe())
 122  *     lockdev if (!--cnt) { if  STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
 123  *
 124  * The refcount counts each thread that have activated the stripe,
 125  * plus raid5d if it is handling it, plus one for each active request
 126  * on a cached buffer, and plus one if the stripe is undergoing stripe
 127  * operations.
 128  *
 129  * The stripe operations are:
 130  * -copying data between the stripe cache and user application buffers
 131  * -computing blocks to save a disk access, or to recover a missing block
 132  * -updating the parity on a write operation (reconstruct write and
 133  *  read-modify-write)
 134  * -checking parity correctness
 135  * -running i/o to disk
 136  * These operations are carried out by raid5_run_ops which uses the async_tx
 137  * api to (optionally) offload operations to dedicated hardware engines.
 138  * When requesting an operation handle_stripe sets the pending bit for the
 139  * operation and increments the count.  raid5_run_ops is then run whenever
 140  * the count is non-zero.
 141  * There are some critical dependencies between the operations that prevent some
 142  * from being requested while another is in flight.
 143  * 1/ Parity check operations destroy the in cache version of the parity block,
 144  *    so we prevent parity dependent operations like writes and compute_blocks
 145  *    from starting while a check is in progress.  Some dma engines can perform
 146  *    the check without damaging the parity block, in these cases the parity
 147  *    block is re-marked up to date (assuming the check was successful) and is
 148  *    not re-read from disk.
 149  * 2/ When a write operation is requested we immediately lock the affected
 150  *    blocks, and mark them as not up to date.  This causes new read requests
 151  *    to be held off, as well as parity checks and compute block operations.
 152  * 3/ Once a compute block operation has been requested handle_stripe treats
 153  *    that block as if it is up to date.  raid5_run_ops guaruntees that any
 154  *    operation that is dependent on the compute block result is initiated after
 155  *    the compute block completes.
 156  */
 157 
 158 /*
 159  * Operations state - intermediate states that are visible outside of
 160  *   STRIPE_ACTIVE.
 161  * In general _idle indicates nothing is running, _run indicates a data
 162  * processing operation is active, and _result means the data processing result
 163  * is stable and can be acted upon.  For simple operations like biofill and
 164  * compute that only have an _idle and _run state they are indicated with
 165  * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
 166  */
 167 /**
 168  * enum check_states - handles syncing / repairing a stripe
 169  * @check_state_idle - check operations are quiesced
 170  * @check_state_run - check operation is running
 171  * @check_state_result - set outside lock when check result is valid
 172  * @check_state_compute_run - check failed and we are repairing
 173  * @check_state_compute_result - set outside lock when compute result is valid
 174  */
 175 enum check_states {
 176         check_state_idle = 0,
 177         check_state_run, /* xor parity check */
 178         check_state_run_q, /* q-parity check */
 179         check_state_run_pq, /* pq dual parity check */
 180         check_state_check_result,
 181         check_state_compute_run, /* parity repair */
 182         check_state_compute_result,
 183 };
 184 
 185 /**
 186  * enum reconstruct_states - handles writing or expanding a stripe
 187  */
 188 enum reconstruct_states {
 189         reconstruct_state_idle = 0,
 190         reconstruct_state_prexor_drain_run,     /* prexor-write */
 191         reconstruct_state_drain_run,            /* write */
 192         reconstruct_state_run,                  /* expand */
 193         reconstruct_state_prexor_drain_result,
 194         reconstruct_state_drain_result,
 195         reconstruct_state_result,
 196 };
 197 
 198 struct stripe_head {
 199         struct hlist_node       hash;
 200         struct list_head        lru;          /* inactive_list or handle_list */
 201         struct llist_node       release_list;
 202         struct r5conf           *raid_conf;
 203         short                   generation;     /* increments with every
 204                                                  * reshape */
 205         sector_t                sector;         /* sector of this row */
 206         short                   pd_idx;         /* parity disk index */
 207         short                   qd_idx;         /* 'Q' disk index for raid6 */
 208         short                   ddf_layout;/* use DDF ordering to calculate Q */
 209         short                   hash_lock_index;
 210         unsigned long           state;          /* state flags */
 211         atomic_t                count;        /* nr of active thread/requests */
 212         int                     bm_seq; /* sequence number for bitmap flushes */
 213         int                     disks;          /* disks in stripe */
 214         int                     overwrite_disks; /* total overwrite disks in stripe,
 215                                                   * this is only checked when stripe
 216                                                   * has STRIPE_BATCH_READY
 217                                                   */
 218         enum check_states       check_state;
 219         enum reconstruct_states reconstruct_state;
 220         spinlock_t              stripe_lock;
 221         int                     cpu;
 222         struct r5worker_group   *group;
 223 
 224         struct stripe_head      *batch_head; /* protected by stripe lock */
 225         spinlock_t              batch_lock; /* only header's lock is useful */
 226         struct list_head        batch_list; /* protected by head's batch lock*/
 227 
 228         union {
 229                 struct r5l_io_unit      *log_io;
 230                 struct ppl_io_unit      *ppl_io;
 231         };
 232 
 233         struct list_head        log_list;
 234         sector_t                log_start; /* first meta block on the journal */
 235         struct list_head        r5c; /* for r5c_cache->stripe_in_journal */
 236 
 237         struct page             *ppl_page; /* partial parity of this stripe */
 238         /**
 239          * struct stripe_operations
 240          * @target - STRIPE_OP_COMPUTE_BLK target
 241          * @target2 - 2nd compute target in the raid6 case
 242          * @zero_sum_result - P and Q verification flags
 243          * @request - async service request flags for raid_run_ops
 244          */
 245         struct stripe_operations {
 246                 int                  target, target2;
 247                 enum sum_check_flags zero_sum_result;
 248         } ops;
 249         struct r5dev {
 250                 /* rreq and rvec are used for the replacement device when
 251                  * writing data to both devices.
 252                  */
 253                 struct bio      req, rreq;
 254                 struct bio_vec  vec, rvec;
 255                 struct page     *page, *orig_page;
 256                 struct bio      *toread, *read, *towrite, *written;
 257                 sector_t        sector;                 /* sector of this page */
 258                 unsigned long   flags;
 259                 u32             log_checksum;
 260                 unsigned short  write_hint;
 261         } dev[1]; /* allocated with extra space depending of RAID geometry */
 262 };
 263 
 264 /* stripe_head_state - collects and tracks the dynamic state of a stripe_head
 265  *     for handle_stripe.
 266  */
 267 struct stripe_head_state {
 268         /* 'syncing' means that we need to read all devices, either
 269          * to check/correct parity, or to reconstruct a missing device.
 270          * 'replacing' means we are replacing one or more drives and
 271          * the source is valid at this point so we don't need to
 272          * read all devices, just the replacement targets.
 273          */
 274         int syncing, expanding, expanded, replacing;
 275         int locked, uptodate, to_read, to_write, failed, written;
 276         int to_fill, compute, req_compute, non_overwrite;
 277         int injournal, just_cached;
 278         int failed_num[2];
 279         int p_failed, q_failed;
 280         int dec_preread_active;
 281         unsigned long ops_request;
 282 
 283         struct md_rdev *blocked_rdev;
 284         int handle_bad_blocks;
 285         int log_failed;
 286         int waiting_extra_page;
 287 };
 288 
 289 /* Flags for struct r5dev.flags */
 290 enum r5dev_flags {
 291         R5_UPTODATE,    /* page contains current data */
 292         R5_LOCKED,      /* IO has been submitted on "req" */
 293         R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
 294         R5_OVERWRITE,   /* towrite covers whole page */
 295 /* and some that are internal to handle_stripe */
 296         R5_Insync,      /* rdev && rdev->in_sync at start */
 297         R5_Wantread,    /* want to schedule a read */
 298         R5_Wantwrite,
 299         R5_Overlap,     /* There is a pending overlapping request
 300                          * on this block */
 301         R5_ReadNoMerge, /* prevent bio from merging in block-layer */
 302         R5_ReadError,   /* seen a read error here recently */
 303         R5_ReWrite,     /* have tried to over-write the readerror */
 304 
 305         R5_Expanded,    /* This block now has post-expand data */
 306         R5_Wantcompute, /* compute_block in progress treat as
 307                          * uptodate
 308                          */
 309         R5_Wantfill,    /* dev->toread contains a bio that needs
 310                          * filling
 311                          */
 312         R5_Wantdrain,   /* dev->towrite needs to be drained */
 313         R5_WantFUA,     /* Write should be FUA */
 314         R5_SyncIO,      /* The IO is sync */
 315         R5_WriteError,  /* got a write error - need to record it */
 316         R5_MadeGood,    /* A bad block has been fixed by writing to it */
 317         R5_ReadRepl,    /* Will/did read from replacement rather than orig */
 318         R5_MadeGoodRepl,/* A bad block on the replacement device has been
 319                          * fixed by writing to it */
 320         R5_NeedReplace, /* This device has a replacement which is not
 321                          * up-to-date at this stripe. */
 322         R5_WantReplace, /* We need to update the replacement, we have read
 323                          * data in, and now is a good time to write it out.
 324                          */
 325         R5_Discard,     /* Discard the stripe */
 326         R5_SkipCopy,    /* Don't copy data from bio to stripe cache */
 327         R5_InJournal,   /* data being written is in the journal device.
 328                          * if R5_InJournal is set for parity pd_idx, all the
 329                          * data and parity being written are in the journal
 330                          * device
 331                          */
 332         R5_OrigPageUPTDODATE,   /* with write back cache, we read old data into
 333                                  * dev->orig_page for prexor. When this flag is
 334                                  * set, orig_page contains latest data in the
 335                                  * raid disk.
 336                                  */
 337 };
 338 
 339 /*
 340  * Stripe state
 341  */
 342 enum {
 343         STRIPE_ACTIVE,
 344         STRIPE_HANDLE,
 345         STRIPE_SYNC_REQUESTED,
 346         STRIPE_SYNCING,
 347         STRIPE_INSYNC,
 348         STRIPE_REPLACED,
 349         STRIPE_PREREAD_ACTIVE,
 350         STRIPE_DELAYED,
 351         STRIPE_DEGRADED,
 352         STRIPE_BIT_DELAY,
 353         STRIPE_EXPANDING,
 354         STRIPE_EXPAND_SOURCE,
 355         STRIPE_EXPAND_READY,
 356         STRIPE_IO_STARTED,      /* do not count towards 'bypass_count' */
 357         STRIPE_FULL_WRITE,      /* all blocks are set to be overwritten */
 358         STRIPE_BIOFILL_RUN,
 359         STRIPE_COMPUTE_RUN,
 360         STRIPE_ON_UNPLUG_LIST,
 361         STRIPE_DISCARD,
 362         STRIPE_ON_RELEASE_LIST,
 363         STRIPE_BATCH_READY,
 364         STRIPE_BATCH_ERR,
 365         STRIPE_BITMAP_PENDING,  /* Being added to bitmap, don't add
 366                                  * to batch yet.
 367                                  */
 368         STRIPE_LOG_TRAPPED,     /* trapped into log (see raid5-cache.c)
 369                                  * this bit is used in two scenarios:
 370                                  *
 371                                  * 1. write-out phase
 372                                  *  set in first entry of r5l_write_stripe
 373                                  *  clear in second entry of r5l_write_stripe
 374                                  *  used to bypass logic in handle_stripe
 375                                  *
 376                                  * 2. caching phase
 377                                  *  set in r5c_try_caching_write()
 378                                  *  clear when journal write is done
 379                                  *  used to initiate r5c_cache_data()
 380                                  *  also used to bypass logic in handle_stripe
 381                                  */
 382         STRIPE_R5C_CACHING,     /* the stripe is in caching phase
 383                                  * see more detail in the raid5-cache.c
 384                                  */
 385         STRIPE_R5C_PARTIAL_STRIPE,      /* in r5c cache (to-be/being handled or
 386                                          * in conf->r5c_partial_stripe_list)
 387                                          */
 388         STRIPE_R5C_FULL_STRIPE, /* in r5c cache (to-be/being handled or
 389                                  * in conf->r5c_full_stripe_list)
 390                                  */
 391         STRIPE_R5C_PREFLUSH,    /* need to flush journal device */
 392 };
 393 
 394 #define STRIPE_EXPAND_SYNC_FLAGS \
 395         ((1 << STRIPE_EXPAND_SOURCE) |\
 396         (1 << STRIPE_EXPAND_READY) |\
 397         (1 << STRIPE_EXPANDING) |\
 398         (1 << STRIPE_SYNC_REQUESTED))
 399 /*
 400  * Operation request flags
 401  */
 402 enum {
 403         STRIPE_OP_BIOFILL,
 404         STRIPE_OP_COMPUTE_BLK,
 405         STRIPE_OP_PREXOR,
 406         STRIPE_OP_BIODRAIN,
 407         STRIPE_OP_RECONSTRUCT,
 408         STRIPE_OP_CHECK,
 409         STRIPE_OP_PARTIAL_PARITY,
 410 };
 411 
 412 /*
 413  * RAID parity calculation preferences
 414  */
 415 enum {
 416         PARITY_DISABLE_RMW = 0,
 417         PARITY_ENABLE_RMW,
 418         PARITY_PREFER_RMW,
 419 };
 420 
 421 /*
 422  * Pages requested from set_syndrome_sources()
 423  */
 424 enum {
 425         SYNDROME_SRC_ALL,
 426         SYNDROME_SRC_WANT_DRAIN,
 427         SYNDROME_SRC_WRITTEN,
 428 };
 429 /*
 430  * Plugging:
 431  *
 432  * To improve write throughput, we need to delay the handling of some
 433  * stripes until there has been a chance that several write requests
 434  * for the one stripe have all been collected.
 435  * In particular, any write request that would require pre-reading
 436  * is put on a "delayed" queue until there are no stripes currently
 437  * in a pre-read phase.  Further, if the "delayed" queue is empty when
 438  * a stripe is put on it then we "plug" the queue and do not process it
 439  * until an unplug call is made. (the unplug_io_fn() is called).
 440  *
 441  * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
 442  * it to the count of prereading stripes.
 443  * When write is initiated, or the stripe refcnt == 0 (just in case) we
 444  * clear the PREREAD_ACTIVE flag and decrement the count
 445  * Whenever the 'handle' queue is empty and the device is not plugged, we
 446  * move any strips from delayed to handle and clear the DELAYED flag and set
 447  * PREREAD_ACTIVE.
 448  * In stripe_handle, if we find pre-reading is necessary, we do it if
 449  * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
 450  * HANDLE gets cleared if stripe_handle leaves nothing locked.
 451  */
 452 
 453 /* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk.
 454  * There are three safe ways to access disk_info.rdev.
 455  * 1/ when holding mddev->reconfig_mutex
 456  * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that
 457  *    is called as part of performing resync/recovery/reshape.
 458  * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
 459  *    and if it is non-NULL, increment rdev->nr_pending before dropping the RCU
 460  *    lock.
 461  * When .rdev is set to NULL, the nr_pending count checked again and if
 462  * it has been incremented, the pointer is put back in .rdev.
 463  */
 464 
 465 struct disk_info {
 466         struct md_rdev  *rdev, *replacement;
 467         struct page     *extra_page; /* extra page to use in prexor */
 468 };
 469 
 470 /*
 471  * Stripe cache
 472  */
 473 
 474 #define NR_STRIPES              256
 475 #define STRIPE_SIZE             PAGE_SIZE
 476 #define STRIPE_SHIFT            (PAGE_SHIFT - 9)
 477 #define STRIPE_SECTORS          (STRIPE_SIZE>>9)
 478 #define IO_THRESHOLD            1
 479 #define BYPASS_THRESHOLD        1
 480 #define NR_HASH                 (PAGE_SIZE / sizeof(struct hlist_head))
 481 #define HASH_MASK               (NR_HASH - 1)
 482 #define MAX_STRIPE_BATCH        8
 483 
 484 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
 485  * order without overlap.  There may be several bio's per stripe+device, and
 486  * a bio could span several devices.
 487  * When walking this list for a particular stripe+device, we must never proceed
 488  * beyond a bio that extends past this device, as the next bio might no longer
 489  * be valid.
 490  * This function is used to determine the 'next' bio in the list, given the
 491  * sector of the current stripe+device
 492  */
 493 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
 494 {
 495         if (bio_end_sector(bio) < sector + STRIPE_SECTORS)
 496                 return bio->bi_next;
 497         else
 498                 return NULL;
 499 }
 500 
 501 /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
 502  * This is because we sometimes take all the spinlocks
 503  * and creating that much locking depth can cause
 504  * problems.
 505  */
 506 #define NR_STRIPE_HASH_LOCKS 8
 507 #define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
 508 
 509 struct r5worker {
 510         struct work_struct work;
 511         struct r5worker_group *group;
 512         struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
 513         bool working;
 514 };
 515 
 516 struct r5worker_group {
 517         struct list_head handle_list;
 518         struct list_head loprio_list;
 519         struct r5conf *conf;
 520         struct r5worker *workers;
 521         int stripes_cnt;
 522 };
 523 
 524 /*
 525  * r5c journal modes of the array: write-back or write-through.
 526  * write-through mode has identical behavior as existing log only
 527  * implementation.
 528  */
 529 enum r5c_journal_mode {
 530         R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
 531         R5C_JOURNAL_MODE_WRITE_BACK = 1,
 532 };
 533 
 534 enum r5_cache_state {
 535         R5_INACTIVE_BLOCKED,    /* release of inactive stripes blocked,
 536                                  * waiting for 25% to be free
 537                                  */
 538         R5_ALLOC_MORE,          /* It might help to allocate another
 539                                  * stripe.
 540                                  */
 541         R5_DID_ALLOC,           /* A stripe was allocated, don't allocate
 542                                  * more until at least one has been
 543                                  * released.  This avoids flooding
 544                                  * the cache.
 545                                  */
 546         R5C_LOG_TIGHT,          /* log device space tight, need to
 547                                  * prioritize stripes at last_checkpoint
 548                                  */
 549         R5C_LOG_CRITICAL,       /* log device is running out of space,
 550                                  * only process stripes that are already
 551                                  * occupying the log
 552                                  */
 553         R5C_EXTRA_PAGE_IN_USE,  /* a stripe is using disk_info.extra_page
 554                                  * for prexor
 555                                  */
 556 };
 557 
 558 #define PENDING_IO_MAX 512
 559 #define PENDING_IO_ONE_FLUSH 128
 560 struct r5pending_data {
 561         struct list_head sibling;
 562         sector_t sector; /* stripe sector */
 563         struct bio_list bios;
 564 };
 565 
 566 struct r5conf {
 567         struct hlist_head       *stripe_hashtbl;
 568         /* only protect corresponding hash list and inactive_list */
 569         spinlock_t              hash_locks[NR_STRIPE_HASH_LOCKS];
 570         struct mddev            *mddev;
 571         int                     chunk_sectors;
 572         int                     level, algorithm, rmw_level;
 573         int                     max_degraded;
 574         int                     raid_disks;
 575         int                     max_nr_stripes;
 576         int                     min_nr_stripes;
 577 
 578         /* reshape_progress is the leading edge of a 'reshape'
 579          * It has value MaxSector when no reshape is happening
 580          * If delta_disks < 0, it is the last sector we started work on,
 581          * else is it the next sector to work on.
 582          */
 583         sector_t                reshape_progress;
 584         /* reshape_safe is the trailing edge of a reshape.  We know that
 585          * before (or after) this address, all reshape has completed.
 586          */
 587         sector_t                reshape_safe;
 588         int                     previous_raid_disks;
 589         int                     prev_chunk_sectors;
 590         int                     prev_algo;
 591         short                   generation; /* increments with every reshape */
 592         seqcount_t              gen_lock;       /* lock against generation changes */
 593         unsigned long           reshape_checkpoint; /* Time we last updated
 594                                                      * metadata */
 595         long long               min_offset_diff; /* minimum difference between
 596                                                   * data_offset and
 597                                                   * new_data_offset across all
 598                                                   * devices.  May be negative,
 599                                                   * but is closest to zero.
 600                                                   */
 601 
 602         struct list_head        handle_list; /* stripes needing handling */
 603         struct list_head        loprio_list; /* low priority stripes */
 604         struct list_head        hold_list; /* preread ready stripes */
 605         struct list_head        delayed_list; /* stripes that have plugged requests */
 606         struct list_head        bitmap_list; /* stripes delaying awaiting bitmap update */
 607         struct bio              *retry_read_aligned; /* currently retrying aligned bios   */
 608         unsigned int            retry_read_offset; /* sector offset into retry_read_aligned */
 609         struct bio              *retry_read_aligned_list; /* aligned bios retry list  */
 610         atomic_t                preread_active_stripes; /* stripes with scheduled io */
 611         atomic_t                active_aligned_reads;
 612         atomic_t                pending_full_writes; /* full write backlog */
 613         int                     bypass_count; /* bypassed prereads */
 614         int                     bypass_threshold; /* preread nice */
 615         int                     skip_copy; /* Don't copy data from bio to stripe cache */
 616         struct list_head        *last_hold; /* detect hold_list promotions */
 617 
 618         atomic_t                reshape_stripes; /* stripes with pending writes for reshape */
 619         /* unfortunately we need two cache names as we temporarily have
 620          * two caches.
 621          */
 622         int                     active_name;
 623         char                    cache_name[2][32];
 624         struct kmem_cache       *slab_cache; /* for allocating stripes */
 625         struct mutex            cache_size_mutex; /* Protect changes to cache size */
 626 
 627         int                     seq_flush, seq_write;
 628         int                     quiesce;
 629 
 630         int                     fullsync;  /* set to 1 if a full sync is needed,
 631                                             * (fresh device added).
 632                                             * Cleared when a sync completes.
 633                                             */
 634         int                     recovery_disabled;
 635         /* per cpu variables */
 636         struct raid5_percpu {
 637                 struct page     *spare_page; /* Used when checking P/Q in raid6 */
 638                 void            *scribble;  /* space for constructing buffer
 639                                              * lists and performing address
 640                                              * conversions
 641                                              */
 642                 int scribble_obj_size;
 643         } __percpu *percpu;
 644         int scribble_disks;
 645         int scribble_sectors;
 646         struct hlist_node node;
 647 
 648         /*
 649          * Free stripes pool
 650          */
 651         atomic_t                active_stripes;
 652         struct list_head        inactive_list[NR_STRIPE_HASH_LOCKS];
 653 
 654         atomic_t                r5c_cached_full_stripes;
 655         struct list_head        r5c_full_stripe_list;
 656         atomic_t                r5c_cached_partial_stripes;
 657         struct list_head        r5c_partial_stripe_list;
 658         atomic_t                r5c_flushing_full_stripes;
 659         atomic_t                r5c_flushing_partial_stripes;
 660 
 661         atomic_t                empty_inactive_list_nr;
 662         struct llist_head       released_stripes;
 663         wait_queue_head_t       wait_for_quiescent;
 664         wait_queue_head_t       wait_for_stripe;
 665         wait_queue_head_t       wait_for_overlap;
 666         unsigned long           cache_state;
 667         struct shrinker         shrinker;
 668         int                     pool_size; /* number of disks in stripeheads in pool */
 669         spinlock_t              device_lock;
 670         struct disk_info        *disks;
 671         struct bio_set          bio_split;
 672 
 673         /* When taking over an array from a different personality, we store
 674          * the new thread here until we fully activate the array.
 675          */
 676         struct md_thread        *thread;
 677         struct list_head        temp_inactive_list[NR_STRIPE_HASH_LOCKS];
 678         struct r5worker_group   *worker_groups;
 679         int                     group_cnt;
 680         int                     worker_cnt_per_group;
 681         struct r5l_log          *log;
 682         void                    *log_private;
 683 
 684         spinlock_t              pending_bios_lock;
 685         bool                    batch_bio_dispatch;
 686         struct r5pending_data   *pending_data;
 687         struct list_head        free_list;
 688         struct list_head        pending_list;
 689         int                     pending_data_cnt;
 690         struct r5pending_data   *next_pending_data;
 691 };
 692 
 693 
 694 /*
 695  * Our supported algorithms
 696  */
 697 #define ALGORITHM_LEFT_ASYMMETRIC       0 /* Rotating Parity N with Data Restart */
 698 #define ALGORITHM_RIGHT_ASYMMETRIC      1 /* Rotating Parity 0 with Data Restart */
 699 #define ALGORITHM_LEFT_SYMMETRIC        2 /* Rotating Parity N with Data Continuation */
 700 #define ALGORITHM_RIGHT_SYMMETRIC       3 /* Rotating Parity 0 with Data Continuation */
 701 
 702 /* Define non-rotating (raid4) algorithms.  These allow
 703  * conversion of raid4 to raid5.
 704  */
 705 #define ALGORITHM_PARITY_0              4 /* P or P,Q are initial devices */
 706 #define ALGORITHM_PARITY_N              5 /* P or P,Q are final devices. */
 707 
 708 /* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
 709  * Firstly, the exact positioning of the parity block is slightly
 710  * different between the 'LEFT_*' modes of md and the "_N_*" modes
 711  * of DDF.
 712  * Secondly, or order of datablocks over which the Q syndrome is computed
 713  * is different.
 714  * Consequently we have different layouts for DDF/raid6 than md/raid6.
 715  * These layouts are from the DDFv1.2 spec.
 716  * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
 717  * leaves RLQ=3 as 'Vendor Specific'
 718  */
 719 
 720 #define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */
 721 #define ALGORITHM_ROTATING_N_RESTART    9 /* DDF PRL=6 RLQ=2 */
 722 #define ALGORITHM_ROTATING_N_CONTINUE   10 /*DDF PRL=6 RLQ=3 */
 723 
 724 /* For every RAID5 algorithm we define a RAID6 algorithm
 725  * with exactly the same layout for data and parity, and
 726  * with the Q block always on the last device (N-1).
 727  * This allows trivial conversion from RAID5 to RAID6
 728  */
 729 #define ALGORITHM_LEFT_ASYMMETRIC_6     16
 730 #define ALGORITHM_RIGHT_ASYMMETRIC_6    17
 731 #define ALGORITHM_LEFT_SYMMETRIC_6      18
 732 #define ALGORITHM_RIGHT_SYMMETRIC_6     19
 733 #define ALGORITHM_PARITY_0_6            20
 734 #define ALGORITHM_PARITY_N_6            ALGORITHM_PARITY_N
 735 
 736 static inline int algorithm_valid_raid5(int layout)
 737 {
 738         return (layout >= 0) &&
 739                 (layout <= 5);
 740 }
 741 static inline int algorithm_valid_raid6(int layout)
 742 {
 743         return (layout >= 0 && layout <= 5)
 744                 ||
 745                 (layout >= 8 && layout <= 10)
 746                 ||
 747                 (layout >= 16 && layout <= 20);
 748 }
 749 
 750 static inline int algorithm_is_DDF(int layout)
 751 {
 752         return layout >= 8 && layout <= 10;
 753 }
 754 
 755 extern void md_raid5_kick_device(struct r5conf *conf);
 756 extern int raid5_set_cache_size(struct mddev *mddev, int size);
 757 extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
 758 extern void raid5_release_stripe(struct stripe_head *sh);
 759 extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
 760                                      int previous, int *dd_idx,
 761                                      struct stripe_head *sh);
 762 extern struct stripe_head *
 763 raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
 764                         int previous, int noblock, int noquiesce);
 765 extern int raid5_calc_degraded(struct r5conf *conf);
 766 extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
 767 #endif

/* [<][>][^][v][top][bottom][index][help] */