root/drivers/lightnvm/pblk.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. nvm_rq_from_c_ctx
  2. emeta_to_bb
  3. emeta_to_wa
  4. emeta_to_lbas
  5. emeta_to_vsc
  6. pblk_line_vsc
  7. pblk_ppa_to_line_id
  8. pblk_ppa_to_line
  9. pblk_ppa_to_pos
  10. addr_to_gen_ppa
  11. pblk_dev_ppa_to_chunk
  12. pblk_dev_ppa_to_chunk_addr
  13. pblk_dev_ppa_to_line_addr
  14. pblk_ppa32_to_ppa64
  15. pblk_ppa64_to_ppa32
  16. pblk_trans_map_get
  17. pblk_trans_map_set
  18. pblk_ppa_empty
  19. pblk_ppa_set_empty
  20. pblk_ppa_comp
  21. pblk_addr_in_cache
  22. pblk_addr_to_cacheline
  23. pblk_cacheline_to_addr
  24. pblk_calc_meta_header_crc
  25. pblk_calc_smeta_crc
  26. pblk_calc_emeta_crc
  27. pblk_io_aligned
  28. print_ppa
  29. pblk_print_failed_rqd
  30. pblk_boundary_ppa_checks
  31. pblk_check_io
  32. pblk_boundary_paddr_checks
  33. pblk_get_bi_idx
  34. pblk_get_lba
  35. pblk_get_secs
  36. pblk_disk_name
  37. pblk_get_min_chks
  38. pblk_get_meta
  39. pblk_dma_meta_size
  40. pblk_is_oob_meta_supported

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
   4  * Copyright (C) 2016 CNEX Labs
   5  * Initial release: Matias Bjorling <matias@cnexlabs.com>
   6  * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
   7  *
   8  * This program is free software; you can redistribute it and/or
   9  * modify it under the terms of the GNU General Public License version
  10  * 2 as published by the Free Software Foundation.
  11  *
  12  * This program is distributed in the hope that it will be useful, but
  13  * WITHOUT ANY WARRANTY; without even the implied warranty of
  14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15  * General Public License for more details.
  16  *
  17  * Implementation of a Physical Block-device target for Open-channel SSDs.
  18  *
  19  */
  20 
  21 #ifndef PBLK_H_
  22 #define PBLK_H_
  23 
  24 #include <linux/blkdev.h>
  25 #include <linux/blk-mq.h>
  26 #include <linux/bio.h>
  27 #include <linux/module.h>
  28 #include <linux/kthread.h>
  29 #include <linux/vmalloc.h>
  30 #include <linux/crc32.h>
  31 #include <linux/uuid.h>
  32 
  33 #include <linux/lightnvm.h>
  34 
  35 /* Run only GC if less than 1/X blocks are free */
  36 #define GC_LIMIT_INVERSE 5
  37 #define GC_TIME_MSECS 1000
  38 
  39 #define PBLK_SECTOR (512)
  40 #define PBLK_EXPOSED_PAGE_SIZE (4096)
  41 
  42 #define PBLK_NR_CLOSE_JOBS (4)
  43 
  44 #define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
  45 
  46 /* Max 512 LUNs per device */
  47 #define PBLK_MAX_LUNS_BITMAP (4)
  48 
  49 #define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
  50 
  51 /* Static pool sizes */
  52 #define PBLK_GEN_WS_POOL_SIZE (2)
  53 
  54 #define PBLK_DEFAULT_OP (11)
  55 
  56 enum {
  57         PBLK_READ               = READ,
  58         PBLK_WRITE              = WRITE,/* Write from write buffer */
  59         PBLK_WRITE_INT,                 /* Internal write - no write buffer */
  60         PBLK_READ_RECOV,                /* Recovery read - errors allowed */
  61         PBLK_ERASE,
  62 };
  63 
  64 enum {
  65         /* IO Types */
  66         PBLK_IOTYPE_USER        = 1 << 0,
  67         PBLK_IOTYPE_GC          = 1 << 1,
  68 
  69         /* Write buffer flags */
  70         PBLK_FLUSH_ENTRY        = 1 << 2,
  71         PBLK_WRITTEN_DATA       = 1 << 3,
  72         PBLK_SUBMITTED_ENTRY    = 1 << 4,
  73         PBLK_WRITABLE_ENTRY     = 1 << 5,
  74 };
  75 
  76 enum {
  77         PBLK_BLK_ST_OPEN =      0x1,
  78         PBLK_BLK_ST_CLOSED =    0x2,
  79 };
  80 
  81 enum {
  82         PBLK_CHUNK_RESET_START,
  83         PBLK_CHUNK_RESET_DONE,
  84         PBLK_CHUNK_RESET_FAILED,
  85 };
  86 
  87 struct pblk_sec_meta {
  88         u64 reserved;
  89         __le64 lba;
  90 };
  91 
  92 /* The number of GC lists and the rate-limiter states go together. This way the
  93  * rate-limiter can dictate how much GC is needed based on resource utilization.
  94  */
  95 #define PBLK_GC_NR_LISTS 4
  96 
  97 enum {
  98         PBLK_RL_OFF = 0,
  99         PBLK_RL_WERR = 1,
 100         PBLK_RL_HIGH = 2,
 101         PBLK_RL_MID = 3,
 102         PBLK_RL_LOW = 4
 103 };
 104 
 105 #define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)
 106 
 107 /* write buffer completion context */
 108 struct pblk_c_ctx {
 109         struct list_head list;          /* Head for out-of-order completion */
 110 
 111         unsigned long *lun_bitmap;      /* Luns used on current request */
 112         unsigned int sentry;
 113         unsigned int nr_valid;
 114         unsigned int nr_padded;
 115 };
 116 
 117 /* read context */
 118 struct pblk_g_ctx {
 119         void *private;
 120         unsigned long start_time;
 121         u64 lba;
 122 };
 123 
 124 /* Pad context */
 125 struct pblk_pad_rq {
 126         struct pblk *pblk;
 127         struct completion wait;
 128         struct kref ref;
 129 };
 130 
 131 /* Recovery context */
 132 struct pblk_rec_ctx {
 133         struct pblk *pblk;
 134         struct nvm_rq *rqd;
 135         struct work_struct ws_rec;
 136 };
 137 
 138 /* Write context */
 139 struct pblk_w_ctx {
 140         struct bio_list bios;           /* Original bios - used for completion
 141                                          * in REQ_FUA, REQ_FLUSH case
 142                                          */
 143         u64 lba;                        /* Logic addr. associated with entry */
 144         struct ppa_addr ppa;            /* Physic addr. associated with entry */
 145         int flags;                      /* Write context flags */
 146 };
 147 
 148 struct pblk_rb_entry {
 149         struct ppa_addr cacheline;      /* Cacheline for this entry */
 150         void *data;                     /* Pointer to data on this entry */
 151         struct pblk_w_ctx w_ctx;        /* Context for this entry */
 152         struct list_head index;         /* List head to enable indexes */
 153 };
 154 
 155 #define EMPTY_ENTRY (~0U)
 156 
 157 struct pblk_rb_pages {
 158         struct page *pages;
 159         int order;
 160         struct list_head list;
 161 };
 162 
 163 struct pblk_rb {
 164         struct pblk_rb_entry *entries;  /* Ring buffer entries */
 165         unsigned int mem;               /* Write offset - points to next
 166                                          * writable entry in memory
 167                                          */
 168         unsigned int subm;              /* Read offset - points to last entry
 169                                          * that has been submitted to the media
 170                                          * to be persisted
 171                                          */
 172         unsigned int sync;              /* Synced - backpointer that signals
 173                                          * the last submitted entry that has
 174                                          * been successfully persisted to media
 175                                          */
 176         unsigned int flush_point;       /* Sync point - last entry that must be
 177                                          * flushed to the media. Used with
 178                                          * REQ_FLUSH and REQ_FUA
 179                                          */
 180         unsigned int l2p_update;        /* l2p update point - next entry for
 181                                          * which l2p mapping will be updated to
 182                                          * contain a device ppa address (instead
 183                                          * of a cacheline
 184                                          */
 185         unsigned int nr_entries;        /* Number of entries in write buffer -
 186                                          * must be a power of two
 187                                          */
 188         unsigned int seg_size;          /* Size of the data segments being
 189                                          * stored on each entry. Typically this
 190                                          * will be 4KB
 191                                          */
 192 
 193         unsigned int back_thres;        /* Threshold that shall be maintained by
 194                                          * the backpointer in order to respect
 195                                          * geo->mw_cunits on a per chunk basis
 196                                          */
 197 
 198         struct list_head pages;         /* List of data pages */
 199 
 200         spinlock_t w_lock;              /* Write lock */
 201         spinlock_t s_lock;              /* Sync lock */
 202 
 203 #ifdef CONFIG_NVM_PBLK_DEBUG
 204         atomic_t inflight_flush_point;  /* Not served REQ_FLUSH | REQ_FUA */
 205 #endif
 206 };
 207 
 208 #define PBLK_RECOVERY_SECTORS 16
 209 
 210 struct pblk_lun {
 211         struct ppa_addr bppa;
 212         struct semaphore wr_sem;
 213 };
 214 
 215 struct pblk_gc_rq {
 216         struct pblk_line *line;
 217         void *data;
 218         u64 paddr_list[NVM_MAX_VLBA];
 219         u64 lba_list[NVM_MAX_VLBA];
 220         int nr_secs;
 221         int secs_to_gc;
 222         struct list_head list;
 223 };
 224 
 225 struct pblk_gc {
 226         /* These states are not protected by a lock since (i) they are in the
 227          * fast path, and (ii) they are not critical.
 228          */
 229         int gc_active;
 230         int gc_enabled;
 231         int gc_forced;
 232 
 233         struct task_struct *gc_ts;
 234         struct task_struct *gc_writer_ts;
 235         struct task_struct *gc_reader_ts;
 236 
 237         struct workqueue_struct *gc_line_reader_wq;
 238         struct workqueue_struct *gc_reader_wq;
 239 
 240         struct timer_list gc_timer;
 241 
 242         struct semaphore gc_sem;
 243         atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
 244         atomic_t pipeline_gc;      /* Number of lines in the GC pipeline -
 245                                     * started reads to finished writes
 246                                     */
 247         int w_entries;
 248 
 249         struct list_head w_list;
 250         struct list_head r_list;
 251 
 252         spinlock_t lock;
 253         spinlock_t w_lock;
 254         spinlock_t r_lock;
 255 };
 256 
 257 struct pblk_rl {
 258         unsigned int high;      /* Upper threshold for rate limiter (free run -
 259                                  * user I/O rate limiter
 260                                  */
 261         unsigned int high_pw;   /* High rounded up as a power of 2 */
 262 
 263 #define PBLK_USER_HIGH_THRS 8   /* Begin write limit at 12% available blks */
 264 #define PBLK_USER_LOW_THRS 10   /* Aggressive GC at 10% available blocks */
 265 
 266         int rb_windows_pw;      /* Number of rate windows in the write buffer
 267                                  * given as a power-of-2. This guarantees that
 268                                  * when user I/O is being rate limited, there
 269                                  * will be reserved enough space for the GC to
 270                                  * place its payload. A window is of
 271                                  * pblk->max_write_pgs size, which in NVMe is
 272                                  * 64, i.e., 256kb.
 273                                  */
 274         int rb_budget;          /* Total number of entries available for I/O */
 275         int rb_user_max;        /* Max buffer entries available for user I/O */
 276         int rb_gc_max;          /* Max buffer entries available for GC I/O */
 277         int rb_gc_rsv;          /* Reserved buffer entries for GC I/O */
 278         int rb_state;           /* Rate-limiter current state */
 279         int rb_max_io;          /* Maximum size for an I/O giving the config */
 280 
 281         atomic_t rb_user_cnt;   /* User I/O buffer counter */
 282         atomic_t rb_gc_cnt;     /* GC I/O buffer counter */
 283         atomic_t rb_space;      /* Space limit in case of reaching capacity */
 284 
 285         int rsv_blocks;         /* Reserved blocks for GC */
 286 
 287         int rb_user_active;
 288         int rb_gc_active;
 289 
 290         atomic_t werr_lines;    /* Number of write error lines that needs gc */
 291 
 292         struct timer_list u_timer;
 293 
 294         unsigned long total_blocks;
 295 
 296         atomic_t free_blocks;           /* Total number of free blocks (+ OP) */
 297         atomic_t free_user_blocks;      /* Number of user free blocks (no OP) */
 298 };
 299 
 300 #define PBLK_LINE_EMPTY (~0U)
 301 
 302 enum {
 303         /* Line Types */
 304         PBLK_LINETYPE_FREE = 0,
 305         PBLK_LINETYPE_LOG = 1,
 306         PBLK_LINETYPE_DATA = 2,
 307 
 308         /* Line state */
 309         PBLK_LINESTATE_NEW = 9,
 310         PBLK_LINESTATE_FREE = 10,
 311         PBLK_LINESTATE_OPEN = 11,
 312         PBLK_LINESTATE_CLOSED = 12,
 313         PBLK_LINESTATE_GC = 13,
 314         PBLK_LINESTATE_BAD = 14,
 315         PBLK_LINESTATE_CORRUPT = 15,
 316 
 317         /* GC group */
 318         PBLK_LINEGC_NONE = 20,
 319         PBLK_LINEGC_EMPTY = 21,
 320         PBLK_LINEGC_LOW = 22,
 321         PBLK_LINEGC_MID = 23,
 322         PBLK_LINEGC_HIGH = 24,
 323         PBLK_LINEGC_FULL = 25,
 324         PBLK_LINEGC_WERR = 26
 325 };
 326 
 327 #define PBLK_MAGIC 0x70626c6b /*pblk*/
 328 
 329 /* emeta/smeta persistent storage format versions:
 330  * Changes in major version requires offline migration.
 331  * Changes in minor version are handled automatically during
 332  * recovery.
 333  */
 334 
 335 #define SMETA_VERSION_MAJOR (0)
 336 #define SMETA_VERSION_MINOR (1)
 337 
 338 #define EMETA_VERSION_MAJOR (0)
 339 #define EMETA_VERSION_MINOR (2)
 340 
 341 struct line_header {
 342         __le32 crc;
 343         __le32 identifier;      /* pblk identifier */
 344         __u8 uuid[16];          /* instance uuid */
 345         __le16 type;            /* line type */
 346         __u8 version_major;     /* version major */
 347         __u8 version_minor;     /* version minor */
 348         __le32 id;              /* line id for current line */
 349 };
 350 
 351 struct line_smeta {
 352         struct line_header header;
 353 
 354         __le32 crc;             /* Full structure including struct crc */
 355         /* Previous line metadata */
 356         __le32 prev_id;         /* Line id for previous line */
 357 
 358         /* Current line metadata */
 359         __le64 seq_nr;          /* Sequence number for current line */
 360 
 361         /* Active writers */
 362         __le32 window_wr_lun;   /* Number of parallel LUNs to write */
 363 
 364         __le32 rsvd[2];
 365 
 366         __le64 lun_bitmap[];
 367 };
 368 
 369 
 370 /*
 371  * Metadata layout in media:
 372  *      First sector:
 373  *              1. struct line_emeta
 374  *              2. bad block bitmap (u64 * window_wr_lun)
 375  *              3. write amplification counters
 376  *      Mid sectors (start at lbas_sector):
 377  *              3. nr_lbas (u64) forming lba list
 378  *      Last sectors (start at vsc_sector):
 379  *              4. u32 valid sector count (vsc) for all lines (~0U: free line)
 380  */
 381 struct line_emeta {
 382         struct line_header header;
 383 
 384         __le32 crc;             /* Full structure including struct crc */
 385 
 386         /* Previous line metadata */
 387         __le32 prev_id;         /* Line id for prev line */
 388 
 389         /* Current line metadata */
 390         __le64 seq_nr;          /* Sequence number for current line */
 391 
 392         /* Active writers */
 393         __le32 window_wr_lun;   /* Number of parallel LUNs to write */
 394 
 395         /* Bookkeeping for recovery */
 396         __le32 next_id;         /* Line id for next line */
 397         __le64 nr_lbas;         /* Number of lbas mapped in line */
 398         __le64 nr_valid_lbas;   /* Number of valid lbas mapped in line */
 399         __le64 bb_bitmap[];     /* Updated bad block bitmap for line */
 400 };
 401 
 402 
 403 /* Write amplification counters stored on media */
 404 struct wa_counters {
 405         __le64 user;            /* Number of user written sectors */
 406         __le64 gc;              /* Number of sectors written by GC*/
 407         __le64 pad;             /* Number of padded sectors */
 408 };
 409 
 410 struct pblk_emeta {
 411         struct line_emeta *buf;         /* emeta buffer in media format */
 412         int mem;                        /* Write offset - points to next
 413                                          * writable entry in memory
 414                                          */
 415         atomic_t sync;                  /* Synced - backpointer that signals the
 416                                          * last entry that has been successfully
 417                                          * persisted to media
 418                                          */
 419         unsigned int nr_entries;        /* Number of emeta entries */
 420 };
 421 
 422 struct pblk_smeta {
 423         struct line_smeta *buf;         /* smeta buffer in persistent format */
 424 };
 425 
 426 struct pblk_w_err_gc {
 427         int has_write_err;
 428         int has_gc_err;
 429         __le64 *lba_list;
 430 };
 431 
 432 struct pblk_line {
 433         struct pblk *pblk;
 434         unsigned int id;                /* Line number corresponds to the
 435                                          * block line
 436                                          */
 437         unsigned int seq_nr;            /* Unique line sequence number */
 438 
 439         int state;                      /* PBLK_LINESTATE_X */
 440         int type;                       /* PBLK_LINETYPE_X */
 441         int gc_group;                   /* PBLK_LINEGC_X */
 442         struct list_head list;          /* Free, GC lists */
 443 
 444         unsigned long *lun_bitmap;      /* Bitmap for LUNs mapped in line */
 445 
 446         struct nvm_chk_meta *chks;      /* Chunks forming line */
 447 
 448         struct pblk_smeta *smeta;       /* Start metadata */
 449         struct pblk_emeta *emeta;       /* End medatada */
 450 
 451         int meta_line;                  /* Metadata line id */
 452         int meta_distance;              /* Distance between data and metadata */
 453 
 454         u64 emeta_ssec;                 /* Sector where emeta starts */
 455 
 456         unsigned int sec_in_line;       /* Number of usable secs in line */
 457 
 458         atomic_t blk_in_line;           /* Number of good blocks in line */
 459         unsigned long *blk_bitmap;      /* Bitmap for valid/invalid blocks */
 460         unsigned long *erase_bitmap;    /* Bitmap for erased blocks */
 461 
 462         unsigned long *map_bitmap;      /* Bitmap for mapped sectors in line */
 463         unsigned long *invalid_bitmap;  /* Bitmap for invalid sectors in line */
 464 
 465         atomic_t left_eblks;            /* Blocks left for erasing */
 466         atomic_t left_seblks;           /* Blocks left for sync erasing */
 467 
 468         int left_msecs;                 /* Sectors left for mapping */
 469         unsigned int cur_sec;           /* Sector map pointer */
 470         unsigned int nr_valid_lbas;     /* Number of valid lbas in line */
 471 
 472         __le32 *vsc;                    /* Valid sector count in line */
 473 
 474         struct kref ref;                /* Write buffer L2P references */
 475         atomic_t sec_to_update;         /* Outstanding L2P updates to ppa */
 476 
 477         struct pblk_w_err_gc *w_err_gc; /* Write error gc recovery metadata */
 478 
 479         spinlock_t lock;                /* Necessary for invalid_bitmap only */
 480 };
 481 
 482 #define PBLK_DATA_LINES 4
 483 
 484 enum {
 485         PBLK_EMETA_TYPE_HEADER = 1,     /* struct line_emeta first sector */
 486         PBLK_EMETA_TYPE_LLBA = 2,       /* lba list - type: __le64 */
 487         PBLK_EMETA_TYPE_VSC = 3,        /* vsc list - type: __le32 */
 488 };
 489 
 490 struct pblk_line_mgmt {
 491         int nr_lines;                   /* Total number of full lines */
 492         int nr_free_lines;              /* Number of full lines in free list */
 493 
 494         /* Free lists - use free_lock */
 495         struct list_head free_list;     /* Full lines ready to use */
 496         struct list_head corrupt_list;  /* Full lines corrupted */
 497         struct list_head bad_list;      /* Full lines bad */
 498 
 499         /* GC lists - use gc_lock */
 500         struct list_head *gc_lists[PBLK_GC_NR_LISTS];
 501         struct list_head gc_high_list;  /* Full lines ready to GC, high isc */
 502         struct list_head gc_mid_list;   /* Full lines ready to GC, mid isc */
 503         struct list_head gc_low_list;   /* Full lines ready to GC, low isc */
 504 
 505         struct list_head gc_werr_list;  /* Write err recovery list */
 506 
 507         struct list_head gc_full_list;  /* Full lines ready to GC, no valid */
 508         struct list_head gc_empty_list; /* Full lines close, all valid */
 509 
 510         struct pblk_line *log_line;     /* Current FTL log line */
 511         struct pblk_line *data_line;    /* Current data line */
 512         struct pblk_line *log_next;     /* Next FTL log line */
 513         struct pblk_line *data_next;    /* Next data line */
 514 
 515         struct list_head emeta_list;    /* Lines queued to schedule emeta */
 516 
 517         __le32 *vsc_list;               /* Valid sector counts for all lines */
 518 
 519         /* Pre-allocated metadata for data lines */
 520         struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
 521         struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
 522         unsigned long meta_bitmap;
 523 
 524         /* Cache and mempool for map/invalid bitmaps */
 525         struct kmem_cache *bitmap_cache;
 526         mempool_t *bitmap_pool;
 527 
 528         /* Helpers for fast bitmap calculations */
 529         unsigned long *bb_template;
 530         unsigned long *bb_aux;
 531 
 532         unsigned long d_seq_nr;         /* Data line unique sequence number */
 533         unsigned long l_seq_nr;         /* Log line unique sequence number */
 534 
 535         spinlock_t free_lock;
 536         spinlock_t close_lock;
 537         spinlock_t gc_lock;
 538 };
 539 
 540 struct pblk_line_meta {
 541         unsigned int smeta_len;         /* Total length for smeta */
 542         unsigned int smeta_sec;         /* Sectors needed for smeta */
 543 
 544         unsigned int emeta_len[4];      /* Lengths for emeta:
 545                                          *  [0]: Total
 546                                          *  [1]: struct line_emeta +
 547                                          *       bb_bitmap + struct wa_counters
 548                                          *  [2]: L2P portion
 549                                          *  [3]: vsc
 550                                          */
 551         unsigned int emeta_sec[4];      /* Sectors needed for emeta. Same layout
 552                                          * as emeta_len
 553                                          */
 554 
 555         unsigned int emeta_bb;          /* Boundary for bb that affects emeta */
 556 
 557         unsigned int vsc_list_len;      /* Length for vsc list */
 558         unsigned int sec_bitmap_len;    /* Length for sector bitmap in line */
 559         unsigned int blk_bitmap_len;    /* Length for block bitmap in line */
 560         unsigned int lun_bitmap_len;    /* Length for lun bitmap in line */
 561 
 562         unsigned int blk_per_line;      /* Number of blocks in a full line */
 563         unsigned int sec_per_line;      /* Number of sectors in a line */
 564         unsigned int dsec_per_line;     /* Number of data sectors in a line */
 565         unsigned int min_blk_line;      /* Min. number of good blocks in line */
 566 
 567         unsigned int mid_thrs;          /* Threshold for GC mid list */
 568         unsigned int high_thrs;         /* Threshold for GC high list */
 569 
 570         unsigned int meta_distance;     /* Distance between data and metadata */
 571 };
 572 
 573 enum {
 574         PBLK_STATE_RUNNING = 0,
 575         PBLK_STATE_STOPPING = 1,
 576         PBLK_STATE_RECOVERING = 2,
 577         PBLK_STATE_STOPPED = 3,
 578 };
 579 
 580 /* Internal format to support not power-of-2 device formats */
 581 struct pblk_addrf {
 582         /* gen to dev */
 583         int sec_stripe;
 584         int ch_stripe;
 585         int lun_stripe;
 586 
 587         /* dev to gen */
 588         int sec_lun_stripe;
 589         int sec_ws_stripe;
 590 };
 591 
 592 struct pblk {
 593         struct nvm_tgt_dev *dev;
 594         struct gendisk *disk;
 595 
 596         struct kobject kobj;
 597 
 598         struct pblk_lun *luns;
 599 
 600         struct pblk_line *lines;                /* Line array */
 601         struct pblk_line_mgmt l_mg;             /* Line management */
 602         struct pblk_line_meta lm;               /* Line metadata */
 603 
 604         struct nvm_addrf addrf;         /* Aligned address format */
 605         struct pblk_addrf uaddrf;       /* Unaligned address format */
 606         int addrf_len;
 607 
 608         struct pblk_rb rwb;
 609 
 610         int state;                      /* pblk line state */
 611 
 612         int min_write_pgs; /* Minimum amount of pages required by controller */
 613         int min_write_pgs_data; /* Minimum amount of payload pages */
 614         int max_write_pgs; /* Maximum amount of pages supported by controller */
 615         int oob_meta_size; /* Size of OOB sector metadata */
 616 
 617         sector_t capacity; /* Device capacity when bad blocks are subtracted */
 618 
 619         int op;      /* Percentage of device used for over-provisioning */
 620         int op_blks; /* Number of blocks used for over-provisioning */
 621 
 622         /* pblk provisioning values. Used by rate limiter */
 623         struct pblk_rl rl;
 624 
 625         int sec_per_write;
 626 
 627         guid_t instance_uuid;
 628 
 629         /* Persistent write amplification counters, 4kb sector I/Os */
 630         atomic64_t user_wa;             /* Sectors written by user */
 631         atomic64_t gc_wa;               /* Sectors written by GC */
 632         atomic64_t pad_wa;              /* Padded sectors written */
 633 
 634         /* Reset values for delta write amplification measurements */
 635         u64 user_rst_wa;
 636         u64 gc_rst_wa;
 637         u64 pad_rst_wa;
 638 
 639         /* Counters used for calculating padding distribution */
 640         atomic64_t *pad_dist;           /* Padding distribution buckets */
 641         u64 nr_flush_rst;               /* Flushes reset value for pad dist.*/
 642         atomic64_t nr_flush;            /* Number of flush/fua I/O */
 643 
 644 #ifdef CONFIG_NVM_PBLK_DEBUG
 645         /* Non-persistent debug counters, 4kb sector I/Os */
 646         atomic_long_t inflight_writes;  /* Inflight writes (user and gc) */
 647         atomic_long_t padded_writes;    /* Sectors padded due to flush/fua */
 648         atomic_long_t padded_wb;        /* Sectors padded in write buffer */
 649         atomic_long_t req_writes;       /* Sectors stored on write buffer */
 650         atomic_long_t sub_writes;       /* Sectors submitted from buffer */
 651         atomic_long_t sync_writes;      /* Sectors synced to media */
 652         atomic_long_t inflight_reads;   /* Inflight sector read requests */
 653         atomic_long_t cache_reads;      /* Read requests that hit the cache */
 654         atomic_long_t sync_reads;       /* Completed sector read requests */
 655         atomic_long_t recov_writes;     /* Sectors submitted from recovery */
 656         atomic_long_t recov_gc_writes;  /* Sectors submitted from write GC */
 657         atomic_long_t recov_gc_reads;   /* Sectors submitted from read GC */
 658 #endif
 659 
 660         spinlock_t lock;
 661 
 662         atomic_long_t read_failed;
 663         atomic_long_t read_empty;
 664         atomic_long_t read_high_ecc;
 665         atomic_long_t read_failed_gc;
 666         atomic_long_t write_failed;
 667         atomic_long_t erase_failed;
 668 
 669         atomic_t inflight_io;           /* General inflight I/O counter */
 670 
 671         struct task_struct *writer_ts;
 672 
 673         /* Simple translation map of logical addresses to physical addresses.
 674          * The logical addresses is known by the host system, while the physical
 675          * addresses are used when writing to the disk block device.
 676          */
 677         unsigned char *trans_map;
 678         spinlock_t trans_lock;
 679 
 680         struct list_head compl_list;
 681 
 682         spinlock_t resubmit_lock;        /* Resubmit list lock */
 683         struct list_head resubmit_list; /* Resubmit list for failed writes*/
 684 
 685         mempool_t page_bio_pool;
 686         mempool_t gen_ws_pool;
 687         mempool_t rec_pool;
 688         mempool_t r_rq_pool;
 689         mempool_t w_rq_pool;
 690         mempool_t e_rq_pool;
 691 
 692         struct workqueue_struct *close_wq;
 693         struct workqueue_struct *bb_wq;
 694         struct workqueue_struct *r_end_wq;
 695 
 696         struct timer_list wtimer;
 697 
 698         struct pblk_gc gc;
 699 };
 700 
 701 struct pblk_line_ws {
 702         struct pblk *pblk;
 703         struct pblk_line *line;
 704         void *priv;
 705         struct work_struct ws;
 706 };
 707 
 708 #define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
 709 #define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
 710 
 711 #define pblk_err(pblk, fmt, ...)                        \
 712         pr_err("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 713 #define pblk_info(pblk, fmt, ...)                       \
 714         pr_info("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 715 #define pblk_warn(pblk, fmt, ...)                       \
 716         pr_warn("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 717 #define pblk_debug(pblk, fmt, ...)                      \
 718         pr_debug("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 719 
 720 /*
 721  * pblk ring buffer operations
 722  */
 723 int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
 724                  unsigned int seg_sz);
 725 int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
 726                            unsigned int nr_entries, unsigned int *pos);
 727 int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
 728                          unsigned int *pos);
 729 void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
 730                               struct pblk_w_ctx w_ctx, unsigned int pos);
 731 void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
 732                             struct pblk_w_ctx w_ctx, struct pblk_line *line,
 733                             u64 paddr, unsigned int pos);
 734 struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
 735 void pblk_rb_flush(struct pblk_rb *rb);
 736 
 737 void pblk_rb_sync_l2p(struct pblk_rb *rb);
 738 unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
 739                                  unsigned int pos, unsigned int nr_entries,
 740                                  unsigned int count);
 741 int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
 742                         struct ppa_addr ppa);
 743 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
 744 
 745 unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
 746 unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
 747 unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
 748                               unsigned int nr_entries);
 749 void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
 750 unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
 751 
 752 unsigned int pblk_rb_read_count(struct pblk_rb *rb);
 753 unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
 754 unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
 755 
 756 int pblk_rb_tear_down_check(struct pblk_rb *rb);
 757 int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
 758 void pblk_rb_free(struct pblk_rb *rb);
 759 ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
 760 
 761 /*
 762  * pblk core
 763  */
 764 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
 765 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
 766 int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
 767 void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
 768 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
 769 int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
 770                         struct pblk_c_ctx *c_ctx);
 771 void pblk_discard(struct pblk *pblk, struct bio *bio);
 772 struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk);
 773 struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
 774                                               struct nvm_chk_meta *lp,
 775                                               struct ppa_addr ppa);
 776 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
 777 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
 778 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
 779 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
 780 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
 781 void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd);
 782 struct pblk_line *pblk_line_get(struct pblk *pblk);
 783 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
 784 struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
 785 void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa);
 786 void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd);
 787 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
 788 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
 789 struct pblk_line *pblk_line_get_data(struct pblk *pblk);
 790 struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
 791 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
 792 int pblk_line_is_full(struct pblk_line *line);
 793 void pblk_line_free(struct pblk_line *line);
 794 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
 795 void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
 796 void pblk_line_close_ws(struct work_struct *work);
 797 void pblk_pipeline_stop(struct pblk *pblk);
 798 void __pblk_pipeline_stop(struct pblk *pblk);
 799 void __pblk_pipeline_flush(struct pblk *pblk);
 800 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
 801                      void (*work)(struct work_struct *), gfp_t gfp_mask,
 802                      struct workqueue_struct *wq);
 803 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
 804 int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line);
 805 int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
 806                          void *emeta_buf);
 807 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
 808 void pblk_line_put(struct kref *ref);
 809 void pblk_line_put_wq(struct kref *ref);
 810 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
 811 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
 812 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 813 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 814 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 815 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 816                    unsigned long secs_to_flush, bool skip_meta);
 817 void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
 818                   unsigned long *lun_bitmap);
 819 void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa);
 820 void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa);
 821 void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap);
 822 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
 823                        int nr_pages);
 824 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
 825                          int nr_pages);
 826 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
 827 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
 828                            u64 paddr);
 829 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
 830 void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
 831                            struct ppa_addr ppa);
 832 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
 833                          struct ppa_addr ppa, struct ppa_addr entry_line);
 834 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
 835                        struct pblk_line *gc_line, u64 paddr);
 836 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
 837                           u64 *lba_list, int nr_secs);
 838 int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
 839                          sector_t blba, int nr_secs, bool *from_cache);
 840 void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd);
 841 void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
 842 
 843 /*
 844  * pblk user I/O write path
 845  */
 846 void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
 847                         unsigned long flags);
 848 int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
 849 
 850 /*
 851  * pblk map
 852  */
 853 int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
 854                        unsigned int sentry, unsigned long *lun_bitmap,
 855                        unsigned int valid_secs, struct ppa_addr *erase_ppa);
 856 int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
 857                  unsigned long *lun_bitmap, unsigned int valid_secs,
 858                  unsigned int off);
 859 
 860 /*
 861  * pblk write thread
 862  */
 863 int pblk_write_ts(void *data);
 864 void pblk_write_timer_fn(struct timer_list *t);
 865 void pblk_write_should_kick(struct pblk *pblk);
 866 void pblk_write_kick(struct pblk *pblk);
 867 
 868 /*
 869  * pblk read path
 870  */
 871 extern struct bio_set pblk_bio_set;
 872 void pblk_submit_read(struct pblk *pblk, struct bio *bio);
 873 int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
 874 /*
 875  * pblk recovery
 876  */
 877 struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
 878 int pblk_recov_pad(struct pblk *pblk);
 879 int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
 880 
 881 /*
 882  * pblk gc
 883  */
 884 #define PBLK_GC_MAX_READERS 8   /* Max number of outstanding GC reader jobs */
 885 #define PBLK_GC_RQ_QD 128       /* Queue depth for inflight GC requests */
 886 #define PBLK_GC_L_QD 4          /* Queue depth for inflight GC lines */
 887 
 888 int pblk_gc_init(struct pblk *pblk);
 889 void pblk_gc_exit(struct pblk *pblk, bool graceful);
 890 void pblk_gc_should_start(struct pblk *pblk);
 891 void pblk_gc_should_stop(struct pblk *pblk);
 892 void pblk_gc_should_kick(struct pblk *pblk);
 893 void pblk_gc_free_full_lines(struct pblk *pblk);
 894 void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
 895                               int *gc_active);
 896 int pblk_gc_sysfs_force(struct pblk *pblk, int force);
 897 void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line);
 898 
 899 /*
 900  * pblk rate limiter
 901  */
 902 void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold);
 903 void pblk_rl_free(struct pblk_rl *rl);
 904 void pblk_rl_update_rates(struct pblk_rl *rl);
 905 int pblk_rl_high_thrs(struct pblk_rl *rl);
 906 unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
 907 unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
 908 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
 909 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
 910 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
 911 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
 912 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
 913 void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
 914 int pblk_rl_max_io(struct pblk_rl *rl);
 915 void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
 916 void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
 917                             bool used);
 918 int pblk_rl_is_limit(struct pblk_rl *rl);
 919 
 920 void pblk_rl_werr_line_in(struct pblk_rl *rl);
 921 void pblk_rl_werr_line_out(struct pblk_rl *rl);
 922 
 923 /*
 924  * pblk sysfs
 925  */
 926 int pblk_sysfs_init(struct gendisk *tdisk);
 927 void pblk_sysfs_exit(struct gendisk *tdisk);
 928 
 929 static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
 930 {
 931         return c_ctx - sizeof(struct nvm_rq);
 932 }
 933 
 934 static inline void *emeta_to_bb(struct line_emeta *emeta)
 935 {
 936         return emeta->bb_bitmap;
 937 }
 938 
 939 static inline void *emeta_to_wa(struct pblk_line_meta *lm,
 940                                 struct line_emeta *emeta)
 941 {
 942         return emeta->bb_bitmap + lm->blk_bitmap_len;
 943 }
 944 
 945 static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
 946 {
 947         return ((void *)emeta + pblk->lm.emeta_len[1]);
 948 }
 949 
 950 static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
 951 {
 952         return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
 953 }
 954 
 955 static inline int pblk_line_vsc(struct pblk_line *line)
 956 {
 957         return le32_to_cpu(*line->vsc);
 958 }
 959 
 960 static inline int pblk_ppa_to_line_id(struct ppa_addr p)
 961 {
 962         return p.a.blk;
 963 }
 964 
 965 static inline struct pblk_line *pblk_ppa_to_line(struct pblk *pblk,
 966                                                  struct ppa_addr p)
 967 {
 968         return &pblk->lines[pblk_ppa_to_line_id(p)];
 969 }
 970 
 971 static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
 972 {
 973         return p.a.lun * geo->num_ch + p.a.ch;
 974 }
 975 
 976 static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
 977                                               u64 line_id)
 978 {
 979         struct nvm_tgt_dev *dev = pblk->dev;
 980         struct nvm_geo *geo = &dev->geo;
 981         struct ppa_addr ppa;
 982 
 983         if (geo->version == NVM_OCSSD_SPEC_12) {
 984                 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
 985 
 986                 ppa.ppa = 0;
 987                 ppa.g.blk = line_id;
 988                 ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset;
 989                 ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset;
 990                 ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset;
 991                 ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset;
 992                 ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset;
 993         } else {
 994                 struct pblk_addrf *uaddrf = &pblk->uaddrf;
 995                 int secs, chnls, luns;
 996 
 997                 ppa.ppa = 0;
 998 
 999                 ppa.m.chk = line_id;
1000 
1001                 paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs);
1002                 ppa.m.sec = secs;
1003 
1004                 paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls);
1005                 ppa.m.grp = chnls;
1006 
1007                 paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns);
1008                 ppa.m.pu = luns;
1009 
1010                 ppa.m.sec += uaddrf->sec_stripe * paddr;
1011         }
1012 
1013         return ppa;
1014 }
1015 
1016 static inline struct nvm_chk_meta *pblk_dev_ppa_to_chunk(struct pblk *pblk,
1017                                                         struct ppa_addr p)
1018 {
1019         struct nvm_tgt_dev *dev = pblk->dev;
1020         struct nvm_geo *geo = &dev->geo;
1021         struct pblk_line *line = pblk_ppa_to_line(pblk, p);
1022         int pos = pblk_ppa_to_pos(geo, p);
1023 
1024         return &line->chks[pos];
1025 }
1026 
1027 static inline u64 pblk_dev_ppa_to_chunk_addr(struct pblk *pblk,
1028                                                         struct ppa_addr p)
1029 {
1030         struct nvm_tgt_dev *dev = pblk->dev;
1031 
1032         return dev_to_chunk_addr(dev->parent, &pblk->addrf, p);
1033 }
1034 
1035 static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
1036                                                         struct ppa_addr p)
1037 {
1038         struct nvm_tgt_dev *dev = pblk->dev;
1039         struct nvm_geo *geo = &dev->geo;
1040         u64 paddr;
1041 
1042         if (geo->version == NVM_OCSSD_SPEC_12) {
1043                 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
1044 
1045                 paddr = (u64)p.g.ch << ppaf->ch_offset;
1046                 paddr |= (u64)p.g.lun << ppaf->lun_offset;
1047                 paddr |= (u64)p.g.pg << ppaf->pg_offset;
1048                 paddr |= (u64)p.g.pl << ppaf->pln_offset;
1049                 paddr |= (u64)p.g.sec << ppaf->sec_offset;
1050         } else {
1051                 struct pblk_addrf *uaddrf = &pblk->uaddrf;
1052                 u64 secs = p.m.sec;
1053                 int sec_stripe;
1054 
1055                 paddr = (u64)p.m.grp * uaddrf->sec_stripe;
1056                 paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe;
1057 
1058                 secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe);
1059                 paddr += secs * uaddrf->sec_ws_stripe;
1060                 paddr += sec_stripe;
1061         }
1062 
1063         return paddr;
1064 }
1065 
1066 static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
1067 {
1068         struct nvm_tgt_dev *dev = pblk->dev;
1069 
1070         return nvm_ppa32_to_ppa64(dev->parent, &pblk->addrf, ppa32);
1071 }
1072 
1073 static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
1074 {
1075         struct nvm_tgt_dev *dev = pblk->dev;
1076 
1077         return nvm_ppa64_to_ppa32(dev->parent, &pblk->addrf, ppa64);
1078 }
1079 
1080 static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
1081                                                                 sector_t lba)
1082 {
1083         struct ppa_addr ppa;
1084 
1085         if (pblk->addrf_len < 32) {
1086                 u32 *map = (u32 *)pblk->trans_map;
1087 
1088                 ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
1089         } else {
1090                 struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
1091 
1092                 ppa = map[lba];
1093         }
1094 
1095         return ppa;
1096 }
1097 
1098 static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
1099                                                 struct ppa_addr ppa)
1100 {
1101         if (pblk->addrf_len < 32) {
1102                 u32 *map = (u32 *)pblk->trans_map;
1103 
1104                 map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
1105         } else {
1106                 u64 *map = (u64 *)pblk->trans_map;
1107 
1108                 map[lba] = ppa.ppa;
1109         }
1110 }
1111 
1112 static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
1113 {
1114         return (ppa_addr.ppa == ADDR_EMPTY);
1115 }
1116 
1117 static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
1118 {
1119         ppa_addr->ppa = ADDR_EMPTY;
1120 }
1121 
1122 static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
1123 {
1124         return (lppa.ppa == rppa.ppa);
1125 }
1126 
1127 static inline int pblk_addr_in_cache(struct ppa_addr ppa)
1128 {
1129         return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
1130 }
1131 
1132 static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
1133 {
1134         return ppa.c.line;
1135 }
1136 
1137 static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
1138 {
1139         struct ppa_addr p;
1140 
1141         p.c.line = addr;
1142         p.c.is_cached = 1;
1143 
1144         return p;
1145 }
1146 
1147 static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
1148                                             struct line_header *header)
1149 {
1150         u32 crc = ~(u32)0;
1151 
1152         crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
1153                                 sizeof(struct line_header) - sizeof(crc));
1154 
1155         return crc;
1156 }
1157 
1158 static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
1159                                       struct line_smeta *smeta)
1160 {
1161         struct pblk_line_meta *lm = &pblk->lm;
1162         u32 crc = ~(u32)0;
1163 
1164         crc = crc32_le(crc, (unsigned char *)smeta +
1165                                 sizeof(struct line_header) + sizeof(crc),
1166                                 lm->smeta_len -
1167                                 sizeof(struct line_header) - sizeof(crc));
1168 
1169         return crc;
1170 }
1171 
1172 static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
1173                                       struct line_emeta *emeta)
1174 {
1175         struct pblk_line_meta *lm = &pblk->lm;
1176         u32 crc = ~(u32)0;
1177 
1178         crc = crc32_le(crc, (unsigned char *)emeta +
1179                                 sizeof(struct line_header) + sizeof(crc),
1180                                 lm->emeta_len[0] -
1181                                 sizeof(struct line_header) - sizeof(crc));
1182 
1183         return crc;
1184 }
1185 
1186 static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
1187 {
1188         return !(nr_secs % pblk->min_write_pgs);
1189 }
1190 
1191 #ifdef CONFIG_NVM_PBLK_DEBUG
1192 static inline void print_ppa(struct pblk *pblk, struct ppa_addr *p,
1193                              char *msg, int error)
1194 {
1195         struct nvm_geo *geo = &pblk->dev->geo;
1196 
1197         if (p->c.is_cached) {
1198                 pblk_err(pblk, "ppa: (%s: %x) cache line: %llu\n",
1199                                 msg, error, (u64)p->c.line);
1200         } else if (geo->version == NVM_OCSSD_SPEC_12) {
1201                 pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1202                         msg, error,
1203                         p->g.ch, p->g.lun, p->g.blk,
1204                         p->g.pg, p->g.pl, p->g.sec);
1205         } else {
1206                 pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
1207                         msg, error,
1208                         p->m.grp, p->m.pu, p->m.chk, p->m.sec);
1209         }
1210 }
1211 
1212 static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
1213                                          int error)
1214 {
1215         int bit = -1;
1216 
1217         if (rqd->nr_ppas ==  1) {
1218                 print_ppa(pblk, &rqd->ppa_addr, "rqd", error);
1219                 return;
1220         }
1221 
1222         while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
1223                                                 bit + 1)) < rqd->nr_ppas) {
1224                 print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error);
1225         }
1226 
1227         pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
1228 }
1229 
1230 static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
1231                                        struct ppa_addr *ppas, int nr_ppas)
1232 {
1233         struct nvm_geo *geo = &tgt_dev->geo;
1234         struct ppa_addr *ppa;
1235         int i;
1236 
1237         for (i = 0; i < nr_ppas; i++) {
1238                 ppa = &ppas[i];
1239 
1240                 if (geo->version == NVM_OCSSD_SPEC_12) {
1241                         if (!ppa->c.is_cached &&
1242                                         ppa->g.ch < geo->num_ch &&
1243                                         ppa->g.lun < geo->num_lun &&
1244                                         ppa->g.pl < geo->num_pln &&
1245                                         ppa->g.blk < geo->num_chk &&
1246                                         ppa->g.pg < geo->num_pg &&
1247                                         ppa->g.sec < geo->ws_min)
1248                                 continue;
1249                 } else {
1250                         if (!ppa->c.is_cached &&
1251                                         ppa->m.grp < geo->num_ch &&
1252                                         ppa->m.pu < geo->num_lun &&
1253                                         ppa->m.chk < geo->num_chk &&
1254                                         ppa->m.sec < geo->clba)
1255                                 continue;
1256                 }
1257 
1258                 print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i);
1259 
1260                 return 1;
1261         }
1262         return 0;
1263 }
1264 
1265 static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
1266 {
1267         struct nvm_tgt_dev *dev = pblk->dev;
1268         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
1269 
1270         if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
1271                 WARN_ON(1);
1272                 return -EINVAL;
1273         }
1274 
1275         if (rqd->opcode == NVM_OP_PWRITE) {
1276                 struct pblk_line *line;
1277                 int i;
1278 
1279                 for (i = 0; i < rqd->nr_ppas; i++) {
1280                         line = pblk_ppa_to_line(pblk, ppa_list[i]);
1281 
1282                         spin_lock(&line->lock);
1283                         if (line->state != PBLK_LINESTATE_OPEN) {
1284                                 pblk_err(pblk, "bad ppa: line:%d,state:%d\n",
1285                                                         line->id, line->state);
1286                                 WARN_ON(1);
1287                                 spin_unlock(&line->lock);
1288                                 return -EINVAL;
1289                         }
1290                         spin_unlock(&line->lock);
1291                 }
1292         }
1293 
1294         return 0;
1295 }
1296 #endif
1297 
1298 static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
1299 {
1300         struct pblk_line_meta *lm = &pblk->lm;
1301 
1302         if (paddr > lm->sec_per_line)
1303                 return 1;
1304 
1305         return 0;
1306 }
1307 
1308 static inline unsigned int pblk_get_bi_idx(struct bio *bio)
1309 {
1310         return bio->bi_iter.bi_idx;
1311 }
1312 
1313 static inline sector_t pblk_get_lba(struct bio *bio)
1314 {
1315         return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
1316 }
1317 
1318 static inline unsigned int pblk_get_secs(struct bio *bio)
1319 {
1320         return  bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
1321 }
1322 
1323 static inline char *pblk_disk_name(struct pblk *pblk)
1324 {
1325         struct gendisk *disk = pblk->disk;
1326 
1327         return disk->disk_name;
1328 }
1329 
1330 static inline unsigned int pblk_get_min_chks(struct pblk *pblk)
1331 {
1332         struct pblk_line_meta *lm = &pblk->lm;
1333         /* In a worst-case scenario every line will have OP invalid sectors.
1334          * We will then need a minimum of 1/OP lines to free up a single line
1335          */
1336 
1337         return DIV_ROUND_UP(100, pblk->op) * lm->blk_per_line;
1338 }
1339 
1340 static inline struct pblk_sec_meta *pblk_get_meta(struct pblk *pblk,
1341                                                          void *meta, int index)
1342 {
1343         return meta +
1344                max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
1345                * index;
1346 }
1347 
1348 static inline int pblk_dma_meta_size(struct pblk *pblk)
1349 {
1350         return max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
1351                * NVM_MAX_VLBA;
1352 }
1353 
1354 static inline int pblk_is_oob_meta_supported(struct pblk *pblk)
1355 {
1356         return pblk->oob_meta_size >= sizeof(struct pblk_sec_meta);
1357 }
1358 #endif /* PBLK_H_ */

/* [<][>][^][v][top][bottom][index][help] */