root/drivers/md/bcache/request.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cache_mode
  2. verify
  3. bio_csum
  4. bch_data_insert_keys
  5. bch_keylist_realloc
  6. bch_data_invalidate
  7. bch_data_insert_error
  8. bch_data_insert_endio
  9. bch_data_insert_start
  10. bch_data_insert
  11. bch_get_congested
  12. add_sequential
  13. iohash
  14. check_should_bypass
  15. bch_cache_read_endio
  16. cache_lookup_fn
  17. cache_lookup
  18. request_endio
  19. backing_request_endio
  20. bio_complete
  21. do_bio_hook
  22. search_free
  23. search_alloc
  24. cached_dev_bio_complete
  25. cached_dev_read_error_done
  26. cached_dev_read_error
  27. cached_dev_cache_miss_done
  28. cached_dev_read_done
  29. cached_dev_read_done_bh
  30. cached_dev_cache_miss
  31. cached_dev_read
  32. cached_dev_write_complete
  33. cached_dev_write
  34. cached_dev_nodata
  35. detached_dev_end_io
  36. detached_dev_do_request
  37. quit_max_writeback_rate
  38. cached_dev_make_request
  39. cached_dev_ioctl
  40. cached_dev_congested
  41. bch_cached_dev_request_init
  42. flash_dev_cache_miss
  43. flash_dev_nodata
  44. flash_dev_make_request
  45. flash_dev_ioctl
  46. flash_dev_congested
  47. bch_flash_dev_request_init
  48. bch_request_exit
  49. bch_request_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Main bcache entry point - handle a read or a write request and decide what to
   4  * do with it; the make_request functions are called by the block layer.
   5  *
   6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   7  * Copyright 2012 Google, Inc.
   8  */
   9 
  10 #include "bcache.h"
  11 #include "btree.h"
  12 #include "debug.h"
  13 #include "request.h"
  14 #include "writeback.h"
  15 
  16 #include <linux/module.h>
  17 #include <linux/hash.h>
  18 #include <linux/random.h>
  19 #include <linux/backing-dev.h>
  20 
  21 #include <trace/events/bcache.h>
  22 
  23 #define CUTOFF_CACHE_ADD        95
  24 #define CUTOFF_CACHE_READA      90
  25 
  26 struct kmem_cache *bch_search_cache;
  27 
  28 static void bch_data_insert_start(struct closure *cl);
  29 
  30 static unsigned int cache_mode(struct cached_dev *dc)
  31 {
  32         return BDEV_CACHE_MODE(&dc->sb);
  33 }
  34 
  35 static bool verify(struct cached_dev *dc)
  36 {
  37         return dc->verify;
  38 }
  39 
  40 static void bio_csum(struct bio *bio, struct bkey *k)
  41 {
  42         struct bio_vec bv;
  43         struct bvec_iter iter;
  44         uint64_t csum = 0;
  45 
  46         bio_for_each_segment(bv, bio, iter) {
  47                 void *d = kmap(bv.bv_page) + bv.bv_offset;
  48 
  49                 csum = bch_crc64_update(csum, d, bv.bv_len);
  50                 kunmap(bv.bv_page);
  51         }
  52 
  53         k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
  54 }
  55 
  56 /* Insert data into cache */
  57 
  58 static void bch_data_insert_keys(struct closure *cl)
  59 {
  60         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  61         atomic_t *journal_ref = NULL;
  62         struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
  63         int ret;
  64 
  65         /*
  66          * If we're looping, might already be waiting on
  67          * another journal write - can't wait on more than one journal write at
  68          * a time
  69          *
  70          * XXX: this looks wrong
  71          */
  72 #if 0
  73         while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
  74                 closure_sync(&s->cl);
  75 #endif
  76 
  77         if (!op->replace)
  78                 journal_ref = bch_journal(op->c, &op->insert_keys,
  79                                           op->flush_journal ? cl : NULL);
  80 
  81         ret = bch_btree_insert(op->c, &op->insert_keys,
  82                                journal_ref, replace_key);
  83         if (ret == -ESRCH) {
  84                 op->replace_collision = true;
  85         } else if (ret) {
  86                 op->status              = BLK_STS_RESOURCE;
  87                 op->insert_data_done    = true;
  88         }
  89 
  90         if (journal_ref)
  91                 atomic_dec_bug(journal_ref);
  92 
  93         if (!op->insert_data_done) {
  94                 continue_at(cl, bch_data_insert_start, op->wq);
  95                 return;
  96         }
  97 
  98         bch_keylist_free(&op->insert_keys);
  99         closure_return(cl);
 100 }
 101 
 102 static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
 103                                struct cache_set *c)
 104 {
 105         size_t oldsize = bch_keylist_nkeys(l);
 106         size_t newsize = oldsize + u64s;
 107 
 108         /*
 109          * The journalling code doesn't handle the case where the keys to insert
 110          * is bigger than an empty write: If we just return -ENOMEM here,
 111          * bch_data_insert_keys() will insert the keys created so far
 112          * and finish the rest when the keylist is empty.
 113          */
 114         if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
 115                 return -ENOMEM;
 116 
 117         return __bch_keylist_realloc(l, u64s);
 118 }
 119 
 120 static void bch_data_invalidate(struct closure *cl)
 121 {
 122         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 123         struct bio *bio = op->bio;
 124 
 125         pr_debug("invalidating %i sectors from %llu",
 126                  bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
 127 
 128         while (bio_sectors(bio)) {
 129                 unsigned int sectors = min(bio_sectors(bio),
 130                                        1U << (KEY_SIZE_BITS - 1));
 131 
 132                 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
 133                         goto out;
 134 
 135                 bio->bi_iter.bi_sector  += sectors;
 136                 bio->bi_iter.bi_size    -= sectors << 9;
 137 
 138                 bch_keylist_add(&op->insert_keys,
 139                                 &KEY(op->inode,
 140                                      bio->bi_iter.bi_sector,
 141                                      sectors));
 142         }
 143 
 144         op->insert_data_done = true;
 145         /* get in bch_data_insert() */
 146         bio_put(bio);
 147 out:
 148         continue_at(cl, bch_data_insert_keys, op->wq);
 149 }
 150 
 151 static void bch_data_insert_error(struct closure *cl)
 152 {
 153         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 154 
 155         /*
 156          * Our data write just errored, which means we've got a bunch of keys to
 157          * insert that point to data that wasn't successfully written.
 158          *
 159          * We don't have to insert those keys but we still have to invalidate
 160          * that region of the cache - so, if we just strip off all the pointers
 161          * from the keys we'll accomplish just that.
 162          */
 163 
 164         struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
 165 
 166         while (src != op->insert_keys.top) {
 167                 struct bkey *n = bkey_next(src);
 168 
 169                 SET_KEY_PTRS(src, 0);
 170                 memmove(dst, src, bkey_bytes(src));
 171 
 172                 dst = bkey_next(dst);
 173                 src = n;
 174         }
 175 
 176         op->insert_keys.top = dst;
 177 
 178         bch_data_insert_keys(cl);
 179 }
 180 
 181 static void bch_data_insert_endio(struct bio *bio)
 182 {
 183         struct closure *cl = bio->bi_private;
 184         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 185 
 186         if (bio->bi_status) {
 187                 /* TODO: We could try to recover from this. */
 188                 if (op->writeback)
 189                         op->status = bio->bi_status;
 190                 else if (!op->replace)
 191                         set_closure_fn(cl, bch_data_insert_error, op->wq);
 192                 else
 193                         set_closure_fn(cl, NULL, NULL);
 194         }
 195 
 196         bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
 197 }
 198 
 199 static void bch_data_insert_start(struct closure *cl)
 200 {
 201         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 202         struct bio *bio = op->bio, *n;
 203 
 204         if (op->bypass)
 205                 return bch_data_invalidate(cl);
 206 
 207         if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
 208                 wake_up_gc(op->c);
 209 
 210         /*
 211          * Journal writes are marked REQ_PREFLUSH; if the original write was a
 212          * flush, it'll wait on the journal write.
 213          */
 214         bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
 215 
 216         do {
 217                 unsigned int i;
 218                 struct bkey *k;
 219                 struct bio_set *split = &op->c->bio_split;
 220 
 221                 /* 1 for the device pointer and 1 for the chksum */
 222                 if (bch_keylist_realloc(&op->insert_keys,
 223                                         3 + (op->csum ? 1 : 0),
 224                                         op->c)) {
 225                         continue_at(cl, bch_data_insert_keys, op->wq);
 226                         return;
 227                 }
 228 
 229                 k = op->insert_keys.top;
 230                 bkey_init(k);
 231                 SET_KEY_INODE(k, op->inode);
 232                 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
 233 
 234                 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
 235                                        op->write_point, op->write_prio,
 236                                        op->writeback))
 237                         goto err;
 238 
 239                 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
 240 
 241                 n->bi_end_io    = bch_data_insert_endio;
 242                 n->bi_private   = cl;
 243 
 244                 if (op->writeback) {
 245                         SET_KEY_DIRTY(k, true);
 246 
 247                         for (i = 0; i < KEY_PTRS(k); i++)
 248                                 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
 249                                             GC_MARK_DIRTY);
 250                 }
 251 
 252                 SET_KEY_CSUM(k, op->csum);
 253                 if (KEY_CSUM(k))
 254                         bio_csum(n, k);
 255 
 256                 trace_bcache_cache_insert(k);
 257                 bch_keylist_push(&op->insert_keys);
 258 
 259                 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
 260                 bch_submit_bbio(n, op->c, k, 0);
 261         } while (n != bio);
 262 
 263         op->insert_data_done = true;
 264         continue_at(cl, bch_data_insert_keys, op->wq);
 265         return;
 266 err:
 267         /* bch_alloc_sectors() blocks if s->writeback = true */
 268         BUG_ON(op->writeback);
 269 
 270         /*
 271          * But if it's not a writeback write we'd rather just bail out if
 272          * there aren't any buckets ready to write to - it might take awhile and
 273          * we might be starving btree writes for gc or something.
 274          */
 275 
 276         if (!op->replace) {
 277                 /*
 278                  * Writethrough write: We can't complete the write until we've
 279                  * updated the index. But we don't want to delay the write while
 280                  * we wait for buckets to be freed up, so just invalidate the
 281                  * rest of the write.
 282                  */
 283                 op->bypass = true;
 284                 return bch_data_invalidate(cl);
 285         } else {
 286                 /*
 287                  * From a cache miss, we can just insert the keys for the data
 288                  * we have written or bail out if we didn't do anything.
 289                  */
 290                 op->insert_data_done = true;
 291                 bio_put(bio);
 292 
 293                 if (!bch_keylist_empty(&op->insert_keys))
 294                         continue_at(cl, bch_data_insert_keys, op->wq);
 295                 else
 296                         closure_return(cl);
 297         }
 298 }
 299 
 300 /**
 301  * bch_data_insert - stick some data in the cache
 302  * @cl: closure pointer.
 303  *
 304  * This is the starting point for any data to end up in a cache device; it could
 305  * be from a normal write, or a writeback write, or a write to a flash only
 306  * volume - it's also used by the moving garbage collector to compact data in
 307  * mostly empty buckets.
 308  *
 309  * It first writes the data to the cache, creating a list of keys to be inserted
 310  * (if the data had to be fragmented there will be multiple keys); after the
 311  * data is written it calls bch_journal, and after the keys have been added to
 312  * the next journal write they're inserted into the btree.
 313  *
 314  * It inserts the data in op->bio; bi_sector is used for the key offset,
 315  * and op->inode is used for the key inode.
 316  *
 317  * If op->bypass is true, instead of inserting the data it invalidates the
 318  * region of the cache represented by op->bio and op->inode.
 319  */
 320 void bch_data_insert(struct closure *cl)
 321 {
 322         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 323 
 324         trace_bcache_write(op->c, op->inode, op->bio,
 325                            op->writeback, op->bypass);
 326 
 327         bch_keylist_init(&op->insert_keys);
 328         bio_get(op->bio);
 329         bch_data_insert_start(cl);
 330 }
 331 
 332 /*
 333  * Congested?  Return 0 (not congested) or the limit (in sectors)
 334  * beyond which we should bypass the cache due to congestion.
 335  */
 336 unsigned int bch_get_congested(const struct cache_set *c)
 337 {
 338         int i;
 339 
 340         if (!c->congested_read_threshold_us &&
 341             !c->congested_write_threshold_us)
 342                 return 0;
 343 
 344         i = (local_clock_us() - c->congested_last_us) / 1024;
 345         if (i < 0)
 346                 return 0;
 347 
 348         i += atomic_read(&c->congested);
 349         if (i >= 0)
 350                 return 0;
 351 
 352         i += CONGESTED_MAX;
 353 
 354         if (i > 0)
 355                 i = fract_exp_two(i, 6);
 356 
 357         i -= hweight32(get_random_u32());
 358 
 359         return i > 0 ? i : 1;
 360 }
 361 
 362 static void add_sequential(struct task_struct *t)
 363 {
 364         ewma_add(t->sequential_io_avg,
 365                  t->sequential_io, 8, 0);
 366 
 367         t->sequential_io = 0;
 368 }
 369 
 370 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
 371 {
 372         return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
 373 }
 374 
 375 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 376 {
 377         struct cache_set *c = dc->disk.c;
 378         unsigned int mode = cache_mode(dc);
 379         unsigned int sectors, congested;
 380         struct task_struct *task = current;
 381         struct io *i;
 382 
 383         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
 384             c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
 385             (bio_op(bio) == REQ_OP_DISCARD))
 386                 goto skip;
 387 
 388         if (mode == CACHE_MODE_NONE ||
 389             (mode == CACHE_MODE_WRITEAROUND &&
 390              op_is_write(bio_op(bio))))
 391                 goto skip;
 392 
 393         /*
 394          * If the bio is for read-ahead or background IO, bypass it or
 395          * not depends on the following situations,
 396          * - If the IO is for meta data, always cache it and no bypass
 397          * - If the IO is not meta data, check dc->cache_reada_policy,
 398          *      BCH_CACHE_READA_ALL: cache it and not bypass
 399          *      BCH_CACHE_READA_META_ONLY: not cache it and bypass
 400          * That is, read-ahead request for metadata always get cached
 401          * (eg, for gfs2 or xfs).
 402          */
 403         if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
 404                 if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
 405                     (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
 406                         goto skip;
 407         }
 408 
 409         if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
 410             bio_sectors(bio) & (c->sb.block_size - 1)) {
 411                 pr_debug("skipping unaligned io");
 412                 goto skip;
 413         }
 414 
 415         if (bypass_torture_test(dc)) {
 416                 if ((get_random_int() & 3) == 3)
 417                         goto skip;
 418                 else
 419                         goto rescale;
 420         }
 421 
 422         congested = bch_get_congested(c);
 423         if (!congested && !dc->sequential_cutoff)
 424                 goto rescale;
 425 
 426         spin_lock(&dc->io_lock);
 427 
 428         hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
 429                 if (i->last == bio->bi_iter.bi_sector &&
 430                     time_before(jiffies, i->jiffies))
 431                         goto found;
 432 
 433         i = list_first_entry(&dc->io_lru, struct io, lru);
 434 
 435         add_sequential(task);
 436         i->sequential = 0;
 437 found:
 438         if (i->sequential + bio->bi_iter.bi_size > i->sequential)
 439                 i->sequential   += bio->bi_iter.bi_size;
 440 
 441         i->last                  = bio_end_sector(bio);
 442         i->jiffies               = jiffies + msecs_to_jiffies(5000);
 443         task->sequential_io      = i->sequential;
 444 
 445         hlist_del(&i->hash);
 446         hlist_add_head(&i->hash, iohash(dc, i->last));
 447         list_move_tail(&i->lru, &dc->io_lru);
 448 
 449         spin_unlock(&dc->io_lock);
 450 
 451         sectors = max(task->sequential_io,
 452                       task->sequential_io_avg) >> 9;
 453 
 454         if (dc->sequential_cutoff &&
 455             sectors >= dc->sequential_cutoff >> 9) {
 456                 trace_bcache_bypass_sequential(bio);
 457                 goto skip;
 458         }
 459 
 460         if (congested && sectors >= congested) {
 461                 trace_bcache_bypass_congested(bio);
 462                 goto skip;
 463         }
 464 
 465 rescale:
 466         bch_rescale_priorities(c, bio_sectors(bio));
 467         return false;
 468 skip:
 469         bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
 470         return true;
 471 }
 472 
 473 /* Cache lookup */
 474 
 475 struct search {
 476         /* Stack frame for bio_complete */
 477         struct closure          cl;
 478 
 479         struct bbio             bio;
 480         struct bio              *orig_bio;
 481         struct bio              *cache_miss;
 482         struct bcache_device    *d;
 483 
 484         unsigned int            insert_bio_sectors;
 485         unsigned int            recoverable:1;
 486         unsigned int            write:1;
 487         unsigned int            read_dirty_data:1;
 488         unsigned int            cache_missed:1;
 489 
 490         unsigned long           start_time;
 491 
 492         struct btree_op         op;
 493         struct data_insert_op   iop;
 494 };
 495 
 496 static void bch_cache_read_endio(struct bio *bio)
 497 {
 498         struct bbio *b = container_of(bio, struct bbio, bio);
 499         struct closure *cl = bio->bi_private;
 500         struct search *s = container_of(cl, struct search, cl);
 501 
 502         /*
 503          * If the bucket was reused while our bio was in flight, we might have
 504          * read the wrong data. Set s->error but not error so it doesn't get
 505          * counted against the cache device, but we'll still reread the data
 506          * from the backing device.
 507          */
 508 
 509         if (bio->bi_status)
 510                 s->iop.status = bio->bi_status;
 511         else if (!KEY_DIRTY(&b->key) &&
 512                  ptr_stale(s->iop.c, &b->key, 0)) {
 513                 atomic_long_inc(&s->iop.c->cache_read_races);
 514                 s->iop.status = BLK_STS_IOERR;
 515         }
 516 
 517         bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
 518 }
 519 
 520 /*
 521  * Read from a single key, handling the initial cache miss if the key starts in
 522  * the middle of the bio
 523  */
 524 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
 525 {
 526         struct search *s = container_of(op, struct search, op);
 527         struct bio *n, *bio = &s->bio.bio;
 528         struct bkey *bio_key;
 529         unsigned int ptr;
 530 
 531         if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
 532                 return MAP_CONTINUE;
 533 
 534         if (KEY_INODE(k) != s->iop.inode ||
 535             KEY_START(k) > bio->bi_iter.bi_sector) {
 536                 unsigned int bio_sectors = bio_sectors(bio);
 537                 unsigned int sectors = KEY_INODE(k) == s->iop.inode
 538                         ? min_t(uint64_t, INT_MAX,
 539                                 KEY_START(k) - bio->bi_iter.bi_sector)
 540                         : INT_MAX;
 541                 int ret = s->d->cache_miss(b, s, bio, sectors);
 542 
 543                 if (ret != MAP_CONTINUE)
 544                         return ret;
 545 
 546                 /* if this was a complete miss we shouldn't get here */
 547                 BUG_ON(bio_sectors <= sectors);
 548         }
 549 
 550         if (!KEY_SIZE(k))
 551                 return MAP_CONTINUE;
 552 
 553         /* XXX: figure out best pointer - for multiple cache devices */
 554         ptr = 0;
 555 
 556         PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
 557 
 558         if (KEY_DIRTY(k))
 559                 s->read_dirty_data = true;
 560 
 561         n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
 562                                       KEY_OFFSET(k) - bio->bi_iter.bi_sector),
 563                            GFP_NOIO, &s->d->bio_split);
 564 
 565         bio_key = &container_of(n, struct bbio, bio)->key;
 566         bch_bkey_copy_single_ptr(bio_key, k, ptr);
 567 
 568         bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
 569         bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
 570 
 571         n->bi_end_io    = bch_cache_read_endio;
 572         n->bi_private   = &s->cl;
 573 
 574         /*
 575          * The bucket we're reading from might be reused while our bio
 576          * is in flight, and we could then end up reading the wrong
 577          * data.
 578          *
 579          * We guard against this by checking (in cache_read_endio()) if
 580          * the pointer is stale again; if so, we treat it as an error
 581          * and reread from the backing device (but we don't pass that
 582          * error up anywhere).
 583          */
 584 
 585         __bch_submit_bbio(n, b->c);
 586         return n == bio ? MAP_DONE : MAP_CONTINUE;
 587 }
 588 
 589 static void cache_lookup(struct closure *cl)
 590 {
 591         struct search *s = container_of(cl, struct search, iop.cl);
 592         struct bio *bio = &s->bio.bio;
 593         struct cached_dev *dc;
 594         int ret;
 595 
 596         bch_btree_op_init(&s->op, -1);
 597 
 598         ret = bch_btree_map_keys(&s->op, s->iop.c,
 599                                  &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
 600                                  cache_lookup_fn, MAP_END_KEY);
 601         if (ret == -EAGAIN) {
 602                 continue_at(cl, cache_lookup, bcache_wq);
 603                 return;
 604         }
 605 
 606         /*
 607          * We might meet err when searching the btree, If that happens, we will
 608          * get negative ret, in this scenario we should not recover data from
 609          * backing device (when cache device is dirty) because we don't know
 610          * whether bkeys the read request covered are all clean.
 611          *
 612          * And after that happened, s->iop.status is still its initial value
 613          * before we submit s->bio.bio
 614          */
 615         if (ret < 0) {
 616                 BUG_ON(ret == -EINTR);
 617                 if (s->d && s->d->c &&
 618                                 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
 619                         dc = container_of(s->d, struct cached_dev, disk);
 620                         if (dc && atomic_read(&dc->has_dirty))
 621                                 s->recoverable = false;
 622                 }
 623                 if (!s->iop.status)
 624                         s->iop.status = BLK_STS_IOERR;
 625         }
 626 
 627         closure_return(cl);
 628 }
 629 
 630 /* Common code for the make_request functions */
 631 
 632 static void request_endio(struct bio *bio)
 633 {
 634         struct closure *cl = bio->bi_private;
 635 
 636         if (bio->bi_status) {
 637                 struct search *s = container_of(cl, struct search, cl);
 638 
 639                 s->iop.status = bio->bi_status;
 640                 /* Only cache read errors are recoverable */
 641                 s->recoverable = false;
 642         }
 643 
 644         bio_put(bio);
 645         closure_put(cl);
 646 }
 647 
 648 static void backing_request_endio(struct bio *bio)
 649 {
 650         struct closure *cl = bio->bi_private;
 651 
 652         if (bio->bi_status) {
 653                 struct search *s = container_of(cl, struct search, cl);
 654                 struct cached_dev *dc = container_of(s->d,
 655                                                      struct cached_dev, disk);
 656                 /*
 657                  * If a bio has REQ_PREFLUSH for writeback mode, it is
 658                  * speically assembled in cached_dev_write() for a non-zero
 659                  * write request which has REQ_PREFLUSH. we don't set
 660                  * s->iop.status by this failure, the status will be decided
 661                  * by result of bch_data_insert() operation.
 662                  */
 663                 if (unlikely(s->iop.writeback &&
 664                              bio->bi_opf & REQ_PREFLUSH)) {
 665                         pr_err("Can't flush %s: returned bi_status %i",
 666                                 dc->backing_dev_name, bio->bi_status);
 667                 } else {
 668                         /* set to orig_bio->bi_status in bio_complete() */
 669                         s->iop.status = bio->bi_status;
 670                 }
 671                 s->recoverable = false;
 672                 /* should count I/O error for backing device here */
 673                 bch_count_backing_io_errors(dc, bio);
 674         }
 675 
 676         bio_put(bio);
 677         closure_put(cl);
 678 }
 679 
 680 static void bio_complete(struct search *s)
 681 {
 682         if (s->orig_bio) {
 683                 generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
 684                                     &s->d->disk->part0, s->start_time);
 685 
 686                 trace_bcache_request_end(s->d, s->orig_bio);
 687                 s->orig_bio->bi_status = s->iop.status;
 688                 bio_endio(s->orig_bio);
 689                 s->orig_bio = NULL;
 690         }
 691 }
 692 
 693 static void do_bio_hook(struct search *s,
 694                         struct bio *orig_bio,
 695                         bio_end_io_t *end_io_fn)
 696 {
 697         struct bio *bio = &s->bio.bio;
 698 
 699         bio_init(bio, NULL, 0);
 700         __bio_clone_fast(bio, orig_bio);
 701         /*
 702          * bi_end_io can be set separately somewhere else, e.g. the
 703          * variants in,
 704          * - cache_bio->bi_end_io from cached_dev_cache_miss()
 705          * - n->bi_end_io from cache_lookup_fn()
 706          */
 707         bio->bi_end_io          = end_io_fn;
 708         bio->bi_private         = &s->cl;
 709 
 710         bio_cnt_set(bio, 3);
 711 }
 712 
 713 static void search_free(struct closure *cl)
 714 {
 715         struct search *s = container_of(cl, struct search, cl);
 716 
 717         atomic_dec(&s->iop.c->search_inflight);
 718 
 719         if (s->iop.bio)
 720                 bio_put(s->iop.bio);
 721 
 722         bio_complete(s);
 723         closure_debug_destroy(cl);
 724         mempool_free(s, &s->iop.c->search);
 725 }
 726 
 727 static inline struct search *search_alloc(struct bio *bio,
 728                                           struct bcache_device *d)
 729 {
 730         struct search *s;
 731 
 732         s = mempool_alloc(&d->c->search, GFP_NOIO);
 733 
 734         closure_init(&s->cl, NULL);
 735         do_bio_hook(s, bio, request_endio);
 736         atomic_inc(&d->c->search_inflight);
 737 
 738         s->orig_bio             = bio;
 739         s->cache_miss           = NULL;
 740         s->cache_missed         = 0;
 741         s->d                    = d;
 742         s->recoverable          = 1;
 743         s->write                = op_is_write(bio_op(bio));
 744         s->read_dirty_data      = 0;
 745         s->start_time           = jiffies;
 746 
 747         s->iop.c                = d->c;
 748         s->iop.bio              = NULL;
 749         s->iop.inode            = d->id;
 750         s->iop.write_point      = hash_long((unsigned long) current, 16);
 751         s->iop.write_prio       = 0;
 752         s->iop.status           = 0;
 753         s->iop.flags            = 0;
 754         s->iop.flush_journal    = op_is_flush(bio->bi_opf);
 755         s->iop.wq               = bcache_wq;
 756 
 757         return s;
 758 }
 759 
 760 /* Cached devices */
 761 
 762 static void cached_dev_bio_complete(struct closure *cl)
 763 {
 764         struct search *s = container_of(cl, struct search, cl);
 765         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 766 
 767         cached_dev_put(dc);
 768         search_free(cl);
 769 }
 770 
 771 /* Process reads */
 772 
 773 static void cached_dev_read_error_done(struct closure *cl)
 774 {
 775         struct search *s = container_of(cl, struct search, cl);
 776 
 777         if (s->iop.replace_collision)
 778                 bch_mark_cache_miss_collision(s->iop.c, s->d);
 779 
 780         if (s->iop.bio)
 781                 bio_free_pages(s->iop.bio);
 782 
 783         cached_dev_bio_complete(cl);
 784 }
 785 
 786 static void cached_dev_read_error(struct closure *cl)
 787 {
 788         struct search *s = container_of(cl, struct search, cl);
 789         struct bio *bio = &s->bio.bio;
 790 
 791         /*
 792          * If read request hit dirty data (s->read_dirty_data is true),
 793          * then recovery a failed read request from cached device may
 794          * get a stale data back. So read failure recovery is only
 795          * permitted when read request hit clean data in cache device,
 796          * or when cache read race happened.
 797          */
 798         if (s->recoverable && !s->read_dirty_data) {
 799                 /* Retry from the backing device: */
 800                 trace_bcache_read_retry(s->orig_bio);
 801 
 802                 s->iop.status = 0;
 803                 do_bio_hook(s, s->orig_bio, backing_request_endio);
 804 
 805                 /* XXX: invalidate cache */
 806 
 807                 /* I/O request sent to backing device */
 808                 closure_bio_submit(s->iop.c, bio, cl);
 809         }
 810 
 811         continue_at(cl, cached_dev_read_error_done, NULL);
 812 }
 813 
 814 static void cached_dev_cache_miss_done(struct closure *cl)
 815 {
 816         struct search *s = container_of(cl, struct search, cl);
 817         struct bcache_device *d = s->d;
 818 
 819         if (s->iop.replace_collision)
 820                 bch_mark_cache_miss_collision(s->iop.c, s->d);
 821 
 822         if (s->iop.bio)
 823                 bio_free_pages(s->iop.bio);
 824 
 825         cached_dev_bio_complete(cl);
 826         closure_put(&d->cl);
 827 }
 828 
 829 static void cached_dev_read_done(struct closure *cl)
 830 {
 831         struct search *s = container_of(cl, struct search, cl);
 832         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 833 
 834         /*
 835          * We had a cache miss; cache_bio now contains data ready to be inserted
 836          * into the cache.
 837          *
 838          * First, we copy the data we just read from cache_bio's bounce buffers
 839          * to the buffers the original bio pointed to:
 840          */
 841 
 842         if (s->iop.bio) {
 843                 bio_reset(s->iop.bio);
 844                 s->iop.bio->bi_iter.bi_sector =
 845                         s->cache_miss->bi_iter.bi_sector;
 846                 bio_copy_dev(s->iop.bio, s->cache_miss);
 847                 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
 848                 bch_bio_map(s->iop.bio, NULL);
 849 
 850                 bio_copy_data(s->cache_miss, s->iop.bio);
 851 
 852                 bio_put(s->cache_miss);
 853                 s->cache_miss = NULL;
 854         }
 855 
 856         if (verify(dc) && s->recoverable && !s->read_dirty_data)
 857                 bch_data_verify(dc, s->orig_bio);
 858 
 859         closure_get(&dc->disk.cl);
 860         bio_complete(s);
 861 
 862         if (s->iop.bio &&
 863             !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
 864                 BUG_ON(!s->iop.replace);
 865                 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
 866         }
 867 
 868         continue_at(cl, cached_dev_cache_miss_done, NULL);
 869 }
 870 
 871 static void cached_dev_read_done_bh(struct closure *cl)
 872 {
 873         struct search *s = container_of(cl, struct search, cl);
 874         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 875 
 876         bch_mark_cache_accounting(s->iop.c, s->d,
 877                                   !s->cache_missed, s->iop.bypass);
 878         trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
 879 
 880         if (s->iop.status)
 881                 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
 882         else if (s->iop.bio || verify(dc))
 883                 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
 884         else
 885                 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
 886 }
 887 
 888 static int cached_dev_cache_miss(struct btree *b, struct search *s,
 889                                  struct bio *bio, unsigned int sectors)
 890 {
 891         int ret = MAP_CONTINUE;
 892         unsigned int reada = 0;
 893         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 894         struct bio *miss, *cache_bio;
 895 
 896         s->cache_missed = 1;
 897 
 898         if (s->cache_miss || s->iop.bypass) {
 899                 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
 900                 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
 901                 goto out_submit;
 902         }
 903 
 904         if (!(bio->bi_opf & REQ_RAHEAD) &&
 905             !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
 906             s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
 907                 reada = min_t(sector_t, dc->readahead >> 9,
 908                               get_capacity(bio->bi_disk) - bio_end_sector(bio));
 909 
 910         s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 911 
 912         s->iop.replace_key = KEY(s->iop.inode,
 913                                  bio->bi_iter.bi_sector + s->insert_bio_sectors,
 914                                  s->insert_bio_sectors);
 915 
 916         ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
 917         if (ret)
 918                 return ret;
 919 
 920         s->iop.replace = true;
 921 
 922         miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
 923 
 924         /* btree_search_recurse()'s btree iterator is no good anymore */
 925         ret = miss == bio ? MAP_DONE : -EINTR;
 926 
 927         cache_bio = bio_alloc_bioset(GFP_NOWAIT,
 928                         DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
 929                         &dc->disk.bio_split);
 930         if (!cache_bio)
 931                 goto out_submit;
 932 
 933         cache_bio->bi_iter.bi_sector    = miss->bi_iter.bi_sector;
 934         bio_copy_dev(cache_bio, miss);
 935         cache_bio->bi_iter.bi_size      = s->insert_bio_sectors << 9;
 936 
 937         cache_bio->bi_end_io    = backing_request_endio;
 938         cache_bio->bi_private   = &s->cl;
 939 
 940         bch_bio_map(cache_bio, NULL);
 941         if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
 942                 goto out_put;
 943 
 944         if (reada)
 945                 bch_mark_cache_readahead(s->iop.c, s->d);
 946 
 947         s->cache_miss   = miss;
 948         s->iop.bio      = cache_bio;
 949         bio_get(cache_bio);
 950         /* I/O request sent to backing device */
 951         closure_bio_submit(s->iop.c, cache_bio, &s->cl);
 952 
 953         return ret;
 954 out_put:
 955         bio_put(cache_bio);
 956 out_submit:
 957         miss->bi_end_io         = backing_request_endio;
 958         miss->bi_private        = &s->cl;
 959         /* I/O request sent to backing device */
 960         closure_bio_submit(s->iop.c, miss, &s->cl);
 961         return ret;
 962 }
 963 
 964 static void cached_dev_read(struct cached_dev *dc, struct search *s)
 965 {
 966         struct closure *cl = &s->cl;
 967 
 968         closure_call(&s->iop.cl, cache_lookup, NULL, cl);
 969         continue_at(cl, cached_dev_read_done_bh, NULL);
 970 }
 971 
 972 /* Process writes */
 973 
 974 static void cached_dev_write_complete(struct closure *cl)
 975 {
 976         struct search *s = container_of(cl, struct search, cl);
 977         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 978 
 979         up_read_non_owner(&dc->writeback_lock);
 980         cached_dev_bio_complete(cl);
 981 }
 982 
 983 static void cached_dev_write(struct cached_dev *dc, struct search *s)
 984 {
 985         struct closure *cl = &s->cl;
 986         struct bio *bio = &s->bio.bio;
 987         struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
 988         struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
 989 
 990         bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
 991 
 992         down_read_non_owner(&dc->writeback_lock);
 993         if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
 994                 /*
 995                  * We overlap with some dirty data undergoing background
 996                  * writeback, force this write to writeback
 997                  */
 998                 s->iop.bypass = false;
 999                 s->iop.writeback = true;
1000         }
1001 
1002         /*
1003          * Discards aren't _required_ to do anything, so skipping if
1004          * check_overlapping returned true is ok
1005          *
1006          * But check_overlapping drops dirty keys for which io hasn't started,
1007          * so we still want to call it.
1008          */
1009         if (bio_op(bio) == REQ_OP_DISCARD)
1010                 s->iop.bypass = true;
1011 
1012         if (should_writeback(dc, s->orig_bio,
1013                              cache_mode(dc),
1014                              s->iop.bypass)) {
1015                 s->iop.bypass = false;
1016                 s->iop.writeback = true;
1017         }
1018 
1019         if (s->iop.bypass) {
1020                 s->iop.bio = s->orig_bio;
1021                 bio_get(s->iop.bio);
1022 
1023                 if (bio_op(bio) == REQ_OP_DISCARD &&
1024                     !blk_queue_discard(bdev_get_queue(dc->bdev)))
1025                         goto insert_data;
1026 
1027                 /* I/O request sent to backing device */
1028                 bio->bi_end_io = backing_request_endio;
1029                 closure_bio_submit(s->iop.c, bio, cl);
1030 
1031         } else if (s->iop.writeback) {
1032                 bch_writeback_add(dc);
1033                 s->iop.bio = bio;
1034 
1035                 if (bio->bi_opf & REQ_PREFLUSH) {
1036                         /*
1037                          * Also need to send a flush to the backing
1038                          * device.
1039                          */
1040                         struct bio *flush;
1041 
1042                         flush = bio_alloc_bioset(GFP_NOIO, 0,
1043                                                  &dc->disk.bio_split);
1044                         if (!flush) {
1045                                 s->iop.status = BLK_STS_RESOURCE;
1046                                 goto insert_data;
1047                         }
1048                         bio_copy_dev(flush, bio);
1049                         flush->bi_end_io = backing_request_endio;
1050                         flush->bi_private = cl;
1051                         flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1052                         /* I/O request sent to backing device */
1053                         closure_bio_submit(s->iop.c, flush, cl);
1054                 }
1055         } else {
1056                 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1057                 /* I/O request sent to backing device */
1058                 bio->bi_end_io = backing_request_endio;
1059                 closure_bio_submit(s->iop.c, bio, cl);
1060         }
1061 
1062 insert_data:
1063         closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1064         continue_at(cl, cached_dev_write_complete, NULL);
1065 }
1066 
1067 static void cached_dev_nodata(struct closure *cl)
1068 {
1069         struct search *s = container_of(cl, struct search, cl);
1070         struct bio *bio = &s->bio.bio;
1071 
1072         if (s->iop.flush_journal)
1073                 bch_journal_meta(s->iop.c, cl);
1074 
1075         /* If it's a flush, we send the flush to the backing device too */
1076         bio->bi_end_io = backing_request_endio;
1077         closure_bio_submit(s->iop.c, bio, cl);
1078 
1079         continue_at(cl, cached_dev_bio_complete, NULL);
1080 }
1081 
1082 struct detached_dev_io_private {
1083         struct bcache_device    *d;
1084         unsigned long           start_time;
1085         bio_end_io_t            *bi_end_io;
1086         void                    *bi_private;
1087 };
1088 
1089 static void detached_dev_end_io(struct bio *bio)
1090 {
1091         struct detached_dev_io_private *ddip;
1092 
1093         ddip = bio->bi_private;
1094         bio->bi_end_io = ddip->bi_end_io;
1095         bio->bi_private = ddip->bi_private;
1096 
1097         generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
1098                             &ddip->d->disk->part0, ddip->start_time);
1099 
1100         if (bio->bi_status) {
1101                 struct cached_dev *dc = container_of(ddip->d,
1102                                                      struct cached_dev, disk);
1103                 /* should count I/O error for backing device here */
1104                 bch_count_backing_io_errors(dc, bio);
1105         }
1106 
1107         kfree(ddip);
1108         bio->bi_end_io(bio);
1109 }
1110 
1111 static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1112 {
1113         struct detached_dev_io_private *ddip;
1114         struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1115 
1116         /*
1117          * no need to call closure_get(&dc->disk.cl),
1118          * because upper layer had already opened bcache device,
1119          * which would call closure_get(&dc->disk.cl)
1120          */
1121         ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1122         ddip->d = d;
1123         ddip->start_time = jiffies;
1124         ddip->bi_end_io = bio->bi_end_io;
1125         ddip->bi_private = bio->bi_private;
1126         bio->bi_end_io = detached_dev_end_io;
1127         bio->bi_private = ddip;
1128 
1129         if ((bio_op(bio) == REQ_OP_DISCARD) &&
1130             !blk_queue_discard(bdev_get_queue(dc->bdev)))
1131                 bio->bi_end_io(bio);
1132         else
1133                 generic_make_request(bio);
1134 }
1135 
1136 static void quit_max_writeback_rate(struct cache_set *c,
1137                                     struct cached_dev *this_dc)
1138 {
1139         int i;
1140         struct bcache_device *d;
1141         struct cached_dev *dc;
1142 
1143         /*
1144          * mutex bch_register_lock may compete with other parallel requesters,
1145          * or attach/detach operations on other backing device. Waiting to
1146          * the mutex lock may increase I/O request latency for seconds or more.
1147          * To avoid such situation, if mutext_trylock() failed, only writeback
1148          * rate of current cached device is set to 1, and __update_write_back()
1149          * will decide writeback rate of other cached devices (remember now
1150          * c->idle_counter is 0 already).
1151          */
1152         if (mutex_trylock(&bch_register_lock)) {
1153                 for (i = 0; i < c->devices_max_used; i++) {
1154                         if (!c->devices[i])
1155                                 continue;
1156 
1157                         if (UUID_FLASH_ONLY(&c->uuids[i]))
1158                                 continue;
1159 
1160                         d = c->devices[i];
1161                         dc = container_of(d, struct cached_dev, disk);
1162                         /*
1163                          * set writeback rate to default minimum value,
1164                          * then let update_writeback_rate() to decide the
1165                          * upcoming rate.
1166                          */
1167                         atomic_long_set(&dc->writeback_rate.rate, 1);
1168                 }
1169                 mutex_unlock(&bch_register_lock);
1170         } else
1171                 atomic_long_set(&this_dc->writeback_rate.rate, 1);
1172 }
1173 
1174 /* Cached devices - read & write stuff */
1175 
1176 static blk_qc_t cached_dev_make_request(struct request_queue *q,
1177                                         struct bio *bio)
1178 {
1179         struct search *s;
1180         struct bcache_device *d = bio->bi_disk->private_data;
1181         struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1182         int rw = bio_data_dir(bio);
1183 
1184         if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1185                      dc->io_disable)) {
1186                 bio->bi_status = BLK_STS_IOERR;
1187                 bio_endio(bio);
1188                 return BLK_QC_T_NONE;
1189         }
1190 
1191         if (likely(d->c)) {
1192                 if (atomic_read(&d->c->idle_counter))
1193                         atomic_set(&d->c->idle_counter, 0);
1194                 /*
1195                  * If at_max_writeback_rate of cache set is true and new I/O
1196                  * comes, quit max writeback rate of all cached devices
1197                  * attached to this cache set, and set at_max_writeback_rate
1198                  * to false.
1199                  */
1200                 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1201                         atomic_set(&d->c->at_max_writeback_rate, 0);
1202                         quit_max_writeback_rate(d->c, dc);
1203                 }
1204         }
1205 
1206         generic_start_io_acct(q,
1207                               bio_op(bio),
1208                               bio_sectors(bio),
1209                               &d->disk->part0);
1210 
1211         bio_set_dev(bio, dc->bdev);
1212         bio->bi_iter.bi_sector += dc->sb.data_offset;
1213 
1214         if (cached_dev_get(dc)) {
1215                 s = search_alloc(bio, d);
1216                 trace_bcache_request_start(s->d, bio);
1217 
1218                 if (!bio->bi_iter.bi_size) {
1219                         /*
1220                          * can't call bch_journal_meta from under
1221                          * generic_make_request
1222                          */
1223                         continue_at_nobarrier(&s->cl,
1224                                               cached_dev_nodata,
1225                                               bcache_wq);
1226                 } else {
1227                         s->iop.bypass = check_should_bypass(dc, bio);
1228 
1229                         if (rw)
1230                                 cached_dev_write(dc, s);
1231                         else
1232                                 cached_dev_read(dc, s);
1233                 }
1234         } else
1235                 /* I/O request sent to backing device */
1236                 detached_dev_do_request(d, bio);
1237 
1238         return BLK_QC_T_NONE;
1239 }
1240 
1241 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1242                             unsigned int cmd, unsigned long arg)
1243 {
1244         struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1245 
1246         if (dc->io_disable)
1247                 return -EIO;
1248 
1249         return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1250 }
1251 
1252 static int cached_dev_congested(void *data, int bits)
1253 {
1254         struct bcache_device *d = data;
1255         struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1256         struct request_queue *q = bdev_get_queue(dc->bdev);
1257         int ret = 0;
1258 
1259         if (bdi_congested(q->backing_dev_info, bits))
1260                 return 1;
1261 
1262         if (cached_dev_get(dc)) {
1263                 unsigned int i;
1264                 struct cache *ca;
1265 
1266                 for_each_cache(ca, d->c, i) {
1267                         q = bdev_get_queue(ca->bdev);
1268                         ret |= bdi_congested(q->backing_dev_info, bits);
1269                 }
1270 
1271                 cached_dev_put(dc);
1272         }
1273 
1274         return ret;
1275 }
1276 
1277 void bch_cached_dev_request_init(struct cached_dev *dc)
1278 {
1279         struct gendisk *g = dc->disk.disk;
1280 
1281         g->queue->make_request_fn               = cached_dev_make_request;
1282         g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1283         dc->disk.cache_miss                     = cached_dev_cache_miss;
1284         dc->disk.ioctl                          = cached_dev_ioctl;
1285 }
1286 
1287 /* Flash backed devices */
1288 
1289 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1290                                 struct bio *bio, unsigned int sectors)
1291 {
1292         unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1293 
1294         swap(bio->bi_iter.bi_size, bytes);
1295         zero_fill_bio(bio);
1296         swap(bio->bi_iter.bi_size, bytes);
1297 
1298         bio_advance(bio, bytes);
1299 
1300         if (!bio->bi_iter.bi_size)
1301                 return MAP_DONE;
1302 
1303         return MAP_CONTINUE;
1304 }
1305 
1306 static void flash_dev_nodata(struct closure *cl)
1307 {
1308         struct search *s = container_of(cl, struct search, cl);
1309 
1310         if (s->iop.flush_journal)
1311                 bch_journal_meta(s->iop.c, cl);
1312 
1313         continue_at(cl, search_free, NULL);
1314 }
1315 
1316 static blk_qc_t flash_dev_make_request(struct request_queue *q,
1317                                              struct bio *bio)
1318 {
1319         struct search *s;
1320         struct closure *cl;
1321         struct bcache_device *d = bio->bi_disk->private_data;
1322 
1323         if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1324                 bio->bi_status = BLK_STS_IOERR;
1325                 bio_endio(bio);
1326                 return BLK_QC_T_NONE;
1327         }
1328 
1329         generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
1330 
1331         s = search_alloc(bio, d);
1332         cl = &s->cl;
1333         bio = &s->bio.bio;
1334 
1335         trace_bcache_request_start(s->d, bio);
1336 
1337         if (!bio->bi_iter.bi_size) {
1338                 /*
1339                  * can't call bch_journal_meta from under
1340                  * generic_make_request
1341                  */
1342                 continue_at_nobarrier(&s->cl,
1343                                       flash_dev_nodata,
1344                                       bcache_wq);
1345                 return BLK_QC_T_NONE;
1346         } else if (bio_data_dir(bio)) {
1347                 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1348                                         &KEY(d->id, bio->bi_iter.bi_sector, 0),
1349                                         &KEY(d->id, bio_end_sector(bio), 0));
1350 
1351                 s->iop.bypass           = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1352                 s->iop.writeback        = true;
1353                 s->iop.bio              = bio;
1354 
1355                 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1356         } else {
1357                 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1358         }
1359 
1360         continue_at(cl, search_free, NULL);
1361         return BLK_QC_T_NONE;
1362 }
1363 
1364 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1365                            unsigned int cmd, unsigned long arg)
1366 {
1367         return -ENOTTY;
1368 }
1369 
1370 static int flash_dev_congested(void *data, int bits)
1371 {
1372         struct bcache_device *d = data;
1373         struct request_queue *q;
1374         struct cache *ca;
1375         unsigned int i;
1376         int ret = 0;
1377 
1378         for_each_cache(ca, d->c, i) {
1379                 q = bdev_get_queue(ca->bdev);
1380                 ret |= bdi_congested(q->backing_dev_info, bits);
1381         }
1382 
1383         return ret;
1384 }
1385 
1386 void bch_flash_dev_request_init(struct bcache_device *d)
1387 {
1388         struct gendisk *g = d->disk;
1389 
1390         g->queue->make_request_fn               = flash_dev_make_request;
1391         g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1392         d->cache_miss                           = flash_dev_cache_miss;
1393         d->ioctl                                = flash_dev_ioctl;
1394 }
1395 
1396 void bch_request_exit(void)
1397 {
1398         kmem_cache_destroy(bch_search_cache);
1399 }
1400 
1401 int __init bch_request_init(void)
1402 {
1403         bch_search_cache = KMEM_CACHE(search, 0);
1404         if (!bch_search_cache)
1405                 return -ENOMEM;
1406 
1407         return 0;
1408 }

/* [<][>][^][v][top][bottom][index][help] */