root/drivers/md/bcache/writeback.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. update_gc_after_writeback
  2. __calc_target_rate
  3. __update_writeback_rate
  4. set_at_max_writeback_rate
  5. update_writeback_rate
  6. writeback_delay
  7. dirty_init
  8. dirty_io_destructor
  9. write_dirty_finish
  10. dirty_endio
  11. write_dirty
  12. read_dirty_endio
  13. read_dirty_submit
  14. read_dirty
  15. bcache_dev_sectors_dirty_add
  16. dirty_pred
  17. refill_full_stripes
  18. refill_dirty
  19. bch_writeback_thread
  20. sectors_dirty_init_fn
  21. bch_sectors_dirty_init
  22. bch_cached_dev_writeback_init
  23. bch_cached_dev_writeback_start

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * background writeback - scan btree for dirty data and write it to the backing
   4  * device
   5  *
   6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   7  * Copyright 2012 Google, Inc.
   8  */
   9 
  10 #include "bcache.h"
  11 #include "btree.h"
  12 #include "debug.h"
  13 #include "writeback.h"
  14 
  15 #include <linux/delay.h>
  16 #include <linux/kthread.h>
  17 #include <linux/sched/clock.h>
  18 #include <trace/events/bcache.h>
  19 
  20 static void update_gc_after_writeback(struct cache_set *c)
  21 {
  22         if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
  23             c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
  24                 return;
  25 
  26         c->gc_after_writeback |= BCH_DO_AUTO_GC;
  27 }
  28 
  29 /* Rate limiting */
  30 static uint64_t __calc_target_rate(struct cached_dev *dc)
  31 {
  32         struct cache_set *c = dc->disk.c;
  33 
  34         /*
  35          * This is the size of the cache, minus the amount used for
  36          * flash-only devices
  37          */
  38         uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
  39                                 atomic_long_read(&c->flash_dev_dirty_sectors);
  40 
  41         /*
  42          * Unfortunately there is no control of global dirty data.  If the
  43          * user states that they want 10% dirty data in the cache, and has,
  44          * e.g., 5 backing volumes of equal size, we try and ensure each
  45          * backing volume uses about 2% of the cache for dirty data.
  46          */
  47         uint32_t bdev_share =
  48                 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
  49                                 c->cached_dev_sectors);
  50 
  51         uint64_t cache_dirty_target =
  52                 div_u64(cache_sectors * dc->writeback_percent, 100);
  53 
  54         /* Ensure each backing dev gets at least one dirty share */
  55         if (bdev_share < 1)
  56                 bdev_share = 1;
  57 
  58         return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
  59 }
  60 
  61 static void __update_writeback_rate(struct cached_dev *dc)
  62 {
  63         /*
  64          * PI controller:
  65          * Figures out the amount that should be written per second.
  66          *
  67          * First, the error (number of sectors that are dirty beyond our
  68          * target) is calculated.  The error is accumulated (numerically
  69          * integrated).
  70          *
  71          * Then, the proportional value and integral value are scaled
  72          * based on configured values.  These are stored as inverses to
  73          * avoid fixed point math and to make configuration easy-- e.g.
  74          * the default value of 40 for writeback_rate_p_term_inverse
  75          * attempts to write at a rate that would retire all the dirty
  76          * blocks in 40 seconds.
  77          *
  78          * The writeback_rate_i_inverse value of 10000 means that 1/10000th
  79          * of the error is accumulated in the integral term per second.
  80          * This acts as a slow, long-term average that is not subject to
  81          * variations in usage like the p term.
  82          */
  83         int64_t target = __calc_target_rate(dc);
  84         int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
  85         int64_t error = dirty - target;
  86         int64_t proportional_scaled =
  87                 div_s64(error, dc->writeback_rate_p_term_inverse);
  88         int64_t integral_scaled;
  89         uint32_t new_rate;
  90 
  91         if ((error < 0 && dc->writeback_rate_integral > 0) ||
  92             (error > 0 && time_before64(local_clock(),
  93                          dc->writeback_rate.next + NSEC_PER_MSEC))) {
  94                 /*
  95                  * Only decrease the integral term if it's more than
  96                  * zero.  Only increase the integral term if the device
  97                  * is keeping up.  (Don't wind up the integral
  98                  * ineffectively in either case).
  99                  *
 100                  * It's necessary to scale this by
 101                  * writeback_rate_update_seconds to keep the integral
 102                  * term dimensioned properly.
 103                  */
 104                 dc->writeback_rate_integral += error *
 105                         dc->writeback_rate_update_seconds;
 106         }
 107 
 108         integral_scaled = div_s64(dc->writeback_rate_integral,
 109                         dc->writeback_rate_i_term_inverse);
 110 
 111         new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
 112                         dc->writeback_rate_minimum, NSEC_PER_SEC);
 113 
 114         dc->writeback_rate_proportional = proportional_scaled;
 115         dc->writeback_rate_integral_scaled = integral_scaled;
 116         dc->writeback_rate_change = new_rate -
 117                         atomic_long_read(&dc->writeback_rate.rate);
 118         atomic_long_set(&dc->writeback_rate.rate, new_rate);
 119         dc->writeback_rate_target = target;
 120 }
 121 
 122 static bool set_at_max_writeback_rate(struct cache_set *c,
 123                                        struct cached_dev *dc)
 124 {
 125         /* Don't set max writeback rate if gc is running */
 126         if (!c->gc_mark_valid)
 127                 return false;
 128         /*
 129          * Idle_counter is increased everytime when update_writeback_rate() is
 130          * called. If all backing devices attached to the same cache set have
 131          * identical dc->writeback_rate_update_seconds values, it is about 6
 132          * rounds of update_writeback_rate() on each backing device before
 133          * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
 134          * to each dc->writeback_rate.rate.
 135          * In order to avoid extra locking cost for counting exact dirty cached
 136          * devices number, c->attached_dev_nr is used to calculate the idle
 137          * throushold. It might be bigger if not all cached device are in write-
 138          * back mode, but it still works well with limited extra rounds of
 139          * update_writeback_rate().
 140          */
 141         if (atomic_inc_return(&c->idle_counter) <
 142             atomic_read(&c->attached_dev_nr) * 6)
 143                 return false;
 144 
 145         if (atomic_read(&c->at_max_writeback_rate) != 1)
 146                 atomic_set(&c->at_max_writeback_rate, 1);
 147 
 148         atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
 149 
 150         /* keep writeback_rate_target as existing value */
 151         dc->writeback_rate_proportional = 0;
 152         dc->writeback_rate_integral_scaled = 0;
 153         dc->writeback_rate_change = 0;
 154 
 155         /*
 156          * Check c->idle_counter and c->at_max_writeback_rate agagain in case
 157          * new I/O arrives during before set_at_max_writeback_rate() returns.
 158          * Then the writeback rate is set to 1, and its new value should be
 159          * decided via __update_writeback_rate().
 160          */
 161         if ((atomic_read(&c->idle_counter) <
 162              atomic_read(&c->attached_dev_nr) * 6) ||
 163             !atomic_read(&c->at_max_writeback_rate))
 164                 return false;
 165 
 166         return true;
 167 }
 168 
 169 static void update_writeback_rate(struct work_struct *work)
 170 {
 171         struct cached_dev *dc = container_of(to_delayed_work(work),
 172                                              struct cached_dev,
 173                                              writeback_rate_update);
 174         struct cache_set *c = dc->disk.c;
 175 
 176         /*
 177          * should check BCACHE_DEV_RATE_DW_RUNNING before calling
 178          * cancel_delayed_work_sync().
 179          */
 180         set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
 181         /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
 182         smp_mb();
 183 
 184         /*
 185          * CACHE_SET_IO_DISABLE might be set via sysfs interface,
 186          * check it here too.
 187          */
 188         if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
 189             test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
 190                 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
 191                 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
 192                 smp_mb();
 193                 return;
 194         }
 195 
 196         if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
 197                 /*
 198                  * If the whole cache set is idle, set_at_max_writeback_rate()
 199                  * will set writeback rate to a max number. Then it is
 200                  * unncessary to update writeback rate for an idle cache set
 201                  * in maximum writeback rate number(s).
 202                  */
 203                 if (!set_at_max_writeback_rate(c, dc)) {
 204                         down_read(&dc->writeback_lock);
 205                         __update_writeback_rate(dc);
 206                         update_gc_after_writeback(c);
 207                         up_read(&dc->writeback_lock);
 208                 }
 209         }
 210 
 211 
 212         /*
 213          * CACHE_SET_IO_DISABLE might be set via sysfs interface,
 214          * check it here too.
 215          */
 216         if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
 217             !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
 218                 schedule_delayed_work(&dc->writeback_rate_update,
 219                               dc->writeback_rate_update_seconds * HZ);
 220         }
 221 
 222         /*
 223          * should check BCACHE_DEV_RATE_DW_RUNNING before calling
 224          * cancel_delayed_work_sync().
 225          */
 226         clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
 227         /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
 228         smp_mb();
 229 }
 230 
 231 static unsigned int writeback_delay(struct cached_dev *dc,
 232                                     unsigned int sectors)
 233 {
 234         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
 235             !dc->writeback_percent)
 236                 return 0;
 237 
 238         return bch_next_delay(&dc->writeback_rate, sectors);
 239 }
 240 
 241 struct dirty_io {
 242         struct closure          cl;
 243         struct cached_dev       *dc;
 244         uint16_t                sequence;
 245         struct bio              bio;
 246 };
 247 
 248 static void dirty_init(struct keybuf_key *w)
 249 {
 250         struct dirty_io *io = w->private;
 251         struct bio *bio = &io->bio;
 252 
 253         bio_init(bio, bio->bi_inline_vecs,
 254                  DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
 255         if (!io->dc->writeback_percent)
 256                 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 257 
 258         bio->bi_iter.bi_size    = KEY_SIZE(&w->key) << 9;
 259         bio->bi_private         = w;
 260         bch_bio_map(bio, NULL);
 261 }
 262 
 263 static void dirty_io_destructor(struct closure *cl)
 264 {
 265         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
 266 
 267         kfree(io);
 268 }
 269 
 270 static void write_dirty_finish(struct closure *cl)
 271 {
 272         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
 273         struct keybuf_key *w = io->bio.bi_private;
 274         struct cached_dev *dc = io->dc;
 275 
 276         bio_free_pages(&io->bio);
 277 
 278         /* This is kind of a dumb way of signalling errors. */
 279         if (KEY_DIRTY(&w->key)) {
 280                 int ret;
 281                 unsigned int i;
 282                 struct keylist keys;
 283 
 284                 bch_keylist_init(&keys);
 285 
 286                 bkey_copy(keys.top, &w->key);
 287                 SET_KEY_DIRTY(keys.top, false);
 288                 bch_keylist_push(&keys);
 289 
 290                 for (i = 0; i < KEY_PTRS(&w->key); i++)
 291                         atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
 292 
 293                 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
 294 
 295                 if (ret)
 296                         trace_bcache_writeback_collision(&w->key);
 297 
 298                 atomic_long_inc(ret
 299                                 ? &dc->disk.c->writeback_keys_failed
 300                                 : &dc->disk.c->writeback_keys_done);
 301         }
 302 
 303         bch_keybuf_del(&dc->writeback_keys, w);
 304         up(&dc->in_flight);
 305 
 306         closure_return_with_destructor(cl, dirty_io_destructor);
 307 }
 308 
 309 static void dirty_endio(struct bio *bio)
 310 {
 311         struct keybuf_key *w = bio->bi_private;
 312         struct dirty_io *io = w->private;
 313 
 314         if (bio->bi_status) {
 315                 SET_KEY_DIRTY(&w->key, false);
 316                 bch_count_backing_io_errors(io->dc, bio);
 317         }
 318 
 319         closure_put(&io->cl);
 320 }
 321 
 322 static void write_dirty(struct closure *cl)
 323 {
 324         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
 325         struct keybuf_key *w = io->bio.bi_private;
 326         struct cached_dev *dc = io->dc;
 327 
 328         uint16_t next_sequence;
 329 
 330         if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
 331                 /* Not our turn to write; wait for a write to complete */
 332                 closure_wait(&dc->writeback_ordering_wait, cl);
 333 
 334                 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
 335                         /*
 336                          * Edge case-- it happened in indeterminate order
 337                          * relative to when we were added to wait list..
 338                          */
 339                         closure_wake_up(&dc->writeback_ordering_wait);
 340                 }
 341 
 342                 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
 343                 return;
 344         }
 345 
 346         next_sequence = io->sequence + 1;
 347 
 348         /*
 349          * IO errors are signalled using the dirty bit on the key.
 350          * If we failed to read, we should not attempt to write to the
 351          * backing device.  Instead, immediately go to write_dirty_finish
 352          * to clean up.
 353          */
 354         if (KEY_DIRTY(&w->key)) {
 355                 dirty_init(w);
 356                 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
 357                 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
 358                 bio_set_dev(&io->bio, io->dc->bdev);
 359                 io->bio.bi_end_io       = dirty_endio;
 360 
 361                 /* I/O request sent to backing device */
 362                 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
 363         }
 364 
 365         atomic_set(&dc->writeback_sequence_next, next_sequence);
 366         closure_wake_up(&dc->writeback_ordering_wait);
 367 
 368         continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
 369 }
 370 
 371 static void read_dirty_endio(struct bio *bio)
 372 {
 373         struct keybuf_key *w = bio->bi_private;
 374         struct dirty_io *io = w->private;
 375 
 376         /* is_read = 1 */
 377         bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
 378                             bio->bi_status, 1,
 379                             "reading dirty data from cache");
 380 
 381         dirty_endio(bio);
 382 }
 383 
 384 static void read_dirty_submit(struct closure *cl)
 385 {
 386         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
 387 
 388         closure_bio_submit(io->dc->disk.c, &io->bio, cl);
 389 
 390         continue_at(cl, write_dirty, io->dc->writeback_write_wq);
 391 }
 392 
 393 static void read_dirty(struct cached_dev *dc)
 394 {
 395         unsigned int delay = 0;
 396         struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
 397         size_t size;
 398         int nk, i;
 399         struct dirty_io *io;
 400         struct closure cl;
 401         uint16_t sequence = 0;
 402 
 403         BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
 404         atomic_set(&dc->writeback_sequence_next, sequence);
 405         closure_init_stack(&cl);
 406 
 407         /*
 408          * XXX: if we error, background writeback just spins. Should use some
 409          * mempools.
 410          */
 411 
 412         next = bch_keybuf_next(&dc->writeback_keys);
 413 
 414         while (!kthread_should_stop() &&
 415                !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
 416                next) {
 417                 size = 0;
 418                 nk = 0;
 419 
 420                 do {
 421                         BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
 422 
 423                         /*
 424                          * Don't combine too many operations, even if they
 425                          * are all small.
 426                          */
 427                         if (nk >= MAX_WRITEBACKS_IN_PASS)
 428                                 break;
 429 
 430                         /*
 431                          * If the current operation is very large, don't
 432                          * further combine operations.
 433                          */
 434                         if (size >= MAX_WRITESIZE_IN_PASS)
 435                                 break;
 436 
 437                         /*
 438                          * Operations are only eligible to be combined
 439                          * if they are contiguous.
 440                          *
 441                          * TODO: add a heuristic willing to fire a
 442                          * certain amount of non-contiguous IO per pass,
 443                          * so that we can benefit from backing device
 444                          * command queueing.
 445                          */
 446                         if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
 447                                                 &START_KEY(&next->key)))
 448                                 break;
 449 
 450                         size += KEY_SIZE(&next->key);
 451                         keys[nk++] = next;
 452                 } while ((next = bch_keybuf_next(&dc->writeback_keys)));
 453 
 454                 /* Now we have gathered a set of 1..5 keys to write back. */
 455                 for (i = 0; i < nk; i++) {
 456                         w = keys[i];
 457 
 458                         io = kzalloc(sizeof(struct dirty_io) +
 459                                      sizeof(struct bio_vec) *
 460                                      DIV_ROUND_UP(KEY_SIZE(&w->key),
 461                                                   PAGE_SECTORS),
 462                                      GFP_KERNEL);
 463                         if (!io)
 464                                 goto err;
 465 
 466                         w->private      = io;
 467                         io->dc          = dc;
 468                         io->sequence    = sequence++;
 469 
 470                         dirty_init(w);
 471                         bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
 472                         io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
 473                         bio_set_dev(&io->bio,
 474                                     PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
 475                         io->bio.bi_end_io       = read_dirty_endio;
 476 
 477                         if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
 478                                 goto err_free;
 479 
 480                         trace_bcache_writeback(&w->key);
 481 
 482                         down(&dc->in_flight);
 483 
 484                         /*
 485                          * We've acquired a semaphore for the maximum
 486                          * simultaneous number of writebacks; from here
 487                          * everything happens asynchronously.
 488                          */
 489                         closure_call(&io->cl, read_dirty_submit, NULL, &cl);
 490                 }
 491 
 492                 delay = writeback_delay(dc, size);
 493 
 494                 while (!kthread_should_stop() &&
 495                        !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
 496                        delay) {
 497                         schedule_timeout_interruptible(delay);
 498                         delay = writeback_delay(dc, 0);
 499                 }
 500         }
 501 
 502         if (0) {
 503 err_free:
 504                 kfree(w->private);
 505 err:
 506                 bch_keybuf_del(&dc->writeback_keys, w);
 507         }
 508 
 509         /*
 510          * Wait for outstanding writeback IOs to finish (and keybuf slots to be
 511          * freed) before refilling again
 512          */
 513         closure_sync(&cl);
 514 }
 515 
 516 /* Scan for dirty data */
 517 
 518 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
 519                                   uint64_t offset, int nr_sectors)
 520 {
 521         struct bcache_device *d = c->devices[inode];
 522         unsigned int stripe_offset, stripe, sectors_dirty;
 523 
 524         if (!d)
 525                 return;
 526 
 527         if (UUID_FLASH_ONLY(&c->uuids[inode]))
 528                 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
 529 
 530         stripe = offset_to_stripe(d, offset);
 531         stripe_offset = offset & (d->stripe_size - 1);
 532 
 533         while (nr_sectors) {
 534                 int s = min_t(unsigned int, abs(nr_sectors),
 535                               d->stripe_size - stripe_offset);
 536 
 537                 if (nr_sectors < 0)
 538                         s = -s;
 539 
 540                 if (stripe >= d->nr_stripes)
 541                         return;
 542 
 543                 sectors_dirty = atomic_add_return(s,
 544                                         d->stripe_sectors_dirty + stripe);
 545                 if (sectors_dirty == d->stripe_size)
 546                         set_bit(stripe, d->full_dirty_stripes);
 547                 else
 548                         clear_bit(stripe, d->full_dirty_stripes);
 549 
 550                 nr_sectors -= s;
 551                 stripe_offset = 0;
 552                 stripe++;
 553         }
 554 }
 555 
 556 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
 557 {
 558         struct cached_dev *dc = container_of(buf,
 559                                              struct cached_dev,
 560                                              writeback_keys);
 561 
 562         BUG_ON(KEY_INODE(k) != dc->disk.id);
 563 
 564         return KEY_DIRTY(k);
 565 }
 566 
 567 static void refill_full_stripes(struct cached_dev *dc)
 568 {
 569         struct keybuf *buf = &dc->writeback_keys;
 570         unsigned int start_stripe, stripe, next_stripe;
 571         bool wrapped = false;
 572 
 573         stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
 574 
 575         if (stripe >= dc->disk.nr_stripes)
 576                 stripe = 0;
 577 
 578         start_stripe = stripe;
 579 
 580         while (1) {
 581                 stripe = find_next_bit(dc->disk.full_dirty_stripes,
 582                                        dc->disk.nr_stripes, stripe);
 583 
 584                 if (stripe == dc->disk.nr_stripes)
 585                         goto next;
 586 
 587                 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
 588                                                  dc->disk.nr_stripes, stripe);
 589 
 590                 buf->last_scanned = KEY(dc->disk.id,
 591                                         stripe * dc->disk.stripe_size, 0);
 592 
 593                 bch_refill_keybuf(dc->disk.c, buf,
 594                                   &KEY(dc->disk.id,
 595                                        next_stripe * dc->disk.stripe_size, 0),
 596                                   dirty_pred);
 597 
 598                 if (array_freelist_empty(&buf->freelist))
 599                         return;
 600 
 601                 stripe = next_stripe;
 602 next:
 603                 if (wrapped && stripe > start_stripe)
 604                         return;
 605 
 606                 if (stripe == dc->disk.nr_stripes) {
 607                         stripe = 0;
 608                         wrapped = true;
 609                 }
 610         }
 611 }
 612 
 613 /*
 614  * Returns true if we scanned the entire disk
 615  */
 616 static bool refill_dirty(struct cached_dev *dc)
 617 {
 618         struct keybuf *buf = &dc->writeback_keys;
 619         struct bkey start = KEY(dc->disk.id, 0, 0);
 620         struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
 621         struct bkey start_pos;
 622 
 623         /*
 624          * make sure keybuf pos is inside the range for this disk - at bringup
 625          * we might not be attached yet so this disk's inode nr isn't
 626          * initialized then
 627          */
 628         if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
 629             bkey_cmp(&buf->last_scanned, &end) > 0)
 630                 buf->last_scanned = start;
 631 
 632         if (dc->partial_stripes_expensive) {
 633                 refill_full_stripes(dc);
 634                 if (array_freelist_empty(&buf->freelist))
 635                         return false;
 636         }
 637 
 638         start_pos = buf->last_scanned;
 639         bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
 640 
 641         if (bkey_cmp(&buf->last_scanned, &end) < 0)
 642                 return false;
 643 
 644         /*
 645          * If we get to the end start scanning again from the beginning, and
 646          * only scan up to where we initially started scanning from:
 647          */
 648         buf->last_scanned = start;
 649         bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
 650 
 651         return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
 652 }
 653 
 654 static int bch_writeback_thread(void *arg)
 655 {
 656         struct cached_dev *dc = arg;
 657         struct cache_set *c = dc->disk.c;
 658         bool searched_full_index;
 659 
 660         bch_ratelimit_reset(&dc->writeback_rate);
 661 
 662         while (!kthread_should_stop() &&
 663                !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
 664                 down_write(&dc->writeback_lock);
 665                 set_current_state(TASK_INTERRUPTIBLE);
 666                 /*
 667                  * If the bache device is detaching, skip here and continue
 668                  * to perform writeback. Otherwise, if no dirty data on cache,
 669                  * or there is dirty data on cache but writeback is disabled,
 670                  * the writeback thread should sleep here and wait for others
 671                  * to wake up it.
 672                  */
 673                 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
 674                     (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
 675                         up_write(&dc->writeback_lock);
 676 
 677                         if (kthread_should_stop() ||
 678                             test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
 679                                 set_current_state(TASK_RUNNING);
 680                                 break;
 681                         }
 682 
 683                         schedule();
 684                         continue;
 685                 }
 686                 set_current_state(TASK_RUNNING);
 687 
 688                 searched_full_index = refill_dirty(dc);
 689 
 690                 if (searched_full_index &&
 691                     RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
 692                         atomic_set(&dc->has_dirty, 0);
 693                         SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
 694                         bch_write_bdev_super(dc, NULL);
 695                         /*
 696                          * If bcache device is detaching via sysfs interface,
 697                          * writeback thread should stop after there is no dirty
 698                          * data on cache. BCACHE_DEV_DETACHING flag is set in
 699                          * bch_cached_dev_detach().
 700                          */
 701                         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
 702                                 up_write(&dc->writeback_lock);
 703                                 break;
 704                         }
 705 
 706                         /*
 707                          * When dirty data rate is high (e.g. 50%+), there might
 708                          * be heavy buckets fragmentation after writeback
 709                          * finished, which hurts following write performance.
 710                          * If users really care about write performance they
 711                          * may set BCH_ENABLE_AUTO_GC via sysfs, then when
 712                          * BCH_DO_AUTO_GC is set, garbage collection thread
 713                          * will be wake up here. After moving gc, the shrunk
 714                          * btree and discarded free buckets SSD space may be
 715                          * helpful for following write requests.
 716                          */
 717                         if (c->gc_after_writeback ==
 718                             (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
 719                                 c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
 720                                 force_wake_up_gc(c);
 721                         }
 722                 }
 723 
 724                 up_write(&dc->writeback_lock);
 725 
 726                 read_dirty(dc);
 727 
 728                 if (searched_full_index) {
 729                         unsigned int delay = dc->writeback_delay * HZ;
 730 
 731                         while (delay &&
 732                                !kthread_should_stop() &&
 733                                !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
 734                                !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
 735                                 delay = schedule_timeout_interruptible(delay);
 736 
 737                         bch_ratelimit_reset(&dc->writeback_rate);
 738                 }
 739         }
 740 
 741         if (dc->writeback_write_wq) {
 742                 flush_workqueue(dc->writeback_write_wq);
 743                 destroy_workqueue(dc->writeback_write_wq);
 744         }
 745         cached_dev_put(dc);
 746         wait_for_kthread_stop();
 747 
 748         return 0;
 749 }
 750 
 751 /* Init */
 752 #define INIT_KEYS_EACH_TIME     500000
 753 #define INIT_KEYS_SLEEP_MS      100
 754 
 755 struct sectors_dirty_init {
 756         struct btree_op op;
 757         unsigned int    inode;
 758         size_t          count;
 759         struct bkey     start;
 760 };
 761 
 762 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
 763                                  struct bkey *k)
 764 {
 765         struct sectors_dirty_init *op = container_of(_op,
 766                                                 struct sectors_dirty_init, op);
 767         if (KEY_INODE(k) > op->inode)
 768                 return MAP_DONE;
 769 
 770         if (KEY_DIRTY(k))
 771                 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
 772                                              KEY_START(k), KEY_SIZE(k));
 773 
 774         op->count++;
 775         if (atomic_read(&b->c->search_inflight) &&
 776             !(op->count % INIT_KEYS_EACH_TIME)) {
 777                 bkey_copy_key(&op->start, k);
 778                 return -EAGAIN;
 779         }
 780 
 781         return MAP_CONTINUE;
 782 }
 783 
 784 void bch_sectors_dirty_init(struct bcache_device *d)
 785 {
 786         struct sectors_dirty_init op;
 787         int ret;
 788 
 789         bch_btree_op_init(&op.op, -1);
 790         op.inode = d->id;
 791         op.count = 0;
 792         op.start = KEY(op.inode, 0, 0);
 793 
 794         do {
 795                 ret = bch_btree_map_keys(&op.op, d->c, &op.start,
 796                                          sectors_dirty_init_fn, 0);
 797                 if (ret == -EAGAIN)
 798                         schedule_timeout_interruptible(
 799                                 msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
 800                 else if (ret < 0) {
 801                         pr_warn("sectors dirty init failed, ret=%d!", ret);
 802                         break;
 803                 }
 804         } while (ret == -EAGAIN);
 805 }
 806 
 807 void bch_cached_dev_writeback_init(struct cached_dev *dc)
 808 {
 809         sema_init(&dc->in_flight, 64);
 810         init_rwsem(&dc->writeback_lock);
 811         bch_keybuf_init(&dc->writeback_keys);
 812 
 813         dc->writeback_metadata          = true;
 814         dc->writeback_running           = false;
 815         dc->writeback_percent           = 10;
 816         dc->writeback_delay             = 30;
 817         atomic_long_set(&dc->writeback_rate.rate, 1024);
 818         dc->writeback_rate_minimum      = 8;
 819 
 820         dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
 821         dc->writeback_rate_p_term_inverse = 40;
 822         dc->writeback_rate_i_term_inverse = 10000;
 823 
 824         WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
 825         INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
 826 }
 827 
 828 int bch_cached_dev_writeback_start(struct cached_dev *dc)
 829 {
 830         dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
 831                                                 WQ_MEM_RECLAIM, 0);
 832         if (!dc->writeback_write_wq)
 833                 return -ENOMEM;
 834 
 835         cached_dev_get(dc);
 836         dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
 837                                               "bcache_writeback");
 838         if (IS_ERR(dc->writeback_thread)) {
 839                 cached_dev_put(dc);
 840                 destroy_workqueue(dc->writeback_write_wq);
 841                 return PTR_ERR(dc->writeback_thread);
 842         }
 843         dc->writeback_running = true;
 844 
 845         WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
 846         schedule_delayed_work(&dc->writeback_rate_update,
 847                               dc->writeback_rate_update_seconds * HZ);
 848 
 849         bch_writeback_queue(dc);
 850 
 851         return 0;
 852 }

/* [<][>][^][v][top][bottom][index][help] */