This source file includes following definitions.
- update_gc_after_writeback
- __calc_target_rate
- __update_writeback_rate
- set_at_max_writeback_rate
- update_writeback_rate
- writeback_delay
- dirty_init
- dirty_io_destructor
- write_dirty_finish
- dirty_endio
- write_dirty
- read_dirty_endio
- read_dirty_submit
- read_dirty
- bcache_dev_sectors_dirty_add
- dirty_pred
- refill_full_stripes
- refill_dirty
- bch_writeback_thread
- sectors_dirty_init_fn
- bch_sectors_dirty_init
- bch_cached_dev_writeback_init
- bch_cached_dev_writeback_start
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 #include "bcache.h"
  11 #include "btree.h"
  12 #include "debug.h"
  13 #include "writeback.h"
  14 
  15 #include <linux/delay.h>
  16 #include <linux/kthread.h>
  17 #include <linux/sched/clock.h>
  18 #include <trace/events/bcache.h>
  19 
  20 static void update_gc_after_writeback(struct cache_set *c)
  21 {
  22         if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
  23             c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
  24                 return;
  25 
  26         c->gc_after_writeback |= BCH_DO_AUTO_GC;
  27 }
  28 
  29 
  30 static uint64_t __calc_target_rate(struct cached_dev *dc)
  31 {
  32         struct cache_set *c = dc->disk.c;
  33 
  34         
  35 
  36 
  37 
  38         uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
  39                                 atomic_long_read(&c->flash_dev_dirty_sectors);
  40 
  41         
  42 
  43 
  44 
  45 
  46 
  47         uint32_t bdev_share =
  48                 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
  49                                 c->cached_dev_sectors);
  50 
  51         uint64_t cache_dirty_target =
  52                 div_u64(cache_sectors * dc->writeback_percent, 100);
  53 
  54         
  55         if (bdev_share < 1)
  56                 bdev_share = 1;
  57 
  58         return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
  59 }
  60 
  61 static void __update_writeback_rate(struct cached_dev *dc)
  62 {
  63         
  64 
  65 
  66 
  67 
  68 
  69 
  70 
  71 
  72 
  73 
  74 
  75 
  76 
  77 
  78 
  79 
  80 
  81 
  82 
  83         int64_t target = __calc_target_rate(dc);
  84         int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
  85         int64_t error = dirty - target;
  86         int64_t proportional_scaled =
  87                 div_s64(error, dc->writeback_rate_p_term_inverse);
  88         int64_t integral_scaled;
  89         uint32_t new_rate;
  90 
  91         if ((error < 0 && dc->writeback_rate_integral > 0) ||
  92             (error > 0 && time_before64(local_clock(),
  93                          dc->writeback_rate.next + NSEC_PER_MSEC))) {
  94                 
  95 
  96 
  97 
  98 
  99 
 100 
 101 
 102 
 103 
 104                 dc->writeback_rate_integral += error *
 105                         dc->writeback_rate_update_seconds;
 106         }
 107 
 108         integral_scaled = div_s64(dc->writeback_rate_integral,
 109                         dc->writeback_rate_i_term_inverse);
 110 
 111         new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
 112                         dc->writeback_rate_minimum, NSEC_PER_SEC);
 113 
 114         dc->writeback_rate_proportional = proportional_scaled;
 115         dc->writeback_rate_integral_scaled = integral_scaled;
 116         dc->writeback_rate_change = new_rate -
 117                         atomic_long_read(&dc->writeback_rate.rate);
 118         atomic_long_set(&dc->writeback_rate.rate, new_rate);
 119         dc->writeback_rate_target = target;
 120 }
 121 
 122 static bool set_at_max_writeback_rate(struct cache_set *c,
 123                                        struct cached_dev *dc)
 124 {
 125         
 126         if (!c->gc_mark_valid)
 127                 return false;
 128         
 129 
 130 
 131 
 132 
 133 
 134 
 135 
 136 
 137 
 138 
 139 
 140 
 141         if (atomic_inc_return(&c->idle_counter) <
 142             atomic_read(&c->attached_dev_nr) * 6)
 143                 return false;
 144 
 145         if (atomic_read(&c->at_max_writeback_rate) != 1)
 146                 atomic_set(&c->at_max_writeback_rate, 1);
 147 
 148         atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
 149 
 150         
 151         dc->writeback_rate_proportional = 0;
 152         dc->writeback_rate_integral_scaled = 0;
 153         dc->writeback_rate_change = 0;
 154 
 155         
 156 
 157 
 158 
 159 
 160 
 161         if ((atomic_read(&c->idle_counter) <
 162              atomic_read(&c->attached_dev_nr) * 6) ||
 163             !atomic_read(&c->at_max_writeback_rate))
 164                 return false;
 165 
 166         return true;
 167 }
 168 
 169 static void update_writeback_rate(struct work_struct *work)
 170 {
 171         struct cached_dev *dc = container_of(to_delayed_work(work),
 172                                              struct cached_dev,
 173                                              writeback_rate_update);
 174         struct cache_set *c = dc->disk.c;
 175 
 176         
 177 
 178 
 179 
 180         set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
 181         
 182         smp_mb();
 183 
 184         
 185 
 186 
 187 
 188         if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
 189             test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
 190                 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
 191                 
 192                 smp_mb();
 193                 return;
 194         }
 195 
 196         if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
 197                 
 198 
 199 
 200 
 201 
 202 
 203                 if (!set_at_max_writeback_rate(c, dc)) {
 204                         down_read(&dc->writeback_lock);
 205                         __update_writeback_rate(dc);
 206                         update_gc_after_writeback(c);
 207                         up_read(&dc->writeback_lock);
 208                 }
 209         }
 210 
 211 
 212         
 213 
 214 
 215 
 216         if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
 217             !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
 218                 schedule_delayed_work(&dc->writeback_rate_update,
 219                               dc->writeback_rate_update_seconds * HZ);
 220         }
 221 
 222         
 223 
 224 
 225 
 226         clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
 227         
 228         smp_mb();
 229 }
 230 
 231 static unsigned int writeback_delay(struct cached_dev *dc,
 232                                     unsigned int sectors)
 233 {
 234         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
 235             !dc->writeback_percent)
 236                 return 0;
 237 
 238         return bch_next_delay(&dc->writeback_rate, sectors);
 239 }
 240 
 241 struct dirty_io {
 242         struct closure          cl;
 243         struct cached_dev       *dc;
 244         uint16_t                sequence;
 245         struct bio              bio;
 246 };
 247 
 248 static void dirty_init(struct keybuf_key *w)
 249 {
 250         struct dirty_io *io = w->private;
 251         struct bio *bio = &io->bio;
 252 
 253         bio_init(bio, bio->bi_inline_vecs,
 254                  DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
 255         if (!io->dc->writeback_percent)
 256                 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 257 
 258         bio->bi_iter.bi_size    = KEY_SIZE(&w->key) << 9;
 259         bio->bi_private         = w;
 260         bch_bio_map(bio, NULL);
 261 }
 262 
 263 static void dirty_io_destructor(struct closure *cl)
 264 {
 265         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
 266 
 267         kfree(io);
 268 }
 269 
 270 static void write_dirty_finish(struct closure *cl)
 271 {
 272         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
 273         struct keybuf_key *w = io->bio.bi_private;
 274         struct cached_dev *dc = io->dc;
 275 
 276         bio_free_pages(&io->bio);
 277 
 278         
 279         if (KEY_DIRTY(&w->key)) {
 280                 int ret;
 281                 unsigned int i;
 282                 struct keylist keys;
 283 
 284                 bch_keylist_init(&keys);
 285 
 286                 bkey_copy(keys.top, &w->key);
 287                 SET_KEY_DIRTY(keys.top, false);
 288                 bch_keylist_push(&keys);
 289 
 290                 for (i = 0; i < KEY_PTRS(&w->key); i++)
 291                         atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
 292 
 293                 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
 294 
 295                 if (ret)
 296                         trace_bcache_writeback_collision(&w->key);
 297 
 298                 atomic_long_inc(ret
 299                                 ? &dc->disk.c->writeback_keys_failed
 300                                 : &dc->disk.c->writeback_keys_done);
 301         }
 302 
 303         bch_keybuf_del(&dc->writeback_keys, w);
 304         up(&dc->in_flight);
 305 
 306         closure_return_with_destructor(cl, dirty_io_destructor);
 307 }
 308 
 309 static void dirty_endio(struct bio *bio)
 310 {
 311         struct keybuf_key *w = bio->bi_private;
 312         struct dirty_io *io = w->private;
 313 
 314         if (bio->bi_status) {
 315                 SET_KEY_DIRTY(&w->key, false);
 316                 bch_count_backing_io_errors(io->dc, bio);
 317         }
 318 
 319         closure_put(&io->cl);
 320 }
 321 
 322 static void write_dirty(struct closure *cl)
 323 {
 324         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
 325         struct keybuf_key *w = io->bio.bi_private;
 326         struct cached_dev *dc = io->dc;
 327 
 328         uint16_t next_sequence;
 329 
 330         if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
 331                 
 332                 closure_wait(&dc->writeback_ordering_wait, cl);
 333 
 334                 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
 335                         
 336 
 337 
 338 
 339                         closure_wake_up(&dc->writeback_ordering_wait);
 340                 }
 341 
 342                 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
 343                 return;
 344         }
 345 
 346         next_sequence = io->sequence + 1;
 347 
 348         
 349 
 350 
 351 
 352 
 353 
 354         if (KEY_DIRTY(&w->key)) {
 355                 dirty_init(w);
 356                 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
 357                 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
 358                 bio_set_dev(&io->bio, io->dc->bdev);
 359                 io->bio.bi_end_io       = dirty_endio;
 360 
 361                 
 362                 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
 363         }
 364 
 365         atomic_set(&dc->writeback_sequence_next, next_sequence);
 366         closure_wake_up(&dc->writeback_ordering_wait);
 367 
 368         continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
 369 }
 370 
 371 static void read_dirty_endio(struct bio *bio)
 372 {
 373         struct keybuf_key *w = bio->bi_private;
 374         struct dirty_io *io = w->private;
 375 
 376         
 377         bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
 378                             bio->bi_status, 1,
 379                             "reading dirty data from cache");
 380 
 381         dirty_endio(bio);
 382 }
 383 
 384 static void read_dirty_submit(struct closure *cl)
 385 {
 386         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
 387 
 388         closure_bio_submit(io->dc->disk.c, &io->bio, cl);
 389 
 390         continue_at(cl, write_dirty, io->dc->writeback_write_wq);
 391 }
 392 
 393 static void read_dirty(struct cached_dev *dc)
 394 {
 395         unsigned int delay = 0;
 396         struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
 397         size_t size;
 398         int nk, i;
 399         struct dirty_io *io;
 400         struct closure cl;
 401         uint16_t sequence = 0;
 402 
 403         BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
 404         atomic_set(&dc->writeback_sequence_next, sequence);
 405         closure_init_stack(&cl);
 406 
 407         
 408 
 409 
 410 
 411 
 412         next = bch_keybuf_next(&dc->writeback_keys);
 413 
 414         while (!kthread_should_stop() &&
 415                !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
 416                next) {
 417                 size = 0;
 418                 nk = 0;
 419 
 420                 do {
 421                         BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
 422 
 423                         
 424 
 425 
 426 
 427                         if (nk >= MAX_WRITEBACKS_IN_PASS)
 428                                 break;
 429 
 430                         
 431 
 432 
 433 
 434                         if (size >= MAX_WRITESIZE_IN_PASS)
 435                                 break;
 436 
 437                         
 438 
 439 
 440 
 441 
 442 
 443 
 444 
 445 
 446                         if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
 447                                                 &START_KEY(&next->key)))
 448                                 break;
 449 
 450                         size += KEY_SIZE(&next->key);
 451                         keys[nk++] = next;
 452                 } while ((next = bch_keybuf_next(&dc->writeback_keys)));
 453 
 454                 
 455                 for (i = 0; i < nk; i++) {
 456                         w = keys[i];
 457 
 458                         io = kzalloc(sizeof(struct dirty_io) +
 459                                      sizeof(struct bio_vec) *
 460                                      DIV_ROUND_UP(KEY_SIZE(&w->key),
 461                                                   PAGE_SECTORS),
 462                                      GFP_KERNEL);
 463                         if (!io)
 464                                 goto err;
 465 
 466                         w->private      = io;
 467                         io->dc          = dc;
 468                         io->sequence    = sequence++;
 469 
 470                         dirty_init(w);
 471                         bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
 472                         io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
 473                         bio_set_dev(&io->bio,
 474                                     PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
 475                         io->bio.bi_end_io       = read_dirty_endio;
 476 
 477                         if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
 478                                 goto err_free;
 479 
 480                         trace_bcache_writeback(&w->key);
 481 
 482                         down(&dc->in_flight);
 483 
 484                         
 485 
 486 
 487 
 488 
 489                         closure_call(&io->cl, read_dirty_submit, NULL, &cl);
 490                 }
 491 
 492                 delay = writeback_delay(dc, size);
 493 
 494                 while (!kthread_should_stop() &&
 495                        !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
 496                        delay) {
 497                         schedule_timeout_interruptible(delay);
 498                         delay = writeback_delay(dc, 0);
 499                 }
 500         }
 501 
 502         if (0) {
 503 err_free:
 504                 kfree(w->private);
 505 err:
 506                 bch_keybuf_del(&dc->writeback_keys, w);
 507         }
 508 
 509         
 510 
 511 
 512 
 513         closure_sync(&cl);
 514 }
 515 
 516 
 517 
 518 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
 519                                   uint64_t offset, int nr_sectors)
 520 {
 521         struct bcache_device *d = c->devices[inode];
 522         unsigned int stripe_offset, stripe, sectors_dirty;
 523 
 524         if (!d)
 525                 return;
 526 
 527         if (UUID_FLASH_ONLY(&c->uuids[inode]))
 528                 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
 529 
 530         stripe = offset_to_stripe(d, offset);
 531         stripe_offset = offset & (d->stripe_size - 1);
 532 
 533         while (nr_sectors) {
 534                 int s = min_t(unsigned int, abs(nr_sectors),
 535                               d->stripe_size - stripe_offset);
 536 
 537                 if (nr_sectors < 0)
 538                         s = -s;
 539 
 540                 if (stripe >= d->nr_stripes)
 541                         return;
 542 
 543                 sectors_dirty = atomic_add_return(s,
 544                                         d->stripe_sectors_dirty + stripe);
 545                 if (sectors_dirty == d->stripe_size)
 546                         set_bit(stripe, d->full_dirty_stripes);
 547                 else
 548                         clear_bit(stripe, d->full_dirty_stripes);
 549 
 550                 nr_sectors -= s;
 551                 stripe_offset = 0;
 552                 stripe++;
 553         }
 554 }
 555 
 556 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
 557 {
 558         struct cached_dev *dc = container_of(buf,
 559                                              struct cached_dev,
 560                                              writeback_keys);
 561 
 562         BUG_ON(KEY_INODE(k) != dc->disk.id);
 563 
 564         return KEY_DIRTY(k);
 565 }
 566 
 567 static void refill_full_stripes(struct cached_dev *dc)
 568 {
 569         struct keybuf *buf = &dc->writeback_keys;
 570         unsigned int start_stripe, stripe, next_stripe;
 571         bool wrapped = false;
 572 
 573         stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
 574 
 575         if (stripe >= dc->disk.nr_stripes)
 576                 stripe = 0;
 577 
 578         start_stripe = stripe;
 579 
 580         while (1) {
 581                 stripe = find_next_bit(dc->disk.full_dirty_stripes,
 582                                        dc->disk.nr_stripes, stripe);
 583 
 584                 if (stripe == dc->disk.nr_stripes)
 585                         goto next;
 586 
 587                 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
 588                                                  dc->disk.nr_stripes, stripe);
 589 
 590                 buf->last_scanned = KEY(dc->disk.id,
 591                                         stripe * dc->disk.stripe_size, 0);
 592 
 593                 bch_refill_keybuf(dc->disk.c, buf,
 594                                   &KEY(dc->disk.id,
 595                                        next_stripe * dc->disk.stripe_size, 0),
 596                                   dirty_pred);
 597 
 598                 if (array_freelist_empty(&buf->freelist))
 599                         return;
 600 
 601                 stripe = next_stripe;
 602 next:
 603                 if (wrapped && stripe > start_stripe)
 604                         return;
 605 
 606                 if (stripe == dc->disk.nr_stripes) {
 607                         stripe = 0;
 608                         wrapped = true;
 609                 }
 610         }
 611 }
 612 
 613 
 614 
 615 
 616 static bool refill_dirty(struct cached_dev *dc)
 617 {
 618         struct keybuf *buf = &dc->writeback_keys;
 619         struct bkey start = KEY(dc->disk.id, 0, 0);
 620         struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
 621         struct bkey start_pos;
 622 
 623         
 624 
 625 
 626 
 627 
 628         if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
 629             bkey_cmp(&buf->last_scanned, &end) > 0)
 630                 buf->last_scanned = start;
 631 
 632         if (dc->partial_stripes_expensive) {
 633                 refill_full_stripes(dc);
 634                 if (array_freelist_empty(&buf->freelist))
 635                         return false;
 636         }
 637 
 638         start_pos = buf->last_scanned;
 639         bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
 640 
 641         if (bkey_cmp(&buf->last_scanned, &end) < 0)
 642                 return false;
 643 
 644         
 645 
 646 
 647 
 648         buf->last_scanned = start;
 649         bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
 650 
 651         return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
 652 }
 653 
 654 static int bch_writeback_thread(void *arg)
 655 {
 656         struct cached_dev *dc = arg;
 657         struct cache_set *c = dc->disk.c;
 658         bool searched_full_index;
 659 
 660         bch_ratelimit_reset(&dc->writeback_rate);
 661 
 662         while (!kthread_should_stop() &&
 663                !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
 664                 down_write(&dc->writeback_lock);
 665                 set_current_state(TASK_INTERRUPTIBLE);
 666                 
 667 
 668 
 669 
 670 
 671 
 672 
 673                 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
 674                     (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
 675                         up_write(&dc->writeback_lock);
 676 
 677                         if (kthread_should_stop() ||
 678                             test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
 679                                 set_current_state(TASK_RUNNING);
 680                                 break;
 681                         }
 682 
 683                         schedule();
 684                         continue;
 685                 }
 686                 set_current_state(TASK_RUNNING);
 687 
 688                 searched_full_index = refill_dirty(dc);
 689 
 690                 if (searched_full_index &&
 691                     RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
 692                         atomic_set(&dc->has_dirty, 0);
 693                         SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
 694                         bch_write_bdev_super(dc, NULL);
 695                         
 696 
 697 
 698 
 699 
 700 
 701                         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
 702                                 up_write(&dc->writeback_lock);
 703                                 break;
 704                         }
 705 
 706                         
 707 
 708 
 709 
 710 
 711 
 712 
 713 
 714 
 715 
 716 
 717                         if (c->gc_after_writeback ==
 718                             (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
 719                                 c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
 720                                 force_wake_up_gc(c);
 721                         }
 722                 }
 723 
 724                 up_write(&dc->writeback_lock);
 725 
 726                 read_dirty(dc);
 727 
 728                 if (searched_full_index) {
 729                         unsigned int delay = dc->writeback_delay * HZ;
 730 
 731                         while (delay &&
 732                                !kthread_should_stop() &&
 733                                !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
 734                                !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
 735                                 delay = schedule_timeout_interruptible(delay);
 736 
 737                         bch_ratelimit_reset(&dc->writeback_rate);
 738                 }
 739         }
 740 
 741         if (dc->writeback_write_wq) {
 742                 flush_workqueue(dc->writeback_write_wq);
 743                 destroy_workqueue(dc->writeback_write_wq);
 744         }
 745         cached_dev_put(dc);
 746         wait_for_kthread_stop();
 747 
 748         return 0;
 749 }
 750 
 751 
 752 #define INIT_KEYS_EACH_TIME     500000
 753 #define INIT_KEYS_SLEEP_MS      100
 754 
 755 struct sectors_dirty_init {
 756         struct btree_op op;
 757         unsigned int    inode;
 758         size_t          count;
 759         struct bkey     start;
 760 };
 761 
 762 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
 763                                  struct bkey *k)
 764 {
 765         struct sectors_dirty_init *op = container_of(_op,
 766                                                 struct sectors_dirty_init, op);
 767         if (KEY_INODE(k) > op->inode)
 768                 return MAP_DONE;
 769 
 770         if (KEY_DIRTY(k))
 771                 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
 772                                              KEY_START(k), KEY_SIZE(k));
 773 
 774         op->count++;
 775         if (atomic_read(&b->c->search_inflight) &&
 776             !(op->count % INIT_KEYS_EACH_TIME)) {
 777                 bkey_copy_key(&op->start, k);
 778                 return -EAGAIN;
 779         }
 780 
 781         return MAP_CONTINUE;
 782 }
 783 
 784 void bch_sectors_dirty_init(struct bcache_device *d)
 785 {
 786         struct sectors_dirty_init op;
 787         int ret;
 788 
 789         bch_btree_op_init(&op.op, -1);
 790         op.inode = d->id;
 791         op.count = 0;
 792         op.start = KEY(op.inode, 0, 0);
 793 
 794         do {
 795                 ret = bch_btree_map_keys(&op.op, d->c, &op.start,
 796                                          sectors_dirty_init_fn, 0);
 797                 if (ret == -EAGAIN)
 798                         schedule_timeout_interruptible(
 799                                 msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
 800                 else if (ret < 0) {
 801                         pr_warn("sectors dirty init failed, ret=%d!", ret);
 802                         break;
 803                 }
 804         } while (ret == -EAGAIN);
 805 }
 806 
 807 void bch_cached_dev_writeback_init(struct cached_dev *dc)
 808 {
 809         sema_init(&dc->in_flight, 64);
 810         init_rwsem(&dc->writeback_lock);
 811         bch_keybuf_init(&dc->writeback_keys);
 812 
 813         dc->writeback_metadata          = true;
 814         dc->writeback_running           = false;
 815         dc->writeback_percent           = 10;
 816         dc->writeback_delay             = 30;
 817         atomic_long_set(&dc->writeback_rate.rate, 1024);
 818         dc->writeback_rate_minimum      = 8;
 819 
 820         dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
 821         dc->writeback_rate_p_term_inverse = 40;
 822         dc->writeback_rate_i_term_inverse = 10000;
 823 
 824         WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
 825         INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
 826 }
 827 
 828 int bch_cached_dev_writeback_start(struct cached_dev *dc)
 829 {
 830         dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
 831                                                 WQ_MEM_RECLAIM, 0);
 832         if (!dc->writeback_write_wq)
 833                 return -ENOMEM;
 834 
 835         cached_dev_get(dc);
 836         dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
 837                                               "bcache_writeback");
 838         if (IS_ERR(dc->writeback_thread)) {
 839                 cached_dev_put(dc);
 840                 destroy_workqueue(dc->writeback_write_wq);
 841                 return PTR_ERR(dc->writeback_thread);
 842         }
 843         dc->writeback_running = true;
 844 
 845         WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
 846         schedule_delayed_work(&dc->writeback_rate_update,
 847                               dc->writeback_rate_update_seconds * HZ);
 848 
 849         bch_writeback_queue(dc);
 850 
 851         return 0;
 852 }