Lines Matching refs:dc
21 static void __update_writeback_rate(struct cached_dev *dc) in __update_writeback_rate() argument
23 struct cache_set *c = dc->disk.c; in __update_writeback_rate()
26 div_u64(cache_sectors * dc->writeback_percent, 100); in __update_writeback_rate()
28 int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev), in __update_writeback_rate()
33 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); in __update_writeback_rate()
34 int64_t derivative = dirty - dc->disk.sectors_dirty_last; in __update_writeback_rate()
38 dc->disk.sectors_dirty_last = dirty; in __update_writeback_rate()
42 proportional *= dc->writeback_rate_update_seconds; in __update_writeback_rate()
43 proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); in __update_writeback_rate()
45 derivative = div_s64(derivative, dc->writeback_rate_update_seconds); in __update_writeback_rate()
47 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, in __update_writeback_rate()
48 (dc->writeback_rate_d_term / in __update_writeback_rate()
49 dc->writeback_rate_update_seconds) ?: 1, 0); in __update_writeback_rate()
51 derivative *= dc->writeback_rate_d_term; in __update_writeback_rate()
52 derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); in __update_writeback_rate()
59 dc->writeback_rate.next + NSEC_PER_MSEC)) in __update_writeback_rate()
62 dc->writeback_rate.rate = in __update_writeback_rate()
63 clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change, in __update_writeback_rate()
66 dc->writeback_rate_proportional = proportional; in __update_writeback_rate()
67 dc->writeback_rate_derivative = derivative; in __update_writeback_rate()
68 dc->writeback_rate_change = change; in __update_writeback_rate()
69 dc->writeback_rate_target = target; in __update_writeback_rate()
74 struct cached_dev *dc = container_of(to_delayed_work(work), in update_writeback_rate() local
78 down_read(&dc->writeback_lock); in update_writeback_rate()
80 if (atomic_read(&dc->has_dirty) && in update_writeback_rate()
81 dc->writeback_percent) in update_writeback_rate()
82 __update_writeback_rate(dc); in update_writeback_rate()
84 up_read(&dc->writeback_lock); in update_writeback_rate()
86 schedule_delayed_work(&dc->writeback_rate_update, in update_writeback_rate()
87 dc->writeback_rate_update_seconds * HZ); in update_writeback_rate()
90 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) in writeback_delay() argument
92 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || in writeback_delay()
93 !dc->writeback_percent) in writeback_delay()
96 return bch_next_delay(&dc->writeback_rate, sectors); in writeback_delay()
101 struct cached_dev *dc; member
111 if (!io->dc->writeback_percent) in dirty_init()
131 struct cached_dev *dc = io->dc; in write_dirty_finish() local
151 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); in write_dirty_finish()
153 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); in write_dirty_finish()
159 ? &dc->disk.c->writeback_keys_failed in write_dirty_finish()
160 : &dc->disk.c->writeback_keys_done); in write_dirty_finish()
163 bch_keybuf_del(&dc->writeback_keys, w); in write_dirty_finish()
164 up(&dc->in_flight); in write_dirty_finish()
188 io->bio.bi_bdev = io->dc->bdev; in write_dirty()
191 closure_bio_submit(&io->bio, cl, &io->dc->disk); in write_dirty()
201 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), in read_dirty_endio()
211 closure_bio_submit(&io->bio, cl, &io->dc->disk); in read_dirty_submit()
216 static void read_dirty(struct cached_dev *dc) in read_dirty() argument
233 w = bch_keybuf_next(&dc->writeback_keys); in read_dirty()
237 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0)); in read_dirty()
239 if (KEY_START(&w->key) != dc->last_read || in read_dirty()
244 dc->last_read = KEY_OFFSET(&w->key); in read_dirty()
253 io->dc = dc; in read_dirty()
257 io->bio.bi_bdev = PTR_CACHE(dc->disk.c, in read_dirty()
267 down(&dc->in_flight); in read_dirty()
270 delay = writeback_delay(dc, KEY_SIZE(&w->key)); in read_dirty()
277 bch_keybuf_del(&dc->writeback_keys, w); in read_dirty()
326 struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); in dirty_pred() local
328 BUG_ON(KEY_INODE(k) != dc->disk.id); in dirty_pred()
333 static void refill_full_stripes(struct cached_dev *dc) in refill_full_stripes() argument
335 struct keybuf *buf = &dc->writeback_keys; in refill_full_stripes()
339 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); in refill_full_stripes()
341 if (stripe >= dc->disk.nr_stripes) in refill_full_stripes()
347 stripe = find_next_bit(dc->disk.full_dirty_stripes, in refill_full_stripes()
348 dc->disk.nr_stripes, stripe); in refill_full_stripes()
350 if (stripe == dc->disk.nr_stripes) in refill_full_stripes()
353 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, in refill_full_stripes()
354 dc->disk.nr_stripes, stripe); in refill_full_stripes()
356 buf->last_scanned = KEY(dc->disk.id, in refill_full_stripes()
357 stripe * dc->disk.stripe_size, 0); in refill_full_stripes()
359 bch_refill_keybuf(dc->disk.c, buf, in refill_full_stripes()
360 &KEY(dc->disk.id, in refill_full_stripes()
361 next_stripe * dc->disk.stripe_size, 0), in refill_full_stripes()
372 if (stripe == dc->disk.nr_stripes) { in refill_full_stripes()
382 static bool refill_dirty(struct cached_dev *dc) in refill_dirty() argument
384 struct keybuf *buf = &dc->writeback_keys; in refill_dirty()
385 struct bkey start = KEY(dc->disk.id, 0, 0); in refill_dirty()
386 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); in refill_dirty()
398 if (dc->partial_stripes_expensive) { in refill_dirty()
399 refill_full_stripes(dc); in refill_dirty()
405 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); in refill_dirty()
415 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); in refill_dirty()
422 struct cached_dev *dc = arg; in bch_writeback_thread() local
426 down_write(&dc->writeback_lock); in bch_writeback_thread()
427 if (!atomic_read(&dc->has_dirty) || in bch_writeback_thread()
428 (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && in bch_writeback_thread()
429 !dc->writeback_running)) { in bch_writeback_thread()
430 up_write(&dc->writeback_lock); in bch_writeback_thread()
441 searched_full_index = refill_dirty(dc); in bch_writeback_thread()
444 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { in bch_writeback_thread()
445 atomic_set(&dc->has_dirty, 0); in bch_writeback_thread()
446 cached_dev_put(dc); in bch_writeback_thread()
447 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); in bch_writeback_thread()
448 bch_write_bdev_super(dc, NULL); in bch_writeback_thread()
451 up_write(&dc->writeback_lock); in bch_writeback_thread()
453 bch_ratelimit_reset(&dc->writeback_rate); in bch_writeback_thread()
454 read_dirty(dc); in bch_writeback_thread()
457 unsigned delay = dc->writeback_delay * HZ; in bch_writeback_thread()
461 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) in bch_writeback_thread()
491 void bch_sectors_dirty_init(struct cached_dev *dc) in bch_sectors_dirty_init() argument
496 op.inode = dc->disk.id; in bch_sectors_dirty_init()
498 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), in bch_sectors_dirty_init()
501 dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); in bch_sectors_dirty_init()
504 void bch_cached_dev_writeback_init(struct cached_dev *dc) in bch_cached_dev_writeback_init() argument
506 sema_init(&dc->in_flight, 64); in bch_cached_dev_writeback_init()
507 init_rwsem(&dc->writeback_lock); in bch_cached_dev_writeback_init()
508 bch_keybuf_init(&dc->writeback_keys); in bch_cached_dev_writeback_init()
510 dc->writeback_metadata = true; in bch_cached_dev_writeback_init()
511 dc->writeback_running = true; in bch_cached_dev_writeback_init()
512 dc->writeback_percent = 10; in bch_cached_dev_writeback_init()
513 dc->writeback_delay = 30; in bch_cached_dev_writeback_init()
514 dc->writeback_rate.rate = 1024; in bch_cached_dev_writeback_init()
516 dc->writeback_rate_update_seconds = 5; in bch_cached_dev_writeback_init()
517 dc->writeback_rate_d_term = 30; in bch_cached_dev_writeback_init()
518 dc->writeback_rate_p_term_inverse = 6000; in bch_cached_dev_writeback_init()
520 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); in bch_cached_dev_writeback_init()
523 int bch_cached_dev_writeback_start(struct cached_dev *dc) in bch_cached_dev_writeback_start() argument
525 dc->writeback_thread = kthread_create(bch_writeback_thread, dc, in bch_cached_dev_writeback_start()
527 if (IS_ERR(dc->writeback_thread)) in bch_cached_dev_writeback_start()
528 return PTR_ERR(dc->writeback_thread); in bch_cached_dev_writeback_start()
530 schedule_delayed_work(&dc->writeback_rate_update, in bch_cached_dev_writeback_start()
531 dc->writeback_rate_update_seconds * HZ); in bch_cached_dev_writeback_start()
533 bch_writeback_queue(dc); in bch_cached_dev_writeback_start()