root/drivers/md/bcache/writeback.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. bcache_dev_sectors_dirty
  2. offset_to_stripe
  3. bcache_dev_stripe_dirty
  4. should_writeback
  5. bch_writeback_queue
  6. bch_writeback_add

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _BCACHE_WRITEBACK_H
   3 #define _BCACHE_WRITEBACK_H
   4 
   5 #define CUTOFF_WRITEBACK        40
   6 #define CUTOFF_WRITEBACK_SYNC   70
   7 
   8 #define CUTOFF_WRITEBACK_MAX            70
   9 #define CUTOFF_WRITEBACK_SYNC_MAX       90
  10 
  11 #define MAX_WRITEBACKS_IN_PASS  5
  12 #define MAX_WRITESIZE_IN_PASS   5000    /* *512b */
  13 
  14 #define WRITEBACK_RATE_UPDATE_SECS_MAX          60
  15 #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT      5
  16 
  17 #define BCH_AUTO_GC_DIRTY_THRESHOLD     50
  18 
  19 /*
  20  * 14 (16384ths) is chosen here as something that each backing device
  21  * should be a reasonable fraction of the share, and not to blow up
  22  * until individual backing devices are a petabyte.
  23  */
  24 #define WRITEBACK_SHARE_SHIFT   14
  25 
  26 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
  27 {
  28         uint64_t i, ret = 0;
  29 
  30         for (i = 0; i < d->nr_stripes; i++)
  31                 ret += atomic_read(d->stripe_sectors_dirty + i);
  32 
  33         return ret;
  34 }
  35 
  36 static inline unsigned int offset_to_stripe(struct bcache_device *d,
  37                                         uint64_t offset)
  38 {
  39         do_div(offset, d->stripe_size);
  40         return offset;
  41 }
  42 
  43 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
  44                                            uint64_t offset,
  45                                            unsigned int nr_sectors)
  46 {
  47         unsigned int stripe = offset_to_stripe(&dc->disk, offset);
  48 
  49         while (1) {
  50                 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
  51                         return true;
  52 
  53                 if (nr_sectors <= dc->disk.stripe_size)
  54                         return false;
  55 
  56                 nr_sectors -= dc->disk.stripe_size;
  57                 stripe++;
  58         }
  59 }
  60 
  61 extern unsigned int bch_cutoff_writeback;
  62 extern unsigned int bch_cutoff_writeback_sync;
  63 
  64 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
  65                                     unsigned int cache_mode, bool would_skip)
  66 {
  67         unsigned int in_use = dc->disk.c->gc_stats.in_use;
  68 
  69         if (cache_mode != CACHE_MODE_WRITEBACK ||
  70             test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
  71             in_use > bch_cutoff_writeback_sync)
  72                 return false;
  73 
  74         if (bio_op(bio) == REQ_OP_DISCARD)
  75                 return false;
  76 
  77         if (dc->partial_stripes_expensive &&
  78             bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
  79                                     bio_sectors(bio)))
  80                 return true;
  81 
  82         if (would_skip)
  83                 return false;
  84 
  85         return (op_is_sync(bio->bi_opf) ||
  86                 bio->bi_opf & (REQ_META|REQ_PRIO) ||
  87                 in_use <= bch_cutoff_writeback);
  88 }
  89 
  90 static inline void bch_writeback_queue(struct cached_dev *dc)
  91 {
  92         if (!IS_ERR_OR_NULL(dc->writeback_thread))
  93                 wake_up_process(dc->writeback_thread);
  94 }
  95 
  96 static inline void bch_writeback_add(struct cached_dev *dc)
  97 {
  98         if (!atomic_read(&dc->has_dirty) &&
  99             !atomic_xchg(&dc->has_dirty, 1)) {
 100                 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
 101                         SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
 102                         /* XXX: should do this synchronously */
 103                         bch_write_bdev_super(dc, NULL);
 104                 }
 105 
 106                 bch_writeback_queue(dc);
 107         }
 108 }
 109 
 110 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
 111                                   uint64_t offset, int nr_sectors);
 112 
 113 void bch_sectors_dirty_init(struct bcache_device *d);
 114 void bch_cached_dev_writeback_init(struct cached_dev *dc);
 115 int bch_cached_dev_writeback_start(struct cached_dev *dc);
 116 
 117 #endif

/* [<][>][^][v][top][bottom][index][help] */