root/drivers/md/bcache/movinggc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. moving_pred
  2. moving_io_destructor
  3. write_moving_finish
  4. read_moving_endio
  5. moving_init
  6. write_moving
  7. read_moving_submit
  8. read_moving
  9. bucket_cmp
  10. bucket_heap_top
  11. bch_moving_gc
  12. bch_moving_init_cache_set

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Moving/copying garbage collector
   4  *
   5  * Copyright 2012 Google, Inc.
   6  */
   7 
   8 #include "bcache.h"
   9 #include "btree.h"
  10 #include "debug.h"
  11 #include "request.h"
  12 
  13 #include <trace/events/bcache.h>
  14 
  15 struct moving_io {
  16         struct closure          cl;
  17         struct keybuf_key       *w;
  18         struct data_insert_op   op;
  19         struct bbio             bio;
  20 };
  21 
  22 static bool moving_pred(struct keybuf *buf, struct bkey *k)
  23 {
  24         struct cache_set *c = container_of(buf, struct cache_set,
  25                                            moving_gc_keys);
  26         unsigned int i;
  27 
  28         for (i = 0; i < KEY_PTRS(k); i++)
  29                 if (ptr_available(c, k, i) &&
  30                     GC_MOVE(PTR_BUCKET(c, k, i)))
  31                         return true;
  32 
  33         return false;
  34 }
  35 
  36 /* Moving GC - IO loop */
  37 
  38 static void moving_io_destructor(struct closure *cl)
  39 {
  40         struct moving_io *io = container_of(cl, struct moving_io, cl);
  41 
  42         kfree(io);
  43 }
  44 
  45 static void write_moving_finish(struct closure *cl)
  46 {
  47         struct moving_io *io = container_of(cl, struct moving_io, cl);
  48         struct bio *bio = &io->bio.bio;
  49 
  50         bio_free_pages(bio);
  51 
  52         if (io->op.replace_collision)
  53                 trace_bcache_gc_copy_collision(&io->w->key);
  54 
  55         bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
  56 
  57         up(&io->op.c->moving_in_flight);
  58 
  59         closure_return_with_destructor(cl, moving_io_destructor);
  60 }
  61 
  62 static void read_moving_endio(struct bio *bio)
  63 {
  64         struct bbio *b = container_of(bio, struct bbio, bio);
  65         struct moving_io *io = container_of(bio->bi_private,
  66                                             struct moving_io, cl);
  67 
  68         if (bio->bi_status)
  69                 io->op.status = bio->bi_status;
  70         else if (!KEY_DIRTY(&b->key) &&
  71                  ptr_stale(io->op.c, &b->key, 0)) {
  72                 io->op.status = BLK_STS_IOERR;
  73         }
  74 
  75         bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
  76 }
  77 
  78 static void moving_init(struct moving_io *io)
  79 {
  80         struct bio *bio = &io->bio.bio;
  81 
  82         bio_init(bio, bio->bi_inline_vecs,
  83                  DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
  84         bio_get(bio);
  85         bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
  86 
  87         bio->bi_iter.bi_size    = KEY_SIZE(&io->w->key) << 9;
  88         bio->bi_private         = &io->cl;
  89         bch_bio_map(bio, NULL);
  90 }
  91 
  92 static void write_moving(struct closure *cl)
  93 {
  94         struct moving_io *io = container_of(cl, struct moving_io, cl);
  95         struct data_insert_op *op = &io->op;
  96 
  97         if (!op->status) {
  98                 moving_init(io);
  99 
 100                 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
 101                 op->write_prio          = 1;
 102                 op->bio                 = &io->bio.bio;
 103 
 104                 op->writeback           = KEY_DIRTY(&io->w->key);
 105                 op->csum                = KEY_CSUM(&io->w->key);
 106 
 107                 bkey_copy(&op->replace_key, &io->w->key);
 108                 op->replace             = true;
 109 
 110                 closure_call(&op->cl, bch_data_insert, NULL, cl);
 111         }
 112 
 113         continue_at(cl, write_moving_finish, op->wq);
 114 }
 115 
 116 static void read_moving_submit(struct closure *cl)
 117 {
 118         struct moving_io *io = container_of(cl, struct moving_io, cl);
 119         struct bio *bio = &io->bio.bio;
 120 
 121         bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
 122 
 123         continue_at(cl, write_moving, io->op.wq);
 124 }
 125 
 126 static void read_moving(struct cache_set *c)
 127 {
 128         struct keybuf_key *w;
 129         struct moving_io *io;
 130         struct bio *bio;
 131         struct closure cl;
 132 
 133         closure_init_stack(&cl);
 134 
 135         /* XXX: if we error, background writeback could stall indefinitely */
 136 
 137         while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
 138                 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
 139                                            &MAX_KEY, moving_pred);
 140                 if (!w)
 141                         break;
 142 
 143                 if (ptr_stale(c, &w->key, 0)) {
 144                         bch_keybuf_del(&c->moving_gc_keys, w);
 145                         continue;
 146                 }
 147 
 148                 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
 149                              * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
 150                              GFP_KERNEL);
 151                 if (!io)
 152                         goto err;
 153 
 154                 w->private      = io;
 155                 io->w           = w;
 156                 io->op.inode    = KEY_INODE(&w->key);
 157                 io->op.c        = c;
 158                 io->op.wq       = c->moving_gc_wq;
 159 
 160                 moving_init(io);
 161                 bio = &io->bio.bio;
 162 
 163                 bio_set_op_attrs(bio, REQ_OP_READ, 0);
 164                 bio->bi_end_io  = read_moving_endio;
 165 
 166                 if (bch_bio_alloc_pages(bio, GFP_KERNEL))
 167                         goto err;
 168 
 169                 trace_bcache_gc_copy(&w->key);
 170 
 171                 down(&c->moving_in_flight);
 172                 closure_call(&io->cl, read_moving_submit, NULL, &cl);
 173         }
 174 
 175         if (0) {
 176 err:            if (!IS_ERR_OR_NULL(w->private))
 177                         kfree(w->private);
 178 
 179                 bch_keybuf_del(&c->moving_gc_keys, w);
 180         }
 181 
 182         closure_sync(&cl);
 183 }
 184 
 185 static bool bucket_cmp(struct bucket *l, struct bucket *r)
 186 {
 187         return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
 188 }
 189 
 190 static unsigned int bucket_heap_top(struct cache *ca)
 191 {
 192         struct bucket *b;
 193 
 194         return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
 195 }
 196 
 197 void bch_moving_gc(struct cache_set *c)
 198 {
 199         struct cache *ca;
 200         struct bucket *b;
 201         unsigned int i;
 202 
 203         if (!c->copy_gc_enabled)
 204                 return;
 205 
 206         mutex_lock(&c->bucket_lock);
 207 
 208         for_each_cache(ca, c, i) {
 209                 unsigned int sectors_to_move = 0;
 210                 unsigned int reserve_sectors = ca->sb.bucket_size *
 211                              fifo_used(&ca->free[RESERVE_MOVINGGC]);
 212 
 213                 ca->heap.used = 0;
 214 
 215                 for_each_bucket(b, ca) {
 216                         if (GC_MARK(b) == GC_MARK_METADATA ||
 217                             !GC_SECTORS_USED(b) ||
 218                             GC_SECTORS_USED(b) == ca->sb.bucket_size ||
 219                             atomic_read(&b->pin))
 220                                 continue;
 221 
 222                         if (!heap_full(&ca->heap)) {
 223                                 sectors_to_move += GC_SECTORS_USED(b);
 224                                 heap_add(&ca->heap, b, bucket_cmp);
 225                         } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
 226                                 sectors_to_move -= bucket_heap_top(ca);
 227                                 sectors_to_move += GC_SECTORS_USED(b);
 228 
 229                                 ca->heap.data[0] = b;
 230                                 heap_sift(&ca->heap, 0, bucket_cmp);
 231                         }
 232                 }
 233 
 234                 while (sectors_to_move > reserve_sectors) {
 235                         heap_pop(&ca->heap, b, bucket_cmp);
 236                         sectors_to_move -= GC_SECTORS_USED(b);
 237                 }
 238 
 239                 while (heap_pop(&ca->heap, b, bucket_cmp))
 240                         SET_GC_MOVE(b, 1);
 241         }
 242 
 243         mutex_unlock(&c->bucket_lock);
 244 
 245         c->moving_gc_keys.last_scanned = ZERO_KEY;
 246 
 247         read_moving(c);
 248 }
 249 
 250 void bch_moving_init_cache_set(struct cache_set *c)
 251 {
 252         bch_keybuf_init(&c->moving_gc_keys);
 253         sema_init(&c->moving_in_flight, 64);
 254 }

/* [<][>][^][v][top][bottom][index][help] */