root/drivers/md/bcache/alloc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. bch_inc_gen
  2. bch_rescale_priorities
  3. can_inc_bucket_gen
  4. bch_can_invalidate_bucket
  5. __bch_invalidate_one_bucket
  6. bch_invalidate_one_bucket
  7. invalidate_buckets_lru
  8. invalidate_buckets_fifo
  9. invalidate_buckets_random
  10. invalidate_buckets
  11. bch_allocator_push
  12. bch_allocator_thread
  13. bch_bucket_alloc
  14. __bch_bucket_free
  15. bch_bucket_free
  16. __bch_bucket_alloc_set
  17. bch_bucket_alloc_set
  18. pick_data_bucket
  19. bch_alloc_sectors
  20. bch_open_buckets_free
  21. bch_open_buckets_alloc
  22. bch_cache_allocator_start

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Primary bucket allocation code
   4  *
   5  * Copyright 2012 Google, Inc.
   6  *
   7  * Allocation in bcache is done in terms of buckets:
   8  *
   9  * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
  10  * btree pointers - they must match for the pointer to be considered valid.
  11  *
  12  * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
  13  * bucket simply by incrementing its gen.
  14  *
  15  * The gens (along with the priorities; it's really the gens are important but
  16  * the code is named as if it's the priorities) are written in an arbitrary list
  17  * of buckets on disk, with a pointer to them in the journal header.
  18  *
  19  * When we invalidate a bucket, we have to write its new gen to disk and wait
  20  * for that write to complete before we use it - otherwise after a crash we
  21  * could have pointers that appeared to be good but pointed to data that had
  22  * been overwritten.
  23  *
  24  * Since the gens and priorities are all stored contiguously on disk, we can
  25  * batch this up: We fill up the free_inc list with freshly invalidated buckets,
  26  * call prio_write(), and when prio_write() finishes we pull buckets off the
  27  * free_inc list and optionally discard them.
  28  *
  29  * free_inc isn't the only freelist - if it was, we'd often to sleep while
  30  * priorities and gens were being written before we could allocate. c->free is a
  31  * smaller freelist, and buckets on that list are always ready to be used.
  32  *
  33  * If we've got discards enabled, that happens when a bucket moves from the
  34  * free_inc list to the free list.
  35  *
  36  * There is another freelist, because sometimes we have buckets that we know
  37  * have nothing pointing into them - these we can reuse without waiting for
  38  * priorities to be rewritten. These come from freed btree nodes and buckets
  39  * that garbage collection discovered no longer had valid keys pointing into
  40  * them (because they were overwritten). That's the unused list - buckets on the
  41  * unused list move to the free list, optionally being discarded in the process.
  42  *
  43  * It's also important to ensure that gens don't wrap around - with respect to
  44  * either the oldest gen in the btree or the gen on disk. This is quite
  45  * difficult to do in practice, but we explicitly guard against it anyways - if
  46  * a bucket is in danger of wrapping around we simply skip invalidating it that
  47  * time around, and we garbage collect or rewrite the priorities sooner than we
  48  * would have otherwise.
  49  *
  50  * bch_bucket_alloc() allocates a single bucket from a specific cache.
  51  *
  52  * bch_bucket_alloc_set() allocates one or more buckets from different caches
  53  * out of a cache set.
  54  *
  55  * free_some_buckets() drives all the processes described above. It's called
  56  * from bch_bucket_alloc() and a few other places that need to make sure free
  57  * buckets are ready.
  58  *
  59  * invalidate_buckets_(lru|fifo)() find buckets that are available to be
  60  * invalidated, and then invalidate them and stick them on the free_inc list -
  61  * in either lru or fifo order.
  62  */
  63 
  64 #include "bcache.h"
  65 #include "btree.h"
  66 
  67 #include <linux/blkdev.h>
  68 #include <linux/kthread.h>
  69 #include <linux/random.h>
  70 #include <trace/events/bcache.h>
  71 
  72 #define MAX_OPEN_BUCKETS 128
  73 
  74 /* Bucket heap / gen */
  75 
  76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
  77 {
  78         uint8_t ret = ++b->gen;
  79 
  80         ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
  81         WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
  82 
  83         return ret;
  84 }
  85 
  86 void bch_rescale_priorities(struct cache_set *c, int sectors)
  87 {
  88         struct cache *ca;
  89         struct bucket *b;
  90         unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
  91         unsigned int i;
  92         int r;
  93 
  94         atomic_sub(sectors, &c->rescale);
  95 
  96         do {
  97                 r = atomic_read(&c->rescale);
  98 
  99                 if (r >= 0)
 100                         return;
 101         } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
 102 
 103         mutex_lock(&c->bucket_lock);
 104 
 105         c->min_prio = USHRT_MAX;
 106 
 107         for_each_cache(ca, c, i)
 108                 for_each_bucket(b, ca)
 109                         if (b->prio &&
 110                             b->prio != BTREE_PRIO &&
 111                             !atomic_read(&b->pin)) {
 112                                 b->prio--;
 113                                 c->min_prio = min(c->min_prio, b->prio);
 114                         }
 115 
 116         mutex_unlock(&c->bucket_lock);
 117 }
 118 
 119 /*
 120  * Background allocation thread: scans for buckets to be invalidated,
 121  * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
 122  * then optionally issues discard commands to the newly free buckets, then puts
 123  * them on the various freelists.
 124  */
 125 
 126 static inline bool can_inc_bucket_gen(struct bucket *b)
 127 {
 128         return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
 129 }
 130 
 131 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
 132 {
 133         BUG_ON(!ca->set->gc_mark_valid);
 134 
 135         return (!GC_MARK(b) ||
 136                 GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
 137                 !atomic_read(&b->pin) &&
 138                 can_inc_bucket_gen(b);
 139 }
 140 
 141 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
 142 {
 143         lockdep_assert_held(&ca->set->bucket_lock);
 144         BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
 145 
 146         if (GC_SECTORS_USED(b))
 147                 trace_bcache_invalidate(ca, b - ca->buckets);
 148 
 149         bch_inc_gen(ca, b);
 150         b->prio = INITIAL_PRIO;
 151         atomic_inc(&b->pin);
 152 }
 153 
 154 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
 155 {
 156         __bch_invalidate_one_bucket(ca, b);
 157 
 158         fifo_push(&ca->free_inc, b - ca->buckets);
 159 }
 160 
 161 /*
 162  * Determines what order we're going to reuse buckets, smallest bucket_prio()
 163  * first: we also take into account the number of sectors of live data in that
 164  * bucket, and in order for that multiply to make sense we have to scale bucket
 165  *
 166  * Thus, we scale the bucket priorities so that the bucket with the smallest
 167  * prio is worth 1/8th of what INITIAL_PRIO is worth.
 168  */
 169 
 170 #define bucket_prio(b)                                                  \
 171 ({                                                                      \
 172         unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
 173                                                                         \
 174         (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);  \
 175 })
 176 
 177 #define bucket_max_cmp(l, r)    (bucket_prio(l) < bucket_prio(r))
 178 #define bucket_min_cmp(l, r)    (bucket_prio(l) > bucket_prio(r))
 179 
 180 static void invalidate_buckets_lru(struct cache *ca)
 181 {
 182         struct bucket *b;
 183         ssize_t i;
 184 
 185         ca->heap.used = 0;
 186 
 187         for_each_bucket(b, ca) {
 188                 if (!bch_can_invalidate_bucket(ca, b))
 189                         continue;
 190 
 191                 if (!heap_full(&ca->heap))
 192                         heap_add(&ca->heap, b, bucket_max_cmp);
 193                 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
 194                         ca->heap.data[0] = b;
 195                         heap_sift(&ca->heap, 0, bucket_max_cmp);
 196                 }
 197         }
 198 
 199         for (i = ca->heap.used / 2 - 1; i >= 0; --i)
 200                 heap_sift(&ca->heap, i, bucket_min_cmp);
 201 
 202         while (!fifo_full(&ca->free_inc)) {
 203                 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
 204                         /*
 205                          * We don't want to be calling invalidate_buckets()
 206                          * multiple times when it can't do anything
 207                          */
 208                         ca->invalidate_needs_gc = 1;
 209                         wake_up_gc(ca->set);
 210                         return;
 211                 }
 212 
 213                 bch_invalidate_one_bucket(ca, b);
 214         }
 215 }
 216 
 217 static void invalidate_buckets_fifo(struct cache *ca)
 218 {
 219         struct bucket *b;
 220         size_t checked = 0;
 221 
 222         while (!fifo_full(&ca->free_inc)) {
 223                 if (ca->fifo_last_bucket <  ca->sb.first_bucket ||
 224                     ca->fifo_last_bucket >= ca->sb.nbuckets)
 225                         ca->fifo_last_bucket = ca->sb.first_bucket;
 226 
 227                 b = ca->buckets + ca->fifo_last_bucket++;
 228 
 229                 if (bch_can_invalidate_bucket(ca, b))
 230                         bch_invalidate_one_bucket(ca, b);
 231 
 232                 if (++checked >= ca->sb.nbuckets) {
 233                         ca->invalidate_needs_gc = 1;
 234                         wake_up_gc(ca->set);
 235                         return;
 236                 }
 237         }
 238 }
 239 
 240 static void invalidate_buckets_random(struct cache *ca)
 241 {
 242         struct bucket *b;
 243         size_t checked = 0;
 244 
 245         while (!fifo_full(&ca->free_inc)) {
 246                 size_t n;
 247 
 248                 get_random_bytes(&n, sizeof(n));
 249 
 250                 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
 251                 n += ca->sb.first_bucket;
 252 
 253                 b = ca->buckets + n;
 254 
 255                 if (bch_can_invalidate_bucket(ca, b))
 256                         bch_invalidate_one_bucket(ca, b);
 257 
 258                 if (++checked >= ca->sb.nbuckets / 2) {
 259                         ca->invalidate_needs_gc = 1;
 260                         wake_up_gc(ca->set);
 261                         return;
 262                 }
 263         }
 264 }
 265 
 266 static void invalidate_buckets(struct cache *ca)
 267 {
 268         BUG_ON(ca->invalidate_needs_gc);
 269 
 270         switch (CACHE_REPLACEMENT(&ca->sb)) {
 271         case CACHE_REPLACEMENT_LRU:
 272                 invalidate_buckets_lru(ca);
 273                 break;
 274         case CACHE_REPLACEMENT_FIFO:
 275                 invalidate_buckets_fifo(ca);
 276                 break;
 277         case CACHE_REPLACEMENT_RANDOM:
 278                 invalidate_buckets_random(ca);
 279                 break;
 280         }
 281 }
 282 
 283 #define allocator_wait(ca, cond)                                        \
 284 do {                                                                    \
 285         while (1) {                                                     \
 286                 set_current_state(TASK_INTERRUPTIBLE);                  \
 287                 if (cond)                                               \
 288                         break;                                          \
 289                                                                         \
 290                 mutex_unlock(&(ca)->set->bucket_lock);                  \
 291                 if (kthread_should_stop() ||                            \
 292                     test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) {  \
 293                         set_current_state(TASK_RUNNING);                \
 294                         goto out;                                       \
 295                 }                                                       \
 296                                                                         \
 297                 schedule();                                             \
 298                 mutex_lock(&(ca)->set->bucket_lock);                    \
 299         }                                                               \
 300         __set_current_state(TASK_RUNNING);                              \
 301 } while (0)
 302 
 303 static int bch_allocator_push(struct cache *ca, long bucket)
 304 {
 305         unsigned int i;
 306 
 307         /* Prios/gens are actually the most important reserve */
 308         if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
 309                 return true;
 310 
 311         for (i = 0; i < RESERVE_NR; i++)
 312                 if (fifo_push(&ca->free[i], bucket))
 313                         return true;
 314 
 315         return false;
 316 }
 317 
 318 static int bch_allocator_thread(void *arg)
 319 {
 320         struct cache *ca = arg;
 321 
 322         mutex_lock(&ca->set->bucket_lock);
 323 
 324         while (1) {
 325                 /*
 326                  * First, we pull buckets off of the unused and free_inc lists,
 327                  * possibly issue discards to them, then we add the bucket to
 328                  * the free list:
 329                  */
 330                 while (1) {
 331                         long bucket;
 332 
 333                         if (!fifo_pop(&ca->free_inc, bucket))
 334                                 break;
 335 
 336                         if (ca->discard) {
 337                                 mutex_unlock(&ca->set->bucket_lock);
 338                                 blkdev_issue_discard(ca->bdev,
 339                                         bucket_to_sector(ca->set, bucket),
 340                                         ca->sb.bucket_size, GFP_KERNEL, 0);
 341                                 mutex_lock(&ca->set->bucket_lock);
 342                         }
 343 
 344                         allocator_wait(ca, bch_allocator_push(ca, bucket));
 345                         wake_up(&ca->set->btree_cache_wait);
 346                         wake_up(&ca->set->bucket_wait);
 347                 }
 348 
 349                 /*
 350                  * We've run out of free buckets, we need to find some buckets
 351                  * we can invalidate. First, invalidate them in memory and add
 352                  * them to the free_inc list:
 353                  */
 354 
 355 retry_invalidate:
 356                 allocator_wait(ca, ca->set->gc_mark_valid &&
 357                                !ca->invalidate_needs_gc);
 358                 invalidate_buckets(ca);
 359 
 360                 /*
 361                  * Now, we write their new gens to disk so we can start writing
 362                  * new stuff to them:
 363                  */
 364                 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
 365                 if (CACHE_SYNC(&ca->set->sb)) {
 366                         /*
 367                          * This could deadlock if an allocation with a btree
 368                          * node locked ever blocked - having the btree node
 369                          * locked would block garbage collection, but here we're
 370                          * waiting on garbage collection before we invalidate
 371                          * and free anything.
 372                          *
 373                          * But this should be safe since the btree code always
 374                          * uses btree_check_reserve() before allocating now, and
 375                          * if it fails it blocks without btree nodes locked.
 376                          */
 377                         if (!fifo_full(&ca->free_inc))
 378                                 goto retry_invalidate;
 379 
 380                         if (bch_prio_write(ca, false) < 0) {
 381                                 ca->invalidate_needs_gc = 1;
 382                                 wake_up_gc(ca->set);
 383                         }
 384                 }
 385         }
 386 out:
 387         wait_for_kthread_stop();
 388         return 0;
 389 }
 390 
 391 /* Allocation */
 392 
 393 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
 394 {
 395         DEFINE_WAIT(w);
 396         struct bucket *b;
 397         long r;
 398 
 399 
 400         /* No allocation if CACHE_SET_IO_DISABLE bit is set */
 401         if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
 402                 return -1;
 403 
 404         /* fastpath */
 405         if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
 406             fifo_pop(&ca->free[reserve], r))
 407                 goto out;
 408 
 409         if (!wait) {
 410                 trace_bcache_alloc_fail(ca, reserve);
 411                 return -1;
 412         }
 413 
 414         do {
 415                 prepare_to_wait(&ca->set->bucket_wait, &w,
 416                                 TASK_UNINTERRUPTIBLE);
 417 
 418                 mutex_unlock(&ca->set->bucket_lock);
 419                 schedule();
 420                 mutex_lock(&ca->set->bucket_lock);
 421         } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
 422                  !fifo_pop(&ca->free[reserve], r));
 423 
 424         finish_wait(&ca->set->bucket_wait, &w);
 425 out:
 426         if (ca->alloc_thread)
 427                 wake_up_process(ca->alloc_thread);
 428 
 429         trace_bcache_alloc(ca, reserve);
 430 
 431         if (expensive_debug_checks(ca->set)) {
 432                 size_t iter;
 433                 long i;
 434                 unsigned int j;
 435 
 436                 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
 437                         BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
 438 
 439                 for (j = 0; j < RESERVE_NR; j++)
 440                         fifo_for_each(i, &ca->free[j], iter)
 441                                 BUG_ON(i == r);
 442                 fifo_for_each(i, &ca->free_inc, iter)
 443                         BUG_ON(i == r);
 444         }
 445 
 446         b = ca->buckets + r;
 447 
 448         BUG_ON(atomic_read(&b->pin) != 1);
 449 
 450         SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
 451 
 452         if (reserve <= RESERVE_PRIO) {
 453                 SET_GC_MARK(b, GC_MARK_METADATA);
 454                 SET_GC_MOVE(b, 0);
 455                 b->prio = BTREE_PRIO;
 456         } else {
 457                 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
 458                 SET_GC_MOVE(b, 0);
 459                 b->prio = INITIAL_PRIO;
 460         }
 461 
 462         if (ca->set->avail_nbuckets > 0) {
 463                 ca->set->avail_nbuckets--;
 464                 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
 465         }
 466 
 467         return r;
 468 }
 469 
 470 void __bch_bucket_free(struct cache *ca, struct bucket *b)
 471 {
 472         SET_GC_MARK(b, 0);
 473         SET_GC_SECTORS_USED(b, 0);
 474 
 475         if (ca->set->avail_nbuckets < ca->set->nbuckets) {
 476                 ca->set->avail_nbuckets++;
 477                 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
 478         }
 479 }
 480 
 481 void bch_bucket_free(struct cache_set *c, struct bkey *k)
 482 {
 483         unsigned int i;
 484 
 485         for (i = 0; i < KEY_PTRS(k); i++)
 486                 __bch_bucket_free(PTR_CACHE(c, k, i),
 487                                   PTR_BUCKET(c, k, i));
 488 }
 489 
 490 int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 491                            struct bkey *k, int n, bool wait)
 492 {
 493         int i;
 494 
 495         /* No allocation if CACHE_SET_IO_DISABLE bit is set */
 496         if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
 497                 return -1;
 498 
 499         lockdep_assert_held(&c->bucket_lock);
 500         BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
 501 
 502         bkey_init(k);
 503 
 504         /* sort by free space/prio of oldest data in caches */
 505 
 506         for (i = 0; i < n; i++) {
 507                 struct cache *ca = c->cache_by_alloc[i];
 508                 long b = bch_bucket_alloc(ca, reserve, wait);
 509 
 510                 if (b == -1)
 511                         goto err;
 512 
 513                 k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
 514                                 bucket_to_sector(c, b),
 515                                 ca->sb.nr_this_dev);
 516 
 517                 SET_KEY_PTRS(k, i + 1);
 518         }
 519 
 520         return 0;
 521 err:
 522         bch_bucket_free(c, k);
 523         bkey_put(c, k);
 524         return -1;
 525 }
 526 
 527 int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 528                          struct bkey *k, int n, bool wait)
 529 {
 530         int ret;
 531 
 532         mutex_lock(&c->bucket_lock);
 533         ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
 534         mutex_unlock(&c->bucket_lock);
 535         return ret;
 536 }
 537 
 538 /* Sector allocator */
 539 
 540 struct open_bucket {
 541         struct list_head        list;
 542         unsigned int            last_write_point;
 543         unsigned int            sectors_free;
 544         BKEY_PADDED(key);
 545 };
 546 
 547 /*
 548  * We keep multiple buckets open for writes, and try to segregate different
 549  * write streams for better cache utilization: first we try to segregate flash
 550  * only volume write streams from cached devices, secondly we look for a bucket
 551  * where the last write to it was sequential with the current write, and
 552  * failing that we look for a bucket that was last used by the same task.
 553  *
 554  * The ideas is if you've got multiple tasks pulling data into the cache at the
 555  * same time, you'll get better cache utilization if you try to segregate their
 556  * data and preserve locality.
 557  *
 558  * For example, dirty sectors of flash only volume is not reclaimable, if their
 559  * dirty sectors mixed with dirty sectors of cached device, such buckets will
 560  * be marked as dirty and won't be reclaimed, though the dirty data of cached
 561  * device have been written back to backend device.
 562  *
 563  * And say you've starting Firefox at the same time you're copying a
 564  * bunch of files. Firefox will likely end up being fairly hot and stay in the
 565  * cache awhile, but the data you copied might not be; if you wrote all that
 566  * data to the same buckets it'd get invalidated at the same time.
 567  *
 568  * Both of those tasks will be doing fairly random IO so we can't rely on
 569  * detecting sequential IO to segregate their data, but going off of the task
 570  * should be a sane heuristic.
 571  */
 572 static struct open_bucket *pick_data_bucket(struct cache_set *c,
 573                                             const struct bkey *search,
 574                                             unsigned int write_point,
 575                                             struct bkey *alloc)
 576 {
 577         struct open_bucket *ret, *ret_task = NULL;
 578 
 579         list_for_each_entry_reverse(ret, &c->data_buckets, list)
 580                 if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
 581                     UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
 582                         continue;
 583                 else if (!bkey_cmp(&ret->key, search))
 584                         goto found;
 585                 else if (ret->last_write_point == write_point)
 586                         ret_task = ret;
 587 
 588         ret = ret_task ?: list_first_entry(&c->data_buckets,
 589                                            struct open_bucket, list);
 590 found:
 591         if (!ret->sectors_free && KEY_PTRS(alloc)) {
 592                 ret->sectors_free = c->sb.bucket_size;
 593                 bkey_copy(&ret->key, alloc);
 594                 bkey_init(alloc);
 595         }
 596 
 597         if (!ret->sectors_free)
 598                 ret = NULL;
 599 
 600         return ret;
 601 }
 602 
 603 /*
 604  * Allocates some space in the cache to write to, and k to point to the newly
 605  * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
 606  * end of the newly allocated space).
 607  *
 608  * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
 609  * sectors were actually allocated.
 610  *
 611  * If s->writeback is true, will not fail.
 612  */
 613 bool bch_alloc_sectors(struct cache_set *c,
 614                        struct bkey *k,
 615                        unsigned int sectors,
 616                        unsigned int write_point,
 617                        unsigned int write_prio,
 618                        bool wait)
 619 {
 620         struct open_bucket *b;
 621         BKEY_PADDED(key) alloc;
 622         unsigned int i;
 623 
 624         /*
 625          * We might have to allocate a new bucket, which we can't do with a
 626          * spinlock held. So if we have to allocate, we drop the lock, allocate
 627          * and then retry. KEY_PTRS() indicates whether alloc points to
 628          * allocated bucket(s).
 629          */
 630 
 631         bkey_init(&alloc.key);
 632         spin_lock(&c->data_bucket_lock);
 633 
 634         while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
 635                 unsigned int watermark = write_prio
 636                         ? RESERVE_MOVINGGC
 637                         : RESERVE_NONE;
 638 
 639                 spin_unlock(&c->data_bucket_lock);
 640 
 641                 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
 642                         return false;
 643 
 644                 spin_lock(&c->data_bucket_lock);
 645         }
 646 
 647         /*
 648          * If we had to allocate, we might race and not need to allocate the
 649          * second time we call pick_data_bucket(). If we allocated a bucket but
 650          * didn't use it, drop the refcount bch_bucket_alloc_set() took:
 651          */
 652         if (KEY_PTRS(&alloc.key))
 653                 bkey_put(c, &alloc.key);
 654 
 655         for (i = 0; i < KEY_PTRS(&b->key); i++)
 656                 EBUG_ON(ptr_stale(c, &b->key, i));
 657 
 658         /* Set up the pointer to the space we're allocating: */
 659 
 660         for (i = 0; i < KEY_PTRS(&b->key); i++)
 661                 k->ptr[i] = b->key.ptr[i];
 662 
 663         sectors = min(sectors, b->sectors_free);
 664 
 665         SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
 666         SET_KEY_SIZE(k, sectors);
 667         SET_KEY_PTRS(k, KEY_PTRS(&b->key));
 668 
 669         /*
 670          * Move b to the end of the lru, and keep track of what this bucket was
 671          * last used for:
 672          */
 673         list_move_tail(&b->list, &c->data_buckets);
 674         bkey_copy_key(&b->key, k);
 675         b->last_write_point = write_point;
 676 
 677         b->sectors_free -= sectors;
 678 
 679         for (i = 0; i < KEY_PTRS(&b->key); i++) {
 680                 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
 681 
 682                 atomic_long_add(sectors,
 683                                 &PTR_CACHE(c, &b->key, i)->sectors_written);
 684         }
 685 
 686         if (b->sectors_free < c->sb.block_size)
 687                 b->sectors_free = 0;
 688 
 689         /*
 690          * k takes refcounts on the buckets it points to until it's inserted
 691          * into the btree, but if we're done with this bucket we just transfer
 692          * get_data_bucket()'s refcount.
 693          */
 694         if (b->sectors_free)
 695                 for (i = 0; i < KEY_PTRS(&b->key); i++)
 696                         atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
 697 
 698         spin_unlock(&c->data_bucket_lock);
 699         return true;
 700 }
 701 
 702 /* Init */
 703 
 704 void bch_open_buckets_free(struct cache_set *c)
 705 {
 706         struct open_bucket *b;
 707 
 708         while (!list_empty(&c->data_buckets)) {
 709                 b = list_first_entry(&c->data_buckets,
 710                                      struct open_bucket, list);
 711                 list_del(&b->list);
 712                 kfree(b);
 713         }
 714 }
 715 
 716 int bch_open_buckets_alloc(struct cache_set *c)
 717 {
 718         int i;
 719 
 720         spin_lock_init(&c->data_bucket_lock);
 721 
 722         for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
 723                 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
 724 
 725                 if (!b)
 726                         return -ENOMEM;
 727 
 728                 list_add(&b->list, &c->data_buckets);
 729         }
 730 
 731         return 0;
 732 }
 733 
 734 int bch_cache_allocator_start(struct cache *ca)
 735 {
 736         struct task_struct *k = kthread_run(bch_allocator_thread,
 737                                             ca, "bcache_allocator");
 738         if (IS_ERR(k))
 739                 return PTR_ERR(k);
 740 
 741         ca->alloc_thread = k;
 742         return 0;
 743 }

/* [<][>][^][v][top][bottom][index][help] */