Lines Matching refs:c

84 void bch_rescale_priorities(struct cache_set *c, int sectors)  in bch_rescale_priorities()  argument
88 unsigned next = c->nbuckets * c->sb.bucket_size / 1024; in bch_rescale_priorities()
92 atomic_sub(sectors, &c->rescale); in bch_rescale_priorities()
95 r = atomic_read(&c->rescale); in bch_rescale_priorities()
99 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r); in bch_rescale_priorities()
101 mutex_lock(&c->bucket_lock); in bch_rescale_priorities()
103 c->min_prio = USHRT_MAX; in bch_rescale_priorities()
105 for_each_cache(ca, c, i) in bch_rescale_priorities()
111 c->min_prio = min(c->min_prio, b->prio); in bch_rescale_priorities()
114 mutex_unlock(&c->bucket_lock); in bch_rescale_priorities()
453 void bch_bucket_free(struct cache_set *c, struct bkey *k) in bch_bucket_free() argument
458 __bch_bucket_free(PTR_CACHE(c, k, i), in bch_bucket_free()
459 PTR_BUCKET(c, k, i)); in bch_bucket_free()
462 int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, in __bch_bucket_alloc_set() argument
467 lockdep_assert_held(&c->bucket_lock); in __bch_bucket_alloc_set()
468 BUG_ON(!n || n > c->caches_loaded || n > 8); in __bch_bucket_alloc_set()
475 struct cache *ca = c->cache_by_alloc[i]; in __bch_bucket_alloc_set()
482 bucket_to_sector(c, b), in __bch_bucket_alloc_set()
490 bch_bucket_free(c, k); in __bch_bucket_alloc_set()
491 bkey_put(c, k); in __bch_bucket_alloc_set()
495 int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, in bch_bucket_alloc_set() argument
499 mutex_lock(&c->bucket_lock); in bch_bucket_alloc_set()
500 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait); in bch_bucket_alloc_set()
501 mutex_unlock(&c->bucket_lock); in bch_bucket_alloc_set()
533 static struct open_bucket *pick_data_bucket(struct cache_set *c, in pick_data_bucket() argument
540 list_for_each_entry_reverse(ret, &c->data_buckets, list) in pick_data_bucket()
546 ret = ret_task ?: list_first_entry(&c->data_buckets, in pick_data_bucket()
550 ret->sectors_free = c->sb.bucket_size; in pick_data_bucket()
571 bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, in bch_alloc_sectors() argument
586 spin_lock(&c->data_bucket_lock); in bch_alloc_sectors()
588 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { in bch_alloc_sectors()
593 spin_unlock(&c->data_bucket_lock); in bch_alloc_sectors()
595 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait)) in bch_alloc_sectors()
598 spin_lock(&c->data_bucket_lock); in bch_alloc_sectors()
607 bkey_put(c, &alloc.key); in bch_alloc_sectors()
610 EBUG_ON(ptr_stale(c, &b->key, i)); in bch_alloc_sectors()
627 list_move_tail(&b->list, &c->data_buckets); in bch_alloc_sectors()
637 &PTR_CACHE(c, &b->key, i)->sectors_written); in bch_alloc_sectors()
640 if (b->sectors_free < c->sb.block_size) in bch_alloc_sectors()
650 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); in bch_alloc_sectors()
652 spin_unlock(&c->data_bucket_lock); in bch_alloc_sectors()
658 void bch_open_buckets_free(struct cache_set *c) in bch_open_buckets_free() argument
662 while (!list_empty(&c->data_buckets)) { in bch_open_buckets_free()
663 b = list_first_entry(&c->data_buckets, in bch_open_buckets_free()
670 int bch_open_buckets_alloc(struct cache_set *c) in bch_open_buckets_alloc() argument
674 spin_lock_init(&c->data_bucket_lock); in bch_open_buckets_alloc()
681 list_add(&b->list, &c->data_buckets); in bch_open_buckets_alloc()