Lines Matching refs:ca

74 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)  in bch_inc_gen()  argument
78 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); in bch_inc_gen()
79 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); in bch_inc_gen()
86 struct cache *ca; in bch_rescale_priorities() local
105 for_each_cache(ca, c, i) in bch_rescale_priorities()
106 for_each_bucket(b, ca) in bch_rescale_priorities()
129 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() argument
131 BUG_ON(!ca->set->gc_mark_valid); in bch_can_invalidate_bucket()
139 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket() argument
141 lockdep_assert_held(&ca->set->bucket_lock); in __bch_invalidate_one_bucket()
145 trace_bcache_invalidate(ca, b - ca->buckets); in __bch_invalidate_one_bucket()
147 bch_inc_gen(ca, b); in __bch_invalidate_one_bucket()
152 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket() argument
154 __bch_invalidate_one_bucket(ca, b); in bch_invalidate_one_bucket()
156 fifo_push(&ca->free_inc, b - ca->buckets); in bch_invalidate_one_bucket()
170 unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
172 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
178 static void invalidate_buckets_lru(struct cache *ca) in invalidate_buckets_lru() argument
183 ca->heap.used = 0; in invalidate_buckets_lru()
185 for_each_bucket(b, ca) { in invalidate_buckets_lru()
186 if (!bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_lru()
189 if (!heap_full(&ca->heap)) in invalidate_buckets_lru()
190 heap_add(&ca->heap, b, bucket_max_cmp); in invalidate_buckets_lru()
191 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { in invalidate_buckets_lru()
192 ca->heap.data[0] = b; in invalidate_buckets_lru()
193 heap_sift(&ca->heap, 0, bucket_max_cmp); in invalidate_buckets_lru()
197 for (i = ca->heap.used / 2 - 1; i >= 0; --i) in invalidate_buckets_lru()
198 heap_sift(&ca->heap, i, bucket_min_cmp); in invalidate_buckets_lru()
200 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_lru()
201 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { in invalidate_buckets_lru()
206 ca->invalidate_needs_gc = 1; in invalidate_buckets_lru()
207 wake_up_gc(ca->set); in invalidate_buckets_lru()
211 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_lru()
215 static void invalidate_buckets_fifo(struct cache *ca) in invalidate_buckets_fifo() argument
220 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_fifo()
221 if (ca->fifo_last_bucket < ca->sb.first_bucket || in invalidate_buckets_fifo()
222 ca->fifo_last_bucket >= ca->sb.nbuckets) in invalidate_buckets_fifo()
223 ca->fifo_last_bucket = ca->sb.first_bucket; in invalidate_buckets_fifo()
225 b = ca->buckets + ca->fifo_last_bucket++; in invalidate_buckets_fifo()
227 if (bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_fifo()
228 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_fifo()
230 if (++checked >= ca->sb.nbuckets) { in invalidate_buckets_fifo()
231 ca->invalidate_needs_gc = 1; in invalidate_buckets_fifo()
232 wake_up_gc(ca->set); in invalidate_buckets_fifo()
238 static void invalidate_buckets_random(struct cache *ca) in invalidate_buckets_random() argument
243 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_random()
247 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); in invalidate_buckets_random()
248 n += ca->sb.first_bucket; in invalidate_buckets_random()
250 b = ca->buckets + n; in invalidate_buckets_random()
252 if (bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_random()
253 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_random()
255 if (++checked >= ca->sb.nbuckets / 2) { in invalidate_buckets_random()
256 ca->invalidate_needs_gc = 1; in invalidate_buckets_random()
257 wake_up_gc(ca->set); in invalidate_buckets_random()
263 static void invalidate_buckets(struct cache *ca) in invalidate_buckets() argument
265 BUG_ON(ca->invalidate_needs_gc); in invalidate_buckets()
267 switch (CACHE_REPLACEMENT(&ca->sb)) { in invalidate_buckets()
269 invalidate_buckets_lru(ca); in invalidate_buckets()
272 invalidate_buckets_fifo(ca); in invalidate_buckets()
275 invalidate_buckets_random(ca); in invalidate_buckets()
280 #define allocator_wait(ca, cond) \ argument
287 mutex_unlock(&(ca)->set->bucket_lock); \
293 mutex_lock(&(ca)->set->bucket_lock); \
298 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() argument
303 if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) in bch_allocator_push()
307 if (fifo_push(&ca->free[i], bucket)) in bch_allocator_push()
315 struct cache *ca = arg; in bch_allocator_thread() local
317 mutex_lock(&ca->set->bucket_lock); in bch_allocator_thread()
325 while (!fifo_empty(&ca->free_inc)) { in bch_allocator_thread()
328 fifo_pop(&ca->free_inc, bucket); in bch_allocator_thread()
330 if (ca->discard) { in bch_allocator_thread()
331 mutex_unlock(&ca->set->bucket_lock); in bch_allocator_thread()
332 blkdev_issue_discard(ca->bdev, in bch_allocator_thread()
333 bucket_to_sector(ca->set, bucket), in bch_allocator_thread()
334 ca->sb.bucket_size, GFP_KERNEL, 0); in bch_allocator_thread()
335 mutex_lock(&ca->set->bucket_lock); in bch_allocator_thread()
338 allocator_wait(ca, bch_allocator_push(ca, bucket)); in bch_allocator_thread()
339 wake_up(&ca->set->btree_cache_wait); in bch_allocator_thread()
340 wake_up(&ca->set->bucket_wait); in bch_allocator_thread()
350 allocator_wait(ca, ca->set->gc_mark_valid && in bch_allocator_thread()
351 !ca->invalidate_needs_gc); in bch_allocator_thread()
352 invalidate_buckets(ca); in bch_allocator_thread()
358 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); in bch_allocator_thread()
359 if (CACHE_SYNC(&ca->set->sb)) { in bch_allocator_thread()
371 if (!fifo_full(&ca->free_inc)) in bch_allocator_thread()
374 bch_prio_write(ca); in bch_allocator_thread()
381 long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) in bch_bucket_alloc() argument
388 if (fifo_pop(&ca->free[RESERVE_NONE], r) || in bch_bucket_alloc()
389 fifo_pop(&ca->free[reserve], r)) in bch_bucket_alloc()
393 trace_bcache_alloc_fail(ca, reserve); in bch_bucket_alloc()
398 prepare_to_wait(&ca->set->bucket_wait, &w, in bch_bucket_alloc()
401 mutex_unlock(&ca->set->bucket_lock); in bch_bucket_alloc()
403 mutex_lock(&ca->set->bucket_lock); in bch_bucket_alloc()
404 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) && in bch_bucket_alloc()
405 !fifo_pop(&ca->free[reserve], r)); in bch_bucket_alloc()
407 finish_wait(&ca->set->bucket_wait, &w); in bch_bucket_alloc()
409 wake_up_process(ca->alloc_thread); in bch_bucket_alloc()
411 trace_bcache_alloc(ca, reserve); in bch_bucket_alloc()
413 if (expensive_debug_checks(ca->set)) { in bch_bucket_alloc()
418 for (iter = 0; iter < prio_buckets(ca) * 2; iter++) in bch_bucket_alloc()
419 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); in bch_bucket_alloc()
422 fifo_for_each(i, &ca->free[j], iter) in bch_bucket_alloc()
424 fifo_for_each(i, &ca->free_inc, iter) in bch_bucket_alloc()
428 b = ca->buckets + r; in bch_bucket_alloc()
432 SET_GC_SECTORS_USED(b, ca->sb.bucket_size); in bch_bucket_alloc()
447 void __bch_bucket_free(struct cache *ca, struct bucket *b) in __bch_bucket_free() argument
475 struct cache *ca = c->cache_by_alloc[i]; in __bch_bucket_alloc_set() local
476 long b = bch_bucket_alloc(ca, reserve, wait); in __bch_bucket_alloc_set()
481 k->ptr[i] = PTR(ca->buckets[b].gen, in __bch_bucket_alloc_set()
483 ca->sb.nr_this_dev); in __bch_bucket_alloc_set()
687 int bch_cache_allocator_start(struct cache *ca) in bch_cache_allocator_start() argument
690 ca, "bcache_allocator"); in bch_cache_allocator_start()
694 ca->alloc_thread = k; in bch_cache_allocator_start()