Lines Matching refs:c
93 #define PTR_HASH(c, k) \ argument
94 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
120 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
136 #define btree_root(fn, c, op, ...) \ argument
140 struct btree *_b = (c)->root; \
143 if (_b == (c)->root && \
148 bch_cannibalize_unlock(c); \
153 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
159 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); in write_block()
166 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
168 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
172 bset_magic(&b->c->sb)); in bch_btree_init_next()
178 void bkey_put(struct cache_set *c, struct bkey *k) in bkey_put() argument
183 if (ptr_available(c, k, i)) in bkey_put()
184 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); in bkey_put()
204 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); in bch_btree_node_read_done()
205 iter->size = b->c->sb.bucket_size / b->c->sb.block_size; in bch_btree_node_read_done()
223 if (b->written + set_blocks(i, block_bytes(b->c)) > in bch_btree_node_read_done()
228 if (i->magic != bset_magic(&b->c->sb)) in bch_btree_node_read_done()
249 b->written += set_blocks(i, block_bytes(b->c)); in bch_btree_node_read_done()
255 i = ((void *) i) + block_bytes(b->c)) in bch_btree_node_read_done()
259 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done()
269 bset_magic(&b->c->sb)); in bch_btree_node_read_done()
271 mempool_free(iter, b->c->fill_iter); in bch_btree_node_read_done()
275 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
276 err, PTR_BUCKET_NR(b->c, &b->key, 0), in bch_btree_node_read_done()
297 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
305 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
311 bch_bbio_free(bio, b->c); in bch_btree_node_read()
317 bch_time_stats_update(&b->c->btree_read_time, start_time); in bch_btree_node_read()
321 bch_cache_set_error(b->c, "io error reading bucket %zu", in bch_btree_node_read()
322 PTR_BUCKET_NR(b->c, &b->key, 0)); in bch_btree_node_read()
328 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) in btree_complete_write()
329 wake_up_allocators(b->c); in btree_complete_write()
333 __closure_wake_up(&b->c->journal.wait); in btree_complete_write()
352 bch_bbio_free(b->bio, b->c); in __btree_node_write_done()
382 bch_bbio_count_io_errors(b->c, bio, error, "writing btree"); in btree_node_write_endio()
396 b->bio = bch_bbio_alloc(b->c); in do_btree_node_write()
401 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); in do_btree_node_write()
432 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
439 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
464 closure_init(&b->io, parent ?: &b->c->cl); in __bch_btree_node_write()
471 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, in __bch_btree_node_write()
472 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); in __bch_btree_node_write()
474 b->written += set_blocks(i, block_bytes(b->c)); in __bch_btree_node_write()
535 journal_pin_cmp(b->c, w->journal, journal_ref)) { in bch_btree_leaf_dirty()
557 #define mca_reserve(c) (((c->root && c->root->level) \ argument
558 ? c->root->level : 1) * 8 + 16)
559 #define mca_can_free(c) \ argument
560 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
568 b->c->btree_cache_used--; in mca_data_free()
569 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_free()
578 list_move(&b->list, &b->c->btree_cache_freeable); in mca_bucket_free()
590 ilog2(b->c->btree_pages), in mca_data_alloc()
593 b->c->btree_cache_used++; in mca_data_alloc()
594 list_move(&b->list, &b->c->btree_cache); in mca_data_alloc()
596 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_alloc()
600 static struct btree *mca_bucket_alloc(struct cache_set *c, in mca_bucket_alloc() argument
613 b->c = c; in mca_bucket_alloc()
625 lockdep_assert_held(&b->c->bucket_lock); in mca_reap()
664 struct cache_set *c = container_of(shrink, struct cache_set, shrink); in bch_mca_scan() local
669 if (c->shrinker_disabled) in bch_mca_scan()
672 if (c->btree_cache_alloc_lock) in bch_mca_scan()
677 mutex_lock(&c->bucket_lock); in bch_mca_scan()
678 else if (!mutex_trylock(&c->bucket_lock)) in bch_mca_scan()
688 nr /= c->btree_pages; in bch_mca_scan()
689 nr = min_t(unsigned long, nr, mca_can_free(c)); in bch_mca_scan()
692 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { in bch_mca_scan()
704 for (i = 0; (nr--) && i < c->btree_cache_used; i++) { in bch_mca_scan()
705 if (list_empty(&c->btree_cache)) in bch_mca_scan()
708 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_mca_scan()
709 list_rotate_left(&c->btree_cache); in bch_mca_scan()
721 mutex_unlock(&c->bucket_lock); in bch_mca_scan()
728 struct cache_set *c = container_of(shrink, struct cache_set, shrink); in bch_mca_count() local
730 if (c->shrinker_disabled) in bch_mca_count()
733 if (c->btree_cache_alloc_lock) in bch_mca_count()
736 return mca_can_free(c) * c->btree_pages; in bch_mca_count()
739 void bch_btree_cache_free(struct cache_set *c) in bch_btree_cache_free() argument
745 if (c->shrink.list.next) in bch_btree_cache_free()
746 unregister_shrinker(&c->shrink); in bch_btree_cache_free()
748 mutex_lock(&c->bucket_lock); in bch_btree_cache_free()
751 if (c->verify_data) in bch_btree_cache_free()
752 list_move(&c->verify_data->list, &c->btree_cache); in bch_btree_cache_free()
754 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c))); in bch_btree_cache_free()
757 list_splice(&c->btree_cache_freeable, in bch_btree_cache_free()
758 &c->btree_cache); in bch_btree_cache_free()
760 while (!list_empty(&c->btree_cache)) { in bch_btree_cache_free()
761 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_btree_cache_free()
770 while (!list_empty(&c->btree_cache_freed)) { in bch_btree_cache_free()
771 b = list_first_entry(&c->btree_cache_freed, in bch_btree_cache_free()
778 mutex_unlock(&c->bucket_lock); in bch_btree_cache_free()
781 int bch_btree_cache_alloc(struct cache_set *c) in bch_btree_cache_alloc() argument
785 for (i = 0; i < mca_reserve(c); i++) in bch_btree_cache_alloc()
786 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) in bch_btree_cache_alloc()
789 list_splice_init(&c->btree_cache, in bch_btree_cache_alloc()
790 &c->btree_cache_freeable); in bch_btree_cache_alloc()
793 mutex_init(&c->verify_lock); in bch_btree_cache_alloc()
795 c->verify_ondisk = (void *) in bch_btree_cache_alloc()
796 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); in bch_btree_cache_alloc()
798 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); in bch_btree_cache_alloc()
800 if (c->verify_data && in bch_btree_cache_alloc()
801 c->verify_data->keys.set->data) in bch_btree_cache_alloc()
802 list_del_init(&c->verify_data->list); in bch_btree_cache_alloc()
804 c->verify_data = NULL; in bch_btree_cache_alloc()
807 c->shrink.count_objects = bch_mca_count; in bch_btree_cache_alloc()
808 c->shrink.scan_objects = bch_mca_scan; in bch_btree_cache_alloc()
809 c->shrink.seeks = 4; in bch_btree_cache_alloc()
810 c->shrink.batch = c->btree_pages * 2; in bch_btree_cache_alloc()
811 register_shrinker(&c->shrink); in bch_btree_cache_alloc()
818 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) in mca_hash() argument
820 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; in mca_hash()
823 static struct btree *mca_find(struct cache_set *c, struct bkey *k) in mca_find() argument
828 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) in mca_find()
829 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) in mca_find()
837 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) in mca_cannibalize_lock() argument
841 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); in mca_cannibalize_lock()
844 prepare_to_wait(&c->btree_cache_wait, &op->wait, in mca_cannibalize_lock()
852 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, in mca_cannibalize() argument
857 trace_bcache_btree_cache_cannibalize(c); in mca_cannibalize()
859 if (mca_cannibalize_lock(c, op)) in mca_cannibalize()
862 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
866 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
880 static void bch_cannibalize_unlock(struct cache_set *c) in bch_cannibalize_unlock() argument
882 if (c->btree_cache_alloc_lock == current) { in bch_cannibalize_unlock()
883 c->btree_cache_alloc_lock = NULL; in bch_cannibalize_unlock()
884 wake_up(&c->btree_cache_wait); in bch_cannibalize_unlock()
888 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, in mca_alloc() argument
895 lockdep_assert_held(&c->bucket_lock); in mca_alloc()
897 if (mca_find(c, k)) in mca_alloc()
903 list_for_each_entry(b, &c->btree_cache_freeable, list) in mca_alloc()
910 list_for_each_entry(b, &c->btree_cache_freed, list) in mca_alloc()
919 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
930 list_move(&b->list, &c->btree_cache); in mca_alloc()
932 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); in mca_alloc()
942 &b->c->expensive_debug_checks); in mca_alloc()
945 &b->c->expensive_debug_checks); in mca_alloc()
952 b = mca_cannibalize(c, op, k); in mca_alloc()
968 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, in bch_btree_node_get() argument
977 b = mca_find(c, k); in bch_btree_node_get()
983 mutex_lock(&c->bucket_lock); in bch_btree_node_get()
984 b = mca_alloc(c, op, k, level); in bch_btree_node_get()
985 mutex_unlock(&c->bucket_lock); in bch_btree_node_get()
998 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { in bch_btree_node_get()
1030 mutex_lock(&parent->c->bucket_lock); in btree_node_prefetch()
1031 b = mca_alloc(parent->c, NULL, k, parent->level - 1); in btree_node_prefetch()
1032 mutex_unlock(&parent->c->bucket_lock); in btree_node_prefetch()
1047 BUG_ON(b == b->c->root); in btree_node_free()
1059 mutex_lock(&b->c->bucket_lock); in btree_node_free()
1060 bch_bucket_free(b->c, &b->key); in btree_node_free()
1062 mutex_unlock(&b->c->bucket_lock); in btree_node_free()
1065 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, in __bch_btree_node_alloc() argument
1072 mutex_lock(&c->bucket_lock); in __bch_btree_node_alloc()
1074 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait)) in __bch_btree_node_alloc()
1077 bkey_put(c, &k.key); in __bch_btree_node_alloc()
1078 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); in __bch_btree_node_alloc()
1080 b = mca_alloc(c, op, &k.key, level); in __bch_btree_node_alloc()
1085 cache_bug(c, in __bch_btree_node_alloc()
1092 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); in __bch_btree_node_alloc()
1094 mutex_unlock(&c->bucket_lock); in __bch_btree_node_alloc()
1099 bch_bucket_free(c, &k.key); in __bch_btree_node_alloc()
1101 mutex_unlock(&c->bucket_lock); in __bch_btree_node_alloc()
1103 trace_bcache_btree_node_alloc_fail(c); in __bch_btree_node_alloc()
1107 static struct btree *bch_btree_node_alloc(struct cache_set *c, in bch_btree_node_alloc() argument
1111 return __bch_btree_node_alloc(c, op, level, op != NULL, parent); in bch_btree_node_alloc()
1117 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_node_alloc_replacement()
1120 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1132 mutex_lock(&b->c->bucket_lock); in make_btree_freeing_key()
1134 atomic_inc(&b->c->prio_blocked); in make_btree_freeing_key()
1141 bch_inc_gen(PTR_CACHE(b->c, &b->key, i), in make_btree_freeing_key()
1142 PTR_BUCKET(b->c, &b->key, i))); in make_btree_freeing_key()
1144 mutex_unlock(&b->c->bucket_lock); in make_btree_freeing_key()
1149 struct cache_set *c = b->c; in btree_check_reserve() local
1151 unsigned i, reserve = (c->root->level - b->level) * 2 + 1; in btree_check_reserve()
1153 mutex_lock(&c->bucket_lock); in btree_check_reserve()
1155 for_each_cache(ca, c, i) in btree_check_reserve()
1158 prepare_to_wait(&c->btree_cache_wait, &op->wait, in btree_check_reserve()
1160 mutex_unlock(&c->bucket_lock); in btree_check_reserve()
1164 mutex_unlock(&c->bucket_lock); in btree_check_reserve()
1166 return mca_cannibalize_lock(b->c, op); in btree_check_reserve()
1171 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, in __bch_btree_mark_key() argument
1187 if (!ptr_available(c, k, i)) in __bch_btree_mark_key()
1190 g = PTR_BUCKET(c, k, i); in __bch_btree_mark_key()
1195 if (ptr_stale(c, k, i)) { in __bch_btree_mark_key()
1196 stale = max(stale, ptr_stale(c, k, i)); in __bch_btree_mark_key()
1202 c, "inconsistent ptrs: mark = %llu, level = %i", in __bch_btree_mark_key()
1223 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1225 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) in bch_initial_mark_key() argument
1230 if (ptr_available(c, k, i) && in bch_initial_mark_key()
1231 !ptr_stale(c, k, i)) { in bch_initial_mark_key()
1232 struct bucket *b = PTR_BUCKET(c, k, i); in bch_initial_mark_key()
1242 __bch_btree_mark_key(c, level, k); in bch_initial_mark_key()
1275 if (b->c->gc_always_rewrite) in btree_gc_mark_node()
1317 blocks = btree_default_blocks(b->c) * 2 / 3; in btree_gc_coalesce()
1321 block_bytes(b->c)) > blocks * (nodes - 1)) in btree_gc_coalesce()
1355 block_bytes(b->c)) > blocks) in btree_gc_coalesce()
1371 block_bytes(b->c)) > in btree_gc_coalesce()
1380 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > in btree_gc_coalesce()
1454 atomic_dec(&b->c->prio_blocked); in btree_gc_coalesce()
1522 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1530 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, in btree_gc_recurse()
1561 bkey_copy_key(&b->c->gc_done, &last->b->key); in btree_gc_recurse()
1617 __bch_btree_mark_key(b->c, b->level + 1, &b->key); in bch_btree_gc_root()
1625 bkey_copy_key(&b->c->gc_done, &b->key); in bch_btree_gc_root()
1630 static void btree_gc_start(struct cache_set *c) in btree_gc_start() argument
1636 if (!c->gc_mark_valid) in btree_gc_start()
1639 mutex_lock(&c->bucket_lock); in btree_gc_start()
1641 c->gc_mark_valid = 0; in btree_gc_start()
1642 c->gc_done = ZERO_KEY; in btree_gc_start()
1644 for_each_cache(ca, c, i) in btree_gc_start()
1653 mutex_unlock(&c->bucket_lock); in btree_gc_start()
1656 static size_t bch_btree_gc_finish(struct cache_set *c) in bch_btree_gc_finish() argument
1663 mutex_lock(&c->bucket_lock); in bch_btree_gc_finish()
1665 set_gc_sectors(c); in bch_btree_gc_finish()
1666 c->gc_mark_valid = 1; in bch_btree_gc_finish()
1667 c->need_gc = 0; in bch_btree_gc_finish()
1669 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) in bch_btree_gc_finish()
1670 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), in bch_btree_gc_finish()
1675 for (i = 0; i < c->nr_uuids; i++) { in bch_btree_gc_finish()
1676 struct bcache_device *d = c->devices[i]; in bch_btree_gc_finish()
1681 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) in bch_btree_gc_finish()
1689 SET_GC_MARK(PTR_BUCKET(c, &w->key, j), in bch_btree_gc_finish()
1695 for_each_cache(ca, c, i) { in bch_btree_gc_finish()
1708 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); in bch_btree_gc_finish()
1720 mutex_unlock(&c->bucket_lock); in bch_btree_gc_finish()
1724 static void bch_btree_gc(struct cache_set *c) in bch_btree_gc() argument
1733 trace_bcache_gc_start(c); in bch_btree_gc()
1739 btree_gc_start(c); in bch_btree_gc()
1742 ret = btree_root(gc_root, c, &op, &writes, &stats); in bch_btree_gc()
1750 available = bch_btree_gc_finish(c); in bch_btree_gc()
1751 wake_up_allocators(c); in bch_btree_gc()
1753 bch_time_stats_update(&c->btree_gc_time, start_time); in bch_btree_gc()
1757 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; in bch_btree_gc()
1758 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); in bch_btree_gc()
1760 trace_bcache_gc_end(c); in bch_btree_gc()
1762 bch_moving_gc(c); in bch_btree_gc()
1767 struct cache_set *c = arg; in bch_gc_thread() local
1773 bch_btree_gc(c); in bch_gc_thread()
1779 mutex_lock(&c->bucket_lock); in bch_gc_thread()
1781 for_each_cache(ca, c, i) in bch_gc_thread()
1783 mutex_unlock(&c->bucket_lock); in bch_gc_thread()
1788 mutex_unlock(&c->bucket_lock); in bch_gc_thread()
1797 int bch_gc_thread_start(struct cache_set *c) in bch_gc_thread_start() argument
1799 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc"); in bch_gc_thread_start()
1800 if (IS_ERR(c->gc_thread)) in bch_gc_thread_start()
1801 return PTR_ERR(c->gc_thread); in bch_gc_thread_start()
1803 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE); in bch_gc_thread_start()
1816 bch_initial_mark_key(b->c, b->level, k); in bch_btree_check_recurse()
1818 bch_initial_mark_key(b->c, b->level + 1, &b->key); in bch_btree_check_recurse()
1839 int bch_btree_check(struct cache_set *c) in bch_btree_check() argument
1845 return btree_root(check_recurse, c, &op); in bch_btree_check()
1848 void bch_initial_gc_finish(struct cache_set *c) in bch_initial_gc_finish() argument
1854 bch_btree_gc_finish(c); in bch_initial_gc_finish()
1856 mutex_lock(&c->bucket_lock); in bch_initial_gc_finish()
1867 for_each_cache(ca, c, i) { in bch_initial_gc_finish()
1881 mutex_unlock(&c->bucket_lock); in bch_initial_gc_finish()
1933 bkey_put(b->c, k); in bch_btree_insert_keys()
1985 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; in btree_split()
1992 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_split()
1997 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); in btree_split()
2072 bch_time_stats_update(&b->c->btree_split_time, start_time); in btree_split()
2076 bkey_put(b->c, &n2->key); in btree_split()
2080 bkey_put(b->c, &n1->key); in btree_split()
2133 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2135 } else if (op->lock <= b->c->root->level) { in bch_btree_insert_node()
2136 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2208 int bch_btree_insert(struct cache_set *c, struct keylist *keys, in bch_btree_insert() argument
2224 ret = bch_btree_map_leaf_nodes(&op.op, c, in bch_btree_insert()
2235 bkey_put(c, k); in bch_btree_insert()
2254 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); in bch_btree_set_root()
2256 mutex_lock(&b->c->bucket_lock); in bch_btree_set_root()
2258 mutex_unlock(&b->c->bucket_lock); in bch_btree_set_root()
2260 b->c->root = b; in bch_btree_set_root()
2262 bch_journal_meta(b->c, &cl); in bch_btree_set_root()
2297 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, in __bch_btree_map_nodes() argument
2300 return btree_root(map_nodes_recurse, c, op, from, fn, flags); in __bch_btree_map_nodes()
2330 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, in bch_btree_map_keys() argument
2333 return btree_root(map_keys_recurse, c, op, from, fn, flags); in bch_btree_map_keys()
2406 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, in bch_refill_keybuf() argument
2420 bch_btree_map_keys(&refill.op, c, &buf->last_scanned, in bch_refill_keybuf()
2503 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, in bch_keybuf_next_rescan() argument
2520 bch_refill_keybuf(c, buf, end, pred); in bch_keybuf_next_rescan()