Lines Matching refs:b

96 #define insert_lock(s, b)	((b)->level <= (s)->lock)  argument
116 #define btree(fn, key, b, op, ...) \ argument
118 int _r, l = (b)->level - 1; \
120 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
121 _w, b); \
157 static inline struct bset *write_block(struct btree *b) in write_block() argument
159 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); in write_block()
162 static void bch_btree_init_next(struct btree *b) in bch_btree_init_next() argument
165 if (b->level && b->keys.nsets) in bch_btree_init_next()
166 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
168 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
170 if (b->written < btree_blocks(b)) in bch_btree_init_next()
171 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_init_next()
172 bset_magic(&b->c->sb)); in bch_btree_init_next()
189 static uint64_t btree_csum_set(struct btree *b, struct bset *i) in btree_csum_set() argument
191 uint64_t crc = b->key.ptr[0]; in btree_csum_set()
198 void bch_btree_node_read_done(struct btree *b) in bch_btree_node_read_done() argument
201 struct bset *i = btree_bset_first(b); in bch_btree_node_read_done()
204 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); in bch_btree_node_read_done()
205 iter->size = b->c->sb.bucket_size / b->c->sb.block_size; in bch_btree_node_read_done()
209 iter->b = &b->keys; in bch_btree_node_read_done()
216 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; in bch_btree_node_read_done()
217 i = write_block(b)) { in bch_btree_node_read_done()
223 if (b->written + set_blocks(i, block_bytes(b->c)) > in bch_btree_node_read_done()
224 btree_blocks(b)) in bch_btree_node_read_done()
228 if (i->magic != bset_magic(&b->c->sb)) in bch_btree_node_read_done()
238 if (i->csum != btree_csum_set(b, i)) in bch_btree_node_read_done()
244 if (i != b->keys.set[0].data && !i->keys) in bch_btree_node_read_done()
249 b->written += set_blocks(i, block_bytes(b->c)); in bch_btree_node_read_done()
253 for (i = write_block(b); in bch_btree_node_read_done()
254 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); in bch_btree_node_read_done()
255 i = ((void *) i) + block_bytes(b->c)) in bch_btree_node_read_done()
256 if (i->seq == b->keys.set[0].data->seq) in bch_btree_node_read_done()
259 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done()
261 i = b->keys.set[0].data; in bch_btree_node_read_done()
263 if (b->keys.set[0].size && in bch_btree_node_read_done()
264 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) in bch_btree_node_read_done()
267 if (b->written < btree_blocks(b)) in bch_btree_node_read_done()
268 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_node_read_done()
269 bset_magic(&b->c->sb)); in bch_btree_node_read_done()
271 mempool_free(iter, b->c->fill_iter); in bch_btree_node_read_done()
274 set_btree_node_io_error(b); in bch_btree_node_read_done()
275 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
276 err, PTR_BUCKET_NR(b->c, &b->key, 0), in bch_btree_node_read_done()
277 bset_block_offset(b, i), i->keys); in bch_btree_node_read_done()
287 static void bch_btree_node_read(struct btree *b) in bch_btree_node_read() argument
293 trace_bcache_btree_read(b); in bch_btree_node_read()
297 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
299 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; in bch_btree_node_read()
303 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
305 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
309 set_btree_node_io_error(b); in bch_btree_node_read()
311 bch_bbio_free(bio, b->c); in bch_btree_node_read()
313 if (btree_node_io_error(b)) in bch_btree_node_read()
316 bch_btree_node_read_done(b); in bch_btree_node_read()
317 bch_time_stats_update(&b->c->btree_read_time, start_time); in bch_btree_node_read()
321 bch_cache_set_error(b->c, "io error reading bucket %zu", in bch_btree_node_read()
322 PTR_BUCKET_NR(b->c, &b->key, 0)); in bch_btree_node_read()
325 static void btree_complete_write(struct btree *b, struct btree_write *w) in btree_complete_write() argument
328 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) in btree_complete_write()
329 wake_up_allocators(b->c); in btree_complete_write()
333 __closure_wake_up(&b->c->journal.wait); in btree_complete_write()
342 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_unlock() local
344 up(&b->io_mutex); in btree_node_write_unlock()
349 struct btree *b = container_of(cl, struct btree, io); in __btree_node_write_done() local
350 struct btree_write *w = btree_prev_write(b); in __btree_node_write_done()
352 bch_bbio_free(b->bio, b->c); in __btree_node_write_done()
353 b->bio = NULL; in __btree_node_write_done()
354 btree_complete_write(b, w); in __btree_node_write_done()
356 if (btree_node_dirty(b)) in __btree_node_write_done()
357 schedule_delayed_work(&b->work, 30 * HZ); in __btree_node_write_done()
364 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_done() local
368 bio_for_each_segment_all(bv, b->bio, n) in btree_node_write_done()
377 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_endio() local
380 set_btree_node_io_error(b); in btree_node_write_endio()
382 bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree"); in btree_node_write_endio()
386 static void do_btree_node_write(struct btree *b) in do_btree_node_write() argument
388 struct closure *cl = &b->io; in do_btree_node_write()
389 struct bset *i = btree_bset_last(b); in do_btree_node_write()
393 i->csum = btree_csum_set(b, i); in do_btree_node_write()
395 BUG_ON(b->bio); in do_btree_node_write()
396 b->bio = bch_bbio_alloc(b->c); in do_btree_node_write()
398 b->bio->bi_end_io = btree_node_write_endio; in do_btree_node_write()
399 b->bio->bi_private = cl; in do_btree_node_write()
400 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; in do_btree_node_write()
401 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); in do_btree_node_write()
402 bch_bio_map(b->bio, i); in do_btree_node_write()
419 bkey_copy(&k.key, &b->key); in do_btree_node_write()
421 bset_sector_offset(&b->keys, i)); in do_btree_node_write()
423 if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { in do_btree_node_write()
428 bio_for_each_segment_all(bv, b->bio, j) in do_btree_node_write()
432 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
436 b->bio->bi_vcnt = 0; in do_btree_node_write()
437 bch_bio_map(b->bio, i); in do_btree_node_write()
439 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
446 void __bch_btree_node_write(struct btree *b, struct closure *parent) in __bch_btree_node_write() argument
448 struct bset *i = btree_bset_last(b); in __bch_btree_node_write()
450 lockdep_assert_held(&b->write_lock); in __bch_btree_node_write()
452 trace_bcache_btree_write(b); in __bch_btree_node_write()
455 BUG_ON(b->written >= btree_blocks(b)); in __bch_btree_node_write()
456 BUG_ON(b->written && !i->keys); in __bch_btree_node_write()
457 BUG_ON(btree_bset_first(b)->seq != i->seq); in __bch_btree_node_write()
458 bch_check_keys(&b->keys, "writing"); in __bch_btree_node_write()
460 cancel_delayed_work(&b->work); in __bch_btree_node_write()
463 down(&b->io_mutex); in __bch_btree_node_write()
464 closure_init(&b->io, parent ?: &b->c->cl); in __bch_btree_node_write()
466 clear_bit(BTREE_NODE_dirty, &b->flags); in __bch_btree_node_write()
467 change_bit(BTREE_NODE_write_idx, &b->flags); in __bch_btree_node_write()
469 do_btree_node_write(b); in __bch_btree_node_write()
471 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, in __bch_btree_node_write()
472 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); in __bch_btree_node_write()
474 b->written += set_blocks(i, block_bytes(b->c)); in __bch_btree_node_write()
477 void bch_btree_node_write(struct btree *b, struct closure *parent) in bch_btree_node_write() argument
479 unsigned nsets = b->keys.nsets; in bch_btree_node_write()
481 lockdep_assert_held(&b->lock); in bch_btree_node_write()
483 __bch_btree_node_write(b, parent); in bch_btree_node_write()
489 if (nsets && !b->keys.nsets) in bch_btree_node_write()
490 bch_btree_verify(b); in bch_btree_node_write()
492 bch_btree_init_next(b); in bch_btree_node_write()
495 static void bch_btree_node_write_sync(struct btree *b) in bch_btree_node_write_sync() argument
501 mutex_lock(&b->write_lock); in bch_btree_node_write_sync()
502 bch_btree_node_write(b, &cl); in bch_btree_node_write_sync()
503 mutex_unlock(&b->write_lock); in bch_btree_node_write_sync()
510 struct btree *b = container_of(to_delayed_work(w), struct btree, work); in btree_node_write_work() local
512 mutex_lock(&b->write_lock); in btree_node_write_work()
513 if (btree_node_dirty(b)) in btree_node_write_work()
514 __bch_btree_node_write(b, NULL); in btree_node_write_work()
515 mutex_unlock(&b->write_lock); in btree_node_write_work()
518 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) in bch_btree_leaf_dirty() argument
520 struct bset *i = btree_bset_last(b); in bch_btree_leaf_dirty()
521 struct btree_write *w = btree_current_write(b); in bch_btree_leaf_dirty()
523 lockdep_assert_held(&b->write_lock); in bch_btree_leaf_dirty()
525 BUG_ON(!b->written); in bch_btree_leaf_dirty()
528 if (!btree_node_dirty(b)) in bch_btree_leaf_dirty()
529 schedule_delayed_work(&b->work, 30 * HZ); in bch_btree_leaf_dirty()
531 set_btree_node_dirty(b); in bch_btree_leaf_dirty()
535 journal_pin_cmp(b->c, w->journal, journal_ref)) { in bch_btree_leaf_dirty()
549 bch_btree_node_write(b, NULL); in bch_btree_leaf_dirty()
562 static void mca_data_free(struct btree *b) in mca_data_free() argument
564 BUG_ON(b->io_mutex.count != 1); in mca_data_free()
566 bch_btree_keys_free(&b->keys); in mca_data_free()
568 b->c->btree_cache_used--; in mca_data_free()
569 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_free()
572 static void mca_bucket_free(struct btree *b) in mca_bucket_free() argument
574 BUG_ON(btree_node_dirty(b)); in mca_bucket_free()
576 b->key.ptr[0] = 0; in mca_bucket_free()
577 hlist_del_init_rcu(&b->hash); in mca_bucket_free()
578 list_move(&b->list, &b->c->btree_cache_freeable); in mca_bucket_free()
586 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) in mca_data_alloc() argument
588 if (!bch_btree_keys_alloc(&b->keys, in mca_data_alloc()
590 ilog2(b->c->btree_pages), in mca_data_alloc()
593 b->c->btree_cache_used++; in mca_data_alloc()
594 list_move(&b->list, &b->c->btree_cache); in mca_data_alloc()
596 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_alloc()
603 struct btree *b = kzalloc(sizeof(struct btree), gfp); in mca_bucket_alloc() local
604 if (!b) in mca_bucket_alloc()
607 init_rwsem(&b->lock); in mca_bucket_alloc()
608 lockdep_set_novalidate_class(&b->lock); in mca_bucket_alloc()
609 mutex_init(&b->write_lock); in mca_bucket_alloc()
610 lockdep_set_novalidate_class(&b->write_lock); in mca_bucket_alloc()
611 INIT_LIST_HEAD(&b->list); in mca_bucket_alloc()
612 INIT_DELAYED_WORK(&b->work, btree_node_write_work); in mca_bucket_alloc()
613 b->c = c; in mca_bucket_alloc()
614 sema_init(&b->io_mutex, 1); in mca_bucket_alloc()
616 mca_data_alloc(b, k, gfp); in mca_bucket_alloc()
617 return b; in mca_bucket_alloc()
620 static int mca_reap(struct btree *b, unsigned min_order, bool flush) in mca_reap() argument
625 lockdep_assert_held(&b->c->bucket_lock); in mca_reap()
627 if (!down_write_trylock(&b->lock)) in mca_reap()
630 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); in mca_reap()
632 if (b->keys.page_order < min_order) in mca_reap()
636 if (btree_node_dirty(b)) in mca_reap()
639 if (down_trylock(&b->io_mutex)) in mca_reap()
641 up(&b->io_mutex); in mca_reap()
644 mutex_lock(&b->write_lock); in mca_reap()
645 if (btree_node_dirty(b)) in mca_reap()
646 __bch_btree_node_write(b, &cl); in mca_reap()
647 mutex_unlock(&b->write_lock); in mca_reap()
652 down(&b->io_mutex); in mca_reap()
653 up(&b->io_mutex); in mca_reap()
657 rw_unlock(true, b); in mca_reap()
665 struct btree *b, *t; in bch_mca_scan() local
692 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { in bch_mca_scan()
697 !mca_reap(b, 0, false)) { in bch_mca_scan()
698 mca_data_free(b); in bch_mca_scan()
699 rw_unlock(true, b); in bch_mca_scan()
708 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_mca_scan()
711 if (!b->accessed && in bch_mca_scan()
712 !mca_reap(b, 0, false)) { in bch_mca_scan()
713 mca_bucket_free(b); in bch_mca_scan()
714 mca_data_free(b); in bch_mca_scan()
715 rw_unlock(true, b); in bch_mca_scan()
718 b->accessed = 0; in bch_mca_scan()
741 struct btree *b; in bch_btree_cache_free() local
761 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_btree_cache_free()
763 if (btree_node_dirty(b)) in bch_btree_cache_free()
764 btree_complete_write(b, btree_current_write(b)); in bch_btree_cache_free()
765 clear_bit(BTREE_NODE_dirty, &b->flags); in bch_btree_cache_free()
767 mca_data_free(b); in bch_btree_cache_free()
771 b = list_first_entry(&c->btree_cache_freed, in bch_btree_cache_free()
773 list_del(&b->list); in bch_btree_cache_free()
774 cancel_delayed_work_sync(&b->work); in bch_btree_cache_free()
775 kfree(b); in bch_btree_cache_free()
825 struct btree *b; in mca_find() local
828 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) in mca_find()
829 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) in mca_find()
831 b = NULL; in mca_find()
834 return b; in mca_find()
855 struct btree *b; in mca_cannibalize() local
862 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
863 if (!mca_reap(b, btree_order(k), false)) in mca_cannibalize()
864 return b; in mca_cannibalize()
866 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
867 if (!mca_reap(b, btree_order(k), true)) in mca_cannibalize()
868 return b; in mca_cannibalize()
891 struct btree *b; in mca_alloc() local
903 list_for_each_entry(b, &c->btree_cache_freeable, list) in mca_alloc()
904 if (!mca_reap(b, btree_order(k), false)) in mca_alloc()
910 list_for_each_entry(b, &c->btree_cache_freed, list) in mca_alloc()
911 if (!mca_reap(b, 0, false)) { in mca_alloc()
912 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
913 if (!b->keys.set[0].data) in mca_alloc()
919 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
920 if (!b) in mca_alloc()
923 BUG_ON(!down_write_trylock(&b->lock)); in mca_alloc()
924 if (!b->keys.set->data) in mca_alloc()
927 BUG_ON(b->io_mutex.count != 1); in mca_alloc()
929 bkey_copy(&b->key, k); in mca_alloc()
930 list_move(&b->list, &c->btree_cache); in mca_alloc()
931 hlist_del_init_rcu(&b->hash); in mca_alloc()
932 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); in mca_alloc()
934 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); in mca_alloc()
935 b->parent = (void *) ~0UL; in mca_alloc()
936 b->flags = 0; in mca_alloc()
937 b->written = 0; in mca_alloc()
938 b->level = level; in mca_alloc()
940 if (!b->level) in mca_alloc()
941 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, in mca_alloc()
942 &b->c->expensive_debug_checks); in mca_alloc()
944 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, in mca_alloc()
945 &b->c->expensive_debug_checks); in mca_alloc()
947 return b; in mca_alloc()
949 if (b) in mca_alloc()
950 rw_unlock(true, b); in mca_alloc()
952 b = mca_cannibalize(c, op, k); in mca_alloc()
953 if (!IS_ERR(b)) in mca_alloc()
956 return b; in mca_alloc()
973 struct btree *b; in bch_btree_node_get() local
977 b = mca_find(c, k); in bch_btree_node_get()
979 if (!b) { in bch_btree_node_get()
984 b = mca_alloc(c, op, k, level); in bch_btree_node_get()
987 if (!b) in bch_btree_node_get()
989 if (IS_ERR(b)) in bch_btree_node_get()
990 return b; in bch_btree_node_get()
992 bch_btree_node_read(b); in bch_btree_node_get()
995 downgrade_write(&b->lock); in bch_btree_node_get()
997 rw_lock(write, b, level); in bch_btree_node_get()
998 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { in bch_btree_node_get()
999 rw_unlock(write, b); in bch_btree_node_get()
1002 BUG_ON(b->level != level); in bch_btree_node_get()
1005 b->parent = parent; in bch_btree_node_get()
1006 b->accessed = 1; in bch_btree_node_get()
1008 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { in bch_btree_node_get()
1009 prefetch(b->keys.set[i].tree); in bch_btree_node_get()
1010 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1013 for (; i <= b->keys.nsets; i++) in bch_btree_node_get()
1014 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1016 if (btree_node_io_error(b)) { in bch_btree_node_get()
1017 rw_unlock(write, b); in bch_btree_node_get()
1021 BUG_ON(!b->written); in bch_btree_node_get()
1023 return b; in bch_btree_node_get()
1028 struct btree *b; in btree_node_prefetch() local
1031 b = mca_alloc(parent->c, NULL, k, parent->level - 1); in btree_node_prefetch()
1034 if (!IS_ERR_OR_NULL(b)) { in btree_node_prefetch()
1035 b->parent = parent; in btree_node_prefetch()
1036 bch_btree_node_read(b); in btree_node_prefetch()
1037 rw_unlock(true, b); in btree_node_prefetch()
1043 static void btree_node_free(struct btree *b) in btree_node_free() argument
1045 trace_bcache_btree_node_free(b); in btree_node_free()
1047 BUG_ON(b == b->c->root); in btree_node_free()
1049 mutex_lock(&b->write_lock); in btree_node_free()
1051 if (btree_node_dirty(b)) in btree_node_free()
1052 btree_complete_write(b, btree_current_write(b)); in btree_node_free()
1053 clear_bit(BTREE_NODE_dirty, &b->flags); in btree_node_free()
1055 mutex_unlock(&b->write_lock); in btree_node_free()
1057 cancel_delayed_work(&b->work); in btree_node_free()
1059 mutex_lock(&b->c->bucket_lock); in btree_node_free()
1060 bch_bucket_free(b->c, &b->key); in btree_node_free()
1061 mca_bucket_free(b); in btree_node_free()
1062 mutex_unlock(&b->c->bucket_lock); in btree_node_free()
1070 struct btree *b = ERR_PTR(-EAGAIN); in __bch_btree_node_alloc() local
1080 b = mca_alloc(c, op, &k.key, level); in __bch_btree_node_alloc()
1081 if (IS_ERR(b)) in __bch_btree_node_alloc()
1084 if (!b) { in __bch_btree_node_alloc()
1090 b->accessed = 1; in __bch_btree_node_alloc()
1091 b->parent = parent; in __bch_btree_node_alloc()
1092 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); in __bch_btree_node_alloc()
1096 trace_bcache_btree_node_alloc(b); in __bch_btree_node_alloc()
1097 return b; in __bch_btree_node_alloc()
1104 return b; in __bch_btree_node_alloc()
1114 static struct btree *btree_node_alloc_replacement(struct btree *b, in btree_node_alloc_replacement() argument
1117 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_node_alloc_replacement()
1120 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1121 bkey_copy_key(&n->key, &b->key); in btree_node_alloc_replacement()
1128 static void make_btree_freeing_key(struct btree *b, struct bkey *k) in make_btree_freeing_key() argument
1132 mutex_lock(&b->c->bucket_lock); in make_btree_freeing_key()
1134 atomic_inc(&b->c->prio_blocked); in make_btree_freeing_key()
1136 bkey_copy(k, &b->key); in make_btree_freeing_key()
1141 bch_inc_gen(PTR_CACHE(b->c, &b->key, i), in make_btree_freeing_key()
1142 PTR_BUCKET(b->c, &b->key, i))); in make_btree_freeing_key()
1144 mutex_unlock(&b->c->bucket_lock); in make_btree_freeing_key()
1147 static int btree_check_reserve(struct btree *b, struct btree_op *op) in btree_check_reserve() argument
1149 struct cache_set *c = b->c; in btree_check_reserve()
1151 unsigned i, reserve = (c->root->level - b->level) * 2 + 1; in btree_check_reserve()
1166 return mca_cannibalize_lock(b->c, op); in btree_check_reserve()
1223 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) argument
1232 struct bucket *b = PTR_BUCKET(c, k, i); in bch_initial_mark_key() local
1234 b->gen = PTR_GEN(k, i); in bch_initial_mark_key()
1237 b->prio = BTREE_PRIO; in bch_initial_mark_key()
1238 else if (!level && b->prio == BTREE_PRIO) in bch_initial_mark_key()
1239 b->prio = INITIAL_PRIO; in bch_initial_mark_key()
1245 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) in btree_gc_mark_node() argument
1255 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node()
1256 stale = max(stale, btree_mark_key(b, k)); in btree_gc_mark_node()
1259 if (bch_ptr_bad(&b->keys, k)) in btree_gc_mark_node()
1269 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) in btree_gc_mark_node()
1271 bset_written(&b->keys, t) && in btree_gc_mark_node()
1272 bkey_cmp(&b->key, &t->end) < 0, in btree_gc_mark_node()
1273 b, "found short btree key in gc"); in btree_gc_mark_node()
1275 if (b->c->gc_always_rewrite) in btree_gc_mark_node()
1290 struct btree *b; member
1297 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, in btree_gc_coalesce() argument
1308 if (btree_check_reserve(b, NULL)) in btree_gc_coalesce()
1314 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) in btree_gc_coalesce()
1317 blocks = btree_default_blocks(b->c) * 2 / 3; in btree_gc_coalesce()
1320 __set_blocks(b->keys.set[0].data, keys, in btree_gc_coalesce()
1321 block_bytes(b->c)) > blocks * (nodes - 1)) in btree_gc_coalesce()
1325 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); in btree_gc_coalesce()
1336 if (btree_check_reserve(b, NULL)) in btree_gc_coalesce()
1355 block_bytes(b->c)) > blocks) in btree_gc_coalesce()
1371 block_bytes(b->c)) > in btree_gc_coalesce()
1377 last = &r->b->key; in btree_gc_coalesce()
1380 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > in btree_gc_coalesce()
1420 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) in btree_gc_coalesce()
1423 make_btree_freeing_key(r[i].b, keylist.top); in btree_gc_coalesce()
1427 bch_btree_insert_node(b, op, &keylist, NULL, NULL); in btree_gc_coalesce()
1431 btree_node_free(r[i].b); in btree_gc_coalesce()
1432 rw_unlock(true, r[i].b); in btree_gc_coalesce()
1434 r[i].b = new_nodes[i]; in btree_gc_coalesce()
1438 r[nodes - 1].b = ERR_PTR(-EINTR); in btree_gc_coalesce()
1454 atomic_dec(&b->c->prio_blocked); in btree_gc_coalesce()
1464 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, in btree_gc_rewrite_node() argument
1470 if (btree_check_reserve(b, NULL)) in btree_gc_rewrite_node()
1476 if (btree_check_reserve(b, NULL)) { in btree_gc_rewrite_node()
1490 bch_btree_insert_node(b, op, &keys, NULL, NULL); in btree_gc_rewrite_node()
1500 static unsigned btree_gc_count_keys(struct btree *b) in btree_gc_count_keys() argument
1506 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in btree_gc_count_keys()
1512 static int btree_gc_recurse(struct btree *b, struct btree_op *op, in btree_gc_recurse() argument
1522 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1525 i->b = ERR_PTR(-EINTR); in btree_gc_recurse()
1528 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); in btree_gc_recurse()
1530 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, in btree_gc_recurse()
1531 true, b); in btree_gc_recurse()
1532 if (IS_ERR(r->b)) { in btree_gc_recurse()
1533 ret = PTR_ERR(r->b); in btree_gc_recurse()
1537 r->keys = btree_gc_count_keys(r->b); in btree_gc_recurse()
1539 ret = btree_gc_coalesce(b, op, gc, r); in btree_gc_recurse()
1544 if (!last->b) in btree_gc_recurse()
1547 if (!IS_ERR(last->b)) { in btree_gc_recurse()
1548 should_rewrite = btree_gc_mark_node(last->b, gc); in btree_gc_recurse()
1550 ret = btree_gc_rewrite_node(b, op, last->b); in btree_gc_recurse()
1555 if (last->b->level) { in btree_gc_recurse()
1556 ret = btree_gc_recurse(last->b, op, writes, gc); in btree_gc_recurse()
1561 bkey_copy_key(&b->c->gc_done, &last->b->key); in btree_gc_recurse()
1567 mutex_lock(&last->b->write_lock); in btree_gc_recurse()
1568 if (btree_node_dirty(last->b)) in btree_gc_recurse()
1569 bch_btree_node_write(last->b, writes); in btree_gc_recurse()
1570 mutex_unlock(&last->b->write_lock); in btree_gc_recurse()
1571 rw_unlock(true, last->b); in btree_gc_recurse()
1575 r->b = NULL; in btree_gc_recurse()
1584 if (!IS_ERR_OR_NULL(i->b)) { in btree_gc_recurse()
1585 mutex_lock(&i->b->write_lock); in btree_gc_recurse()
1586 if (btree_node_dirty(i->b)) in btree_gc_recurse()
1587 bch_btree_node_write(i->b, writes); in btree_gc_recurse()
1588 mutex_unlock(&i->b->write_lock); in btree_gc_recurse()
1589 rw_unlock(true, i->b); in btree_gc_recurse()
1595 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, in bch_btree_gc_root() argument
1602 should_rewrite = btree_gc_mark_node(b, gc); in bch_btree_gc_root()
1604 n = btree_node_alloc_replacement(b, NULL); in bch_btree_gc_root()
1610 btree_node_free(b); in bch_btree_gc_root()
1617 __bch_btree_mark_key(b->c, b->level + 1, &b->key); in bch_btree_gc_root()
1619 if (b->level) { in bch_btree_gc_root()
1620 ret = btree_gc_recurse(b, op, writes, gc); in bch_btree_gc_root()
1625 bkey_copy_key(&b->c->gc_done, &b->key); in bch_btree_gc_root()
1633 struct bucket *b; in btree_gc_start() local
1645 for_each_bucket(b, ca) { in btree_gc_start()
1646 b->last_gc = b->gen; in btree_gc_start()
1647 if (!atomic_read(&b->pin)) { in btree_gc_start()
1648 SET_GC_MARK(b, 0); in btree_gc_start()
1649 SET_GC_SECTORS_USED(b, 0); in btree_gc_start()
1659 struct bucket *b; in bch_btree_gc_finish() local
1707 for_each_bucket(b, ca) { in bch_btree_gc_finish()
1708 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); in bch_btree_gc_finish()
1710 if (atomic_read(&b->pin)) in bch_btree_gc_finish()
1713 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); in bch_btree_gc_finish()
1715 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) in bch_btree_gc_finish()
1809 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) in bch_btree_check_recurse() argument
1815 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) in bch_btree_check_recurse()
1816 bch_initial_mark_key(b->c, b->level, k); in bch_btree_check_recurse()
1818 bch_initial_mark_key(b->c, b->level + 1, &b->key); in bch_btree_check_recurse()
1820 if (b->level) { in bch_btree_check_recurse()
1821 bch_btree_iter_init(&b->keys, &iter, NULL); in bch_btree_check_recurse()
1824 k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_check_recurse()
1827 btree_node_prefetch(b, k); in bch_btree_check_recurse()
1830 ret = btree(check_recurse, p, b, op); in bch_btree_check_recurse()
1851 struct bucket *b; in bch_initial_gc_finish() local
1868 for_each_bucket(b, ca) { in bch_initial_gc_finish()
1872 if (bch_can_invalidate_bucket(ca, b) && in bch_initial_gc_finish()
1873 !GC_MARK(b)) { in bch_initial_gc_finish()
1874 __bch_invalidate_one_bucket(ca, b); in bch_initial_gc_finish()
1876 b - ca->buckets); in bch_initial_gc_finish()
1886 static bool btree_insert_key(struct btree *b, struct bkey *k, in btree_insert_key() argument
1891 BUG_ON(bkey_cmp(k, &b->key) > 0); in btree_insert_key()
1893 status = bch_btree_insert_key(&b->keys, k, replace_key); in btree_insert_key()
1895 bch_check_keys(&b->keys, "%u for %s", status, in btree_insert_key()
1898 trace_bcache_btree_insert_key(b, k, replace_key != NULL, in btree_insert_key()
1905 static size_t insert_u64s_remaining(struct btree *b) in insert_u64s_remaining() argument
1907 long ret = bch_btree_keys_u64s_remaining(&b->keys); in insert_u64s_remaining()
1912 if (b->keys.ops->is_extents) in insert_u64s_remaining()
1918 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, in bch_btree_insert_keys() argument
1923 int oldsize = bch_count_data(&b->keys); in bch_btree_insert_keys()
1928 if (bkey_u64s(k) > insert_u64s_remaining(b)) in bch_btree_insert_keys()
1931 if (bkey_cmp(k, &b->key) <= 0) { in bch_btree_insert_keys()
1932 if (!b->level) in bch_btree_insert_keys()
1933 bkey_put(b->c, k); in bch_btree_insert_keys()
1935 ret |= btree_insert_key(b, k, replace_key); in bch_btree_insert_keys()
1937 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { in bch_btree_insert_keys()
1941 bch_cut_back(&b->key, &temp.key); in bch_btree_insert_keys()
1942 bch_cut_front(&b->key, insert_keys->keys); in bch_btree_insert_keys()
1944 ret |= btree_insert_key(b, &temp.key, replace_key); in bch_btree_insert_keys()
1954 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); in bch_btree_insert_keys()
1956 BUG_ON(bch_count_data(&b->keys) < oldsize); in bch_btree_insert_keys()
1960 static int btree_split(struct btree *b, struct btree_op *op, in btree_split() argument
1973 if (btree_check_reserve(b, op)) { in btree_split()
1974 if (!b->level) in btree_split()
1980 n1 = btree_node_alloc_replacement(b, op); in btree_split()
1985 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; in btree_split()
1990 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); in btree_split()
1992 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_split()
1996 if (!b->parent) { in btree_split()
1997 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); in btree_split()
2027 bkey_copy_key(&n2->key, &b->key); in btree_split()
2034 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); in btree_split()
2055 } else if (!b->parent) { in btree_split()
2062 make_btree_freeing_key(b, parent_keys.top); in btree_split()
2065 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); in btree_split()
2069 btree_node_free(b); in btree_split()
2072 bch_time_stats_update(&b->c->btree_split_time, start_time); in btree_split()
2076 bkey_put(b->c, &n2->key); in btree_split()
2080 bkey_put(b->c, &n1->key); in btree_split()
2084 WARN(1, "bcache: btree split failed (level %u)", b->level); in btree_split()
2094 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, in bch_btree_insert_node() argument
2101 BUG_ON(b->level && replace_key); in bch_btree_insert_node()
2105 mutex_lock(&b->write_lock); in bch_btree_insert_node()
2107 if (write_block(b) != btree_bset_last(b) && in bch_btree_insert_node()
2108 b->keys.last_set_unwritten) in bch_btree_insert_node()
2109 bch_btree_init_next(b); /* just wrote a set */ in bch_btree_insert_node()
2111 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { in bch_btree_insert_node()
2112 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2116 BUG_ON(write_block(b) != btree_bset_last(b)); in bch_btree_insert_node()
2118 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { in bch_btree_insert_node()
2119 if (!b->level) in bch_btree_insert_node()
2120 bch_btree_leaf_dirty(b, journal_ref); in bch_btree_insert_node()
2122 bch_btree_node_write(b, &cl); in bch_btree_insert_node()
2125 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2133 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2135 } else if (op->lock <= b->c->root->level) { in bch_btree_insert_node()
2136 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2140 int ret = btree_split(b, op, insert_keys, replace_key); in bch_btree_insert_node()
2150 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, in bch_btree_insert_check_key() argument
2154 uint64_t btree_ptr = b->key.ptr[0]; in bch_btree_insert_check_key()
2155 unsigned long seq = b->seq; in bch_btree_insert_check_key()
2162 rw_unlock(false, b); in bch_btree_insert_check_key()
2163 rw_lock(true, b, b->level); in bch_btree_insert_check_key()
2165 if (b->key.ptr[0] != btree_ptr || in bch_btree_insert_check_key()
2166 b->seq != seq + 1) { in bch_btree_insert_check_key()
2167 op->lock = b->level; in bch_btree_insert_check_key()
2179 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); in bch_btree_insert_check_key()
2184 downgrade_write(&b->lock); in bch_btree_insert_check_key()
2195 static int btree_insert_fn(struct btree_op *b_op, struct btree *b) in btree_insert_fn() argument
2200 int ret = bch_btree_insert_node(b, &op->op, op->keys, in btree_insert_fn()
2242 void bch_btree_set_root(struct btree *b) in bch_btree_set_root() argument
2249 trace_bcache_btree_set_root(b); in bch_btree_set_root()
2251 BUG_ON(!b->written); in bch_btree_set_root()
2253 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_btree_set_root()
2254 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); in bch_btree_set_root()
2256 mutex_lock(&b->c->bucket_lock); in bch_btree_set_root()
2257 list_del_init(&b->list); in bch_btree_set_root()
2258 mutex_unlock(&b->c->bucket_lock); in bch_btree_set_root()
2260 b->c->root = b; in bch_btree_set_root()
2262 bch_journal_meta(b->c, &cl); in bch_btree_set_root()
2268 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_nodes_recurse() argument
2274 if (b->level) { in bch_btree_map_nodes_recurse()
2278 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_nodes_recurse()
2280 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_map_nodes_recurse()
2282 ret = btree(map_nodes_recurse, k, b, in bch_btree_map_nodes_recurse()
2291 if (!b->level || flags == MAP_ALL_NODES) in bch_btree_map_nodes_recurse()
2292 ret = fn(op, b); in bch_btree_map_nodes_recurse()
2303 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_keys_recurse() argument
2311 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_keys_recurse()
2313 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { in bch_btree_map_keys_recurse()
2314 ret = !b->level in bch_btree_map_keys_recurse()
2315 ? fn(op, b, k) in bch_btree_map_keys_recurse()
2316 : btree(map_keys_recurse, k, b, op, from, fn, flags); in bch_btree_map_keys_recurse()
2323 if (!b->level && (flags & MAP_END_KEY)) in bch_btree_map_keys_recurse()
2324 ret = fn(op, b, &KEY(KEY_INODE(&b->key), in bch_btree_map_keys_recurse()
2325 KEY_OFFSET(&b->key), 0)); in bch_btree_map_keys_recurse()
2362 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, in refill_keybuf_fn() argument