Lines Matching refs:c
303 struct cache_set *c = container_of(cl, struct cache_set, sb_write); in bcache_write_super_unlock() local
305 up(&c->sb_write_mutex); in bcache_write_super_unlock()
308 void bcache_write_super(struct cache_set *c) in bcache_write_super() argument
310 struct closure *cl = &c->sb_write; in bcache_write_super()
314 down(&c->sb_write_mutex); in bcache_write_super()
315 closure_init(cl, &c->cl); in bcache_write_super()
317 c->sb.seq++; in bcache_write_super()
319 for_each_cache(ca, c, i) { in bcache_write_super()
323 ca->sb.seq = c->sb.seq; in bcache_write_super()
324 ca->sb.last_mount = c->sb.last_mount; in bcache_write_super()
326 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); in bcache_write_super()
345 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); in uuid_endio() local
347 cache_set_err_on(error, c, "accessing uuids"); in uuid_endio()
348 bch_bbio_free(bio, c); in uuid_endio()
354 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); in uuid_io_unlock() local
356 up(&c->uuid_write_mutex); in uuid_io_unlock()
359 static void uuid_io(struct cache_set *c, unsigned long rw, in uuid_io() argument
362 struct closure *cl = &c->uuid_write; in uuid_io()
368 down(&c->uuid_write_mutex); in uuid_io()
372 struct bio *bio = bch_bbio_alloc(c); in uuid_io()
379 bch_bio_map(bio, c->uuids); in uuid_io()
381 bch_submit_bbio(bio, c, k, i); in uuid_io()
390 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) in uuid_io()
393 u - c->uuids, u->uuid, u->label, in uuid_io()
399 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) in uuid_read() argument
403 if (__bch_btree_ptr_invalid(c, k)) in uuid_read()
406 bkey_copy(&c->uuid_bucket, k); in uuid_read()
407 uuid_io(c, READ_SYNC, k, cl); in uuid_read()
410 struct uuid_entry_v0 *u0 = (void *) c->uuids; in uuid_read()
411 struct uuid_entry *u1 = (void *) c->uuids; in uuid_read()
422 for (i = c->nr_uuids - 1; in uuid_read()
440 static int __uuid_write(struct cache_set *c) in __uuid_write() argument
448 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) in __uuid_write()
451 SET_KEY_SIZE(&k.key, c->sb.bucket_size); in __uuid_write()
452 uuid_io(c, REQ_WRITE, &k.key, &cl); in __uuid_write()
455 bkey_copy(&c->uuid_bucket, &k.key); in __uuid_write()
456 bkey_put(c, &k.key); in __uuid_write()
460 int bch_uuid_write(struct cache_set *c) in bch_uuid_write() argument
462 int ret = __uuid_write(c); in bch_uuid_write()
465 bch_journal_meta(c, NULL); in bch_uuid_write()
470 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) in uuid_find() argument
474 for (u = c->uuids; in uuid_find()
475 u < c->uuids + c->nr_uuids; u++) in uuid_find()
482 static struct uuid_entry *uuid_find_empty(struct cache_set *c) in uuid_find_empty() argument
485 return uuid_find(c, zero_uuid); in uuid_find_empty()
684 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { in bcache_device_unlink()
688 sysfs_remove_link(&d->c->kobj, d->name); in bcache_device_unlink()
691 for_each_cache(ca, d->c, i) in bcache_device_unlink()
696 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, in bcache_device_link() argument
702 for_each_cache(ca, d->c, i) in bcache_device_link()
708 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || in bcache_device_link()
709 sysfs_create_link(&c->kobj, &d->kobj, d->name), in bcache_device_link()
720 struct uuid_entry *u = d->c->uuids + d->id; in bcache_device_detach()
725 bch_uuid_write(d->c); in bcache_device_detach()
730 d->c->devices[d->id] = NULL; in bcache_device_detach()
731 closure_put(&d->c->caching); in bcache_device_detach()
732 d->c = NULL; in bcache_device_detach()
735 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, in bcache_device_attach() argument
739 d->c = c; in bcache_device_attach()
740 c->devices[id] = d; in bcache_device_attach()
742 closure_get(&c->caching); in bcache_device_attach()
751 if (d->c) in bcache_device_free()
857 static void calc_cached_dev_sectors(struct cache_set *c) in calc_cached_dev_sectors() argument
862 list_for_each_entry(dc, &c->cached_devs, list) in calc_cached_dev_sectors()
865 c->cached_dev_sectors = sectors; in calc_cached_dev_sectors()
889 if (!d->c && in bch_cached_dev_run()
964 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) in bch_cached_dev_attach() argument
972 if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) in bch_cached_dev_attach()
975 if (dc->disk.c) { in bch_cached_dev_attach()
980 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { in bch_cached_dev_attach()
985 if (dc->sb.block_size < c->sb.block_size) { in bch_cached_dev_attach()
992 u = uuid_find(c, dc->sb.uuid); in bch_cached_dev_attach()
1008 u = uuid_find_empty(c); in bch_cached_dev_attach()
1026 bch_uuid_write(c); in bch_cached_dev_attach()
1028 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); in bch_cached_dev_attach()
1035 bch_uuid_write(c); in bch_cached_dev_attach()
1038 bcache_device_attach(&dc->disk, c, u - c->uuids); in bch_cached_dev_attach()
1039 list_move(&dc->list, &c->cached_devs); in bch_cached_dev_attach()
1040 calc_cached_dev_sectors(c); in bch_cached_dev_attach()
1064 bcache_device_link(&dc->disk, c, "bdev"); in bch_cached_dev_attach()
1071 dc->disk.c->sb.set_uuid); in bch_cached_dev_attach()
1178 struct cache_set *c; in register_bdev() local
1203 list_for_each_entry(c, &bch_cache_sets, list) in register_bdev()
1204 bch_cached_dev_attach(dc, c); in register_bdev()
1245 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) in flash_dev_run() argument
1257 if (bcache_device_init(d, block_bytes(c), u->sectors)) in flash_dev_run()
1260 bcache_device_attach(d, c, u - c->uuids); in flash_dev_run()
1267 bcache_device_link(d, c, "volume"); in flash_dev_run()
1275 static int flash_devs_run(struct cache_set *c) in flash_devs_run() argument
1280 for (u = c->uuids; in flash_devs_run()
1281 u < c->uuids + c->nr_uuids && !ret; in flash_devs_run()
1284 ret = flash_dev_run(c, u); in flash_devs_run()
1289 int bch_flash_dev_create(struct cache_set *c, uint64_t size) in bch_flash_dev_create() argument
1293 if (test_bit(CACHE_SET_STOPPING, &c->flags)) in bch_flash_dev_create()
1296 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) in bch_flash_dev_create()
1299 u = uuid_find_empty(c); in bch_flash_dev_create()
1312 bch_uuid_write(c); in bch_flash_dev_create()
1314 return flash_dev_run(c, u); in bch_flash_dev_create()
1320 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) in bch_cache_set_error() argument
1324 if (c->on_error != ON_ERROR_PANIC && in bch_cache_set_error()
1325 test_bit(CACHE_SET_STOPPING, &c->flags)) in bch_cache_set_error()
1332 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); in bch_cache_set_error()
1340 if (c->on_error == ON_ERROR_PANIC) in bch_cache_set_error()
1343 bch_cache_set_unregister(c); in bch_cache_set_error()
1349 struct cache_set *c = container_of(kobj, struct cache_set, kobj); in bch_cache_set_release() local
1350 kfree(c); in bch_cache_set_release()
1356 struct cache_set *c = container_of(cl, struct cache_set, cl); in cache_set_free() local
1360 if (!IS_ERR_OR_NULL(c->debug)) in cache_set_free()
1361 debugfs_remove(c->debug); in cache_set_free()
1363 bch_open_buckets_free(c); in cache_set_free()
1364 bch_btree_cache_free(c); in cache_set_free()
1365 bch_journal_free(c); in cache_set_free()
1367 for_each_cache(ca, c, i) in cache_set_free()
1370 c->cache[ca->sb.nr_this_dev] = NULL; in cache_set_free()
1374 bch_bset_sort_state_free(&c->sort); in cache_set_free()
1375 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); in cache_set_free()
1377 if (c->moving_gc_wq) in cache_set_free()
1378 destroy_workqueue(c->moving_gc_wq); in cache_set_free()
1379 if (c->bio_split) in cache_set_free()
1380 bioset_free(c->bio_split); in cache_set_free()
1381 if (c->fill_iter) in cache_set_free()
1382 mempool_destroy(c->fill_iter); in cache_set_free()
1383 if (c->bio_meta) in cache_set_free()
1384 mempool_destroy(c->bio_meta); in cache_set_free()
1385 if (c->search) in cache_set_free()
1386 mempool_destroy(c->search); in cache_set_free()
1387 kfree(c->devices); in cache_set_free()
1390 list_del(&c->list); in cache_set_free()
1393 pr_info("Cache set %pU unregistered", c->sb.set_uuid); in cache_set_free()
1396 closure_debug_destroy(&c->cl); in cache_set_free()
1397 kobject_put(&c->kobj); in cache_set_free()
1402 struct cache_set *c = container_of(cl, struct cache_set, caching); in cache_set_flush() local
1407 if (!c) in cache_set_flush()
1410 bch_cache_accounting_destroy(&c->accounting); in cache_set_flush()
1412 kobject_put(&c->internal); in cache_set_flush()
1413 kobject_del(&c->kobj); in cache_set_flush()
1415 if (c->gc_thread) in cache_set_flush()
1416 kthread_stop(c->gc_thread); in cache_set_flush()
1418 if (!IS_ERR_OR_NULL(c->root)) in cache_set_flush()
1419 list_add(&c->root->list, &c->btree_cache); in cache_set_flush()
1422 list_for_each_entry(b, &c->btree_cache, list) { in cache_set_flush()
1429 for_each_cache(ca, c, i) in cache_set_flush()
1433 if (c->journal.cur) { in cache_set_flush()
1434 cancel_delayed_work_sync(&c->journal.work); in cache_set_flush()
1436 c->journal.work.work.func(&c->journal.work.work); in cache_set_flush()
1444 struct cache_set *c = container_of(cl, struct cache_set, caching); in __cache_set_unregister() local
1450 for (i = 0; i < c->nr_uuids; i++) in __cache_set_unregister()
1451 if (c->devices[i]) { in __cache_set_unregister()
1452 if (!UUID_FLASH_ONLY(&c->uuids[i]) && in __cache_set_unregister()
1453 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { in __cache_set_unregister()
1454 dc = container_of(c->devices[i], in __cache_set_unregister()
1458 bcache_device_stop(c->devices[i]); in __cache_set_unregister()
1467 void bch_cache_set_stop(struct cache_set *c) in bch_cache_set_stop() argument
1469 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) in bch_cache_set_stop()
1470 closure_queue(&c->caching); in bch_cache_set_stop()
1473 void bch_cache_set_unregister(struct cache_set *c) in bch_cache_set_unregister() argument
1475 set_bit(CACHE_SET_UNREGISTERING, &c->flags); in bch_cache_set_unregister()
1476 bch_cache_set_stop(c); in bch_cache_set_unregister()
1479 #define alloc_bucket_pages(gfp, c) \ argument
1480 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1485 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); in bch_cache_set_alloc() local
1486 if (!c) in bch_cache_set_alloc()
1490 closure_init(&c->cl, NULL); in bch_cache_set_alloc()
1491 set_closure_fn(&c->cl, cache_set_free, system_wq); in bch_cache_set_alloc()
1493 closure_init(&c->caching, &c->cl); in bch_cache_set_alloc()
1494 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); in bch_cache_set_alloc()
1497 closure_set_stopped(&c->cl); in bch_cache_set_alloc()
1498 closure_put(&c->cl); in bch_cache_set_alloc()
1500 kobject_init(&c->kobj, &bch_cache_set_ktype); in bch_cache_set_alloc()
1501 kobject_init(&c->internal, &bch_cache_set_internal_ktype); in bch_cache_set_alloc()
1503 bch_cache_accounting_init(&c->accounting, &c->cl); in bch_cache_set_alloc()
1505 memcpy(c->sb.set_uuid, sb->set_uuid, 16); in bch_cache_set_alloc()
1506 c->sb.block_size = sb->block_size; in bch_cache_set_alloc()
1507 c->sb.bucket_size = sb->bucket_size; in bch_cache_set_alloc()
1508 c->sb.nr_in_set = sb->nr_in_set; in bch_cache_set_alloc()
1509 c->sb.last_mount = sb->last_mount; in bch_cache_set_alloc()
1510 c->bucket_bits = ilog2(sb->bucket_size); in bch_cache_set_alloc()
1511 c->block_bits = ilog2(sb->block_size); in bch_cache_set_alloc()
1512 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); in bch_cache_set_alloc()
1514 c->btree_pages = bucket_pages(c); in bch_cache_set_alloc()
1515 if (c->btree_pages > BTREE_MAX_PAGES) in bch_cache_set_alloc()
1516 c->btree_pages = max_t(int, c->btree_pages / 4, in bch_cache_set_alloc()
1519 sema_init(&c->sb_write_mutex, 1); in bch_cache_set_alloc()
1520 mutex_init(&c->bucket_lock); in bch_cache_set_alloc()
1521 init_waitqueue_head(&c->btree_cache_wait); in bch_cache_set_alloc()
1522 init_waitqueue_head(&c->bucket_wait); in bch_cache_set_alloc()
1523 sema_init(&c->uuid_write_mutex, 1); in bch_cache_set_alloc()
1525 spin_lock_init(&c->btree_gc_time.lock); in bch_cache_set_alloc()
1526 spin_lock_init(&c->btree_split_time.lock); in bch_cache_set_alloc()
1527 spin_lock_init(&c->btree_read_time.lock); in bch_cache_set_alloc()
1529 bch_moving_init_cache_set(c); in bch_cache_set_alloc()
1531 INIT_LIST_HEAD(&c->list); in bch_cache_set_alloc()
1532 INIT_LIST_HEAD(&c->cached_devs); in bch_cache_set_alloc()
1533 INIT_LIST_HEAD(&c->btree_cache); in bch_cache_set_alloc()
1534 INIT_LIST_HEAD(&c->btree_cache_freeable); in bch_cache_set_alloc()
1535 INIT_LIST_HEAD(&c->btree_cache_freed); in bch_cache_set_alloc()
1536 INIT_LIST_HEAD(&c->data_buckets); in bch_cache_set_alloc()
1538 c->search = mempool_create_slab_pool(32, bch_search_cache); in bch_cache_set_alloc()
1539 if (!c->search) in bch_cache_set_alloc()
1545 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || in bch_cache_set_alloc()
1546 !(c->bio_meta = mempool_create_kmalloc_pool(2, in bch_cache_set_alloc()
1548 bucket_pages(c))) || in bch_cache_set_alloc()
1549 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || in bch_cache_set_alloc()
1550 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || in bch_cache_set_alloc()
1551 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || in bch_cache_set_alloc()
1552 !(c->moving_gc_wq = create_workqueue("bcache_gc")) || in bch_cache_set_alloc()
1553 bch_journal_alloc(c) || in bch_cache_set_alloc()
1554 bch_btree_cache_alloc(c) || in bch_cache_set_alloc()
1555 bch_open_buckets_alloc(c) || in bch_cache_set_alloc()
1556 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) in bch_cache_set_alloc()
1559 c->congested_read_threshold_us = 2000; in bch_cache_set_alloc()
1560 c->congested_write_threshold_us = 20000; in bch_cache_set_alloc()
1561 c->error_limit = 8 << IO_ERROR_SHIFT; in bch_cache_set_alloc()
1563 return c; in bch_cache_set_alloc()
1565 bch_cache_set_unregister(c); in bch_cache_set_alloc()
1569 static void run_cache_set(struct cache_set *c) in run_cache_set() argument
1579 for_each_cache(ca, c, i) in run_cache_set()
1580 c->nbuckets += ca->sb.nbuckets; in run_cache_set()
1582 if (CACHE_SYNC(&c->sb)) { in run_cache_set()
1588 if (bch_journal_read(c, &journal)) in run_cache_set()
1600 for_each_cache(ca, c, i) in run_cache_set()
1612 if (__bch_btree_ptr_invalid(c, k)) in run_cache_set()
1616 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL); in run_cache_set()
1617 if (IS_ERR_OR_NULL(c->root)) in run_cache_set()
1620 list_del_init(&c->root->list); in run_cache_set()
1621 rw_unlock(true, c->root); in run_cache_set()
1623 err = uuid_read(c, j, &cl); in run_cache_set()
1628 if (bch_btree_check(c)) in run_cache_set()
1631 bch_journal_mark(c, &journal); in run_cache_set()
1632 bch_initial_gc_finish(c); in run_cache_set()
1640 bch_journal_next(&c->journal); in run_cache_set()
1643 for_each_cache(ca, c, i) in run_cache_set()
1658 __uuid_write(c); in run_cache_set()
1660 bch_journal_replay(c, &journal); in run_cache_set()
1664 for_each_cache(ca, c, i) { in run_cache_set()
1674 bch_initial_gc_finish(c); in run_cache_set()
1677 for_each_cache(ca, c, i) in run_cache_set()
1681 mutex_lock(&c->bucket_lock); in run_cache_set()
1682 for_each_cache(ca, c, i) in run_cache_set()
1684 mutex_unlock(&c->bucket_lock); in run_cache_set()
1687 if (__uuid_write(c)) in run_cache_set()
1691 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); in run_cache_set()
1692 if (IS_ERR_OR_NULL(c->root)) in run_cache_set()
1695 mutex_lock(&c->root->write_lock); in run_cache_set()
1696 bkey_copy_key(&c->root->key, &MAX_KEY); in run_cache_set()
1697 bch_btree_node_write(c->root, &cl); in run_cache_set()
1698 mutex_unlock(&c->root->write_lock); in run_cache_set()
1700 bch_btree_set_root(c->root); in run_cache_set()
1701 rw_unlock(true, c->root); in run_cache_set()
1708 SET_CACHE_SYNC(&c->sb, true); in run_cache_set()
1710 bch_journal_next(&c->journal); in run_cache_set()
1711 bch_journal_meta(c, &cl); in run_cache_set()
1715 if (bch_gc_thread_start(c)) in run_cache_set()
1719 c->sb.last_mount = get_seconds(); in run_cache_set()
1720 bcache_write_super(c); in run_cache_set()
1723 bch_cached_dev_attach(dc, c); in run_cache_set()
1725 flash_devs_run(c); in run_cache_set()
1727 set_bit(CACHE_SET_RUNNING, &c->flags); in run_cache_set()
1732 bch_cache_set_error(c, "%s", err); in run_cache_set()
1735 static bool can_attach_cache(struct cache *ca, struct cache_set *c) in can_attach_cache() argument
1737 return ca->sb.block_size == c->sb.block_size && in can_attach_cache()
1738 ca->sb.bucket_size == c->sb.bucket_size && in can_attach_cache()
1739 ca->sb.nr_in_set == c->sb.nr_in_set; in can_attach_cache()
1746 struct cache_set *c; in register_cache_set() local
1748 list_for_each_entry(c, &bch_cache_sets, list) in register_cache_set()
1749 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { in register_cache_set()
1750 if (c->cache[ca->sb.nr_this_dev]) in register_cache_set()
1753 if (!can_attach_cache(ca, c)) in register_cache_set()
1757 SET_CACHE_SYNC(&c->sb, false); in register_cache_set()
1762 c = bch_cache_set_alloc(&ca->sb); in register_cache_set()
1763 if (!c) in register_cache_set()
1767 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || in register_cache_set()
1768 kobject_add(&c->internal, &c->kobj, "internal")) in register_cache_set()
1771 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) in register_cache_set()
1774 bch_debug_init_cache_set(c); in register_cache_set()
1776 list_add(&c->list, &bch_cache_sets); in register_cache_set()
1779 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || in register_cache_set()
1780 sysfs_create_link(&c->kobj, &ca->kobj, buf)) in register_cache_set()
1783 if (ca->sb.seq > c->sb.seq) { in register_cache_set()
1784 c->sb.version = ca->sb.version; in register_cache_set()
1785 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); in register_cache_set()
1786 c->sb.flags = ca->sb.flags; in register_cache_set()
1787 c->sb.seq = ca->sb.seq; in register_cache_set()
1788 pr_debug("set version = %llu", c->sb.version); in register_cache_set()
1792 ca->set = c; in register_cache_set()
1794 c->cache_by_alloc[c->caches_loaded++] = ca; in register_cache_set()
1796 if (c->caches_loaded == c->sb.nr_in_set) in register_cache_set()
1797 run_cache_set(c); in register_cache_set()
1801 bch_cache_set_unregister(c); in register_cache_set()
1935 struct cache_set *c, *tc; in bch_is_open_backing() local
1938 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) in bch_is_open_backing()
1939 list_for_each_entry_safe(dc, t, &c->cached_devs, list) in bch_is_open_backing()
1949 struct cache_set *c, *tc; in bch_is_open_cache() local
1953 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) in bch_is_open_cache()
1954 for_each_cache(ca, c, i) in bch_is_open_cache()
2049 struct cache_set *c, *tc; in bcache_reboot() local
2060 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) in bcache_reboot()
2061 bch_cache_set_stop(c); in bcache_reboot()