Lines Matching refs:c
280 struct cache_set *c = container_of(cl, struct cache_set, sb_write); in bcache_write_super_unlock() local
282 up(&c->sb_write_mutex); in bcache_write_super_unlock()
285 void bcache_write_super(struct cache_set *c) in bcache_write_super() argument
287 struct closure *cl = &c->sb_write; in bcache_write_super()
291 down(&c->sb_write_mutex); in bcache_write_super()
292 closure_init(cl, &c->cl); in bcache_write_super()
294 c->sb.seq++; in bcache_write_super()
296 for_each_cache(ca, c, i) { in bcache_write_super()
300 ca->sb.seq = c->sb.seq; in bcache_write_super()
301 ca->sb.last_mount = c->sb.last_mount; in bcache_write_super()
303 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); in bcache_write_super()
322 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); in uuid_endio() local
324 cache_set_err_on(bio->bi_error, c, "accessing uuids"); in uuid_endio()
325 bch_bbio_free(bio, c); in uuid_endio()
331 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); in uuid_io_unlock() local
333 up(&c->uuid_write_mutex); in uuid_io_unlock()
336 static void uuid_io(struct cache_set *c, unsigned long rw, in uuid_io() argument
339 struct closure *cl = &c->uuid_write; in uuid_io()
345 down(&c->uuid_write_mutex); in uuid_io()
349 struct bio *bio = bch_bbio_alloc(c); in uuid_io()
356 bch_bio_map(bio, c->uuids); in uuid_io()
358 bch_submit_bbio(bio, c, k, i); in uuid_io()
367 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) in uuid_io()
370 u - c->uuids, u->uuid, u->label, in uuid_io()
376 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) in uuid_read() argument
380 if (__bch_btree_ptr_invalid(c, k)) in uuid_read()
383 bkey_copy(&c->uuid_bucket, k); in uuid_read()
384 uuid_io(c, READ_SYNC, k, cl); in uuid_read()
387 struct uuid_entry_v0 *u0 = (void *) c->uuids; in uuid_read()
388 struct uuid_entry *u1 = (void *) c->uuids; in uuid_read()
399 for (i = c->nr_uuids - 1; in uuid_read()
417 static int __uuid_write(struct cache_set *c) in __uuid_write() argument
425 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) in __uuid_write()
428 SET_KEY_SIZE(&k.key, c->sb.bucket_size); in __uuid_write()
429 uuid_io(c, REQ_WRITE, &k.key, &cl); in __uuid_write()
432 bkey_copy(&c->uuid_bucket, &k.key); in __uuid_write()
433 bkey_put(c, &k.key); in __uuid_write()
437 int bch_uuid_write(struct cache_set *c) in bch_uuid_write() argument
439 int ret = __uuid_write(c); in bch_uuid_write()
442 bch_journal_meta(c, NULL); in bch_uuid_write()
447 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) in uuid_find() argument
451 for (u = c->uuids; in uuid_find()
452 u < c->uuids + c->nr_uuids; u++) in uuid_find()
459 static struct uuid_entry *uuid_find_empty(struct cache_set *c) in uuid_find_empty() argument
462 return uuid_find(c, zero_uuid); in uuid_find_empty()
661 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { in bcache_device_unlink()
665 sysfs_remove_link(&d->c->kobj, d->name); in bcache_device_unlink()
668 for_each_cache(ca, d->c, i) in bcache_device_unlink()
673 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, in bcache_device_link() argument
679 for_each_cache(ca, d->c, i) in bcache_device_link()
685 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || in bcache_device_link()
686 sysfs_create_link(&c->kobj, &d->kobj, d->name), in bcache_device_link()
697 struct uuid_entry *u = d->c->uuids + d->id; in bcache_device_detach()
702 bch_uuid_write(d->c); in bcache_device_detach()
707 d->c->devices[d->id] = NULL; in bcache_device_detach()
708 closure_put(&d->c->caching); in bcache_device_detach()
709 d->c = NULL; in bcache_device_detach()
712 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, in bcache_device_attach() argument
716 d->c = c; in bcache_device_attach()
717 c->devices[id] = d; in bcache_device_attach()
719 closure_get(&c->caching); in bcache_device_attach()
728 if (d->c) in bcache_device_free()
826 static void calc_cached_dev_sectors(struct cache_set *c) in calc_cached_dev_sectors() argument
831 list_for_each_entry(dc, &c->cached_devs, list) in calc_cached_dev_sectors()
834 c->cached_dev_sectors = sectors; in calc_cached_dev_sectors()
858 if (!d->c && in bch_cached_dev_run()
933 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) in bch_cached_dev_attach() argument
941 if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) in bch_cached_dev_attach()
944 if (dc->disk.c) { in bch_cached_dev_attach()
949 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { in bch_cached_dev_attach()
954 if (dc->sb.block_size < c->sb.block_size) { in bch_cached_dev_attach()
961 u = uuid_find(c, dc->sb.uuid); in bch_cached_dev_attach()
977 u = uuid_find_empty(c); in bch_cached_dev_attach()
995 bch_uuid_write(c); in bch_cached_dev_attach()
997 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); in bch_cached_dev_attach()
1004 bch_uuid_write(c); in bch_cached_dev_attach()
1007 bcache_device_attach(&dc->disk, c, u - c->uuids); in bch_cached_dev_attach()
1008 list_move(&dc->list, &c->cached_devs); in bch_cached_dev_attach()
1009 calc_cached_dev_sectors(c); in bch_cached_dev_attach()
1033 bcache_device_link(&dc->disk, c, "bdev"); in bch_cached_dev_attach()
1040 dc->disk.c->sb.set_uuid); in bch_cached_dev_attach()
1147 struct cache_set *c; in register_bdev() local
1172 list_for_each_entry(c, &bch_cache_sets, list) in register_bdev()
1173 bch_cached_dev_attach(dc, c); in register_bdev()
1214 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) in flash_dev_run() argument
1226 if (bcache_device_init(d, block_bytes(c), u->sectors)) in flash_dev_run()
1229 bcache_device_attach(d, c, u - c->uuids); in flash_dev_run()
1236 bcache_device_link(d, c, "volume"); in flash_dev_run()
1244 static int flash_devs_run(struct cache_set *c) in flash_devs_run() argument
1249 for (u = c->uuids; in flash_devs_run()
1250 u < c->uuids + c->nr_uuids && !ret; in flash_devs_run()
1253 ret = flash_dev_run(c, u); in flash_devs_run()
1258 int bch_flash_dev_create(struct cache_set *c, uint64_t size) in bch_flash_dev_create() argument
1262 if (test_bit(CACHE_SET_STOPPING, &c->flags)) in bch_flash_dev_create()
1265 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) in bch_flash_dev_create()
1268 u = uuid_find_empty(c); in bch_flash_dev_create()
1281 bch_uuid_write(c); in bch_flash_dev_create()
1283 return flash_dev_run(c, u); in bch_flash_dev_create()
1289 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) in bch_cache_set_error() argument
1293 if (c->on_error != ON_ERROR_PANIC && in bch_cache_set_error()
1294 test_bit(CACHE_SET_STOPPING, &c->flags)) in bch_cache_set_error()
1301 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); in bch_cache_set_error()
1309 if (c->on_error == ON_ERROR_PANIC) in bch_cache_set_error()
1312 bch_cache_set_unregister(c); in bch_cache_set_error()
1318 struct cache_set *c = container_of(kobj, struct cache_set, kobj); in bch_cache_set_release() local
1319 kfree(c); in bch_cache_set_release()
1325 struct cache_set *c = container_of(cl, struct cache_set, cl); in cache_set_free() local
1329 if (!IS_ERR_OR_NULL(c->debug)) in cache_set_free()
1330 debugfs_remove(c->debug); in cache_set_free()
1332 bch_open_buckets_free(c); in cache_set_free()
1333 bch_btree_cache_free(c); in cache_set_free()
1334 bch_journal_free(c); in cache_set_free()
1336 for_each_cache(ca, c, i) in cache_set_free()
1339 c->cache[ca->sb.nr_this_dev] = NULL; in cache_set_free()
1343 bch_bset_sort_state_free(&c->sort); in cache_set_free()
1344 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); in cache_set_free()
1346 if (c->moving_gc_wq) in cache_set_free()
1347 destroy_workqueue(c->moving_gc_wq); in cache_set_free()
1348 if (c->bio_split) in cache_set_free()
1349 bioset_free(c->bio_split); in cache_set_free()
1350 if (c->fill_iter) in cache_set_free()
1351 mempool_destroy(c->fill_iter); in cache_set_free()
1352 if (c->bio_meta) in cache_set_free()
1353 mempool_destroy(c->bio_meta); in cache_set_free()
1354 if (c->search) in cache_set_free()
1355 mempool_destroy(c->search); in cache_set_free()
1356 kfree(c->devices); in cache_set_free()
1359 list_del(&c->list); in cache_set_free()
1362 pr_info("Cache set %pU unregistered", c->sb.set_uuid); in cache_set_free()
1365 closure_debug_destroy(&c->cl); in cache_set_free()
1366 kobject_put(&c->kobj); in cache_set_free()
1371 struct cache_set *c = container_of(cl, struct cache_set, caching); in cache_set_flush() local
1376 if (!c) in cache_set_flush()
1379 bch_cache_accounting_destroy(&c->accounting); in cache_set_flush()
1381 kobject_put(&c->internal); in cache_set_flush()
1382 kobject_del(&c->kobj); in cache_set_flush()
1384 if (c->gc_thread) in cache_set_flush()
1385 kthread_stop(c->gc_thread); in cache_set_flush()
1387 if (!IS_ERR_OR_NULL(c->root)) in cache_set_flush()
1388 list_add(&c->root->list, &c->btree_cache); in cache_set_flush()
1391 list_for_each_entry(b, &c->btree_cache, list) { in cache_set_flush()
1398 for_each_cache(ca, c, i) in cache_set_flush()
1402 if (c->journal.cur) { in cache_set_flush()
1403 cancel_delayed_work_sync(&c->journal.work); in cache_set_flush()
1405 c->journal.work.work.func(&c->journal.work.work); in cache_set_flush()
1413 struct cache_set *c = container_of(cl, struct cache_set, caching); in __cache_set_unregister() local
1419 for (i = 0; i < c->nr_uuids; i++) in __cache_set_unregister()
1420 if (c->devices[i]) { in __cache_set_unregister()
1421 if (!UUID_FLASH_ONLY(&c->uuids[i]) && in __cache_set_unregister()
1422 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { in __cache_set_unregister()
1423 dc = container_of(c->devices[i], in __cache_set_unregister()
1427 bcache_device_stop(c->devices[i]); in __cache_set_unregister()
1436 void bch_cache_set_stop(struct cache_set *c) in bch_cache_set_stop() argument
1438 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) in bch_cache_set_stop()
1439 closure_queue(&c->caching); in bch_cache_set_stop()
1442 void bch_cache_set_unregister(struct cache_set *c) in bch_cache_set_unregister() argument
1444 set_bit(CACHE_SET_UNREGISTERING, &c->flags); in bch_cache_set_unregister()
1445 bch_cache_set_stop(c); in bch_cache_set_unregister()
1448 #define alloc_bucket_pages(gfp, c) \ argument
1449 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1454 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); in bch_cache_set_alloc() local
1455 if (!c) in bch_cache_set_alloc()
1459 closure_init(&c->cl, NULL); in bch_cache_set_alloc()
1460 set_closure_fn(&c->cl, cache_set_free, system_wq); in bch_cache_set_alloc()
1462 closure_init(&c->caching, &c->cl); in bch_cache_set_alloc()
1463 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); in bch_cache_set_alloc()
1466 closure_set_stopped(&c->cl); in bch_cache_set_alloc()
1467 closure_put(&c->cl); in bch_cache_set_alloc()
1469 kobject_init(&c->kobj, &bch_cache_set_ktype); in bch_cache_set_alloc()
1470 kobject_init(&c->internal, &bch_cache_set_internal_ktype); in bch_cache_set_alloc()
1472 bch_cache_accounting_init(&c->accounting, &c->cl); in bch_cache_set_alloc()
1474 memcpy(c->sb.set_uuid, sb->set_uuid, 16); in bch_cache_set_alloc()
1475 c->sb.block_size = sb->block_size; in bch_cache_set_alloc()
1476 c->sb.bucket_size = sb->bucket_size; in bch_cache_set_alloc()
1477 c->sb.nr_in_set = sb->nr_in_set; in bch_cache_set_alloc()
1478 c->sb.last_mount = sb->last_mount; in bch_cache_set_alloc()
1479 c->bucket_bits = ilog2(sb->bucket_size); in bch_cache_set_alloc()
1480 c->block_bits = ilog2(sb->block_size); in bch_cache_set_alloc()
1481 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); in bch_cache_set_alloc()
1483 c->btree_pages = bucket_pages(c); in bch_cache_set_alloc()
1484 if (c->btree_pages > BTREE_MAX_PAGES) in bch_cache_set_alloc()
1485 c->btree_pages = max_t(int, c->btree_pages / 4, in bch_cache_set_alloc()
1488 sema_init(&c->sb_write_mutex, 1); in bch_cache_set_alloc()
1489 mutex_init(&c->bucket_lock); in bch_cache_set_alloc()
1490 init_waitqueue_head(&c->btree_cache_wait); in bch_cache_set_alloc()
1491 init_waitqueue_head(&c->bucket_wait); in bch_cache_set_alloc()
1492 sema_init(&c->uuid_write_mutex, 1); in bch_cache_set_alloc()
1494 spin_lock_init(&c->btree_gc_time.lock); in bch_cache_set_alloc()
1495 spin_lock_init(&c->btree_split_time.lock); in bch_cache_set_alloc()
1496 spin_lock_init(&c->btree_read_time.lock); in bch_cache_set_alloc()
1498 bch_moving_init_cache_set(c); in bch_cache_set_alloc()
1500 INIT_LIST_HEAD(&c->list); in bch_cache_set_alloc()
1501 INIT_LIST_HEAD(&c->cached_devs); in bch_cache_set_alloc()
1502 INIT_LIST_HEAD(&c->btree_cache); in bch_cache_set_alloc()
1503 INIT_LIST_HEAD(&c->btree_cache_freeable); in bch_cache_set_alloc()
1504 INIT_LIST_HEAD(&c->btree_cache_freed); in bch_cache_set_alloc()
1505 INIT_LIST_HEAD(&c->data_buckets); in bch_cache_set_alloc()
1507 c->search = mempool_create_slab_pool(32, bch_search_cache); in bch_cache_set_alloc()
1508 if (!c->search) in bch_cache_set_alloc()
1514 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || in bch_cache_set_alloc()
1515 !(c->bio_meta = mempool_create_kmalloc_pool(2, in bch_cache_set_alloc()
1517 bucket_pages(c))) || in bch_cache_set_alloc()
1518 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || in bch_cache_set_alloc()
1519 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || in bch_cache_set_alloc()
1520 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || in bch_cache_set_alloc()
1521 !(c->moving_gc_wq = create_workqueue("bcache_gc")) || in bch_cache_set_alloc()
1522 bch_journal_alloc(c) || in bch_cache_set_alloc()
1523 bch_btree_cache_alloc(c) || in bch_cache_set_alloc()
1524 bch_open_buckets_alloc(c) || in bch_cache_set_alloc()
1525 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) in bch_cache_set_alloc()
1528 c->congested_read_threshold_us = 2000; in bch_cache_set_alloc()
1529 c->congested_write_threshold_us = 20000; in bch_cache_set_alloc()
1530 c->error_limit = 8 << IO_ERROR_SHIFT; in bch_cache_set_alloc()
1532 return c; in bch_cache_set_alloc()
1534 bch_cache_set_unregister(c); in bch_cache_set_alloc()
1538 static void run_cache_set(struct cache_set *c) in run_cache_set() argument
1548 for_each_cache(ca, c, i) in run_cache_set()
1549 c->nbuckets += ca->sb.nbuckets; in run_cache_set()
1551 if (CACHE_SYNC(&c->sb)) { in run_cache_set()
1557 if (bch_journal_read(c, &journal)) in run_cache_set()
1569 for_each_cache(ca, c, i) in run_cache_set()
1581 if (__bch_btree_ptr_invalid(c, k)) in run_cache_set()
1585 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL); in run_cache_set()
1586 if (IS_ERR_OR_NULL(c->root)) in run_cache_set()
1589 list_del_init(&c->root->list); in run_cache_set()
1590 rw_unlock(true, c->root); in run_cache_set()
1592 err = uuid_read(c, j, &cl); in run_cache_set()
1597 if (bch_btree_check(c)) in run_cache_set()
1600 bch_journal_mark(c, &journal); in run_cache_set()
1601 bch_initial_gc_finish(c); in run_cache_set()
1609 bch_journal_next(&c->journal); in run_cache_set()
1612 for_each_cache(ca, c, i) in run_cache_set()
1627 __uuid_write(c); in run_cache_set()
1629 bch_journal_replay(c, &journal); in run_cache_set()
1633 for_each_cache(ca, c, i) { in run_cache_set()
1643 bch_initial_gc_finish(c); in run_cache_set()
1646 for_each_cache(ca, c, i) in run_cache_set()
1650 mutex_lock(&c->bucket_lock); in run_cache_set()
1651 for_each_cache(ca, c, i) in run_cache_set()
1653 mutex_unlock(&c->bucket_lock); in run_cache_set()
1656 if (__uuid_write(c)) in run_cache_set()
1660 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); in run_cache_set()
1661 if (IS_ERR_OR_NULL(c->root)) in run_cache_set()
1664 mutex_lock(&c->root->write_lock); in run_cache_set()
1665 bkey_copy_key(&c->root->key, &MAX_KEY); in run_cache_set()
1666 bch_btree_node_write(c->root, &cl); in run_cache_set()
1667 mutex_unlock(&c->root->write_lock); in run_cache_set()
1669 bch_btree_set_root(c->root); in run_cache_set()
1670 rw_unlock(true, c->root); in run_cache_set()
1677 SET_CACHE_SYNC(&c->sb, true); in run_cache_set()
1679 bch_journal_next(&c->journal); in run_cache_set()
1680 bch_journal_meta(c, &cl); in run_cache_set()
1684 if (bch_gc_thread_start(c)) in run_cache_set()
1688 c->sb.last_mount = get_seconds(); in run_cache_set()
1689 bcache_write_super(c); in run_cache_set()
1692 bch_cached_dev_attach(dc, c); in run_cache_set()
1694 flash_devs_run(c); in run_cache_set()
1696 set_bit(CACHE_SET_RUNNING, &c->flags); in run_cache_set()
1701 bch_cache_set_error(c, "%s", err); in run_cache_set()
1704 static bool can_attach_cache(struct cache *ca, struct cache_set *c) in can_attach_cache() argument
1706 return ca->sb.block_size == c->sb.block_size && in can_attach_cache()
1707 ca->sb.bucket_size == c->sb.bucket_size && in can_attach_cache()
1708 ca->sb.nr_in_set == c->sb.nr_in_set; in can_attach_cache()
1715 struct cache_set *c; in register_cache_set() local
1717 list_for_each_entry(c, &bch_cache_sets, list) in register_cache_set()
1718 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { in register_cache_set()
1719 if (c->cache[ca->sb.nr_this_dev]) in register_cache_set()
1722 if (!can_attach_cache(ca, c)) in register_cache_set()
1726 SET_CACHE_SYNC(&c->sb, false); in register_cache_set()
1731 c = bch_cache_set_alloc(&ca->sb); in register_cache_set()
1732 if (!c) in register_cache_set()
1736 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || in register_cache_set()
1737 kobject_add(&c->internal, &c->kobj, "internal")) in register_cache_set()
1740 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) in register_cache_set()
1743 bch_debug_init_cache_set(c); in register_cache_set()
1745 list_add(&c->list, &bch_cache_sets); in register_cache_set()
1748 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || in register_cache_set()
1749 sysfs_create_link(&c->kobj, &ca->kobj, buf)) in register_cache_set()
1752 if (ca->sb.seq > c->sb.seq) { in register_cache_set()
1753 c->sb.version = ca->sb.version; in register_cache_set()
1754 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); in register_cache_set()
1755 c->sb.flags = ca->sb.flags; in register_cache_set()
1756 c->sb.seq = ca->sb.seq; in register_cache_set()
1757 pr_debug("set version = %llu", c->sb.version); in register_cache_set()
1761 ca->set = c; in register_cache_set()
1763 c->cache_by_alloc[c->caches_loaded++] = ca; in register_cache_set()
1765 if (c->caches_loaded == c->sb.nr_in_set) in register_cache_set()
1766 run_cache_set(c); in register_cache_set()
1770 bch_cache_set_unregister(c); in register_cache_set()
1901 struct cache_set *c, *tc; in bch_is_open_backing() local
1904 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) in bch_is_open_backing()
1905 list_for_each_entry_safe(dc, t, &c->cached_devs, list) in bch_is_open_backing()
1915 struct cache_set *c, *tc; in bch_is_open_cache() local
1919 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) in bch_is_open_cache()
1920 for_each_cache(ca, c, i) in bch_is_open_cache()
2015 struct cache_set *c, *tc; in bcache_reboot() local
2026 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) in bcache_reboot()
2027 bch_cache_set_stop(c); in bcache_reboot()