Lines Matching refs:ca
295 struct cache *ca = bio->bi_private; in write_super_endio() local
297 bch_count_io_errors(ca, error, "writing superblock"); in write_super_endio()
298 closure_put(&ca->set->sb_write); in write_super_endio()
311 struct cache *ca; in bcache_write_super() local
319 for_each_cache(ca, c, i) { in bcache_write_super()
320 struct bio *bio = &ca->sb_bio; in bcache_write_super()
322 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; in bcache_write_super()
323 ca->sb.seq = c->sb.seq; in bcache_write_super()
324 ca->sb.last_mount = c->sb.last_mount; in bcache_write_super()
326 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); in bcache_write_super()
329 bio->bi_bdev = ca->bdev; in bcache_write_super()
331 bio->bi_private = ca; in bcache_write_super()
334 __write_super(&ca->sb, bio); in bcache_write_super()
517 struct cache *ca = bio->bi_private; in prio_endio() local
519 cache_set_err_on(error, ca->set, "accessing priorities"); in prio_endio()
520 bch_bbio_free(bio, ca->set); in prio_endio()
521 closure_put(&ca->prio); in prio_endio()
524 static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) in prio_io() argument
526 struct closure *cl = &ca->prio; in prio_io()
527 struct bio *bio = bch_bbio_alloc(ca->set); in prio_io()
531 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io()
532 bio->bi_bdev = ca->bdev; in prio_io()
534 bio->bi_iter.bi_size = bucket_bytes(ca); in prio_io()
537 bio->bi_private = ca; in prio_io()
538 bch_bio_map(bio, ca->disk_buckets); in prio_io()
540 closure_bio_submit(bio, &ca->prio, ca); in prio_io()
544 void bch_prio_write(struct cache *ca) in bch_prio_write() argument
552 lockdep_assert_held(&ca->set->bucket_lock); in bch_prio_write()
554 ca->disk_buckets->seq++; in bch_prio_write()
556 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), in bch_prio_write()
557 &ca->meta_sectors_written); in bch_prio_write()
562 for (i = prio_buckets(ca) - 1; i >= 0; --i) { in bch_prio_write()
564 struct prio_set *p = ca->disk_buckets; in bch_prio_write()
566 struct bucket_disk *end = d + prios_per_bucket(ca); in bch_prio_write()
568 for (b = ca->buckets + i * prios_per_bucket(ca); in bch_prio_write()
569 b < ca->buckets + ca->sb.nbuckets && d < end; in bch_prio_write()
575 p->next_bucket = ca->prio_buckets[i + 1]; in bch_prio_write()
576 p->magic = pset_magic(&ca->sb); in bch_prio_write()
577 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); in bch_prio_write()
579 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); in bch_prio_write()
582 mutex_unlock(&ca->set->bucket_lock); in bch_prio_write()
583 prio_io(ca, bucket, REQ_WRITE); in bch_prio_write()
584 mutex_lock(&ca->set->bucket_lock); in bch_prio_write()
586 ca->prio_buckets[i] = bucket; in bch_prio_write()
587 atomic_dec_bug(&ca->buckets[bucket].pin); in bch_prio_write()
590 mutex_unlock(&ca->set->bucket_lock); in bch_prio_write()
592 bch_journal_meta(ca->set, &cl); in bch_prio_write()
595 mutex_lock(&ca->set->bucket_lock); in bch_prio_write()
601 for (i = 0; i < prio_buckets(ca); i++) { in bch_prio_write()
602 if (ca->prio_last_buckets[i]) in bch_prio_write()
603 __bch_bucket_free(ca, in bch_prio_write()
604 &ca->buckets[ca->prio_last_buckets[i]]); in bch_prio_write()
606 ca->prio_last_buckets[i] = ca->prio_buckets[i]; in bch_prio_write()
610 static void prio_read(struct cache *ca, uint64_t bucket) in prio_read() argument
612 struct prio_set *p = ca->disk_buckets; in prio_read()
613 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; in prio_read()
617 for (b = ca->buckets; in prio_read()
618 b < ca->buckets + ca->sb.nbuckets; in prio_read()
621 ca->prio_buckets[bucket_nr] = bucket; in prio_read()
622 ca->prio_last_buckets[bucket_nr] = bucket; in prio_read()
625 prio_io(ca, bucket, READ_SYNC); in prio_read()
627 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) in prio_read()
630 if (p->magic != pset_magic(&ca->sb)) in prio_read()
686 struct cache *ca; in bcache_device_unlink() local
691 for_each_cache(ca, d->c, i) in bcache_device_unlink()
692 bd_unlink_disk_holder(ca->bdev, d->disk); in bcache_device_unlink()
700 struct cache *ca; in bcache_device_link() local
702 for_each_cache(ca, d->c, i) in bcache_device_link()
703 bd_link_disk_holder(ca->bdev, d->disk); in bcache_device_link()
1357 struct cache *ca; in cache_set_free() local
1367 for_each_cache(ca, c, i) in cache_set_free()
1368 if (ca) { in cache_set_free()
1369 ca->set = NULL; in cache_set_free()
1370 c->cache[ca->sb.nr_this_dev] = NULL; in cache_set_free()
1371 kobject_put(&ca->kobj); in cache_set_free()
1403 struct cache *ca; in cache_set_flush() local
1429 for_each_cache(ca, c, i) in cache_set_flush()
1430 if (ca->alloc_thread) in cache_set_flush()
1431 kthread_stop(ca->alloc_thread); in cache_set_flush()
1573 struct cache *ca; in run_cache_set() local
1579 for_each_cache(ca, c, i) in run_cache_set()
1580 c->nbuckets += ca->sb.nbuckets; in run_cache_set()
1600 for_each_cache(ca, c, i) in run_cache_set()
1601 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); in run_cache_set()
1643 for_each_cache(ca, c, i) in run_cache_set()
1644 if (bch_cache_allocator_start(ca)) in run_cache_set()
1664 for_each_cache(ca, c, i) { in run_cache_set()
1667 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, in run_cache_set()
1670 for (j = 0; j < ca->sb.keys; j++) in run_cache_set()
1671 ca->sb.d[j] = ca->sb.first_bucket + j; in run_cache_set()
1677 for_each_cache(ca, c, i) in run_cache_set()
1678 if (bch_cache_allocator_start(ca)) in run_cache_set()
1682 for_each_cache(ca, c, i) in run_cache_set()
1683 bch_prio_write(ca); in run_cache_set()
1735 static bool can_attach_cache(struct cache *ca, struct cache_set *c) in can_attach_cache() argument
1737 return ca->sb.block_size == c->sb.block_size && in can_attach_cache()
1738 ca->sb.bucket_size == c->sb.bucket_size && in can_attach_cache()
1739 ca->sb.nr_in_set == c->sb.nr_in_set; in can_attach_cache()
1742 static const char *register_cache_set(struct cache *ca) in register_cache_set() argument
1749 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { in register_cache_set()
1750 if (c->cache[ca->sb.nr_this_dev]) in register_cache_set()
1753 if (!can_attach_cache(ca, c)) in register_cache_set()
1756 if (!CACHE_SYNC(&ca->sb)) in register_cache_set()
1762 c = bch_cache_set_alloc(&ca->sb); in register_cache_set()
1778 sprintf(buf, "cache%i", ca->sb.nr_this_dev); in register_cache_set()
1779 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || in register_cache_set()
1780 sysfs_create_link(&c->kobj, &ca->kobj, buf)) in register_cache_set()
1783 if (ca->sb.seq > c->sb.seq) { in register_cache_set()
1784 c->sb.version = ca->sb.version; in register_cache_set()
1785 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); in register_cache_set()
1786 c->sb.flags = ca->sb.flags; in register_cache_set()
1787 c->sb.seq = ca->sb.seq; in register_cache_set()
1791 kobject_get(&ca->kobj); in register_cache_set()
1792 ca->set = c; in register_cache_set()
1793 ca->set->cache[ca->sb.nr_this_dev] = ca; in register_cache_set()
1794 c->cache_by_alloc[c->caches_loaded++] = ca; in register_cache_set()
1809 struct cache *ca = container_of(kobj, struct cache, kobj); in bch_cache_release() local
1812 if (ca->set) { in bch_cache_release()
1813 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); in bch_cache_release()
1814 ca->set->cache[ca->sb.nr_this_dev] = NULL; in bch_cache_release()
1817 bio_split_pool_free(&ca->bio_split_hook); in bch_cache_release()
1819 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); in bch_cache_release()
1820 kfree(ca->prio_buckets); in bch_cache_release()
1821 vfree(ca->buckets); in bch_cache_release()
1823 free_heap(&ca->heap); in bch_cache_release()
1824 free_fifo(&ca->free_inc); in bch_cache_release()
1827 free_fifo(&ca->free[i]); in bch_cache_release()
1829 if (ca->sb_bio.bi_inline_vecs[0].bv_page) in bch_cache_release()
1830 put_page(ca->sb_bio.bi_io_vec[0].bv_page); in bch_cache_release()
1832 if (!IS_ERR_OR_NULL(ca->bdev)) in bch_cache_release()
1833 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); in bch_cache_release()
1835 kfree(ca); in bch_cache_release()
1839 static int cache_alloc(struct cache_sb *sb, struct cache *ca) in cache_alloc() argument
1845 kobject_init(&ca->kobj, &bch_cache_ktype); in cache_alloc()
1847 bio_init(&ca->journal.bio); in cache_alloc()
1848 ca->journal.bio.bi_max_vecs = 8; in cache_alloc()
1849 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; in cache_alloc()
1851 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; in cache_alloc()
1853 if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || in cache_alloc()
1854 !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || in cache_alloc()
1855 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || in cache_alloc()
1856 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || in cache_alloc()
1857 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || in cache_alloc()
1858 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || in cache_alloc()
1859 !(ca->buckets = vzalloc(sizeof(struct bucket) * in cache_alloc()
1860 ca->sb.nbuckets)) || in cache_alloc()
1861 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * in cache_alloc()
1863 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || in cache_alloc()
1864 bio_split_pool_init(&ca->bio_split_hook)) in cache_alloc()
1867 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); in cache_alloc()
1869 for_each_bucket(b, ca) in cache_alloc()
1876 struct block_device *bdev, struct cache *ca) in register_cache() argument
1882 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); in register_cache()
1883 ca->bdev = bdev; in register_cache()
1884 ca->bdev->bd_holder = ca; in register_cache()
1886 bio_init(&ca->sb_bio); in register_cache()
1887 ca->sb_bio.bi_max_vecs = 1; in register_cache()
1888 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; in register_cache()
1889 ca->sb_bio.bi_io_vec[0].bv_page = sb_page; in register_cache()
1892 if (blk_queue_discard(bdev_get_queue(ca->bdev))) in register_cache()
1893 ca->discard = CACHE_DISCARD(&ca->sb); in register_cache()
1895 ret = cache_alloc(sb, ca); in register_cache()
1899 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { in register_cache()
1906 err = register_cache_set(ca); in register_cache()
1917 kobject_put(&ca->kobj); in register_cache()
1950 struct cache *ca; in bch_is_open_cache() local
1954 for_each_cache(ca, c, i) in bch_is_open_cache()
1955 if (ca->bdev == bdev) in bch_is_open_cache()
2017 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); in register_bcache() local
2018 if (!ca) in register_bcache()
2021 if (register_cache(sb, sb_page, bdev, ca) != 0) in register_bcache()