Lines Matching refs:ca
272 struct cache *ca = bio->bi_private; in write_super_endio() local
274 bch_count_io_errors(ca, bio->bi_error, "writing superblock"); in write_super_endio()
275 closure_put(&ca->set->sb_write); in write_super_endio()
288 struct cache *ca; in bcache_write_super() local
296 for_each_cache(ca, c, i) { in bcache_write_super()
297 struct bio *bio = &ca->sb_bio; in bcache_write_super()
299 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; in bcache_write_super()
300 ca->sb.seq = c->sb.seq; in bcache_write_super()
301 ca->sb.last_mount = c->sb.last_mount; in bcache_write_super()
303 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); in bcache_write_super()
306 bio->bi_bdev = ca->bdev; in bcache_write_super()
308 bio->bi_private = ca; in bcache_write_super()
311 __write_super(&ca->sb, bio); in bcache_write_super()
494 struct cache *ca = bio->bi_private; in prio_endio() local
496 cache_set_err_on(bio->bi_error, ca->set, "accessing priorities"); in prio_endio()
497 bch_bbio_free(bio, ca->set); in prio_endio()
498 closure_put(&ca->prio); in prio_endio()
501 static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) in prio_io() argument
503 struct closure *cl = &ca->prio; in prio_io()
504 struct bio *bio = bch_bbio_alloc(ca->set); in prio_io()
508 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io()
509 bio->bi_bdev = ca->bdev; in prio_io()
511 bio->bi_iter.bi_size = bucket_bytes(ca); in prio_io()
514 bio->bi_private = ca; in prio_io()
515 bch_bio_map(bio, ca->disk_buckets); in prio_io()
517 closure_bio_submit(bio, &ca->prio); in prio_io()
521 void bch_prio_write(struct cache *ca) in bch_prio_write() argument
529 lockdep_assert_held(&ca->set->bucket_lock); in bch_prio_write()
531 ca->disk_buckets->seq++; in bch_prio_write()
533 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), in bch_prio_write()
534 &ca->meta_sectors_written); in bch_prio_write()
539 for (i = prio_buckets(ca) - 1; i >= 0; --i) { in bch_prio_write()
541 struct prio_set *p = ca->disk_buckets; in bch_prio_write()
543 struct bucket_disk *end = d + prios_per_bucket(ca); in bch_prio_write()
545 for (b = ca->buckets + i * prios_per_bucket(ca); in bch_prio_write()
546 b < ca->buckets + ca->sb.nbuckets && d < end; in bch_prio_write()
552 p->next_bucket = ca->prio_buckets[i + 1]; in bch_prio_write()
553 p->magic = pset_magic(&ca->sb); in bch_prio_write()
554 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); in bch_prio_write()
556 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); in bch_prio_write()
559 mutex_unlock(&ca->set->bucket_lock); in bch_prio_write()
560 prio_io(ca, bucket, REQ_WRITE); in bch_prio_write()
561 mutex_lock(&ca->set->bucket_lock); in bch_prio_write()
563 ca->prio_buckets[i] = bucket; in bch_prio_write()
564 atomic_dec_bug(&ca->buckets[bucket].pin); in bch_prio_write()
567 mutex_unlock(&ca->set->bucket_lock); in bch_prio_write()
569 bch_journal_meta(ca->set, &cl); in bch_prio_write()
572 mutex_lock(&ca->set->bucket_lock); in bch_prio_write()
578 for (i = 0; i < prio_buckets(ca); i++) { in bch_prio_write()
579 if (ca->prio_last_buckets[i]) in bch_prio_write()
580 __bch_bucket_free(ca, in bch_prio_write()
581 &ca->buckets[ca->prio_last_buckets[i]]); in bch_prio_write()
583 ca->prio_last_buckets[i] = ca->prio_buckets[i]; in bch_prio_write()
587 static void prio_read(struct cache *ca, uint64_t bucket) in prio_read() argument
589 struct prio_set *p = ca->disk_buckets; in prio_read()
590 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; in prio_read()
594 for (b = ca->buckets; in prio_read()
595 b < ca->buckets + ca->sb.nbuckets; in prio_read()
598 ca->prio_buckets[bucket_nr] = bucket; in prio_read()
599 ca->prio_last_buckets[bucket_nr] = bucket; in prio_read()
602 prio_io(ca, bucket, READ_SYNC); in prio_read()
604 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) in prio_read()
607 if (p->magic != pset_magic(&ca->sb)) in prio_read()
663 struct cache *ca; in bcache_device_unlink() local
668 for_each_cache(ca, d->c, i) in bcache_device_unlink()
669 bd_unlink_disk_holder(ca->bdev, d->disk); in bcache_device_unlink()
677 struct cache *ca; in bcache_device_link() local
679 for_each_cache(ca, d->c, i) in bcache_device_link()
680 bd_link_disk_holder(ca->bdev, d->disk); in bcache_device_link()
1326 struct cache *ca; in cache_set_free() local
1336 for_each_cache(ca, c, i) in cache_set_free()
1337 if (ca) { in cache_set_free()
1338 ca->set = NULL; in cache_set_free()
1339 c->cache[ca->sb.nr_this_dev] = NULL; in cache_set_free()
1340 kobject_put(&ca->kobj); in cache_set_free()
1372 struct cache *ca; in cache_set_flush() local
1398 for_each_cache(ca, c, i) in cache_set_flush()
1399 if (ca->alloc_thread) in cache_set_flush()
1400 kthread_stop(ca->alloc_thread); in cache_set_flush()
1542 struct cache *ca; in run_cache_set() local
1548 for_each_cache(ca, c, i) in run_cache_set()
1549 c->nbuckets += ca->sb.nbuckets; in run_cache_set()
1569 for_each_cache(ca, c, i) in run_cache_set()
1570 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); in run_cache_set()
1612 for_each_cache(ca, c, i) in run_cache_set()
1613 if (bch_cache_allocator_start(ca)) in run_cache_set()
1633 for_each_cache(ca, c, i) { in run_cache_set()
1636 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, in run_cache_set()
1639 for (j = 0; j < ca->sb.keys; j++) in run_cache_set()
1640 ca->sb.d[j] = ca->sb.first_bucket + j; in run_cache_set()
1646 for_each_cache(ca, c, i) in run_cache_set()
1647 if (bch_cache_allocator_start(ca)) in run_cache_set()
1651 for_each_cache(ca, c, i) in run_cache_set()
1652 bch_prio_write(ca); in run_cache_set()
1704 static bool can_attach_cache(struct cache *ca, struct cache_set *c) in can_attach_cache() argument
1706 return ca->sb.block_size == c->sb.block_size && in can_attach_cache()
1707 ca->sb.bucket_size == c->sb.bucket_size && in can_attach_cache()
1708 ca->sb.nr_in_set == c->sb.nr_in_set; in can_attach_cache()
1711 static const char *register_cache_set(struct cache *ca) in register_cache_set() argument
1718 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { in register_cache_set()
1719 if (c->cache[ca->sb.nr_this_dev]) in register_cache_set()
1722 if (!can_attach_cache(ca, c)) in register_cache_set()
1725 if (!CACHE_SYNC(&ca->sb)) in register_cache_set()
1731 c = bch_cache_set_alloc(&ca->sb); in register_cache_set()
1747 sprintf(buf, "cache%i", ca->sb.nr_this_dev); in register_cache_set()
1748 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || in register_cache_set()
1749 sysfs_create_link(&c->kobj, &ca->kobj, buf)) in register_cache_set()
1752 if (ca->sb.seq > c->sb.seq) { in register_cache_set()
1753 c->sb.version = ca->sb.version; in register_cache_set()
1754 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); in register_cache_set()
1755 c->sb.flags = ca->sb.flags; in register_cache_set()
1756 c->sb.seq = ca->sb.seq; in register_cache_set()
1760 kobject_get(&ca->kobj); in register_cache_set()
1761 ca->set = c; in register_cache_set()
1762 ca->set->cache[ca->sb.nr_this_dev] = ca; in register_cache_set()
1763 c->cache_by_alloc[c->caches_loaded++] = ca; in register_cache_set()
1778 struct cache *ca = container_of(kobj, struct cache, kobj); in bch_cache_release() local
1781 if (ca->set) { in bch_cache_release()
1782 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); in bch_cache_release()
1783 ca->set->cache[ca->sb.nr_this_dev] = NULL; in bch_cache_release()
1786 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); in bch_cache_release()
1787 kfree(ca->prio_buckets); in bch_cache_release()
1788 vfree(ca->buckets); in bch_cache_release()
1790 free_heap(&ca->heap); in bch_cache_release()
1791 free_fifo(&ca->free_inc); in bch_cache_release()
1794 free_fifo(&ca->free[i]); in bch_cache_release()
1796 if (ca->sb_bio.bi_inline_vecs[0].bv_page) in bch_cache_release()
1797 put_page(ca->sb_bio.bi_io_vec[0].bv_page); in bch_cache_release()
1799 if (!IS_ERR_OR_NULL(ca->bdev)) in bch_cache_release()
1800 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); in bch_cache_release()
1802 kfree(ca); in bch_cache_release()
1806 static int cache_alloc(struct cache_sb *sb, struct cache *ca) in cache_alloc() argument
1812 kobject_init(&ca->kobj, &bch_cache_ktype); in cache_alloc()
1814 bio_init(&ca->journal.bio); in cache_alloc()
1815 ca->journal.bio.bi_max_vecs = 8; in cache_alloc()
1816 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; in cache_alloc()
1818 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; in cache_alloc()
1820 if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || in cache_alloc()
1821 !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || in cache_alloc()
1822 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || in cache_alloc()
1823 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || in cache_alloc()
1824 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || in cache_alloc()
1825 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || in cache_alloc()
1826 !(ca->buckets = vzalloc(sizeof(struct bucket) * in cache_alloc()
1827 ca->sb.nbuckets)) || in cache_alloc()
1828 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * in cache_alloc()
1830 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca))) in cache_alloc()
1833 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); in cache_alloc()
1835 for_each_bucket(b, ca) in cache_alloc()
1842 struct block_device *bdev, struct cache *ca) in register_cache() argument
1848 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); in register_cache()
1849 ca->bdev = bdev; in register_cache()
1850 ca->bdev->bd_holder = ca; in register_cache()
1852 bio_init(&ca->sb_bio); in register_cache()
1853 ca->sb_bio.bi_max_vecs = 1; in register_cache()
1854 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; in register_cache()
1855 ca->sb_bio.bi_io_vec[0].bv_page = sb_page; in register_cache()
1858 if (blk_queue_discard(bdev_get_queue(ca->bdev))) in register_cache()
1859 ca->discard = CACHE_DISCARD(&ca->sb); in register_cache()
1861 ret = cache_alloc(sb, ca); in register_cache()
1865 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { in register_cache()
1872 err = register_cache_set(ca); in register_cache()
1883 kobject_put(&ca->kobj); in register_cache()
1916 struct cache *ca; in bch_is_open_cache() local
1920 for_each_cache(ca, c, i) in bch_is_open_cache()
1921 if (ca->bdev == bdev) in bch_is_open_cache()
1983 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); in register_bcache() local
1984 if (!ca) in register_bcache()
1987 if (register_cache(sb, sb_page, bdev, ca) != 0) in register_bcache()