Lines Matching refs:c

195 	struct cache_set *c;  in STORE()  local
249 if (dc->disk.c) { in STORE()
250 memcpy(dc->disk.c->uuids[dc->disk.id].label, in STORE()
252 bch_uuid_write(dc->disk.c); in STORE()
269 list_for_each_entry(c, &bch_cache_sets, list) { in STORE()
270 v = bch_cached_dev_attach(dc, c); in STORE()
279 if (attr == &sysfs_detach && dc->disk.c) in STORE()
345 struct uuid_entry *u = &d->c->uuids[d->id]; in SHOW()
364 struct uuid_entry *u = &d->c->uuids[d->id]; in STORE()
373 bch_uuid_write(d->c); in STORE()
379 bch_uuid_write(d->c); in STORE()
418 static int bch_bset_print_stats(struct cache_set *c, char *buf) in bch_bset_print_stats() argument
426 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats); in bch_bset_print_stats()
444 static unsigned bch_root_usage(struct cache_set *c) in bch_root_usage() argument
456 b = c->root; in bch_root_usage()
458 } while (b != c->root); in bch_root_usage()
465 return (bytes * 100) / btree_bytes(c); in bch_root_usage()
468 static size_t bch_cache_size(struct cache_set *c) in bch_cache_size() argument
473 mutex_lock(&c->bucket_lock); in bch_cache_size()
474 list_for_each_entry(b, &c->btree_cache, list) in bch_cache_size()
477 mutex_unlock(&c->bucket_lock); in bch_cache_size()
481 static unsigned bch_cache_max_chain(struct cache_set *c) in bch_cache_max_chain() argument
486 mutex_lock(&c->bucket_lock); in bch_cache_max_chain()
488 for (h = c->bucket_hash; in bch_cache_max_chain()
489 h < c->bucket_hash + (1 << BUCKET_HASH_BITS); in bch_cache_max_chain()
500 mutex_unlock(&c->bucket_lock); in bch_cache_max_chain()
504 static unsigned bch_btree_used(struct cache_set *c) in bch_btree_used() argument
506 return div64_u64(c->gc_stats.key_bytes * 100, in bch_btree_used()
507 (c->gc_stats.nodes ?: 1) * btree_bytes(c)); in bch_btree_used()
510 static unsigned bch_average_key_size(struct cache_set *c) in bch_average_key_size() argument
512 return c->gc_stats.nkeys in bch_average_key_size()
513 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) in bch_average_key_size()
519 struct cache_set *c = container_of(kobj, struct cache_set, kobj); in SHOW() local
521 sysfs_print(synchronous, CACHE_SYNC(&c->sb)); in SHOW()
522 sysfs_print(journal_delay_ms, c->journal_delay_ms); in SHOW()
523 sysfs_hprint(bucket_size, bucket_bytes(c)); in SHOW()
524 sysfs_hprint(block_size, block_bytes(c)); in SHOW()
525 sysfs_print(tree_depth, c->root->level); in SHOW()
526 sysfs_print(root_usage_percent, bch_root_usage(c)); in SHOW()
528 sysfs_hprint(btree_cache_size, bch_cache_size(c)); in SHOW()
529 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c)); in SHOW()
530 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); in SHOW()
532 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); in SHOW()
533 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us); in SHOW()
534 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); in SHOW()
535 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); in SHOW()
537 sysfs_print(btree_used_percent, bch_btree_used(c)); in SHOW()
538 sysfs_print(btree_nodes, c->gc_stats.nodes); in SHOW()
539 sysfs_hprint(average_key_size, bch_average_key_size(c)); in SHOW()
542 atomic_long_read(&c->cache_read_races)); in SHOW()
545 atomic_long_read(&c->writeback_keys_done)); in SHOW()
547 atomic_long_read(&c->writeback_keys_failed)); in SHOW()
551 c->on_error); in SHOW()
554 sysfs_print(io_error_halflife, c->error_decay * 88); in SHOW()
555 sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT); in SHOW()
558 ((uint64_t) bch_get_congested(c)) << 9); in SHOW()
560 c->congested_read_threshold_us); in SHOW()
562 c->congested_write_threshold_us); in SHOW()
564 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); in SHOW()
565 sysfs_printf(verify, "%i", c->verify); in SHOW()
566 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); in SHOW()
568 "%i", c->expensive_debug_checks); in SHOW()
569 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); in SHOW()
570 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); in SHOW()
571 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); in SHOW()
574 return bch_bset_print_stats(c, buf); in SHOW()
582 struct cache_set *c = container_of(kobj, struct cache_set, kobj); in STORE() local
585 bch_cache_set_unregister(c); in STORE()
588 bch_cache_set_stop(c); in STORE()
593 if (sync != CACHE_SYNC(&c->sb)) { in STORE()
594 SET_CACHE_SYNC(&c->sb, sync); in STORE()
595 bcache_write_super(c); in STORE()
604 r = bch_flash_dev_create(c, v); in STORE()
610 atomic_long_set(&c->writeback_keys_done, 0); in STORE()
611 atomic_long_set(&c->writeback_keys_failed, 0); in STORE()
613 memset(&c->gc_stats, 0, sizeof(struct gc_stat)); in STORE()
614 bch_cache_accounting_clear(&c->accounting); in STORE()
618 wake_up_gc(c); in STORE()
624 c->shrink.scan_objects(&c->shrink, &sc); in STORE()
628 c->congested_read_threshold_us); in STORE()
630 c->congested_write_threshold_us); in STORE()
638 c->on_error = v; in STORE()
642 c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT; in STORE()
646 c->error_decay = strtoul_or_return(buf) / 88; in STORE()
648 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms); in STORE()
649 sysfs_strtoul(verify, c->verify); in STORE()
650 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled); in STORE()
651 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks); in STORE()
652 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite); in STORE()
653 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled); in STORE()
654 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled); in STORE()
662 struct cache_set *c = container_of(kobj, struct cache_set, internal); in SHOW() local
663 return bch_cache_set_show(&c->kobj, attr, buf); in SHOW()
668 struct cache_set *c = container_of(kobj, struct cache_set, internal); in STORE() local
669 return bch_cache_set_store(&c->kobj, attr, buf, size); in STORE()