Home
last modified time | relevance | path

Searched refs:cache_set (Results 1 – 21 of 21) sorted by relevance

/linux-4.4.14/drivers/md/bcache/
Dbcache.h251 struct cache_set *c;
381 struct cache_set *set;
471 struct cache_set { struct
700 static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) in sector_to_bucket()
705 static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) in bucket_to_sector()
710 static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) in bucket_remainder()
715 static inline struct cache *PTR_CACHE(struct cache_set *c, in PTR_CACHE()
722 static inline size_t PTR_BUCKET_NR(struct cache_set *c, in PTR_BUCKET_NR()
729 static inline struct bucket *PTR_BUCKET(struct cache_set *c, in PTR_BUCKET()
742 static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, in ptr_stale()
[all …]
Dbtree.h127 struct cache_set *c;
191 static inline void set_gc_sectors(struct cache_set *c) in set_gc_sectors()
196 void bkey_put(struct cache_set *c, struct bkey *k);
245 struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *,
247 struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *,
252 int bch_btree_insert(struct cache_set *, struct keylist *,
255 int bch_gc_thread_start(struct cache_set *);
256 void bch_initial_gc_finish(struct cache_set *);
257 void bch_moving_gc(struct cache_set *);
258 int bch_btree_check(struct cache_set *);
[all …]
Djournal.h96 struct cache_set *c;
165 struct cache_set;
169 atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
171 void bch_journal_mark(struct cache_set *, struct list_head *);
172 void bch_journal_meta(struct cache_set *, struct closure *);
173 int bch_journal_read(struct cache_set *, struct list_head *);
174 int bch_journal_replay(struct cache_set *, struct list_head *);
176 void bch_journal_free(struct cache_set *);
177 int bch_journal_alloc(struct cache_set *);
Dstats.h41 struct cache_set;
55 void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *,
57 void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *);
58 void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *);
59 void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int);
Dextents.h8 struct cache_set;
11 bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
12 bool __bch_extent_invalid(struct cache_set *, const struct bkey *);
Dio.c16 void bch_bbio_free(struct bio *bio, struct cache_set *c) in bch_bbio_free()
22 struct bio *bch_bbio_alloc(struct cache_set *c) in bch_bbio_alloc()
35 void __bch_submit_bbio(struct bio *bio, struct cache_set *c) in __bch_submit_bbio()
46 void bch_submit_bbio(struct bio *bio, struct cache_set *c, in bch_submit_bbio()
108 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, in bch_bbio_count_io_errors()
137 void bch_bbio_endio(struct cache_set *c, struct bio *bio, in bch_bbio_endio()
Ddebug.h6 struct cache_set;
29 void bch_debug_init_cache_set(struct cache_set *);
31 static inline void bch_debug_init_cache_set(struct cache_set *c) {} in bch_debug_init_cache_set()
Djournal.c144 int bch_journal_read(struct cache_set *c, struct list_head *list) in bch_journal_read()
266 void bch_journal_mark(struct cache_set *c, struct list_head *list) in bch_journal_mark()
312 int bch_journal_replay(struct cache_set *s, struct list_head *list) in bch_journal_replay()
366 static void btree_flush_write(struct cache_set *c) in btree_flush_write()
467 static void journal_reclaim(struct cache_set *c) in journal_reclaim()
573 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write_unlock()
582 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write_unlocked()
658 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write()
664 static void journal_try_write(struct cache_set *c) in journal_try_write()
680 static struct journal_write *journal_wait_for_write(struct cache_set *c, in journal_wait_for_write()
[all …]
Dsuper.c280 struct cache_set *c = container_of(cl, struct cache_set, sb_write); in bcache_write_super_unlock()
285 void bcache_write_super(struct cache_set *c) in bcache_write_super()
322 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); in uuid_endio()
331 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); in uuid_io_unlock()
336 static void uuid_io(struct cache_set *c, unsigned long rw, in uuid_io()
376 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) in uuid_read()
417 static int __uuid_write(struct cache_set *c) in __uuid_write()
437 int bch_uuid_write(struct cache_set *c) in bch_uuid_write()
447 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) in uuid_find()
459 static struct uuid_entry *uuid_find_empty(struct cache_set *c) in uuid_find_empty()
[all …]
Dsysfs.c195 struct cache_set *c; in STORE()
418 static int bch_bset_print_stats(struct cache_set *c, char *buf) in bch_bset_print_stats()
444 static unsigned bch_root_usage(struct cache_set *c) in bch_root_usage()
468 static size_t bch_cache_size(struct cache_set *c) in bch_cache_size()
481 static unsigned bch_cache_max_chain(struct cache_set *c) in bch_cache_max_chain()
504 static unsigned bch_btree_used(struct cache_set *c) in bch_btree_used()
510 static unsigned bch_average_key_size(struct cache_set *c) in bch_average_key_size()
519 struct cache_set *c = container_of(kobj, struct cache_set, kobj); in SHOW()
582 struct cache_set *c = container_of(kobj, struct cache_set, kobj); in STORE()
662 struct cache_set *c = container_of(kobj, struct cache_set, internal); in SHOW()
[all …]
Drequest.h6 struct cache_set *c;
35 unsigned bch_get_congested(struct cache_set *);
Dbtree.c178 void bkey_put(struct cache_set *c, struct bkey *k) in bkey_put()
600 static struct btree *mca_bucket_alloc(struct cache_set *c, in mca_bucket_alloc()
664 struct cache_set *c = container_of(shrink, struct cache_set, shrink); in bch_mca_scan()
728 struct cache_set *c = container_of(shrink, struct cache_set, shrink); in bch_mca_count()
739 void bch_btree_cache_free(struct cache_set *c) in bch_btree_cache_free()
781 int bch_btree_cache_alloc(struct cache_set *c) in bch_btree_cache_alloc()
818 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) in mca_hash()
823 static struct btree *mca_find(struct cache_set *c, struct bkey *k) in mca_find()
837 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) in mca_cannibalize_lock()
852 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, in mca_cannibalize()
[all …]
Dmovinggc.c23 struct cache_set *c = container_of(buf, struct cache_set, in moving_pred()
129 static void read_moving(struct cache_set *c) in read_moving()
199 void bch_moving_gc(struct cache_set *c) in bch_moving_gc()
252 void bch_moving_init_cache_set(struct cache_set *c) in bch_moving_init_cache_set()
Dalloc.c84 void bch_rescale_priorities(struct cache_set *c, int sectors) in bch_rescale_priorities()
453 void bch_bucket_free(struct cache_set *c, struct bkey *k) in bch_bucket_free()
462 int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, in __bch_bucket_alloc_set()
495 int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, in bch_bucket_alloc_set()
533 static struct open_bucket *pick_data_bucket(struct cache_set *c, in pick_data_bucket()
571 bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, in bch_alloc_sectors()
658 void bch_open_buckets_free(struct cache_set *c) in bch_open_buckets_free()
670 int bch_open_buckets_alloc(struct cache_set *c) in bch_open_buckets_alloc()
Dstats.c198 void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, in bch_mark_cache_accounting()
206 void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) in bch_mark_cache_readahead()
213 void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) in bch_mark_cache_miss_collision()
220 void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, in bch_mark_sectors_bypassed()
Dextents.c46 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) in __ptr_invalid()
67 static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) in bch_ptr_status()
148 bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) in __bch_btree_ptr_invalid()
312 struct cache_set *c, in bch_subtract_dirty()
326 struct cache_set *c = container_of(b, struct btree, keys)->c; in bch_extent_insert_fixup()
477 bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k) in __bch_extent_invalid()
Ddebug.c152 struct cache_set *c;
199 struct cache_set *c = inode->i_private; in bch_dump_open()
227 void bch_debug_init_cache_set(struct cache_set *c) in bch_debug_init_cache_set()
Dwriteback.h86 void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
Dwriteback.c23 struct cache_set *c = dc->disk.c; in __update_writeback_rate()
289 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, in bcache_dev_sectors_dirty_add()
Drequest.c101 struct cache_set *c) in bch_keylist_realloc()
330 unsigned bch_get_congested(struct cache_set *c) in bch_get_congested()
373 struct cache_set *c = dc->disk.c; in check_should_bypass()
/linux-4.4.14/include/trace/events/
Dbcache.h151 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
193 DECLARE_EVENT_CLASS(cache_set,
194 TP_PROTO(struct cache_set *c),
213 DEFINE_EVENT(cache_set, bcache_journal_full,
214 TP_PROTO(struct cache_set *c),
218 DEFINE_EVENT(cache_set, bcache_journal_entry_full,
219 TP_PROTO(struct cache_set *c),
230 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
231 TP_PROTO(struct cache_set *c),
264 DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
[all …]