Searched refs:cblock (Results 1 - 11 of 11) sorted by relevance

/linux-4.4.14/drivers/md/
H A Ddm-cache-policy-internal.h26 static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) policy_lookup() argument
29 return p->lookup(p, oblock, cblock); policy_lookup()
45 dm_oblock_t oblock, dm_cblock_t cblock, policy_load_mapping()
48 return p->load_mapping(p, oblock, cblock, hint, hint_valid); policy_load_mapping()
59 dm_cblock_t *cblock, policy_writeback_work()
62 return p->writeback_work ? p->writeback_work(p, oblock, cblock, critical_only) : -ENOENT; policy_writeback_work()
70 static inline int policy_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock) policy_remove_cblock() argument
72 return p->remove_cblock(p, cblock); policy_remove_cblock()
44 policy_load_mapping(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t cblock, uint32_t hint, bool hint_valid) policy_load_mapping() argument
57 policy_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock, bool critical_only) policy_writeback_work() argument
H A Ddm-cache-metadata.h82 int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
83 int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
87 dm_cblock_t cblock, bool dirty,
94 int dm_cache_set_dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty);
123 * The policy is invited to save a 32bit hint value for every cblock (eg,
128 * The hints are indexed by the cblock, but many policies will not
129 * neccessarily have a fast way of accessing efficiently via cblock. So
130 * rather than querying the policy for each cblock, we let it walk its data
H A Ddm-cache-policy-cleaner.c27 dm_cblock_t cblock; member in struct:wb_cache_entry
192 result->cblock = e->cblock; wb_map()
201 static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock) wb_lookup() argument
213 *cblock = e->cblock; wb_lookup()
277 dm_oblock_t oblock, dm_cblock_t cblock, wb_load_mapping()
285 e->cblock = cblock; wb_load_mapping()
362 dm_cblock_t *cblock, wb_writeback_work()
375 *cblock = e->cblock; wb_writeback_work()
276 wb_load_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t cblock, uint32_t hint, bool hint_valid) wb_load_mapping() argument
360 wb_writeback_work(struct dm_cache_policy *pe, dm_oblock_t *oblock, dm_cblock_t *cblock, bool critical_only) wb_writeback_work() argument
H A Ddm-cache-policy.h90 dm_cblock_t cblock; /* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */ member in struct:policy_result
93 typedef int (*policy_walk_fn)(void *context, dm_cblock_t cblock,
149 int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
159 dm_cblock_t cblock, uint32_t hint, bool hint_valid);
174 * possible the particular cblock has already been removed due to a
178 int (*remove_cblock)(struct dm_cache_policy *p, dm_cblock_t cblock);
187 * 0 and @cblock,@oblock: block to write back provided
191 int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock,
H A Ddm-cache-target.c105 * cblock: index of a cache block
350 dm_cblock_t cblock; member in struct:per_bio_data
361 dm_cblock_t cblock; member in struct:dm_cache_migration
580 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) set_dirty() argument
582 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { set_dirty()
588 static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) clear_dirty() argument
590 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { clear_dirty()
770 dm_cblock_t cblock) remap_to_cache()
773 sector_t block = from_cblock(cblock); remap_to_cache()
811 dm_oblock_t oblock, dm_cblock_t cblock) remap_to_cache_dirty()
814 remap_to_cache(cache, bio, cblock); remap_to_cache_dirty()
816 set_dirty(cache, oblock, cblock); remap_to_cache_dirty()
933 remap_to_cache(pb->cache, bio, pb->cblock); writethrough_endio()
950 dm_oblock_t oblock, dm_cblock_t cblock) remap_to_origin_then_cache()
955 pb->cblock = cblock; remap_to_origin_then_cache()
1135 set_dirty(cache, mg->old_oblock, mg->cblock); migration_failure()
1161 clear_dirty(cache, mg->old_oblock, mg->cblock); migration_success_pre_commit()
1167 r = dm_cache_remove_mapping(cache->cmd, mg->cblock); migration_success_pre_commit()
1180 r = dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock); migration_success_pre_commit()
1225 clear_dirty(cache, mg->new_oblock, mg->cblock); migration_success_post_commit()
1231 set_dirty(cache, mg->new_oblock, mg->cblock); migration_success_post_commit()
1260 sector_t cblock = from_cblock(mg->cblock); issue_copy() local
1266 c_region.sector = cblock * cache->sectors_per_block; issue_copy()
1313 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); issue_overwrite()
1377 avoid = !is_dirty(cache, mg->cblock) || issue_copy_or_discard()
1470 dm_oblock_t oblock, dm_cblock_t cblock, promote()
1484 mg->cblock = cblock; promote()
1494 dm_oblock_t oblock, dm_cblock_t cblock, writeback()
1508 mg->cblock = cblock; writeback()
1519 dm_cblock_t cblock, demote_then_promote()
1535 mg->cblock = cblock; demote_then_promote()
1549 dm_oblock_t oblock, dm_cblock_t cblock, invalidate()
1563 mg->cblock = cblock; invalidate()
1737 dm_oblock_t oblock, dm_cblock_t cblock, bool issue_holder) remap_cell_to_cache_dirty()
1753 remap_to_cache(cache, cell->holder, cblock); remap_cell_to_cache_dirty()
1760 set_dirty(cache, oblock, cblock); remap_cell_to_cache_dirty()
1765 remap_to_cache(cache, bio, cblock); remap_cell_to_cache_dirty()
1837 invalidate(cache, structs, block, lookup_result.cblock, new_ocell); process_cell()
1850 !is_dirty(cache, lookup_result.cblock)) { process_cell()
1851 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); process_cell()
1855 remap_cell_to_cache_dirty(cache, new_ocell, block, lookup_result.cblock, true); process_cell()
1870 promote(cache, structs, block, lookup_result.cblock, new_ocell); process_cell()
1878 block, lookup_result.cblock, process_cell()
2077 dm_cblock_t cblock; writeback_some_dirty_blocks() local
2085 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) writeback_some_dirty_blocks()
2095 writeback(cache, &structs, oblock, cblock, old_ocell); writeback_some_dirty_blocks()
3113 !is_dirty(cache, lookup_result.cblock)) { cache_map()
3114 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); cache_map()
3120 remap_cell_to_cache_dirty(cache, cell, block, lookup_result.cblock, false); cache_map()
3283 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, load_mapping() argument
3289 r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid); load_mapping()
3294 set_dirty(cache, oblock, cblock); load_mapping()
3296 clear_dirty(cache, oblock, cblock); load_mapping()
3627 * i) A single cblock, eg. '3456'
3628 * ii) A begin and end cblock with dots between, eg. 123-234
3663 DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str); parse_cblock_range()
3674 DMERR("%s: begin cblock out of range: %llu >= %llu", validate_cblock_range()
3680 DMERR("%s: end cblock out of range: %llu > %llu", validate_cblock_range()
3686 DMERR("%s: invalid cblock range: %llu >= %llu", validate_cblock_range()
769 remap_to_cache(struct cache *cache, struct bio *bio, dm_cblock_t cblock) remap_to_cache() argument
810 remap_to_cache_dirty(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) remap_to_cache_dirty() argument
949 remap_to_origin_then_cache(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) remap_to_origin_then_cache() argument
1469 promote(struct cache *cache, struct prealloc *structs, dm_oblock_t oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *cell) promote() argument
1493 writeback(struct cache *cache, struct prealloc *structs, dm_oblock_t oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *cell) writeback() argument
1517 demote_then_promote(struct cache *cache, struct prealloc *structs, dm_oblock_t old_oblock, dm_oblock_t new_oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *old_ocell, struct dm_bio_prison_cell *new_ocell) demote_then_promote() argument
1548 invalidate(struct cache *cache, struct prealloc *structs, dm_oblock_t oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *cell) invalidate() argument
1736 remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell, dm_oblock_t oblock, dm_cblock_t cblock, bool issue_holder) remap_cell_to_cache_dirty() argument
H A Ddm-cache-policy-mq.c319 * Rather than storing the cblock in an entry, we allocate all entries in
320 * an array, and infer the cblock from the entry position.
370 * This assumes the cblock hasn't already been allocated.
372 static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock) alloc_particular_entry() argument
374 struct entry *e = ep->entries + from_cblock(cblock); alloc_particular_entry()
394 static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock) epool_find() argument
396 struct entry *e = ep->entries + from_cblock(cblock); epool_find()
795 result->cblock = infer_cblock(&mq->cache_pool, e); cache_entry_found()
812 /* Ensure there's a free cblock in the cache */ pre_cache_to_cache()
835 result->cblock = infer_cblock(&mq->cache_pool, new_e); pre_cache_to_cache()
917 result->cblock = infer_cblock(&mq->cache_pool, e); insert_in_cache()
1053 static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) mq_lookup() argument
1064 *cblock = infer_cblock(&mq->cache_pool, e); mq_lookup()
1105 dm_oblock_t oblock, dm_cblock_t cblock, mq_load_mapping()
1111 e = alloc_particular_entry(&mq->cache_pool, cblock); mq_load_mapping()
1180 static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock) __remove_cblock() argument
1182 struct entry *e = epool_find(&mq->cache_pool, cblock); __remove_cblock()
1193 static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock) mq_remove_cblock() argument
1199 r = __remove_cblock(mq, cblock); mq_remove_cblock()
1220 dm_cblock_t *cblock) __mq_writeback_work()
1231 *cblock = infer_cblock(&mq->cache_pool, e); __mq_writeback_work()
1239 dm_cblock_t *cblock, bool critical_only) mq_writeback_work()
1245 r = __mq_writeback_work(mq, oblock, cblock); mq_writeback_work()
1104 mq_load_mapping(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t cblock, uint32_t hint, bool hint_valid) mq_load_mapping() argument
1219 __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock, dm_cblock_t *cblock) __mq_writeback_work() argument
1238 mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock, bool critical_only) mq_writeback_work() argument
H A Ddm-cache-metadata.c669 * little-endian format. The index is the cblock, the high 48bits of the
1071 static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock) __remove() argument
1077 r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock), __remove()
1086 int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock) dm_cache_remove_mapping() argument
1091 r = __remove(cmd, cblock); dm_cache_remove_mapping()
1098 dm_cblock_t cblock, dm_oblock_t oblock) __insert()
1104 r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock), __insert()
1114 dm_cblock_t cblock, dm_oblock_t oblock) dm_cache_insert_mapping()
1119 r = __insert(cmd, cblock, oblock); dm_cache_insert_mapping()
1174 static int __load_mapping(void *context, uint64_t cblock, void *leaf) __load_mapping() argument
1191 cblock, &hint_value); __load_mapping()
1197 r = thunk->fn(thunk->context, oblock, to_cblock(cblock), __load_mapping()
1233 static int __dump_mapping(void *context, uint64_t cblock, void *leaf) __dump_mapping() argument
1269 static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty) __dirty() argument
1276 r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value); __dirty()
1289 r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock), __dirty()
1300 dm_cblock_t cblock, bool dirty) dm_cache_set_dirty()
1305 r = __dirty(cmd, cblock, dirty); dm_cache_set_dirty()
1414 static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint) save_hint() argument
1423 from_cblock(cblock), &value, &cmd->hint_root); save_hint()
1097 __insert(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock) __insert() argument
1113 dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock) dm_cache_insert_mapping() argument
1299 dm_cache_set_dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty) dm_cache_set_dirty() argument
H A Ddm-cache-policy-smq.c720 * This assumes the cblock hasn't already been allocated.
1180 result->cblock = infer_cblock(mq, e); insert_in_cache()
1247 result->cblock = infer_cblock(mq, e); map()
1311 static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) smq_lookup() argument
1321 *cblock = infer_cblock(mq, e); smq_lookup()
1363 dm_oblock_t oblock, dm_cblock_t cblock, smq_load_mapping()
1369 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); smq_load_mapping()
1436 static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock) __remove_cblock() argument
1438 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); __remove_cblock()
1449 static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock) smq_remove_cblock() argument
1456 r = __remove_cblock(mq, cblock); smq_remove_cblock()
1481 dm_cblock_t *cblock, bool critical_only) __smq_writeback_work()
1499 *cblock = infer_cblock(mq, e); __smq_writeback_work()
1507 dm_cblock_t *cblock, bool critical_only) smq_writeback_work()
1514 r = __smq_writeback_work(mq, oblock, cblock, critical_only); smq_writeback_work()
1362 smq_load_mapping(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t cblock, uint32_t hint, bool hint_valid) smq_load_mapping() argument
1480 __smq_writeback_work(struct smq_policy *mq, dm_oblock_t *oblock, dm_cblock_t *cblock, bool critical_only) __smq_writeback_work() argument
1506 smq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock, bool critical_only) smq_writeback_work() argument
/linux-4.4.14/drivers/isdn/act2000/
H A Dact2000_isa.c404 act2000_ddef cblock; act2000_isa_download() local
409 if (copy_from_user(&cblock, cb, sizeof(cblock))) act2000_isa_download()
411 length = cblock.length; act2000_isa_download()
412 p = cblock.buffer; act2000_isa_download()
/linux-4.4.14/fs/xfs/libxfs/
H A Dxfs_btree.c2609 struct xfs_buf *cbp; /* buffer for cblock */ xfs_btree_new_iroot()
2611 struct xfs_btree_block *cblock; /* child btree block */ xfs_btree_new_iroot() local
2644 error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp); xfs_btree_new_iroot()
2652 memcpy(cblock, block, xfs_btree_block_len(cur)); xfs_btree_new_iroot()
2655 cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn); xfs_btree_new_iroot()
2657 cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn); xfs_btree_new_iroot()
2666 ckp = xfs_btree_key_addr(cur, 1, cblock); xfs_btree_new_iroot()
2667 xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock)); xfs_btree_new_iroot()
2669 cpp = xfs_btree_ptr_addr(cur, 1, cblock); xfs_btree_new_iroot()
2671 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { xfs_btree_new_iroot()
2677 xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock)); xfs_btree_new_iroot()
2687 1 - xfs_btree_get_numrecs(cblock), xfs_btree_new_iroot()
2697 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); xfs_btree_new_iroot()
2698 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); xfs_btree_new_iroot()
3203 struct xfs_btree_block *cblock; xfs_btree_kill_iroot() local
3237 cblock = xfs_btree_get_block(cur, level - 1, &cbp); xfs_btree_kill_iroot()
3238 numrecs = xfs_btree_get_numrecs(cblock); xfs_btree_kill_iroot()
3265 ASSERT(block->bb_numrecs == cblock->bb_numrecs); xfs_btree_kill_iroot()
3268 ckp = xfs_btree_key_addr(cur, 1, cblock); xfs_btree_kill_iroot()
3272 cpp = xfs_btree_ptr_addr(cur, 1, cblock); xfs_btree_kill_iroot()
H A Dxfs_bmap.c668 struct xfs_btree_block *cblock;/* child btree block */ xfs_bmap_btree_to_extents() local
696 cblock = XFS_BUF_TO_BLOCK(cbp); xfs_bmap_btree_to_extents()
697 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) xfs_bmap_btree_to_extents()

Completed in 250 milliseconds