Lines Matching refs:c
148 struct dm_bufio_client *c; member
159 static inline int dm_bufio_cache_index(struct dm_bufio_client *c) in dm_bufio_cache_index() argument
161 unsigned ret = c->blocks_per_page_bits - 1; in dm_bufio_cache_index()
168 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)]) argument
169 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)]) argument
173 static void dm_bufio_lock(struct dm_bufio_client *c) in dm_bufio_lock() argument
175 mutex_lock_nested(&c->lock, dm_bufio_in_request()); in dm_bufio_lock()
178 static int dm_bufio_trylock(struct dm_bufio_client *c) in dm_bufio_trylock() argument
180 return mutex_trylock(&c->lock); in dm_bufio_trylock()
183 static void dm_bufio_unlock(struct dm_bufio_client *c) in dm_bufio_unlock() argument
185 mutex_unlock(&c->lock); in dm_bufio_unlock()
259 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) in __find() argument
261 struct rb_node *n = c->buffer_tree.rb_node; in __find()
276 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) in __insert() argument
278 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL; in __insert()
295 rb_insert_color(&b->node, &c->buffer_tree); in __insert()
298 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) in __remove() argument
300 rb_erase(&b->node, &c->buffer_tree); in __remove()
369 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, in alloc_buffer_data() argument
375 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { in alloc_buffer_data()
377 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); in alloc_buffer_data()
380 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT && in alloc_buffer_data()
384 c->pages_per_block_bits); in alloc_buffer_data()
402 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL); in alloc_buffer_data()
413 static void free_buffer_data(struct dm_bufio_client *c, in free_buffer_data() argument
418 kmem_cache_free(DM_BUFIO_CACHE(c), data); in free_buffer_data()
422 free_pages((unsigned long)data, c->pages_per_block_bits); in free_buffer_data()
439 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) in alloc_buffer() argument
441 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size, in alloc_buffer()
447 b->c = c; in alloc_buffer()
449 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
455 adjust_total_allocated(b->data_mode, (long)c->block_size); in alloc_buffer()
465 struct dm_bufio_client *c = b->c; in free_buffer() local
467 adjust_total_allocated(b->data_mode, -(long)c->block_size); in free_buffer()
469 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
478 struct dm_bufio_client *c = b->c; in __link_buffer() local
480 c->n_buffers[dirty]++; in __link_buffer()
483 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer()
484 __insert(b->c, b); in __link_buffer()
493 struct dm_bufio_client *c = b->c; in __unlink_buffer() local
495 BUG_ON(!c->n_buffers[b->list_mode]); in __unlink_buffer()
497 c->n_buffers[b->list_mode]--; in __unlink_buffer()
498 __remove(b->c, b); in __unlink_buffer()
507 struct dm_bufio_client *c = b->c; in __relink_lru() local
509 BUG_ON(!c->n_buffers[b->list_mode]); in __relink_lru()
511 c->n_buffers[b->list_mode]--; in __relink_lru()
512 c->n_buffers[dirty]++; in __relink_lru()
514 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru()
559 .client = b->c->dm_io, in use_dmio()
562 .bdev = b->c->bdev, in use_dmio()
563 .sector = block << b->c->sectors_per_block_bits, in use_dmio()
564 .count = b->c->block_size >> SECTOR_SHIFT, in use_dmio()
604 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; in use_inline_bio()
605 b->bio.bi_bdev = b->c->bdev; in use_inline_bio()
618 len = b->c->block_size; in use_inline_bio()
629 BUG_ON(b->c->block_size <= PAGE_SIZE); in use_inline_bio()
644 if (rw == WRITE && b->c->write_callback) in submit_io()
645 b->c->write_callback(b); in submit_io()
647 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE && in submit_io()
670 struct dm_bufio_client *c = b->c; in write_endio() local
671 (void)cmpxchg(&c->async_write_error, 0, error); in write_endio()
742 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) in __get_unclaimed_buffer() argument
746 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer()
758 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer()
779 static void __wait_for_free_buffer(struct dm_bufio_client *c) in __wait_for_free_buffer() argument
783 add_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
785 dm_bufio_unlock(c); in __wait_for_free_buffer()
789 remove_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
791 dm_bufio_lock(c); in __wait_for_free_buffer()
807 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag n… in __alloc_buffer_wait_no_callback() argument
825 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
833 if (!list_empty(&c->reserved_buffers)) { in __alloc_buffer_wait_no_callback()
834 b = list_entry(c->reserved_buffers.next, in __alloc_buffer_wait_no_callback()
837 c->need_reserved_buffers++; in __alloc_buffer_wait_no_callback()
842 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
846 __wait_for_free_buffer(c); in __alloc_buffer_wait_no_callback()
850 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) in __alloc_buffer_wait() argument
852 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait()
857 if (c->alloc_callback) in __alloc_buffer_wait()
858 c->alloc_callback(b); in __alloc_buffer_wait()
868 struct dm_bufio_client *c = b->c; in __free_buffer_wake() local
870 if (!c->need_reserved_buffers) in __free_buffer_wake()
873 list_add(&b->lru_list, &c->reserved_buffers); in __free_buffer_wake()
874 c->need_reserved_buffers--; in __free_buffer_wake()
877 wake_up(&c->free_buffer_wait); in __free_buffer_wake()
880 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, in __write_dirty_buffers_async() argument
885 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in __write_dirty_buffers_async()
905 static void __get_memory_limit(struct dm_bufio_client *c, in __get_memory_limit() argument
918 (c->sectors_per_block_bits + SECTOR_SHIFT); in __get_memory_limit()
920 if (buffers < c->minimum_buffers) in __get_memory_limit()
921 buffers = c->minimum_buffers; in __get_memory_limit()
932 static void __check_watermark(struct dm_bufio_client *c, in __check_watermark() argument
937 __get_memory_limit(c, &threshold_buffers, &limit_buffers); in __check_watermark()
939 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] > in __check_watermark()
942 struct dm_buffer *b = __get_unclaimed_buffer(c); in __check_watermark()
951 if (c->n_buffers[LIST_DIRTY] > threshold_buffers) in __check_watermark()
952 __write_dirty_buffers_async(c, 1, write_list); in __check_watermark()
959 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, in __bufio_new() argument
967 b = __find(c, block); in __bufio_new()
974 new_b = __alloc_buffer_wait(c, nf); in __bufio_new()
982 b = __find(c, block); in __bufio_new()
988 __check_watermark(c, write_list); in __bufio_new()
1050 static void *new_read(struct dm_bufio_client *c, sector_t block, in new_read() argument
1058 dm_bufio_lock(c); in new_read()
1059 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1060 dm_bufio_unlock(c); in new_read()
1085 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, in dm_bufio_get() argument
1088 return new_read(c, block, NF_GET, bp); in dm_bufio_get()
1092 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, in dm_bufio_read() argument
1097 return new_read(c, block, NF_READ, bp); in dm_bufio_read()
1101 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, in dm_bufio_new() argument
1106 return new_read(c, block, NF_FRESH, bp); in dm_bufio_new()
1110 void dm_bufio_prefetch(struct dm_bufio_client *c, in dm_bufio_prefetch() argument
1120 dm_bufio_lock(c); in dm_bufio_prefetch()
1125 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in dm_bufio_prefetch()
1128 dm_bufio_unlock(c); in dm_bufio_prefetch()
1132 dm_bufio_lock(c); in dm_bufio_prefetch()
1135 dm_bufio_unlock(c); in dm_bufio_prefetch()
1145 dm_bufio_lock(c); in dm_bufio_prefetch()
1149 dm_bufio_unlock(c); in dm_bufio_prefetch()
1158 struct dm_bufio_client *c = b->c; in dm_bufio_release() local
1160 dm_bufio_lock(c); in dm_bufio_release()
1166 wake_up(&c->free_buffer_wait); in dm_bufio_release()
1182 dm_bufio_unlock(c); in dm_bufio_release()
1188 struct dm_bufio_client *c = b->c; in dm_bufio_mark_buffer_dirty() local
1190 dm_bufio_lock(c); in dm_bufio_mark_buffer_dirty()
1197 dm_bufio_unlock(c); in dm_bufio_mark_buffer_dirty()
1201 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) in dm_bufio_write_dirty_buffers_async() argument
1207 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers_async()
1208 __write_dirty_buffers_async(c, 0, &write_list); in dm_bufio_write_dirty_buffers_async()
1209 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers_async()
1221 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) in dm_bufio_write_dirty_buffers() argument
1229 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
1230 __write_dirty_buffers_async(c, 0, &write_list); in dm_bufio_write_dirty_buffers()
1231 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
1233 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
1236 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in dm_bufio_write_dirty_buffers()
1239 if (buffers_processed < c->n_buffers[LIST_DIRTY]) in dm_bufio_write_dirty_buffers()
1245 if (buffers_processed < c->n_buffers[LIST_DIRTY]) { in dm_bufio_write_dirty_buffers()
1248 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
1251 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
1281 wake_up(&c->free_buffer_wait); in dm_bufio_write_dirty_buffers()
1282 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
1284 a = xchg(&c->async_write_error, 0); in dm_bufio_write_dirty_buffers()
1285 f = dm_bufio_issue_flush(c); in dm_bufio_write_dirty_buffers()
1296 int dm_bufio_issue_flush(struct dm_bufio_client *c) in dm_bufio_issue_flush() argument
1302 .client = c->dm_io, in dm_bufio_issue_flush()
1305 .bdev = c->bdev, in dm_bufio_issue_flush()
1330 struct dm_bufio_client *c = b->c; in dm_bufio_release_move() local
1335 dm_bufio_lock(c); in dm_bufio_release_move()
1338 new = __find(c, new_block); in dm_bufio_release_move()
1341 __wait_for_free_buffer(c); in dm_bufio_release_move()
1385 dm_bufio_unlock(c); in dm_bufio_release_move()
1396 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) in dm_bufio_forget() argument
1400 dm_bufio_lock(c); in dm_bufio_forget()
1402 b = __find(c, block); in dm_bufio_forget()
1408 dm_bufio_unlock(c); in dm_bufio_forget()
1412 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n) in dm_bufio_set_minimum_buffers() argument
1414 c->minimum_buffers = n; in dm_bufio_set_minimum_buffers()
1418 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c) in dm_bufio_get_block_size() argument
1420 return c->block_size; in dm_bufio_get_block_size()
1424 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) in dm_bufio_get_device_size() argument
1426 return i_size_read(c->bdev->bd_inode) >> in dm_bufio_get_device_size()
1427 (SECTOR_SHIFT + c->sectors_per_block_bits); in dm_bufio_get_device_size()
1451 return b->c; in dm_bufio_get_client()
1455 static void drop_buffers(struct dm_bufio_client *c) in drop_buffers() argument
1465 dm_bufio_write_dirty_buffers_async(c); in drop_buffers()
1467 dm_bufio_lock(c); in drop_buffers()
1469 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1473 list_for_each_entry(b, &c->lru[i], lru_list) in drop_buffers()
1478 BUG_ON(!list_empty(&c->lru[i])); in drop_buffers()
1480 dm_bufio_unlock(c); in drop_buffers()
1510 static unsigned get_retain_buffers(struct dm_bufio_client *c) in get_retain_buffers() argument
1513 return retain_bytes / c->block_size; in get_retain_buffers()
1516 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, in __scan() argument
1523 unsigned retain_target = get_retain_buffers(c); in __scan()
1526 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { in __scan()
1540 struct dm_bufio_client *c; in dm_bufio_shrink_scan() local
1543 c = container_of(shrink, struct dm_bufio_client, shrinker); in dm_bufio_shrink_scan()
1545 dm_bufio_lock(c); in dm_bufio_shrink_scan()
1546 else if (!dm_bufio_trylock(c)) in dm_bufio_shrink_scan()
1549 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); in dm_bufio_shrink_scan()
1550 dm_bufio_unlock(c); in dm_bufio_shrink_scan()
1557 struct dm_bufio_client *c; in dm_bufio_shrink_count() local
1560 c = container_of(shrink, struct dm_bufio_client, shrinker); in dm_bufio_shrink_count()
1562 dm_bufio_lock(c); in dm_bufio_shrink_count()
1563 else if (!dm_bufio_trylock(c)) in dm_bufio_shrink_count()
1566 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; in dm_bufio_shrink_count()
1567 dm_bufio_unlock(c); in dm_bufio_shrink_count()
1580 struct dm_bufio_client *c; in dm_bufio_client_create() local
1586 c = kzalloc(sizeof(*c), GFP_KERNEL); in dm_bufio_client_create()
1587 if (!c) { in dm_bufio_client_create()
1591 c->buffer_tree = RB_ROOT; in dm_bufio_client_create()
1593 c->bdev = bdev; in dm_bufio_client_create()
1594 c->block_size = block_size; in dm_bufio_client_create()
1595 c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT; in dm_bufio_client_create()
1596 c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ? in dm_bufio_client_create()
1598 c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ? in dm_bufio_client_create()
1601 c->aux_size = aux_size; in dm_bufio_client_create()
1602 c->alloc_callback = alloc_callback; in dm_bufio_client_create()
1603 c->write_callback = write_callback; in dm_bufio_client_create()
1606 INIT_LIST_HEAD(&c->lru[i]); in dm_bufio_client_create()
1607 c->n_buffers[i] = 0; in dm_bufio_client_create()
1610 mutex_init(&c->lock); in dm_bufio_client_create()
1611 INIT_LIST_HEAD(&c->reserved_buffers); in dm_bufio_client_create()
1612 c->need_reserved_buffers = reserved_buffers; in dm_bufio_client_create()
1614 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS; in dm_bufio_client_create()
1616 init_waitqueue_head(&c->free_buffer_wait); in dm_bufio_client_create()
1617 c->async_write_error = 0; in dm_bufio_client_create()
1619 c->dm_io = dm_io_client_create(); in dm_bufio_client_create()
1620 if (IS_ERR(c->dm_io)) { in dm_bufio_client_create()
1621 r = PTR_ERR(c->dm_io); in dm_bufio_client_create()
1626 if (c->blocks_per_page_bits) { in dm_bufio_client_create()
1627 if (!DM_BUFIO_CACHE_NAME(c)) { in dm_bufio_client_create()
1628 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size); in dm_bufio_client_create()
1629 if (!DM_BUFIO_CACHE_NAME(c)) { in dm_bufio_client_create()
1636 if (!DM_BUFIO_CACHE(c)) { in dm_bufio_client_create()
1637 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c), in dm_bufio_client_create()
1638 c->block_size, in dm_bufio_client_create()
1639 c->block_size, 0, NULL); in dm_bufio_client_create()
1640 if (!DM_BUFIO_CACHE(c)) { in dm_bufio_client_create()
1649 while (c->need_reserved_buffers) { in dm_bufio_client_create()
1650 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create()
1661 list_add(&c->client_list, &dm_bufio_all_clients); in dm_bufio_client_create()
1665 c->shrinker.count_objects = dm_bufio_shrink_count; in dm_bufio_client_create()
1666 c->shrinker.scan_objects = dm_bufio_shrink_scan; in dm_bufio_client_create()
1667 c->shrinker.seeks = 1; in dm_bufio_client_create()
1668 c->shrinker.batch = 0; in dm_bufio_client_create()
1669 register_shrinker(&c->shrinker); in dm_bufio_client_create()
1671 return c; in dm_bufio_client_create()
1675 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_create()
1676 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_create()
1681 dm_io_client_destroy(c->dm_io); in dm_bufio_client_create()
1683 kfree(c); in dm_bufio_client_create()
1693 void dm_bufio_client_destroy(struct dm_bufio_client *c) in dm_bufio_client_destroy() argument
1697 drop_buffers(c); in dm_bufio_client_destroy()
1699 unregister_shrinker(&c->shrinker); in dm_bufio_client_destroy()
1703 list_del(&c->client_list); in dm_bufio_client_destroy()
1709 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree)); in dm_bufio_client_destroy()
1710 BUG_ON(c->need_reserved_buffers); in dm_bufio_client_destroy()
1712 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_destroy()
1713 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_destroy()
1720 if (c->n_buffers[i]) in dm_bufio_client_destroy()
1721 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]); in dm_bufio_client_destroy()
1724 BUG_ON(c->n_buffers[i]); in dm_bufio_client_destroy()
1726 dm_io_client_destroy(c->dm_io); in dm_bufio_client_destroy()
1727 kfree(c); in dm_bufio_client_destroy()
1746 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) in __evict_old_buffers() argument
1749 unsigned retain_target = get_retain_buffers(c); in __evict_old_buffers()
1752 dm_bufio_lock(c); in __evict_old_buffers()
1754 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; in __evict_old_buffers()
1755 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { in __evict_old_buffers()
1768 dm_bufio_unlock(c); in __evict_old_buffers()
1774 struct dm_bufio_client *c; in cleanup_old_buffers() local
1778 list_for_each_entry(c, &dm_bufio_all_clients, client_list) in cleanup_old_buffers()
1779 __evict_old_buffers(c, max_age_hz); in cleanup_old_buffers()