Lines Matching refs:c
148 struct dm_bufio_client *c; member
159 static inline int dm_bufio_cache_index(struct dm_bufio_client *c) in dm_bufio_cache_index() argument
161 unsigned ret = c->blocks_per_page_bits - 1; in dm_bufio_cache_index()
168 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)]) argument
169 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)]) argument
173 static void dm_bufio_lock(struct dm_bufio_client *c) in dm_bufio_lock() argument
175 mutex_lock_nested(&c->lock, dm_bufio_in_request()); in dm_bufio_lock()
178 static int dm_bufio_trylock(struct dm_bufio_client *c) in dm_bufio_trylock() argument
180 return mutex_trylock(&c->lock); in dm_bufio_trylock()
183 static void dm_bufio_unlock(struct dm_bufio_client *c) in dm_bufio_unlock() argument
185 mutex_unlock(&c->lock); in dm_bufio_unlock()
259 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) in __find() argument
261 struct rb_node *n = c->buffer_tree.rb_node; in __find()
276 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) in __insert() argument
278 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL; in __insert()
295 rb_insert_color(&b->node, &c->buffer_tree); in __insert()
298 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) in __remove() argument
300 rb_erase(&b->node, &c->buffer_tree); in __remove()
369 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, in alloc_buffer_data() argument
375 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { in alloc_buffer_data()
377 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); in alloc_buffer_data()
380 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT && in alloc_buffer_data()
384 c->pages_per_block_bits); in alloc_buffer_data()
402 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL); in alloc_buffer_data()
413 static void free_buffer_data(struct dm_bufio_client *c, in free_buffer_data() argument
418 kmem_cache_free(DM_BUFIO_CACHE(c), data); in free_buffer_data()
422 free_pages((unsigned long)data, c->pages_per_block_bits); in free_buffer_data()
439 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) in alloc_buffer() argument
441 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size, in alloc_buffer()
447 b->c = c; in alloc_buffer()
449 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
455 adjust_total_allocated(b->data_mode, (long)c->block_size); in alloc_buffer()
465 struct dm_bufio_client *c = b->c; in free_buffer() local
467 adjust_total_allocated(b->data_mode, -(long)c->block_size); in free_buffer()
469 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
478 struct dm_bufio_client *c = b->c; in __link_buffer() local
480 c->n_buffers[dirty]++; in __link_buffer()
483 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer()
484 __insert(b->c, b); in __link_buffer()
493 struct dm_bufio_client *c = b->c; in __unlink_buffer() local
495 BUG_ON(!c->n_buffers[b->list_mode]); in __unlink_buffer()
497 c->n_buffers[b->list_mode]--; in __unlink_buffer()
498 __remove(b->c, b); in __unlink_buffer()
507 struct dm_bufio_client *c = b->c; in __relink_lru() local
509 BUG_ON(!c->n_buffers[b->list_mode]); in __relink_lru()
511 c->n_buffers[b->list_mode]--; in __relink_lru()
512 c->n_buffers[dirty]++; in __relink_lru()
514 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru()
560 .client = b->c->dm_io, in use_dmio()
563 .bdev = b->c->bdev, in use_dmio()
564 .sector = block << b->c->sectors_per_block_bits, in use_dmio()
565 .count = b->c->block_size >> SECTOR_SHIFT, in use_dmio()
609 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; in use_inline_bio()
610 b->bio.bi_bdev = b->c->bdev; in use_inline_bio()
623 len = b->c->block_size; in use_inline_bio()
634 BUG_ON(b->c->block_size <= PAGE_SIZE); in use_inline_bio()
649 if (rw == WRITE && b->c->write_callback) in submit_io()
650 b->c->write_callback(b); in submit_io()
652 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE && in submit_io()
675 struct dm_bufio_client *c = b->c; in write_endio() local
677 (void)cmpxchg(&c->async_write_error, 0, error); in write_endio()
748 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) in __get_unclaimed_buffer() argument
752 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer()
764 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer()
785 static void __wait_for_free_buffer(struct dm_bufio_client *c) in __wait_for_free_buffer() argument
789 add_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
791 dm_bufio_unlock(c); in __wait_for_free_buffer()
795 remove_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
797 dm_bufio_lock(c); in __wait_for_free_buffer()
813 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag n… in __alloc_buffer_wait_no_callback() argument
831 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
839 if (!list_empty(&c->reserved_buffers)) { in __alloc_buffer_wait_no_callback()
840 b = list_entry(c->reserved_buffers.next, in __alloc_buffer_wait_no_callback()
843 c->need_reserved_buffers++; in __alloc_buffer_wait_no_callback()
848 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
852 __wait_for_free_buffer(c); in __alloc_buffer_wait_no_callback()
856 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) in __alloc_buffer_wait() argument
858 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait()
863 if (c->alloc_callback) in __alloc_buffer_wait()
864 c->alloc_callback(b); in __alloc_buffer_wait()
874 struct dm_bufio_client *c = b->c; in __free_buffer_wake() local
876 if (!c->need_reserved_buffers) in __free_buffer_wake()
879 list_add(&b->lru_list, &c->reserved_buffers); in __free_buffer_wake()
880 c->need_reserved_buffers--; in __free_buffer_wake()
883 wake_up(&c->free_buffer_wait); in __free_buffer_wake()
886 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, in __write_dirty_buffers_async() argument
891 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in __write_dirty_buffers_async()
911 static void __get_memory_limit(struct dm_bufio_client *c, in __get_memory_limit() argument
924 (c->sectors_per_block_bits + SECTOR_SHIFT); in __get_memory_limit()
926 if (buffers < c->minimum_buffers) in __get_memory_limit()
927 buffers = c->minimum_buffers; in __get_memory_limit()
938 static void __check_watermark(struct dm_bufio_client *c, in __check_watermark() argument
943 __get_memory_limit(c, &threshold_buffers, &limit_buffers); in __check_watermark()
945 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] > in __check_watermark()
948 struct dm_buffer *b = __get_unclaimed_buffer(c); in __check_watermark()
957 if (c->n_buffers[LIST_DIRTY] > threshold_buffers) in __check_watermark()
958 __write_dirty_buffers_async(c, 1, write_list); in __check_watermark()
965 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, in __bufio_new() argument
973 b = __find(c, block); in __bufio_new()
980 new_b = __alloc_buffer_wait(c, nf); in __bufio_new()
988 b = __find(c, block); in __bufio_new()
994 __check_watermark(c, write_list); in __bufio_new()
1056 static void *new_read(struct dm_bufio_client *c, sector_t block, in new_read() argument
1064 dm_bufio_lock(c); in new_read()
1065 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1066 dm_bufio_unlock(c); in new_read()
1091 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, in dm_bufio_get() argument
1094 return new_read(c, block, NF_GET, bp); in dm_bufio_get()
1098 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, in dm_bufio_read() argument
1103 return new_read(c, block, NF_READ, bp); in dm_bufio_read()
1107 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, in dm_bufio_new() argument
1112 return new_read(c, block, NF_FRESH, bp); in dm_bufio_new()
1116 void dm_bufio_prefetch(struct dm_bufio_client *c, in dm_bufio_prefetch() argument
1126 dm_bufio_lock(c); in dm_bufio_prefetch()
1131 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in dm_bufio_prefetch()
1134 dm_bufio_unlock(c); in dm_bufio_prefetch()
1138 dm_bufio_lock(c); in dm_bufio_prefetch()
1141 dm_bufio_unlock(c); in dm_bufio_prefetch()
1151 dm_bufio_lock(c); in dm_bufio_prefetch()
1155 dm_bufio_unlock(c); in dm_bufio_prefetch()
1164 struct dm_bufio_client *c = b->c; in dm_bufio_release() local
1166 dm_bufio_lock(c); in dm_bufio_release()
1172 wake_up(&c->free_buffer_wait); in dm_bufio_release()
1188 dm_bufio_unlock(c); in dm_bufio_release()
1194 struct dm_bufio_client *c = b->c; in dm_bufio_mark_buffer_dirty() local
1196 dm_bufio_lock(c); in dm_bufio_mark_buffer_dirty()
1203 dm_bufio_unlock(c); in dm_bufio_mark_buffer_dirty()
1207 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) in dm_bufio_write_dirty_buffers_async() argument
1213 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers_async()
1214 __write_dirty_buffers_async(c, 0, &write_list); in dm_bufio_write_dirty_buffers_async()
1215 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers_async()
1227 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) in dm_bufio_write_dirty_buffers() argument
1235 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
1236 __write_dirty_buffers_async(c, 0, &write_list); in dm_bufio_write_dirty_buffers()
1237 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
1239 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
1242 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in dm_bufio_write_dirty_buffers()
1245 if (buffers_processed < c->n_buffers[LIST_DIRTY]) in dm_bufio_write_dirty_buffers()
1251 if (buffers_processed < c->n_buffers[LIST_DIRTY]) { in dm_bufio_write_dirty_buffers()
1254 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
1257 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
1287 wake_up(&c->free_buffer_wait); in dm_bufio_write_dirty_buffers()
1288 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
1290 a = xchg(&c->async_write_error, 0); in dm_bufio_write_dirty_buffers()
1291 f = dm_bufio_issue_flush(c); in dm_bufio_write_dirty_buffers()
1302 int dm_bufio_issue_flush(struct dm_bufio_client *c) in dm_bufio_issue_flush() argument
1308 .client = c->dm_io, in dm_bufio_issue_flush()
1311 .bdev = c->bdev, in dm_bufio_issue_flush()
1336 struct dm_bufio_client *c = b->c; in dm_bufio_release_move() local
1341 dm_bufio_lock(c); in dm_bufio_release_move()
1344 new = __find(c, new_block); in dm_bufio_release_move()
1347 __wait_for_free_buffer(c); in dm_bufio_release_move()
1391 dm_bufio_unlock(c); in dm_bufio_release_move()
1402 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) in dm_bufio_forget() argument
1406 dm_bufio_lock(c); in dm_bufio_forget()
1408 b = __find(c, block); in dm_bufio_forget()
1414 dm_bufio_unlock(c); in dm_bufio_forget()
1418 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n) in dm_bufio_set_minimum_buffers() argument
1420 c->minimum_buffers = n; in dm_bufio_set_minimum_buffers()
1424 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c) in dm_bufio_get_block_size() argument
1426 return c->block_size; in dm_bufio_get_block_size()
1430 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) in dm_bufio_get_device_size() argument
1432 return i_size_read(c->bdev->bd_inode) >> in dm_bufio_get_device_size()
1433 (SECTOR_SHIFT + c->sectors_per_block_bits); in dm_bufio_get_device_size()
1457 return b->c; in dm_bufio_get_client()
1461 static void drop_buffers(struct dm_bufio_client *c) in drop_buffers() argument
1471 dm_bufio_write_dirty_buffers_async(c); in drop_buffers()
1473 dm_bufio_lock(c); in drop_buffers()
1475 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1479 list_for_each_entry(b, &c->lru[i], lru_list) in drop_buffers()
1484 BUG_ON(!list_empty(&c->lru[i])); in drop_buffers()
1486 dm_bufio_unlock(c); in drop_buffers()
1516 static unsigned get_retain_buffers(struct dm_bufio_client *c) in get_retain_buffers() argument
1519 return retain_bytes / c->block_size; in get_retain_buffers()
1522 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, in __scan() argument
1529 unsigned retain_target = get_retain_buffers(c); in __scan()
1532 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { in __scan()
1546 struct dm_bufio_client *c; in dm_bufio_shrink_scan() local
1549 c = container_of(shrink, struct dm_bufio_client, shrinker); in dm_bufio_shrink_scan()
1551 dm_bufio_lock(c); in dm_bufio_shrink_scan()
1552 else if (!dm_bufio_trylock(c)) in dm_bufio_shrink_scan()
1555 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); in dm_bufio_shrink_scan()
1556 dm_bufio_unlock(c); in dm_bufio_shrink_scan()
1563 struct dm_bufio_client *c; in dm_bufio_shrink_count() local
1566 c = container_of(shrink, struct dm_bufio_client, shrinker); in dm_bufio_shrink_count()
1568 dm_bufio_lock(c); in dm_bufio_shrink_count()
1569 else if (!dm_bufio_trylock(c)) in dm_bufio_shrink_count()
1572 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; in dm_bufio_shrink_count()
1573 dm_bufio_unlock(c); in dm_bufio_shrink_count()
1586 struct dm_bufio_client *c; in dm_bufio_client_create() local
1592 c = kzalloc(sizeof(*c), GFP_KERNEL); in dm_bufio_client_create()
1593 if (!c) { in dm_bufio_client_create()
1597 c->buffer_tree = RB_ROOT; in dm_bufio_client_create()
1599 c->bdev = bdev; in dm_bufio_client_create()
1600 c->block_size = block_size; in dm_bufio_client_create()
1601 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; in dm_bufio_client_create()
1602 c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ? in dm_bufio_client_create()
1604 c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ? in dm_bufio_client_create()
1607 c->aux_size = aux_size; in dm_bufio_client_create()
1608 c->alloc_callback = alloc_callback; in dm_bufio_client_create()
1609 c->write_callback = write_callback; in dm_bufio_client_create()
1612 INIT_LIST_HEAD(&c->lru[i]); in dm_bufio_client_create()
1613 c->n_buffers[i] = 0; in dm_bufio_client_create()
1616 mutex_init(&c->lock); in dm_bufio_client_create()
1617 INIT_LIST_HEAD(&c->reserved_buffers); in dm_bufio_client_create()
1618 c->need_reserved_buffers = reserved_buffers; in dm_bufio_client_create()
1620 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS; in dm_bufio_client_create()
1622 init_waitqueue_head(&c->free_buffer_wait); in dm_bufio_client_create()
1623 c->async_write_error = 0; in dm_bufio_client_create()
1625 c->dm_io = dm_io_client_create(); in dm_bufio_client_create()
1626 if (IS_ERR(c->dm_io)) { in dm_bufio_client_create()
1627 r = PTR_ERR(c->dm_io); in dm_bufio_client_create()
1632 if (c->blocks_per_page_bits) { in dm_bufio_client_create()
1633 if (!DM_BUFIO_CACHE_NAME(c)) { in dm_bufio_client_create()
1634 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size); in dm_bufio_client_create()
1635 if (!DM_BUFIO_CACHE_NAME(c)) { in dm_bufio_client_create()
1642 if (!DM_BUFIO_CACHE(c)) { in dm_bufio_client_create()
1643 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c), in dm_bufio_client_create()
1644 c->block_size, in dm_bufio_client_create()
1645 c->block_size, 0, NULL); in dm_bufio_client_create()
1646 if (!DM_BUFIO_CACHE(c)) { in dm_bufio_client_create()
1655 while (c->need_reserved_buffers) { in dm_bufio_client_create()
1656 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create()
1667 list_add(&c->client_list, &dm_bufio_all_clients); in dm_bufio_client_create()
1671 c->shrinker.count_objects = dm_bufio_shrink_count; in dm_bufio_client_create()
1672 c->shrinker.scan_objects = dm_bufio_shrink_scan; in dm_bufio_client_create()
1673 c->shrinker.seeks = 1; in dm_bufio_client_create()
1674 c->shrinker.batch = 0; in dm_bufio_client_create()
1675 register_shrinker(&c->shrinker); in dm_bufio_client_create()
1677 return c; in dm_bufio_client_create()
1681 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_create()
1682 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_create()
1687 dm_io_client_destroy(c->dm_io); in dm_bufio_client_create()
1689 kfree(c); in dm_bufio_client_create()
1699 void dm_bufio_client_destroy(struct dm_bufio_client *c) in dm_bufio_client_destroy() argument
1703 drop_buffers(c); in dm_bufio_client_destroy()
1705 unregister_shrinker(&c->shrinker); in dm_bufio_client_destroy()
1709 list_del(&c->client_list); in dm_bufio_client_destroy()
1715 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree)); in dm_bufio_client_destroy()
1716 BUG_ON(c->need_reserved_buffers); in dm_bufio_client_destroy()
1718 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_destroy()
1719 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_destroy()
1726 if (c->n_buffers[i]) in dm_bufio_client_destroy()
1727 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]); in dm_bufio_client_destroy()
1730 BUG_ON(c->n_buffers[i]); in dm_bufio_client_destroy()
1732 dm_io_client_destroy(c->dm_io); in dm_bufio_client_destroy()
1733 kfree(c); in dm_bufio_client_destroy()
1752 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) in __evict_old_buffers() argument
1755 unsigned retain_target = get_retain_buffers(c); in __evict_old_buffers()
1758 dm_bufio_lock(c); in __evict_old_buffers()
1760 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; in __evict_old_buffers()
1761 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { in __evict_old_buffers()
1774 dm_bufio_unlock(c); in __evict_old_buffers()
1780 struct dm_bufio_client *c; in cleanup_old_buffers() local
1784 list_for_each_entry(c, &dm_bufio_all_clients, client_list) in cleanup_old_buffers()
1785 __evict_old_buffers(c, max_age_hz); in cleanup_old_buffers()