Lines Matching refs:b
262 struct dm_buffer *b; in __find() local
265 b = container_of(n, struct dm_buffer, node); in __find()
267 if (b->block == block) in __find()
268 return b; in __find()
270 n = (b->block < block) ? n->rb_left : n->rb_right; in __find()
276 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) in __insert() argument
284 if (found->block == b->block) { in __insert()
285 BUG_ON(found != b); in __insert()
290 new = (found->block < b->block) ? in __insert()
294 rb_link_node(&b->node, parent, new); in __insert()
295 rb_insert_color(&b->node, &c->buffer_tree); in __insert()
298 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) in __remove() argument
300 rb_erase(&b->node, &c->buffer_tree); in __remove()
441 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size, in alloc_buffer() local
444 if (!b) in alloc_buffer()
447 b->c = c; in alloc_buffer()
449 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
450 if (!b->data) { in alloc_buffer()
451 kfree(b); in alloc_buffer()
455 adjust_total_allocated(b->data_mode, (long)c->block_size); in alloc_buffer()
457 return b; in alloc_buffer()
463 static void free_buffer(struct dm_buffer *b) in free_buffer() argument
465 struct dm_bufio_client *c = b->c; in free_buffer()
467 adjust_total_allocated(b->data_mode, -(long)c->block_size); in free_buffer()
469 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
470 kfree(b); in free_buffer()
476 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) in __link_buffer() argument
478 struct dm_bufio_client *c = b->c; in __link_buffer()
481 b->block = block; in __link_buffer()
482 b->list_mode = dirty; in __link_buffer()
483 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer()
484 __insert(b->c, b); in __link_buffer()
485 b->last_accessed = jiffies; in __link_buffer()
491 static void __unlink_buffer(struct dm_buffer *b) in __unlink_buffer() argument
493 struct dm_bufio_client *c = b->c; in __unlink_buffer()
495 BUG_ON(!c->n_buffers[b->list_mode]); in __unlink_buffer()
497 c->n_buffers[b->list_mode]--; in __unlink_buffer()
498 __remove(b->c, b); in __unlink_buffer()
499 list_del(&b->lru_list); in __unlink_buffer()
505 static void __relink_lru(struct dm_buffer *b, int dirty) in __relink_lru() argument
507 struct dm_bufio_client *c = b->c; in __relink_lru()
509 BUG_ON(!c->n_buffers[b->list_mode]); in __relink_lru()
511 c->n_buffers[b->list_mode]--; in __relink_lru()
513 b->list_mode = dirty; in __relink_lru()
514 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru()
515 b->last_accessed = jiffies; in __relink_lru()
546 struct dm_buffer *b = context; in dmio_complete() local
548 b->bio.bi_end_io(&b->bio, error ? -EIO : 0); in dmio_complete()
551 static void use_dmio(struct dm_buffer *b, int rw, sector_t block, in use_dmio() argument
558 .notify.context = b, in use_dmio()
559 .client = b->c->dm_io, in use_dmio()
562 .bdev = b->c->bdev, in use_dmio()
563 .sector = block << b->c->sectors_per_block_bits, in use_dmio()
564 .count = b->c->block_size >> SECTOR_SHIFT, in use_dmio()
567 if (b->data_mode != DATA_MODE_VMALLOC) { in use_dmio()
569 io_req.mem.ptr.addr = b->data; in use_dmio()
572 io_req.mem.ptr.vma = b->data; in use_dmio()
575 b->bio.bi_end_io = end_io; in use_dmio()
579 end_io(&b->bio, r); in use_dmio()
595 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, in use_inline_bio() argument
601 bio_init(&b->bio); in use_inline_bio()
602 b->bio.bi_io_vec = b->bio_vec; in use_inline_bio()
603 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; in use_inline_bio()
604 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; in use_inline_bio()
605 b->bio.bi_bdev = b->c->bdev; in use_inline_bio()
606 b->bio.bi_end_io = inline_endio; in use_inline_bio()
611 b->bio.bi_private = end_io; in use_inline_bio()
617 ptr = b->data; in use_inline_bio()
618 len = b->c->block_size; in use_inline_bio()
626 if (!bio_add_page(&b->bio, virt_to_page(ptr), in use_inline_bio()
629 BUG_ON(b->c->block_size <= PAGE_SIZE); in use_inline_bio()
630 use_dmio(b, rw, block, end_io); in use_inline_bio()
638 submit_bio(rw, &b->bio); in use_inline_bio()
641 static void submit_io(struct dm_buffer *b, int rw, sector_t block, in submit_io() argument
644 if (rw == WRITE && b->c->write_callback) in submit_io()
645 b->c->write_callback(b); in submit_io()
647 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE && in submit_io()
648 b->data_mode != DATA_MODE_VMALLOC) in submit_io()
649 use_inline_bio(b, rw, block, end_io); in submit_io()
651 use_dmio(b, rw, block, end_io); in submit_io()
666 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); in write_endio() local
668 b->write_error = error; in write_endio()
670 struct dm_bufio_client *c = b->c; in write_endio()
674 BUG_ON(!test_bit(B_WRITING, &b->state)); in write_endio()
677 clear_bit(B_WRITING, &b->state); in write_endio()
680 wake_up_bit(&b->state, B_WRITING); in write_endio()
692 static void __write_dirty_buffer(struct dm_buffer *b, in __write_dirty_buffer() argument
695 if (!test_bit(B_DIRTY, &b->state)) in __write_dirty_buffer()
698 clear_bit(B_DIRTY, &b->state); in __write_dirty_buffer()
699 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __write_dirty_buffer()
702 submit_io(b, WRITE, b->block, write_endio); in __write_dirty_buffer()
704 list_add_tail(&b->write_list, write_list); in __write_dirty_buffer()
712 struct dm_buffer *b = in __flush_write_list() local
714 list_del(&b->write_list); in __flush_write_list()
715 submit_io(b, WRITE, b->block, write_endio); in __flush_write_list()
726 static void __make_buffer_clean(struct dm_buffer *b) in __make_buffer_clean() argument
728 BUG_ON(b->hold_count); in __make_buffer_clean()
730 if (!b->state) /* fast case */ in __make_buffer_clean()
733 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
734 __write_dirty_buffer(b, NULL); in __make_buffer_clean()
735 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
744 struct dm_buffer *b; in __get_unclaimed_buffer() local
746 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer()
747 BUG_ON(test_bit(B_WRITING, &b->state)); in __get_unclaimed_buffer()
748 BUG_ON(test_bit(B_DIRTY, &b->state)); in __get_unclaimed_buffer()
750 if (!b->hold_count) { in __get_unclaimed_buffer()
751 __make_buffer_clean(b); in __get_unclaimed_buffer()
752 __unlink_buffer(b); in __get_unclaimed_buffer()
753 return b; in __get_unclaimed_buffer()
758 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer()
759 BUG_ON(test_bit(B_READING, &b->state)); in __get_unclaimed_buffer()
761 if (!b->hold_count) { in __get_unclaimed_buffer()
762 __make_buffer_clean(b); in __get_unclaimed_buffer()
763 __unlink_buffer(b); in __get_unclaimed_buffer()
764 return b; in __get_unclaimed_buffer()
809 struct dm_buffer *b; in __alloc_buffer_wait_no_callback() local
825 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
826 if (b) in __alloc_buffer_wait_no_callback()
827 return b; in __alloc_buffer_wait_no_callback()
834 b = list_entry(c->reserved_buffers.next, in __alloc_buffer_wait_no_callback()
836 list_del(&b->lru_list); in __alloc_buffer_wait_no_callback()
839 return b; in __alloc_buffer_wait_no_callback()
842 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
843 if (b) in __alloc_buffer_wait_no_callback()
844 return b; in __alloc_buffer_wait_no_callback()
852 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait() local
854 if (!b) in __alloc_buffer_wait()
858 c->alloc_callback(b); in __alloc_buffer_wait()
860 return b; in __alloc_buffer_wait()
866 static void __free_buffer_wake(struct dm_buffer *b) in __free_buffer_wake() argument
868 struct dm_bufio_client *c = b->c; in __free_buffer_wake()
871 free_buffer(b); in __free_buffer_wake()
873 list_add(&b->lru_list, &c->reserved_buffers); in __free_buffer_wake()
883 struct dm_buffer *b, *tmp; in __write_dirty_buffers_async() local
885 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in __write_dirty_buffers_async()
886 BUG_ON(test_bit(B_READING, &b->state)); in __write_dirty_buffers_async()
888 if (!test_bit(B_DIRTY, &b->state) && in __write_dirty_buffers_async()
889 !test_bit(B_WRITING, &b->state)) { in __write_dirty_buffers_async()
890 __relink_lru(b, LIST_CLEAN); in __write_dirty_buffers_async()
894 if (no_wait && test_bit(B_WRITING, &b->state)) in __write_dirty_buffers_async()
897 __write_dirty_buffer(b, write_list); in __write_dirty_buffers_async()
942 struct dm_buffer *b = __get_unclaimed_buffer(c); in __check_watermark() local
944 if (!b) in __check_watermark()
947 __free_buffer_wake(b); in __check_watermark()
963 struct dm_buffer *b, *new_b = NULL; in __bufio_new() local
967 b = __find(c, block); in __bufio_new()
968 if (b) in __bufio_new()
982 b = __find(c, block); in __bufio_new()
983 if (b) { in __bufio_new()
990 b = new_b; in __bufio_new()
991 b->hold_count = 1; in __bufio_new()
992 b->read_error = 0; in __bufio_new()
993 b->write_error = 0; in __bufio_new()
994 __link_buffer(b, block, LIST_CLEAN); in __bufio_new()
997 b->state = 0; in __bufio_new()
998 return b; in __bufio_new()
1001 b->state = 1 << B_READING; in __bufio_new()
1004 return b; in __bufio_new()
1016 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) in __bufio_new()
1019 b->hold_count++; in __bufio_new()
1020 __relink_lru(b, test_bit(B_DIRTY, &b->state) || in __bufio_new()
1021 test_bit(B_WRITING, &b->state)); in __bufio_new()
1022 return b; in __bufio_new()
1031 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); in read_endio() local
1033 b->read_error = error; in read_endio()
1035 BUG_ON(!test_bit(B_READING, &b->state)); in read_endio()
1038 clear_bit(B_READING, &b->state); in read_endio()
1041 wake_up_bit(&b->state, B_READING); in read_endio()
1054 struct dm_buffer *b; in new_read() local
1059 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1064 if (!b) in new_read()
1065 return b; in new_read()
1068 submit_io(b, READ, b->block, read_endio); in new_read()
1070 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in new_read()
1072 if (b->read_error) { in new_read()
1073 int error = b->read_error; in new_read()
1075 dm_bufio_release(b); in new_read()
1080 *bp = b; in new_read()
1082 return b->data; in new_read()
1124 struct dm_buffer *b; in dm_bufio_prefetch() local
1125 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in dm_bufio_prefetch()
1134 if (unlikely(b != NULL)) { in dm_bufio_prefetch()
1138 submit_io(b, READ, b->block, read_endio); in dm_bufio_prefetch()
1139 dm_bufio_release(b); in dm_bufio_prefetch()
1156 void dm_bufio_release(struct dm_buffer *b) in dm_bufio_release() argument
1158 struct dm_bufio_client *c = b->c; in dm_bufio_release()
1162 BUG_ON(!b->hold_count); in dm_bufio_release()
1164 b->hold_count--; in dm_bufio_release()
1165 if (!b->hold_count) { in dm_bufio_release()
1173 if ((b->read_error || b->write_error) && in dm_bufio_release()
1174 !test_bit(B_READING, &b->state) && in dm_bufio_release()
1175 !test_bit(B_WRITING, &b->state) && in dm_bufio_release()
1176 !test_bit(B_DIRTY, &b->state)) { in dm_bufio_release()
1177 __unlink_buffer(b); in dm_bufio_release()
1178 __free_buffer_wake(b); in dm_bufio_release()
1186 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) in dm_bufio_mark_buffer_dirty() argument
1188 struct dm_bufio_client *c = b->c; in dm_bufio_mark_buffer_dirty()
1192 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_mark_buffer_dirty()
1194 if (!test_and_set_bit(B_DIRTY, &b->state)) in dm_bufio_mark_buffer_dirty()
1195 __relink_lru(b, LIST_DIRTY); in dm_bufio_mark_buffer_dirty()
1225 struct dm_buffer *b, *tmp; in dm_bufio_write_dirty_buffers() local
1236 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in dm_bufio_write_dirty_buffers()
1242 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_write_dirty_buffers()
1244 if (test_bit(B_WRITING, &b->state)) { in dm_bufio_write_dirty_buffers()
1247 b->hold_count++; in dm_bufio_write_dirty_buffers()
1249 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1252 b->hold_count--; in dm_bufio_write_dirty_buffers()
1254 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1258 if (!test_bit(B_DIRTY, &b->state) && in dm_bufio_write_dirty_buffers()
1259 !test_bit(B_WRITING, &b->state)) in dm_bufio_write_dirty_buffers()
1260 __relink_lru(b, LIST_CLEAN); in dm_bufio_write_dirty_buffers()
1328 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) in dm_bufio_release_move() argument
1330 struct dm_bufio_client *c = b->c; in dm_bufio_release_move()
1354 BUG_ON(!b->hold_count); in dm_bufio_release_move()
1355 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_release_move()
1357 __write_dirty_buffer(b, NULL); in dm_bufio_release_move()
1358 if (b->hold_count == 1) { in dm_bufio_release_move()
1359 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1361 set_bit(B_DIRTY, &b->state); in dm_bufio_release_move()
1362 __unlink_buffer(b); in dm_bufio_release_move()
1363 __link_buffer(b, new_block, LIST_DIRTY); in dm_bufio_release_move()
1366 wait_on_bit_lock_io(&b->state, B_WRITING, in dm_bufio_release_move()
1375 old_block = b->block; in dm_bufio_release_move()
1376 __unlink_buffer(b); in dm_bufio_release_move()
1377 __link_buffer(b, new_block, b->list_mode); in dm_bufio_release_move()
1378 submit_io(b, WRITE, new_block, write_endio); in dm_bufio_release_move()
1379 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1381 __unlink_buffer(b); in dm_bufio_release_move()
1382 __link_buffer(b, old_block, b->list_mode); in dm_bufio_release_move()
1386 dm_bufio_release(b); in dm_bufio_release_move()
1398 struct dm_buffer *b; in dm_bufio_forget() local
1402 b = __find(c, block); in dm_bufio_forget()
1403 if (b && likely(!b->hold_count) && likely(!b->state)) { in dm_bufio_forget()
1404 __unlink_buffer(b); in dm_bufio_forget()
1405 __free_buffer_wake(b); in dm_bufio_forget()
1431 sector_t dm_bufio_get_block_number(struct dm_buffer *b) in dm_bufio_get_block_number() argument
1433 return b->block; in dm_bufio_get_block_number()
1437 void *dm_bufio_get_block_data(struct dm_buffer *b) in dm_bufio_get_block_data() argument
1439 return b->data; in dm_bufio_get_block_data()
1443 void *dm_bufio_get_aux_data(struct dm_buffer *b) in dm_bufio_get_aux_data() argument
1445 return b + 1; in dm_bufio_get_aux_data()
1449 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) in dm_bufio_get_client() argument
1451 return b->c; in dm_bufio_get_client()
1457 struct dm_buffer *b; in drop_buffers() local
1469 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1470 __free_buffer_wake(b); in drop_buffers()
1473 list_for_each_entry(b, &c->lru[i], lru_list) in drop_buffers()
1475 (unsigned long long)b->block, b->hold_count, i); in drop_buffers()
1491 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) in __try_evict_buffer() argument
1494 if (test_bit(B_READING, &b->state) || in __try_evict_buffer()
1495 test_bit(B_WRITING, &b->state) || in __try_evict_buffer()
1496 test_bit(B_DIRTY, &b->state)) in __try_evict_buffer()
1500 if (b->hold_count) in __try_evict_buffer()
1503 __make_buffer_clean(b); in __try_evict_buffer()
1504 __unlink_buffer(b); in __try_evict_buffer()
1505 __free_buffer_wake(b); in __try_evict_buffer()
1520 struct dm_buffer *b, *tmp; in __scan() local
1526 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { in __scan()
1527 if (__try_evict_buffer(b, gfp_mask)) in __scan()
1650 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create() local
1652 if (!b) { in dm_bufio_client_create()
1656 __free_buffer_wake(b); in dm_bufio_client_create()
1676 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_create() local
1678 list_del(&b->lru_list); in dm_bufio_client_create()
1679 free_buffer(b); in dm_bufio_client_create()
1713 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_destroy() local
1715 list_del(&b->lru_list); in dm_bufio_client_destroy()
1716 free_buffer(b); in dm_bufio_client_destroy()
1741 static bool older_than(struct dm_buffer *b, unsigned long age_hz) in older_than() argument
1743 return time_after_eq(jiffies, b->last_accessed + age_hz); in older_than()
1748 struct dm_buffer *b, *tmp; in __evict_old_buffers() local
1755 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { in __evict_old_buffers()
1759 if (!older_than(b, age_hz)) in __evict_old_buffers()
1762 if (__try_evict_buffer(b, 0)) in __evict_old_buffers()