Lines Matching refs:b

262 	struct dm_buffer *b;  in __find()  local
265 b = container_of(n, struct dm_buffer, node); in __find()
267 if (b->block == block) in __find()
268 return b; in __find()
270 n = (b->block < block) ? n->rb_left : n->rb_right; in __find()
276 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) in __insert() argument
284 if (found->block == b->block) { in __insert()
285 BUG_ON(found != b); in __insert()
290 new = (found->block < b->block) ? in __insert()
294 rb_link_node(&b->node, parent, new); in __insert()
295 rb_insert_color(&b->node, &c->buffer_tree); in __insert()
298 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) in __remove() argument
300 rb_erase(&b->node, &c->buffer_tree); in __remove()
441 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size, in alloc_buffer() local
444 if (!b) in alloc_buffer()
447 b->c = c; in alloc_buffer()
449 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
450 if (!b->data) { in alloc_buffer()
451 kfree(b); in alloc_buffer()
455 adjust_total_allocated(b->data_mode, (long)c->block_size); in alloc_buffer()
457 return b; in alloc_buffer()
463 static void free_buffer(struct dm_buffer *b) in free_buffer() argument
465 struct dm_bufio_client *c = b->c; in free_buffer()
467 adjust_total_allocated(b->data_mode, -(long)c->block_size); in free_buffer()
469 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
470 kfree(b); in free_buffer()
476 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) in __link_buffer() argument
478 struct dm_bufio_client *c = b->c; in __link_buffer()
481 b->block = block; in __link_buffer()
482 b->list_mode = dirty; in __link_buffer()
483 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer()
484 __insert(b->c, b); in __link_buffer()
485 b->last_accessed = jiffies; in __link_buffer()
491 static void __unlink_buffer(struct dm_buffer *b) in __unlink_buffer() argument
493 struct dm_bufio_client *c = b->c; in __unlink_buffer()
495 BUG_ON(!c->n_buffers[b->list_mode]); in __unlink_buffer()
497 c->n_buffers[b->list_mode]--; in __unlink_buffer()
498 __remove(b->c, b); in __unlink_buffer()
499 list_del(&b->lru_list); in __unlink_buffer()
505 static void __relink_lru(struct dm_buffer *b, int dirty) in __relink_lru() argument
507 struct dm_bufio_client *c = b->c; in __relink_lru()
509 BUG_ON(!c->n_buffers[b->list_mode]); in __relink_lru()
511 c->n_buffers[b->list_mode]--; in __relink_lru()
513 b->list_mode = dirty; in __relink_lru()
514 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru()
515 b->last_accessed = jiffies; in __relink_lru()
546 struct dm_buffer *b = context; in dmio_complete() local
548 b->bio.bi_error = error ? -EIO : 0; in dmio_complete()
549 b->bio.bi_end_io(&b->bio); in dmio_complete()
552 static void use_dmio(struct dm_buffer *b, int rw, sector_t block, in use_dmio() argument
559 .notify.context = b, in use_dmio()
560 .client = b->c->dm_io, in use_dmio()
563 .bdev = b->c->bdev, in use_dmio()
564 .sector = block << b->c->sectors_per_block_bits, in use_dmio()
565 .count = b->c->block_size >> SECTOR_SHIFT, in use_dmio()
568 if (b->data_mode != DATA_MODE_VMALLOC) { in use_dmio()
570 io_req.mem.ptr.addr = b->data; in use_dmio()
573 io_req.mem.ptr.vma = b->data; in use_dmio()
576 b->bio.bi_end_io = end_io; in use_dmio()
580 b->bio.bi_error = r; in use_dmio()
581 end_io(&b->bio); in use_dmio()
600 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, in use_inline_bio() argument
606 bio_init(&b->bio); in use_inline_bio()
607 b->bio.bi_io_vec = b->bio_vec; in use_inline_bio()
608 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; in use_inline_bio()
609 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; in use_inline_bio()
610 b->bio.bi_bdev = b->c->bdev; in use_inline_bio()
611 b->bio.bi_end_io = inline_endio; in use_inline_bio()
616 b->bio.bi_private = end_io; in use_inline_bio()
622 ptr = b->data; in use_inline_bio()
623 len = b->c->block_size; in use_inline_bio()
631 if (!bio_add_page(&b->bio, virt_to_page(ptr), in use_inline_bio()
634 BUG_ON(b->c->block_size <= PAGE_SIZE); in use_inline_bio()
635 use_dmio(b, rw, block, end_io); in use_inline_bio()
643 submit_bio(rw, &b->bio); in use_inline_bio()
646 static void submit_io(struct dm_buffer *b, int rw, sector_t block, in submit_io() argument
649 if (rw == WRITE && b->c->write_callback) in submit_io()
650 b->c->write_callback(b); in submit_io()
652 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE && in submit_io()
653 b->data_mode != DATA_MODE_VMALLOC) in submit_io()
654 use_inline_bio(b, rw, block, end_io); in submit_io()
656 use_dmio(b, rw, block, end_io); in submit_io()
671 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); in write_endio() local
673 b->write_error = bio->bi_error; in write_endio()
675 struct dm_bufio_client *c = b->c; in write_endio()
680 BUG_ON(!test_bit(B_WRITING, &b->state)); in write_endio()
683 clear_bit(B_WRITING, &b->state); in write_endio()
686 wake_up_bit(&b->state, B_WRITING); in write_endio()
698 static void __write_dirty_buffer(struct dm_buffer *b, in __write_dirty_buffer() argument
701 if (!test_bit(B_DIRTY, &b->state)) in __write_dirty_buffer()
704 clear_bit(B_DIRTY, &b->state); in __write_dirty_buffer()
705 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __write_dirty_buffer()
708 submit_io(b, WRITE, b->block, write_endio); in __write_dirty_buffer()
710 list_add_tail(&b->write_list, write_list); in __write_dirty_buffer()
718 struct dm_buffer *b = in __flush_write_list() local
720 list_del(&b->write_list); in __flush_write_list()
721 submit_io(b, WRITE, b->block, write_endio); in __flush_write_list()
732 static void __make_buffer_clean(struct dm_buffer *b) in __make_buffer_clean() argument
734 BUG_ON(b->hold_count); in __make_buffer_clean()
736 if (!b->state) /* fast case */ in __make_buffer_clean()
739 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
740 __write_dirty_buffer(b, NULL); in __make_buffer_clean()
741 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
750 struct dm_buffer *b; in __get_unclaimed_buffer() local
752 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer()
753 BUG_ON(test_bit(B_WRITING, &b->state)); in __get_unclaimed_buffer()
754 BUG_ON(test_bit(B_DIRTY, &b->state)); in __get_unclaimed_buffer()
756 if (!b->hold_count) { in __get_unclaimed_buffer()
757 __make_buffer_clean(b); in __get_unclaimed_buffer()
758 __unlink_buffer(b); in __get_unclaimed_buffer()
759 return b; in __get_unclaimed_buffer()
764 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer()
765 BUG_ON(test_bit(B_READING, &b->state)); in __get_unclaimed_buffer()
767 if (!b->hold_count) { in __get_unclaimed_buffer()
768 __make_buffer_clean(b); in __get_unclaimed_buffer()
769 __unlink_buffer(b); in __get_unclaimed_buffer()
770 return b; in __get_unclaimed_buffer()
815 struct dm_buffer *b; in __alloc_buffer_wait_no_callback() local
831 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
832 if (b) in __alloc_buffer_wait_no_callback()
833 return b; in __alloc_buffer_wait_no_callback()
840 b = list_entry(c->reserved_buffers.next, in __alloc_buffer_wait_no_callback()
842 list_del(&b->lru_list); in __alloc_buffer_wait_no_callback()
845 return b; in __alloc_buffer_wait_no_callback()
848 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
849 if (b) in __alloc_buffer_wait_no_callback()
850 return b; in __alloc_buffer_wait_no_callback()
858 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait() local
860 if (!b) in __alloc_buffer_wait()
864 c->alloc_callback(b); in __alloc_buffer_wait()
866 return b; in __alloc_buffer_wait()
872 static void __free_buffer_wake(struct dm_buffer *b) in __free_buffer_wake() argument
874 struct dm_bufio_client *c = b->c; in __free_buffer_wake()
877 free_buffer(b); in __free_buffer_wake()
879 list_add(&b->lru_list, &c->reserved_buffers); in __free_buffer_wake()
889 struct dm_buffer *b, *tmp; in __write_dirty_buffers_async() local
891 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in __write_dirty_buffers_async()
892 BUG_ON(test_bit(B_READING, &b->state)); in __write_dirty_buffers_async()
894 if (!test_bit(B_DIRTY, &b->state) && in __write_dirty_buffers_async()
895 !test_bit(B_WRITING, &b->state)) { in __write_dirty_buffers_async()
896 __relink_lru(b, LIST_CLEAN); in __write_dirty_buffers_async()
900 if (no_wait && test_bit(B_WRITING, &b->state)) in __write_dirty_buffers_async()
903 __write_dirty_buffer(b, write_list); in __write_dirty_buffers_async()
948 struct dm_buffer *b = __get_unclaimed_buffer(c); in __check_watermark() local
950 if (!b) in __check_watermark()
953 __free_buffer_wake(b); in __check_watermark()
969 struct dm_buffer *b, *new_b = NULL; in __bufio_new() local
973 b = __find(c, block); in __bufio_new()
974 if (b) in __bufio_new()
988 b = __find(c, block); in __bufio_new()
989 if (b) { in __bufio_new()
996 b = new_b; in __bufio_new()
997 b->hold_count = 1; in __bufio_new()
998 b->read_error = 0; in __bufio_new()
999 b->write_error = 0; in __bufio_new()
1000 __link_buffer(b, block, LIST_CLEAN); in __bufio_new()
1003 b->state = 0; in __bufio_new()
1004 return b; in __bufio_new()
1007 b->state = 1 << B_READING; in __bufio_new()
1010 return b; in __bufio_new()
1022 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) in __bufio_new()
1025 b->hold_count++; in __bufio_new()
1026 __relink_lru(b, test_bit(B_DIRTY, &b->state) || in __bufio_new()
1027 test_bit(B_WRITING, &b->state)); in __bufio_new()
1028 return b; in __bufio_new()
1037 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); in read_endio() local
1039 b->read_error = bio->bi_error; in read_endio()
1041 BUG_ON(!test_bit(B_READING, &b->state)); in read_endio()
1044 clear_bit(B_READING, &b->state); in read_endio()
1047 wake_up_bit(&b->state, B_READING); in read_endio()
1060 struct dm_buffer *b; in new_read() local
1065 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1070 if (!b) in new_read()
1071 return b; in new_read()
1074 submit_io(b, READ, b->block, read_endio); in new_read()
1076 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in new_read()
1078 if (b->read_error) { in new_read()
1079 int error = b->read_error; in new_read()
1081 dm_bufio_release(b); in new_read()
1086 *bp = b; in new_read()
1088 return b->data; in new_read()
1130 struct dm_buffer *b; in dm_bufio_prefetch() local
1131 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in dm_bufio_prefetch()
1140 if (unlikely(b != NULL)) { in dm_bufio_prefetch()
1144 submit_io(b, READ, b->block, read_endio); in dm_bufio_prefetch()
1145 dm_bufio_release(b); in dm_bufio_prefetch()
1162 void dm_bufio_release(struct dm_buffer *b) in dm_bufio_release() argument
1164 struct dm_bufio_client *c = b->c; in dm_bufio_release()
1168 BUG_ON(!b->hold_count); in dm_bufio_release()
1170 b->hold_count--; in dm_bufio_release()
1171 if (!b->hold_count) { in dm_bufio_release()
1179 if ((b->read_error || b->write_error) && in dm_bufio_release()
1180 !test_bit(B_READING, &b->state) && in dm_bufio_release()
1181 !test_bit(B_WRITING, &b->state) && in dm_bufio_release()
1182 !test_bit(B_DIRTY, &b->state)) { in dm_bufio_release()
1183 __unlink_buffer(b); in dm_bufio_release()
1184 __free_buffer_wake(b); in dm_bufio_release()
1192 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) in dm_bufio_mark_buffer_dirty() argument
1194 struct dm_bufio_client *c = b->c; in dm_bufio_mark_buffer_dirty()
1198 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_mark_buffer_dirty()
1200 if (!test_and_set_bit(B_DIRTY, &b->state)) in dm_bufio_mark_buffer_dirty()
1201 __relink_lru(b, LIST_DIRTY); in dm_bufio_mark_buffer_dirty()
1231 struct dm_buffer *b, *tmp; in dm_bufio_write_dirty_buffers() local
1242 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in dm_bufio_write_dirty_buffers()
1248 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_write_dirty_buffers()
1250 if (test_bit(B_WRITING, &b->state)) { in dm_bufio_write_dirty_buffers()
1253 b->hold_count++; in dm_bufio_write_dirty_buffers()
1255 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1258 b->hold_count--; in dm_bufio_write_dirty_buffers()
1260 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1264 if (!test_bit(B_DIRTY, &b->state) && in dm_bufio_write_dirty_buffers()
1265 !test_bit(B_WRITING, &b->state)) in dm_bufio_write_dirty_buffers()
1266 __relink_lru(b, LIST_CLEAN); in dm_bufio_write_dirty_buffers()
1334 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) in dm_bufio_release_move() argument
1336 struct dm_bufio_client *c = b->c; in dm_bufio_release_move()
1360 BUG_ON(!b->hold_count); in dm_bufio_release_move()
1361 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_release_move()
1363 __write_dirty_buffer(b, NULL); in dm_bufio_release_move()
1364 if (b->hold_count == 1) { in dm_bufio_release_move()
1365 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1367 set_bit(B_DIRTY, &b->state); in dm_bufio_release_move()
1368 __unlink_buffer(b); in dm_bufio_release_move()
1369 __link_buffer(b, new_block, LIST_DIRTY); in dm_bufio_release_move()
1372 wait_on_bit_lock_io(&b->state, B_WRITING, in dm_bufio_release_move()
1381 old_block = b->block; in dm_bufio_release_move()
1382 __unlink_buffer(b); in dm_bufio_release_move()
1383 __link_buffer(b, new_block, b->list_mode); in dm_bufio_release_move()
1384 submit_io(b, WRITE, new_block, write_endio); in dm_bufio_release_move()
1385 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1387 __unlink_buffer(b); in dm_bufio_release_move()
1388 __link_buffer(b, old_block, b->list_mode); in dm_bufio_release_move()
1392 dm_bufio_release(b); in dm_bufio_release_move()
1404 struct dm_buffer *b; in dm_bufio_forget() local
1408 b = __find(c, block); in dm_bufio_forget()
1409 if (b && likely(!b->hold_count) && likely(!b->state)) { in dm_bufio_forget()
1410 __unlink_buffer(b); in dm_bufio_forget()
1411 __free_buffer_wake(b); in dm_bufio_forget()
1437 sector_t dm_bufio_get_block_number(struct dm_buffer *b) in dm_bufio_get_block_number() argument
1439 return b->block; in dm_bufio_get_block_number()
1443 void *dm_bufio_get_block_data(struct dm_buffer *b) in dm_bufio_get_block_data() argument
1445 return b->data; in dm_bufio_get_block_data()
1449 void *dm_bufio_get_aux_data(struct dm_buffer *b) in dm_bufio_get_aux_data() argument
1451 return b + 1; in dm_bufio_get_aux_data()
1455 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) in dm_bufio_get_client() argument
1457 return b->c; in dm_bufio_get_client()
1463 struct dm_buffer *b; in drop_buffers() local
1475 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1476 __free_buffer_wake(b); in drop_buffers()
1479 list_for_each_entry(b, &c->lru[i], lru_list) in drop_buffers()
1481 (unsigned long long)b->block, b->hold_count, i); in drop_buffers()
1497 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) in __try_evict_buffer() argument
1500 if (test_bit(B_READING, &b->state) || in __try_evict_buffer()
1501 test_bit(B_WRITING, &b->state) || in __try_evict_buffer()
1502 test_bit(B_DIRTY, &b->state)) in __try_evict_buffer()
1506 if (b->hold_count) in __try_evict_buffer()
1509 __make_buffer_clean(b); in __try_evict_buffer()
1510 __unlink_buffer(b); in __try_evict_buffer()
1511 __free_buffer_wake(b); in __try_evict_buffer()
1526 struct dm_buffer *b, *tmp; in __scan() local
1532 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { in __scan()
1533 if (__try_evict_buffer(b, gfp_mask)) in __scan()
1656 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create() local
1658 if (!b) { in dm_bufio_client_create()
1662 __free_buffer_wake(b); in dm_bufio_client_create()
1682 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_create() local
1684 list_del(&b->lru_list); in dm_bufio_client_create()
1685 free_buffer(b); in dm_bufio_client_create()
1719 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_destroy() local
1721 list_del(&b->lru_list); in dm_bufio_client_destroy()
1722 free_buffer(b); in dm_bufio_client_destroy()
1747 static bool older_than(struct dm_buffer *b, unsigned long age_hz) in older_than() argument
1749 return time_after_eq(jiffies, b->last_accessed + age_hz); in older_than()
1754 struct dm_buffer *b, *tmp; in __evict_old_buffers() local
1761 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { in __evict_old_buffers()
1765 if (!older_than(b, age_hz)) in __evict_old_buffers()
1768 if (__try_evict_buffer(b, 0)) in __evict_old_buffers()