Lines Matching refs:tree

85 #define btrfs_debug_check_extent_io_range(tree, start, end)		\  argument
86 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
88 struct extent_io_tree *tree, u64 start, u64 end) in __btrfs_debug_check_extent_io_range() argument
93 if (!tree->mapping) in __btrfs_debug_check_extent_io_range()
96 inode = tree->mapping->host; in __btrfs_debug_check_extent_io_range()
121 struct extent_io_tree *tree; member
136 tree_fs_info(struct extent_io_tree *tree) in tree_fs_info() argument
138 if (!tree->mapping) in tree_fs_info()
140 return btrfs_sb(tree->mapping->host->i_sb); in tree_fs_info()
198 void extent_io_tree_init(struct extent_io_tree *tree, in extent_io_tree_init() argument
201 tree->state = RB_ROOT; in extent_io_tree_init()
202 tree->ops = NULL; in extent_io_tree_init()
203 tree->dirty_bytes = 0; in extent_io_tree_init()
204 spin_lock_init(&tree->lock); in extent_io_tree_init()
205 tree->mapping = mapping; in extent_io_tree_init()
273 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, in __etree_search() argument
279 struct rb_root *root = &tree->state; in __etree_search()
326 tree_search_for_insert(struct extent_io_tree *tree, in tree_search_for_insert() argument
334 ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret); in tree_search_for_insert()
340 static inline struct rb_node *tree_search(struct extent_io_tree *tree, in tree_search() argument
343 return tree_search_for_insert(tree, offset, NULL, NULL); in tree_search()
346 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new, in merge_cb() argument
349 if (tree->ops && tree->ops->merge_extent_hook) in merge_cb()
350 tree->ops->merge_extent_hook(tree->mapping->host, new, in merge_cb()
363 static void merge_state(struct extent_io_tree *tree, in merge_state() argument
377 merge_cb(tree, state, other); in merge_state()
379 rb_erase(&other->rb_node, &tree->state); in merge_state()
389 merge_cb(tree, state, other); in merge_state()
391 rb_erase(&other->rb_node, &tree->state); in merge_state()
398 static void set_state_cb(struct extent_io_tree *tree, in set_state_cb() argument
401 if (tree->ops && tree->ops->set_bit_hook) in set_state_cb()
402 tree->ops->set_bit_hook(tree->mapping->host, state, bits); in set_state_cb()
405 static void clear_state_cb(struct extent_io_tree *tree, in clear_state_cb() argument
408 if (tree->ops && tree->ops->clear_bit_hook) in clear_state_cb()
409 tree->ops->clear_bit_hook(tree->mapping->host, state, bits); in clear_state_cb()
412 static void set_state_bits(struct extent_io_tree *tree,
425 static int insert_state(struct extent_io_tree *tree, in insert_state() argument
439 set_state_bits(tree, state, bits); in insert_state()
441 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent); in insert_state()
450 merge_state(tree, state); in insert_state()
454 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig, in split_cb() argument
457 if (tree->ops && tree->ops->split_extent_hook) in split_cb()
458 tree->ops->split_extent_hook(tree->mapping->host, orig, split); in split_cb()
475 static int split_state(struct extent_io_tree *tree, struct extent_state *orig, in split_state() argument
480 split_cb(tree, orig, split); in split_state()
487 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end, in split_state()
512 static struct extent_state *clear_state_bit(struct extent_io_tree *tree, in clear_state_bit() argument
521 WARN_ON(range > tree->dirty_bytes); in clear_state_bit()
522 tree->dirty_bytes -= range; in clear_state_bit()
524 clear_state_cb(tree, state, bits); in clear_state_bit()
531 rb_erase(&state->rb_node, &tree->state); in clear_state_bit()
538 merge_state(tree, state); in clear_state_bit()
553 static void extent_io_tree_panic(struct extent_io_tree *tree, int err) in extent_io_tree_panic() argument
555 btrfs_panic(tree_fs_info(tree), err, "Locking error: " in extent_io_tree_panic()
572 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bit() argument
585 btrfs_debug_check_extent_io_range(tree, start, end); in clear_extent_bit()
608 spin_lock(&tree->lock); in clear_extent_bit()
631 node = tree_search(tree, start); in clear_extent_bit()
666 err = split_state(tree, state, prealloc, start); in clear_extent_bit()
668 extent_io_tree_panic(tree, err); in clear_extent_bit()
674 state = clear_state_bit(tree, state, &bits, wake); in clear_extent_bit()
688 err = split_state(tree, state, prealloc, end + 1); in clear_extent_bit()
690 extent_io_tree_panic(tree, err); in clear_extent_bit()
695 clear_state_bit(tree, prealloc, &bits, wake); in clear_extent_bit()
701 state = clear_state_bit(tree, state, &bits, wake); in clear_extent_bit()
711 spin_unlock(&tree->lock); in clear_extent_bit()
720 spin_unlock(&tree->lock); in clear_extent_bit()
726 static void wait_on_state(struct extent_io_tree *tree, in wait_on_state() argument
728 __releases(tree->lock) in wait_on_state()
729 __acquires(tree->lock) in wait_on_state()
733 spin_unlock(&tree->lock); in wait_on_state()
735 spin_lock(&tree->lock); in wait_on_state()
744 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in wait_extent_bit() argument
750 btrfs_debug_check_extent_io_range(tree, start, end); in wait_extent_bit()
752 spin_lock(&tree->lock); in wait_extent_bit()
759 node = tree_search(tree, start); in wait_extent_bit()
772 wait_on_state(tree, state); in wait_extent_bit()
781 if (!cond_resched_lock(&tree->lock)) { in wait_extent_bit()
787 spin_unlock(&tree->lock); in wait_extent_bit()
790 static void set_state_bits(struct extent_io_tree *tree, in set_state_bits() argument
796 set_state_cb(tree, state, bits); in set_state_bits()
799 tree->dirty_bytes += range; in set_state_bits()
835 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __set_extent_bit() argument
849 btrfs_debug_check_extent_io_range(tree, start, end); in __set_extent_bit()
858 spin_lock(&tree->lock); in __set_extent_bit()
871 node = tree_search_for_insert(tree, start, &p, &parent); in __set_extent_bit()
875 err = insert_state(tree, prealloc, start, end, in __set_extent_bit()
878 extent_io_tree_panic(tree, err); in __set_extent_bit()
902 set_state_bits(tree, state, &bits); in __set_extent_bit()
904 merge_state(tree, state); in __set_extent_bit()
940 err = split_state(tree, state, prealloc, start); in __set_extent_bit()
942 extent_io_tree_panic(tree, err); in __set_extent_bit()
948 set_state_bits(tree, state, &bits); in __set_extent_bit()
950 merge_state(tree, state); in __set_extent_bit()
982 err = insert_state(tree, prealloc, start, this_end, in __set_extent_bit()
985 extent_io_tree_panic(tree, err); in __set_extent_bit()
1007 err = split_state(tree, state, prealloc, end + 1); in __set_extent_bit()
1009 extent_io_tree_panic(tree, err); in __set_extent_bit()
1011 set_state_bits(tree, prealloc, &bits); in __set_extent_bit()
1013 merge_state(tree, prealloc); in __set_extent_bit()
1021 spin_unlock(&tree->lock); in __set_extent_bit()
1030 spin_unlock(&tree->lock); in __set_extent_bit()
1036 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bit() argument
1040 return __set_extent_bit(tree, start, end, bits, 0, failed_start, in set_extent_bit()
1062 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in convert_extent_bit() argument
1076 btrfs_debug_check_extent_io_range(tree, start, end); in convert_extent_bit()
1092 spin_lock(&tree->lock); in convert_extent_bit()
1106 node = tree_search_for_insert(tree, start, &p, &parent); in convert_extent_bit()
1113 err = insert_state(tree, prealloc, start, end, in convert_extent_bit()
1116 extent_io_tree_panic(tree, err); in convert_extent_bit()
1133 set_state_bits(tree, state, &bits); in convert_extent_bit()
1135 state = clear_state_bit(tree, state, &clear_bits, 0); in convert_extent_bit()
1167 err = split_state(tree, state, prealloc, start); in convert_extent_bit()
1169 extent_io_tree_panic(tree, err); in convert_extent_bit()
1174 set_state_bits(tree, state, &bits); in convert_extent_bit()
1176 state = clear_state_bit(tree, state, &clear_bits, 0); in convert_extent_bit()
1210 err = insert_state(tree, prealloc, start, this_end, in convert_extent_bit()
1213 extent_io_tree_panic(tree, err); in convert_extent_bit()
1232 err = split_state(tree, state, prealloc, end + 1); in convert_extent_bit()
1234 extent_io_tree_panic(tree, err); in convert_extent_bit()
1236 set_state_bits(tree, prealloc, &bits); in convert_extent_bit()
1238 clear_state_bit(tree, prealloc, &clear_bits, 0); in convert_extent_bit()
1246 spin_unlock(&tree->lock); in convert_extent_bit()
1255 spin_unlock(&tree->lock); in convert_extent_bit()
1263 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_dirty() argument
1266 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, in set_extent_dirty()
1270 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bits() argument
1273 return set_extent_bit(tree, start, end, bits, NULL, in set_extent_bits()
1277 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bits() argument
1280 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask); in clear_extent_bits()
1283 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_delalloc() argument
1286 return set_extent_bit(tree, start, end, in set_extent_delalloc()
1291 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_defrag() argument
1294 return set_extent_bit(tree, start, end, in set_extent_defrag()
1299 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_dirty() argument
1302 return clear_extent_bit(tree, start, end, in clear_extent_dirty()
1307 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_new() argument
1310 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, in set_extent_new()
1314 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_uptodate() argument
1317 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, in set_extent_uptodate()
1321 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_uptodate() argument
1324 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, in clear_extent_uptodate()
1332 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in lock_extent_bits() argument
1339 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, in lock_extent_bits()
1343 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); in lock_extent_bits()
1352 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in lock_extent() argument
1354 return lock_extent_bits(tree, start, end, 0, NULL); in lock_extent()
1357 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in try_lock_extent() argument
1362 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, in try_lock_extent()
1366 clear_extent_bit(tree, start, failed_start - 1, in try_lock_extent()
1373 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, in unlock_extent_cached() argument
1376 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, in unlock_extent_cached()
1380 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) in unlock_extent() argument
1382 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, in unlock_extent()
1422 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) in set_range_writeback() argument
1429 page = find_get_page(tree->mapping, index); in set_range_writeback()
1443 find_first_extent_bit_state(struct extent_io_tree *tree, in find_first_extent_bit_state() argument
1453 node = tree_search(tree, start); in find_first_extent_bit_state()
1477 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, in find_first_extent_bit() argument
1485 spin_lock(&tree->lock); in find_first_extent_bit()
1505 state = find_first_extent_bit_state(tree, start, bits); in find_first_extent_bit()
1514 spin_unlock(&tree->lock); in find_first_extent_bit()
1524 static noinline u64 find_delalloc_range(struct extent_io_tree *tree, in find_delalloc_range() argument
1534 spin_lock(&tree->lock); in find_delalloc_range()
1540 node = tree_search(tree, cur_start); in find_delalloc_range()
1574 spin_unlock(&tree->lock); in find_delalloc_range()
1676 struct extent_io_tree *tree, in find_lock_delalloc_range() argument
1691 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, in find_lock_delalloc_range()
1735 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state); in find_lock_delalloc_range()
1738 ret = test_range_bit(tree, delalloc_start, delalloc_end, in find_lock_delalloc_range()
1741 unlock_extent_cached(tree, delalloc_start, delalloc_end, in find_lock_delalloc_range()
1760 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in extent_clear_unlock_delalloc() local
1768 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); in extent_clear_unlock_delalloc()
1812 u64 count_range_bits(struct extent_io_tree *tree, in count_range_bits() argument
1826 spin_lock(&tree->lock); in count_range_bits()
1828 total_bytes = tree->dirty_bytes; in count_range_bits()
1835 node = tree_search(tree, cur_start); in count_range_bits()
1863 spin_unlock(&tree->lock); in count_range_bits()
1871 static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) in set_state_private() argument
1877 spin_lock(&tree->lock); in set_state_private()
1882 node = tree_search(tree, start); in set_state_private()
1894 spin_unlock(&tree->lock); in set_state_private()
1898 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) in get_state_private() argument
1904 spin_lock(&tree->lock); in get_state_private()
1909 node = tree_search(tree, start); in get_state_private()
1921 spin_unlock(&tree->lock); in get_state_private()
1931 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, in test_range_bit() argument
1938 spin_lock(&tree->lock); in test_range_bit()
1943 node = tree_search(tree, start); in test_range_bit()
1977 spin_unlock(&tree->lock); in test_range_bit()
1985 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) in check_page_uptodate() argument
1989 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) in check_page_uptodate()
2210 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in btrfs_get_io_failure_record() local
2268 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED, in btrfs_get_io_failure_record()
2403 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in bio_readpage_error() local
2438 ret = tree->ops->submit_bio_hook(inode, read_mode, bio, in bio_readpage_error()
2454 struct extent_io_tree *tree; in end_extent_writepage() local
2457 tree = &BTRFS_I(page->mapping->host)->io_tree; in end_extent_writepage()
2459 if (tree->ops && tree->ops->writepage_end_io_hook) { in end_extent_writepage()
2460 ret = tree->ops->writepage_end_io_hook(page, start, in end_extent_writepage()
2524 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len, in endio_readpage_release_extent() argument
2530 if (uptodate && tree->track_uptodate) in endio_readpage_release_extent()
2531 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2532 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2551 struct extent_io_tree *tree; in end_bio_extent_readpage() local
2572 tree = &BTRFS_I(inode)->io_tree; in end_bio_extent_readpage()
2596 if (likely(uptodate && tree->ops && in end_bio_extent_readpage()
2597 tree->ops->readpage_end_io_hook)) { in end_bio_extent_readpage()
2598 ret = tree->ops->readpage_end_io_hook(io_bio, offset, in end_bio_extent_readpage()
2610 if (tree->ops && tree->ops->readpage_io_failed_hook) { in end_bio_extent_readpage()
2611 ret = tree->ops->readpage_io_failed_hook(page, mirror); in end_bio_extent_readpage()
2657 endio_readpage_release_extent(tree, in end_bio_extent_readpage()
2663 endio_readpage_release_extent(tree, start, in end_bio_extent_readpage()
2671 endio_readpage_release_extent(tree, extent_start, in end_bio_extent_readpage()
2679 endio_readpage_release_extent(tree, extent_start, extent_len, in end_bio_extent_readpage()
2755 struct extent_io_tree *tree = bio->bi_private; in submit_one_bio() local
2764 if (tree->ops && tree->ops->submit_bio_hook) in submit_one_bio()
2765 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, in submit_one_bio()
2776 static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page, in merge_bio() argument
2781 if (tree->ops && tree->ops->merge_bio_hook) in merge_bio()
2782 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio, in merge_bio()
2789 static int submit_extent_page(int rw, struct extent_io_tree *tree, in submit_extent_page() argument
2818 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || in submit_extent_page()
2842 bio->bi_private = tree; in submit_extent_page()
2906 static int __do_readpage(struct extent_io_tree *tree, in __do_readpage() argument
2941 unlock_extent(tree, start, end); in __do_readpage()
2971 set_extent_uptodate(tree, cur, cur + iosize - 1, in __do_readpage()
2974 unlock_extent_cached(tree, cur, in __do_readpage()
2984 unlock_extent(tree, cur, end); in __do_readpage()
3067 set_extent_uptodate(tree, cur, cur + iosize - 1, in __do_readpage()
3069 unlock_extent_cached(tree, cur, cur + iosize - 1, in __do_readpage()
3076 if (test_range_bit(tree, cur, cur_end, in __do_readpage()
3078 check_page_uptodate(tree, page); in __do_readpage()
3080 unlock_extent(tree, cur, cur + iosize - 1); in __do_readpage()
3091 unlock_extent(tree, cur, cur + iosize - 1); in __do_readpage()
3098 ret = submit_extent_page(rw, tree, page, in __do_readpage()
3111 unlock_extent(tree, cur, cur + iosize - 1); in __do_readpage()
3125 static inline void __do_contiguous_readpages(struct extent_io_tree *tree, in __do_contiguous_readpages() argument
3140 lock_extent(tree, start, end); in __do_contiguous_readpages()
3145 unlock_extent(tree, start, end); in __do_contiguous_readpages()
3151 __do_readpage(tree, pages[index], get_extent, em_cached, bio, in __do_contiguous_readpages()
3157 static void __extent_readpages(struct extent_io_tree *tree, in __extent_readpages() argument
3180 __do_contiguous_readpages(tree, &pages[first_index], in __extent_readpages()
3192 __do_contiguous_readpages(tree, &pages[first_index], in __extent_readpages()
3199 static int __extent_read_full_page(struct extent_io_tree *tree, in __extent_read_full_page() argument
3212 lock_extent(tree, start, end); in __extent_read_full_page()
3216 unlock_extent(tree, start, end); in __extent_read_full_page()
3221 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, in __extent_read_full_page()
3226 int extent_read_full_page(struct extent_io_tree *tree, struct page *page, in extent_read_full_page() argument
3233 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num, in extent_read_full_page()
3240 int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page, in extent_read_full_page_nolock() argument
3247 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, in extent_read_full_page_nolock()
3280 struct extent_io_tree *tree = epd->tree; in writepage_delalloc() local
3288 if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc) in writepage_delalloc()
3292 nr_delalloc = find_lock_delalloc_range(inode, tree, in writepage_delalloc()
3301 ret = tree->ops->fill_delalloc(inode, page, in writepage_delalloc()
3371 struct extent_io_tree *tree = epd->tree; in __extent_writepage_io() local
3389 if (tree->ops && tree->ops->writepage_start_hook) { in __extent_writepage_io()
3390 ret = tree->ops->writepage_start_hook(page, start, in __extent_writepage_io()
3414 if (tree->ops && tree->ops->writepage_end_io_hook) in __extent_writepage_io()
3415 tree->ops->writepage_end_io_hook(page, start, in __extent_writepage_io()
3425 if (tree->ops && tree->ops->writepage_end_io_hook) in __extent_writepage_io()
3426 tree->ops->writepage_end_io_hook(page, cur, in __extent_writepage_io()
3461 if (!compressed && tree->ops && in __extent_writepage_io()
3462 tree->ops->writepage_end_io_hook) in __extent_writepage_io()
3463 tree->ops->writepage_end_io_hook(page, cur, in __extent_writepage_io()
3479 if (tree->ops && tree->ops->writepage_io_hook) { in __extent_writepage_io()
3480 ret = tree->ops->writepage_io_hook(page, cur, in __extent_writepage_io()
3490 set_range_writeback(tree, cur, cur + iosize - 1); in __extent_writepage_io()
3497 ret = submit_extent_page(write_flags, tree, page, in __extent_writepage_io()
3782 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree; in write_one_eb() local
3800 ret = submit_extent_page(rw, tree, p, offset >> 9, in write_one_eb()
3832 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree; in btree_write_cache_pages() local
3837 .tree = tree, in btree_write_cache_pages()
3966 static int extent_write_cache_pages(struct extent_io_tree *tree, in extent_write_cache_pages() argument
4109 int extent_write_full_page(struct extent_io_tree *tree, struct page *page, in extent_write_full_page() argument
4116 .tree = tree, in extent_write_full_page()
4129 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, in extent_write_locked_range() argument
4141 .tree = tree, in extent_write_locked_range()
4159 if (tree->ops && tree->ops->writepage_end_io_hook) in extent_write_locked_range()
4160 tree->ops->writepage_end_io_hook(page, start, in extent_write_locked_range()
4173 int extent_writepages(struct extent_io_tree *tree, in extent_writepages() argument
4181 .tree = tree, in extent_writepages()
4188 ret = extent_write_cache_pages(tree, mapping, wbc, in extent_writepages()
4195 int extent_readpages(struct extent_io_tree *tree, in extent_readpages() argument
4223 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, in extent_readpages()
4228 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, in extent_readpages()
4245 int extent_invalidatepage(struct extent_io_tree *tree, in extent_invalidatepage() argument
4257 lock_extent_bits(tree, start, end, 0, &cached_state); in extent_invalidatepage()
4259 clear_extent_bit(tree, start, end, in extent_invalidatepage()
4272 struct extent_io_tree *tree, in try_release_extent_state() argument
4279 if (test_range_bit(tree, start, end, in try_release_extent_state()
4289 ret = clear_extent_bit(tree, start, end, in try_release_extent_state()
4310 struct extent_io_tree *tree, struct page *page, in try_release_extent_mapping() argument
4334 if (!test_range_bit(tree, em->start, in try_release_extent_mapping()
4349 return try_release_extent_state(map, tree, page, mask); in try_release_extent_mapping()
5194 int read_extent_buffer_pages(struct extent_io_tree *tree, in read_extent_buffer_pages() argument
5249 err = __extent_read_full_page(tree, page, in read_extent_buffer_pages()