Lines Matching refs:tree

85 #define btrfs_debug_check_extent_io_range(tree, start, end)		\  argument
86 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
88 struct extent_io_tree *tree, u64 start, u64 end) in __btrfs_debug_check_extent_io_range() argument
93 if (!tree->mapping) in __btrfs_debug_check_extent_io_range()
96 inode = tree->mapping->host; in __btrfs_debug_check_extent_io_range()
121 struct extent_io_tree *tree; member
155 tree_fs_info(struct extent_io_tree *tree) in tree_fs_info() argument
157 if (!tree->mapping) in tree_fs_info()
159 return btrfs_sb(tree->mapping->host->i_sb); in tree_fs_info()
217 void extent_io_tree_init(struct extent_io_tree *tree, in extent_io_tree_init() argument
220 tree->state = RB_ROOT; in extent_io_tree_init()
221 tree->ops = NULL; in extent_io_tree_init()
222 tree->dirty_bytes = 0; in extent_io_tree_init()
223 spin_lock_init(&tree->lock); in extent_io_tree_init()
224 tree->mapping = mapping; in extent_io_tree_init()
292 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, in __etree_search() argument
298 struct rb_root *root = &tree->state; in __etree_search()
345 tree_search_for_insert(struct extent_io_tree *tree, in tree_search_for_insert() argument
353 ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret); in tree_search_for_insert()
359 static inline struct rb_node *tree_search(struct extent_io_tree *tree, in tree_search() argument
362 return tree_search_for_insert(tree, offset, NULL, NULL); in tree_search()
365 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new, in merge_cb() argument
368 if (tree->ops && tree->ops->merge_extent_hook) in merge_cb()
369 tree->ops->merge_extent_hook(tree->mapping->host, new, in merge_cb()
382 static void merge_state(struct extent_io_tree *tree, in merge_state() argument
396 merge_cb(tree, state, other); in merge_state()
398 rb_erase(&other->rb_node, &tree->state); in merge_state()
408 merge_cb(tree, state, other); in merge_state()
410 rb_erase(&other->rb_node, &tree->state); in merge_state()
417 static void set_state_cb(struct extent_io_tree *tree, in set_state_cb() argument
420 if (tree->ops && tree->ops->set_bit_hook) in set_state_cb()
421 tree->ops->set_bit_hook(tree->mapping->host, state, bits); in set_state_cb()
424 static void clear_state_cb(struct extent_io_tree *tree, in clear_state_cb() argument
427 if (tree->ops && tree->ops->clear_bit_hook) in clear_state_cb()
428 tree->ops->clear_bit_hook(tree->mapping->host, state, bits); in clear_state_cb()
431 static void set_state_bits(struct extent_io_tree *tree,
445 static int insert_state(struct extent_io_tree *tree, in insert_state() argument
459 set_state_bits(tree, state, bits, changeset); in insert_state()
461 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent); in insert_state()
470 merge_state(tree, state); in insert_state()
474 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig, in split_cb() argument
477 if (tree->ops && tree->ops->split_extent_hook) in split_cb()
478 tree->ops->split_extent_hook(tree->mapping->host, orig, split); in split_cb()
495 static int split_state(struct extent_io_tree *tree, struct extent_state *orig, in split_state() argument
500 split_cb(tree, orig, split); in split_state()
507 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end, in split_state()
532 static struct extent_state *clear_state_bit(struct extent_io_tree *tree, in clear_state_bit() argument
542 WARN_ON(range > tree->dirty_bytes); in clear_state_bit()
543 tree->dirty_bytes -= range; in clear_state_bit()
545 clear_state_cb(tree, state, bits); in clear_state_bit()
553 rb_erase(&state->rb_node, &tree->state); in clear_state_bit()
560 merge_state(tree, state); in clear_state_bit()
575 static void extent_io_tree_panic(struct extent_io_tree *tree, int err) in extent_io_tree_panic() argument
577 btrfs_panic(tree_fs_info(tree), err, "Locking error: " in extent_io_tree_panic()
594 static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __clear_extent_bit() argument
607 btrfs_debug_check_extent_io_range(tree, start, end); in __clear_extent_bit()
630 spin_lock(&tree->lock); in __clear_extent_bit()
653 node = tree_search(tree, start); in __clear_extent_bit()
688 err = split_state(tree, state, prealloc, start); in __clear_extent_bit()
690 extent_io_tree_panic(tree, err); in __clear_extent_bit()
696 state = clear_state_bit(tree, state, &bits, wake, in __clear_extent_bit()
711 err = split_state(tree, state, prealloc, end + 1); in __clear_extent_bit()
713 extent_io_tree_panic(tree, err); in __clear_extent_bit()
718 clear_state_bit(tree, prealloc, &bits, wake, changeset); in __clear_extent_bit()
724 state = clear_state_bit(tree, state, &bits, wake, changeset); in __clear_extent_bit()
734 spin_unlock(&tree->lock); in __clear_extent_bit()
743 spin_unlock(&tree->lock); in __clear_extent_bit()
749 static void wait_on_state(struct extent_io_tree *tree, in wait_on_state() argument
751 __releases(tree->lock) in wait_on_state()
752 __acquires(tree->lock) in wait_on_state()
756 spin_unlock(&tree->lock); in wait_on_state()
758 spin_lock(&tree->lock); in wait_on_state()
767 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in wait_extent_bit() argument
773 btrfs_debug_check_extent_io_range(tree, start, end); in wait_extent_bit()
775 spin_lock(&tree->lock); in wait_extent_bit()
782 node = tree_search(tree, start); in wait_extent_bit()
795 wait_on_state(tree, state); in wait_extent_bit()
804 if (!cond_resched_lock(&tree->lock)) { in wait_extent_bit()
810 spin_unlock(&tree->lock); in wait_extent_bit()
813 static void set_state_bits(struct extent_io_tree *tree, in set_state_bits() argument
819 set_state_cb(tree, state, bits); in set_state_bits()
822 tree->dirty_bytes += range; in set_state_bits()
859 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __set_extent_bit() argument
873 btrfs_debug_check_extent_io_range(tree, start, end); in __set_extent_bit()
882 spin_lock(&tree->lock); in __set_extent_bit()
895 node = tree_search_for_insert(tree, start, &p, &parent); in __set_extent_bit()
899 err = insert_state(tree, prealloc, start, end, in __set_extent_bit()
902 extent_io_tree_panic(tree, err); in __set_extent_bit()
926 set_state_bits(tree, state, &bits, changeset); in __set_extent_bit()
928 merge_state(tree, state); in __set_extent_bit()
964 err = split_state(tree, state, prealloc, start); in __set_extent_bit()
966 extent_io_tree_panic(tree, err); in __set_extent_bit()
972 set_state_bits(tree, state, &bits, changeset); in __set_extent_bit()
974 merge_state(tree, state); in __set_extent_bit()
1006 err = insert_state(tree, prealloc, start, this_end, in __set_extent_bit()
1009 extent_io_tree_panic(tree, err); in __set_extent_bit()
1031 err = split_state(tree, state, prealloc, end + 1); in __set_extent_bit()
1033 extent_io_tree_panic(tree, err); in __set_extent_bit()
1035 set_state_bits(tree, prealloc, &bits, changeset); in __set_extent_bit()
1037 merge_state(tree, prealloc); in __set_extent_bit()
1045 spin_unlock(&tree->lock); in __set_extent_bit()
1054 spin_unlock(&tree->lock); in __set_extent_bit()
1060 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bit() argument
1064 return __set_extent_bit(tree, start, end, bits, 0, failed_start, in set_extent_bit()
1086 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in convert_extent_bit() argument
1100 btrfs_debug_check_extent_io_range(tree, start, end); in convert_extent_bit()
1116 spin_lock(&tree->lock); in convert_extent_bit()
1130 node = tree_search_for_insert(tree, start, &p, &parent); in convert_extent_bit()
1137 err = insert_state(tree, prealloc, start, end, in convert_extent_bit()
1140 extent_io_tree_panic(tree, err); in convert_extent_bit()
1157 set_state_bits(tree, state, &bits, NULL); in convert_extent_bit()
1159 state = clear_state_bit(tree, state, &clear_bits, 0, NULL); in convert_extent_bit()
1191 err = split_state(tree, state, prealloc, start); in convert_extent_bit()
1193 extent_io_tree_panic(tree, err); in convert_extent_bit()
1198 set_state_bits(tree, state, &bits, NULL); in convert_extent_bit()
1200 state = clear_state_bit(tree, state, &clear_bits, 0, in convert_extent_bit()
1235 err = insert_state(tree, prealloc, start, this_end, in convert_extent_bit()
1238 extent_io_tree_panic(tree, err); in convert_extent_bit()
1257 err = split_state(tree, state, prealloc, end + 1); in convert_extent_bit()
1259 extent_io_tree_panic(tree, err); in convert_extent_bit()
1261 set_state_bits(tree, prealloc, &bits, NULL); in convert_extent_bit()
1263 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL); in convert_extent_bit()
1271 spin_unlock(&tree->lock); in convert_extent_bit()
1280 spin_unlock(&tree->lock); in convert_extent_bit()
1288 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_dirty() argument
1291 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, in set_extent_dirty()
1295 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bits() argument
1298 return set_extent_bit(tree, start, end, bits, NULL, in set_extent_bits()
1302 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_record_extent_bits() argument
1314 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask, in set_record_extent_bits()
1318 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bit() argument
1322 return __clear_extent_bit(tree, start, end, bits, wake, delete, in clear_extent_bit()
1326 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bits() argument
1334 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask); in clear_extent_bits()
1337 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_record_extent_bits() argument
1347 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask, in clear_record_extent_bits()
1351 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_delalloc() argument
1354 return set_extent_bit(tree, start, end, in set_extent_delalloc()
1359 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_defrag() argument
1362 return set_extent_bit(tree, start, end, in set_extent_defrag()
1367 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_dirty() argument
1370 return clear_extent_bit(tree, start, end, in clear_extent_dirty()
1375 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_new() argument
1378 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, in set_extent_new()
1382 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_uptodate() argument
1385 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, in set_extent_uptodate()
1389 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_uptodate() argument
1392 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, in clear_extent_uptodate()
1400 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in lock_extent_bits() argument
1407 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, in lock_extent_bits()
1411 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); in lock_extent_bits()
1420 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in lock_extent() argument
1422 return lock_extent_bits(tree, start, end, 0, NULL); in lock_extent()
1425 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in try_lock_extent() argument
1430 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, in try_lock_extent()
1434 clear_extent_bit(tree, start, failed_start - 1, in try_lock_extent()
1441 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, in unlock_extent_cached() argument
1444 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, in unlock_extent_cached()
1448 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) in unlock_extent() argument
1450 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, in unlock_extent()
1490 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) in set_range_writeback() argument
1497 page = find_get_page(tree->mapping, index); in set_range_writeback()
1511 find_first_extent_bit_state(struct extent_io_tree *tree, in find_first_extent_bit_state() argument
1521 node = tree_search(tree, start); in find_first_extent_bit_state()
1545 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, in find_first_extent_bit() argument
1553 spin_lock(&tree->lock); in find_first_extent_bit()
1573 state = find_first_extent_bit_state(tree, start, bits); in find_first_extent_bit()
1582 spin_unlock(&tree->lock); in find_first_extent_bit()
1592 static noinline u64 find_delalloc_range(struct extent_io_tree *tree, in find_delalloc_range() argument
1602 spin_lock(&tree->lock); in find_delalloc_range()
1608 node = tree_search(tree, cur_start); in find_delalloc_range()
1642 spin_unlock(&tree->lock); in find_delalloc_range()
1744 struct extent_io_tree *tree, in find_lock_delalloc_range() argument
1759 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, in find_lock_delalloc_range()
1803 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state); in find_lock_delalloc_range()
1806 ret = test_range_bit(tree, delalloc_start, delalloc_end, in find_lock_delalloc_range()
1809 unlock_extent_cached(tree, delalloc_start, delalloc_end, in find_lock_delalloc_range()
1828 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in extent_clear_unlock_delalloc() local
1836 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); in extent_clear_unlock_delalloc()
1880 u64 count_range_bits(struct extent_io_tree *tree, in count_range_bits() argument
1894 spin_lock(&tree->lock); in count_range_bits()
1896 total_bytes = tree->dirty_bytes; in count_range_bits()
1903 node = tree_search(tree, cur_start); in count_range_bits()
1931 spin_unlock(&tree->lock); in count_range_bits()
1939 static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) in set_state_private() argument
1945 spin_lock(&tree->lock); in set_state_private()
1950 node = tree_search(tree, start); in set_state_private()
1962 spin_unlock(&tree->lock); in set_state_private()
1966 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) in get_state_private() argument
1972 spin_lock(&tree->lock); in get_state_private()
1977 node = tree_search(tree, start); in get_state_private()
1989 spin_unlock(&tree->lock); in get_state_private()
1999 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, in test_range_bit() argument
2006 spin_lock(&tree->lock); in test_range_bit()
2011 node = tree_search(tree, start); in test_range_bit()
2045 spin_unlock(&tree->lock); in test_range_bit()
2053 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) in check_page_uptodate() argument
2057 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) in check_page_uptodate()
2278 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in btrfs_get_io_failure_record() local
2336 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED, in btrfs_get_io_failure_record()
2471 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in bio_readpage_error() local
2506 ret = tree->ops->submit_bio_hook(inode, read_mode, bio, in bio_readpage_error()
2522 struct extent_io_tree *tree; in end_extent_writepage() local
2525 tree = &BTRFS_I(page->mapping->host)->io_tree; in end_extent_writepage()
2527 if (tree->ops && tree->ops->writepage_end_io_hook) { in end_extent_writepage()
2528 ret = tree->ops->writepage_end_io_hook(page, start, in end_extent_writepage()
2592 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len, in endio_readpage_release_extent() argument
2598 if (uptodate && tree->track_uptodate) in endio_readpage_release_extent()
2599 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2600 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2619 struct extent_io_tree *tree; in end_bio_extent_readpage() local
2637 tree = &BTRFS_I(inode)->io_tree; in end_bio_extent_readpage()
2661 if (likely(uptodate && tree->ops && in end_bio_extent_readpage()
2662 tree->ops->readpage_end_io_hook)) { in end_bio_extent_readpage()
2663 ret = tree->ops->readpage_end_io_hook(io_bio, offset, in end_bio_extent_readpage()
2675 if (tree->ops && tree->ops->readpage_io_failed_hook) { in end_bio_extent_readpage()
2676 ret = tree->ops->readpage_io_failed_hook(page, mirror); in end_bio_extent_readpage()
2718 endio_readpage_release_extent(tree, in end_bio_extent_readpage()
2724 endio_readpage_release_extent(tree, start, in end_bio_extent_readpage()
2732 endio_readpage_release_extent(tree, extent_start, in end_bio_extent_readpage()
2740 endio_readpage_release_extent(tree, extent_start, extent_len, in end_bio_extent_readpage()
2822 struct extent_io_tree *tree = bio->bi_private; in submit_one_bio() local
2831 if (tree->ops && tree->ops->submit_bio_hook) in submit_one_bio()
2832 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, in submit_one_bio()
2841 static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page, in merge_bio() argument
2846 if (tree->ops && tree->ops->merge_bio_hook) in merge_bio()
2847 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio, in merge_bio()
2854 static int submit_extent_page(int rw, struct extent_io_tree *tree, in submit_extent_page() argument
2882 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || in submit_extent_page()
2905 bio->bi_private = tree; in submit_extent_page()
2973 static int __do_readpage(struct extent_io_tree *tree, in __do_readpage() argument
3008 unlock_extent(tree, start, end); in __do_readpage()
3038 set_extent_uptodate(tree, cur, cur + iosize - 1, in __do_readpage()
3041 unlock_extent_cached(tree, cur, in __do_readpage()
3051 unlock_extent(tree, cur, end); in __do_readpage()
3134 set_extent_uptodate(tree, cur, cur + iosize - 1, in __do_readpage()
3139 unlock_extent_cached(tree, cur, in __do_readpage()
3147 if (test_range_bit(tree, cur, cur_end, in __do_readpage()
3149 check_page_uptodate(tree, page); in __do_readpage()
3151 unlock_extent(tree, cur, cur + iosize - 1); in __do_readpage()
3162 unlock_extent(tree, cur, cur + iosize - 1); in __do_readpage()
3169 ret = submit_extent_page(rw, tree, NULL, page, in __do_readpage()
3182 unlock_extent(tree, cur, cur + iosize - 1); in __do_readpage()
3196 static inline void __do_contiguous_readpages(struct extent_io_tree *tree, in __do_contiguous_readpages() argument
3211 lock_extent(tree, start, end); in __do_contiguous_readpages()
3216 unlock_extent(tree, start, end); in __do_contiguous_readpages()
3222 __do_readpage(tree, pages[index], get_extent, em_cached, bio, in __do_contiguous_readpages()
3228 static void __extent_readpages(struct extent_io_tree *tree, in __extent_readpages() argument
3251 __do_contiguous_readpages(tree, &pages[first_index], in __extent_readpages()
3263 __do_contiguous_readpages(tree, &pages[first_index], in __extent_readpages()
3270 static int __extent_read_full_page(struct extent_io_tree *tree, in __extent_read_full_page() argument
3283 lock_extent(tree, start, end); in __extent_read_full_page()
3287 unlock_extent(tree, start, end); in __extent_read_full_page()
3292 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, in __extent_read_full_page()
3297 int extent_read_full_page(struct extent_io_tree *tree, struct page *page, in extent_read_full_page() argument
3304 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num, in extent_read_full_page()
3311 int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page, in extent_read_full_page_nolock() argument
3318 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, in extent_read_full_page_nolock()
3351 struct extent_io_tree *tree = epd->tree; in writepage_delalloc() local
3359 if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc) in writepage_delalloc()
3363 nr_delalloc = find_lock_delalloc_range(inode, tree, in writepage_delalloc()
3372 ret = tree->ops->fill_delalloc(inode, page, in writepage_delalloc()
3442 struct extent_io_tree *tree = epd->tree; in __extent_writepage_io() local
3460 if (tree->ops && tree->ops->writepage_start_hook) { in __extent_writepage_io()
3461 ret = tree->ops->writepage_start_hook(page, start, in __extent_writepage_io()
3485 if (tree->ops && tree->ops->writepage_end_io_hook) in __extent_writepage_io()
3486 tree->ops->writepage_end_io_hook(page, start, in __extent_writepage_io()
3496 if (tree->ops && tree->ops->writepage_end_io_hook) in __extent_writepage_io()
3497 tree->ops->writepage_end_io_hook(page, cur, in __extent_writepage_io()
3532 if (!compressed && tree->ops && in __extent_writepage_io()
3533 tree->ops->writepage_end_io_hook) in __extent_writepage_io()
3534 tree->ops->writepage_end_io_hook(page, cur, in __extent_writepage_io()
3550 if (tree->ops && tree->ops->writepage_io_hook) { in __extent_writepage_io()
3551 ret = tree->ops->writepage_io_hook(page, cur, in __extent_writepage_io()
3561 set_range_writeback(tree, cur, cur + iosize - 1); in __extent_writepage_io()
3568 ret = submit_extent_page(write_flags, tree, wbc, page, in __extent_writepage_io()
3854 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree; in write_one_eb() local
3872 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, in write_one_eb()
3904 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree; in btree_write_cache_pages() local
3909 .tree = tree, in btree_write_cache_pages()
4038 static int extent_write_cache_pages(struct extent_io_tree *tree, in extent_write_cache_pages() argument
4181 int extent_write_full_page(struct extent_io_tree *tree, struct page *page, in extent_write_full_page() argument
4188 .tree = tree, in extent_write_full_page()
4201 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, in extent_write_locked_range() argument
4213 .tree = tree, in extent_write_locked_range()
4231 if (tree->ops && tree->ops->writepage_end_io_hook) in extent_write_locked_range()
4232 tree->ops->writepage_end_io_hook(page, start, in extent_write_locked_range()
4245 int extent_writepages(struct extent_io_tree *tree, in extent_writepages() argument
4253 .tree = tree, in extent_writepages()
4260 ret = extent_write_cache_pages(tree, mapping, wbc, in extent_writepages()
4267 int extent_readpages(struct extent_io_tree *tree, in extent_readpages() argument
4295 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, in extent_readpages()
4300 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, in extent_readpages()
4317 int extent_invalidatepage(struct extent_io_tree *tree, in extent_invalidatepage() argument
4329 lock_extent_bits(tree, start, end, 0, &cached_state); in extent_invalidatepage()
4331 clear_extent_bit(tree, start, end, in extent_invalidatepage()
4344 struct extent_io_tree *tree, in try_release_extent_state() argument
4351 if (test_range_bit(tree, start, end, in try_release_extent_state()
4361 ret = clear_extent_bit(tree, start, end, in try_release_extent_state()
4382 struct extent_io_tree *tree, struct page *page, in try_release_extent_mapping() argument
4406 if (!test_range_bit(tree, em->start, in try_release_extent_mapping()
4421 return try_release_extent_state(map, tree, page, mask); in try_release_extent_mapping()
5266 int read_extent_buffer_pages(struct extent_io_tree *tree, in read_extent_buffer_pages() argument
5321 err = __extent_read_full_page(tree, page, in read_extent_buffer_pages()