Lines Matching refs:end
68 state->start, state->end, state->state, in btrfs_leak_debug_check()
85 #define btrfs_debug_check_extent_io_range(tree, start, end) \ argument
86 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
88 struct extent_io_tree *tree, u64 start, u64 end) in __btrfs_debug_check_extent_io_range() argument
98 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { in __btrfs_debug_check_extent_io_range()
101 caller, btrfs_ino(inode), isize, start, end); in __btrfs_debug_check_extent_io_range()
115 u64 end; member
146 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
147 ret = ulist_add(changeset->range_changed, state->start, state->end, in add_extent_changeset()
280 else if (offset > entry->end) in tree_insert()
312 else if (offset > entry->end) in __etree_search()
325 while (prev && offset > prev_entry->end) { in __etree_search()
394 if (other->end == state->start - 1 && in merge_state()
406 if (other->start == state->end + 1 && in merge_state()
409 state->end = other->end; in merge_state()
446 struct extent_state *state, u64 start, u64 end, in insert_state() argument
453 if (end < start) in insert_state()
455 end, start); in insert_state()
457 state->end = end; in insert_state()
461 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent); in insert_state()
467 found->start, found->end, start, end); in insert_state()
503 prealloc->end = split - 1; in split_state()
507 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end, in split_state()
541 u64 range = state->end - state->start + 1; in clear_state_bit()
594 static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __clear_extent_bit() argument
607 btrfs_debug_check_extent_io_range(tree, start, end); in __clear_extent_bit()
640 cached->start <= start && cached->end > start) { in __clear_extent_bit()
658 if (state->start > end) in __clear_extent_bit()
660 WARN_ON(state->end < start); in __clear_extent_bit()
661 last_end = state->end; in __clear_extent_bit()
695 if (state->end <= end) { in __clear_extent_bit()
708 if (state->start <= end && state->end > end) { in __clear_extent_bit()
711 err = split_state(tree, state, prealloc, end + 1); in __clear_extent_bit()
729 if (start <= end && state && !need_resched()) in __clear_extent_bit()
741 if (start > end) in __clear_extent_bit()
767 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in wait_extent_bit() argument
773 btrfs_debug_check_extent_io_range(tree, start, end); in wait_extent_bit()
789 if (state->start > end) in wait_extent_bit()
799 start = state->end + 1; in wait_extent_bit()
801 if (start > end) in wait_extent_bit()
821 u64 range = state->end - state->start + 1; in set_state_bits()
859 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __set_extent_bit() argument
873 btrfs_debug_check_extent_io_range(tree, start, end); in __set_extent_bit()
885 if (state->start <= start && state->end > start && in __set_extent_bit()
899 err = insert_state(tree, prealloc, start, end, in __set_extent_bit()
911 last_end = state->end; in __set_extent_bit()
919 if (state->start == start && state->end <= end) { in __set_extent_bit()
933 if (start < end && state && state->start == start && in __set_extent_bit()
971 if (state->end <= end) { in __set_extent_bit()
979 if (start < end && state && state->start == start && in __set_extent_bit()
994 if (end < last_start) in __set_extent_bit()
995 this_end = end; in __set_extent_bit()
1022 if (state->start <= end && state->end > end) { in __set_extent_bit()
1031 err = split_state(tree, state, prealloc, end + 1); in __set_extent_bit()
1052 if (start > end) in __set_extent_bit()
1060 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bit() argument
1064 return __set_extent_bit(tree, start, end, bits, 0, failed_start, in set_extent_bit()
1086 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in convert_extent_bit() argument
1100 btrfs_debug_check_extent_io_range(tree, start, end); in convert_extent_bit()
1119 if (state->start <= start && state->end > start && in convert_extent_bit()
1137 err = insert_state(tree, prealloc, start, end, in convert_extent_bit()
1148 last_end = state->end; in convert_extent_bit()
1156 if (state->start == start && state->end <= end) { in convert_extent_bit()
1163 if (start < end && state && state->start == start && in convert_extent_bit()
1197 if (state->end <= end) { in convert_extent_bit()
1205 if (start < end && state && state->start == start && in convert_extent_bit()
1220 if (end < last_start) in convert_extent_bit()
1221 this_end = end; in convert_extent_bit()
1250 if (state->start <= end && state->end > end) { in convert_extent_bit()
1257 err = split_state(tree, state, prealloc, end + 1); in convert_extent_bit()
1278 if (start > end) in convert_extent_bit()
1288 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_dirty() argument
1291 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, in set_extent_dirty()
1295 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bits() argument
1298 return set_extent_bit(tree, start, end, bits, NULL, in set_extent_bits()
1302 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_record_extent_bits() argument
1314 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask, in set_record_extent_bits()
1318 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bit() argument
1322 return __clear_extent_bit(tree, start, end, bits, wake, delete, in clear_extent_bit()
1326 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bits() argument
1334 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask); in clear_extent_bits()
1337 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_record_extent_bits() argument
1347 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask, in clear_record_extent_bits()
1351 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_delalloc() argument
1354 return set_extent_bit(tree, start, end, in set_extent_delalloc()
1359 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_defrag() argument
1362 return set_extent_bit(tree, start, end, in set_extent_defrag()
1367 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_dirty() argument
1370 return clear_extent_bit(tree, start, end, in clear_extent_dirty()
1375 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_new() argument
1378 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, in set_extent_new()
1382 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_uptodate() argument
1385 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, in set_extent_uptodate()
1389 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_uptodate() argument
1392 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, in clear_extent_uptodate()
1400 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in lock_extent_bits() argument
1407 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, in lock_extent_bits()
1411 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); in lock_extent_bits()
1415 WARN_ON(start > end); in lock_extent_bits()
1420 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in lock_extent() argument
1422 return lock_extent_bits(tree, start, end, 0, NULL); in lock_extent()
1425 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in try_lock_extent() argument
1430 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, in try_lock_extent()
1441 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, in unlock_extent_cached() argument
1444 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, in unlock_extent_cached()
1448 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) in unlock_extent() argument
1450 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, in unlock_extent()
1454 int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_clear_dirty_for_io() argument
1457 unsigned long end_index = end >> PAGE_CACHE_SHIFT; in extent_range_clear_dirty_for_io()
1470 int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_redirty_for_io() argument
1473 unsigned long end_index = end >> PAGE_CACHE_SHIFT; in extent_range_redirty_for_io()
1490 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) in set_range_writeback() argument
1493 unsigned long end_index = end >> PAGE_CACHE_SHIFT; in set_range_writeback()
1527 if (state->end >= start && (state->state & bits)) in find_first_extent_bit_state()
1556 if (state->end == start - 1 && extent_state_in_tree(state)) { in find_first_extent_bit()
1578 *end_ret = state->end; in find_first_extent_bit()
1593 u64 *start, u64 *end, u64 max_bytes, in find_delalloc_range() argument
1611 *end = (u64)-1; in find_delalloc_range()
1623 *end = state->end; in find_delalloc_range()
1632 *end = state->end; in find_delalloc_range()
1633 cur_start = state->end + 1; in find_delalloc_range()
1635 total_bytes += state->end - state->start + 1; in find_delalloc_range()
1648 u64 start, u64 end) in __unlock_for_delalloc() argument
1653 unsigned long end_index = end >> PAGE_CACHE_SHIFT; in __unlock_for_delalloc()
1746 u64 *end, u64 max_bytes) in find_lock_delalloc_range() argument
1763 *end = delalloc_end; in find_lock_delalloc_range()
1818 *end = delalloc_end; in find_lock_delalloc_range()
1823 int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, in extent_clear_unlock_delalloc() argument
1832 unsigned long end_index = end >> PAGE_CACHE_SHIFT; in extent_clear_unlock_delalloc()
1836 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); in extent_clear_unlock_delalloc()
1913 if (state->end >= cur_start && (state->state & bits) == bits) { in count_range_bits()
1914 total_bytes += min(search_end, state->end) + 1 - in count_range_bits()
1922 last = state->end; in count_range_bits()
1999 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, in test_range_bit() argument
2008 cached->end > start) in test_range_bit()
2012 while (node && start <= end) { in test_range_bit()
2020 if (state->start > end) in test_range_bit()
2032 if (state->end == (u64)-1) in test_range_bit()
2035 start = state->end + 1; in test_range_bit()
2036 if (start > end) in test_range_bit()
2056 u64 end = start + PAGE_CACHE_SIZE - 1; in check_page_uptodate() local
2057 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) in check_page_uptodate()
2221 state->end >= failrec->start + failrec->len - 1) { in clean_io_failure()
2243 void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end) in btrfs_free_io_failure_record() argument
2255 if (state->start > end) in btrfs_free_io_failure_record()
2258 ASSERT(state->end <= end); in btrfs_free_io_failure_record()
2271 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, in btrfs_get_io_failure_record() argument
2290 failrec->len = end - start + 1; in btrfs_get_io_failure_record()
2329 ret = set_extent_bits(failure_tree, start, end, in btrfs_get_io_failure_record()
2336 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED, in btrfs_get_io_failure_record()
2466 struct page *page, u64 start, u64 end, in bio_readpage_error() argument
2478 ret = btrfs_get_io_failure_record(inode, start, end, &failrec); in bio_readpage_error()
2519 int end_extent_writepage(struct page *page, int err, u64 start, u64 end) in end_extent_writepage() argument
2529 end, NULL, uptodate); in end_extent_writepage()
2556 u64 end; in end_bio_extent_writepage() local
2580 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_writepage()
2582 if (end_extent_writepage(page, bio->bi_error, start, end)) in end_bio_extent_writepage()
2596 u64 end = start + len - 1; in endio_readpage_release_extent() local
2599 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2600 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2622 u64 end; in end_bio_extent_readpage() local
2657 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_readpage()
2664 page, start, end, in end_bio_extent_readpage()
2690 ret = bio_readpage_error(bio, offset, page, start, end, in end_bio_extent_readpage()
2725 end - start + 1, 0); in end_bio_extent_readpage()
2728 extent_len = end + 1 - start; in end_bio_extent_readpage()
2730 extent_len += end + 1 - start; in end_bio_extent_readpage()
2735 extent_len = end + 1 - start; in end_bio_extent_readpage()
2984 u64 end; in __do_readpage() local
3004 end = page_end; in __do_readpage()
3008 unlock_extent(tree, start, end); in __do_readpage()
3025 while (cur <= end) { in __do_readpage()
3047 end - cur + 1, get_extent, em_cached); in __do_readpage()
3051 unlock_extent(tree, cur, end); in __do_readpage()
3056 BUG_ON(end < cur); in __do_readpage()
3064 iosize = min(extent_map_end(em) - cur, end - cur + 1); in __do_readpage()
3065 cur_end = min(extent_map_end(em) - 1, end); in __do_readpage()
3198 u64 start, u64 end, in __do_contiguous_readpages() argument
3211 lock_extent(tree, start, end); in __do_contiguous_readpages()
3213 end - start + 1); in __do_contiguous_readpages()
3216 unlock_extent(tree, start, end); in __do_contiguous_readpages()
3237 u64 end = 0; in __extent_readpages() local
3244 if (!end) { in __extent_readpages()
3246 end = start + PAGE_CACHE_SIZE - 1; in __extent_readpages()
3248 } else if (end + 1 == page_start) { in __extent_readpages()
3249 end += PAGE_CACHE_SIZE; in __extent_readpages()
3253 end, get_extent, em_cached, in __extent_readpages()
3257 end = start + PAGE_CACHE_SIZE - 1; in __extent_readpages()
3262 if (end) in __extent_readpages()
3265 end, get_extent, em_cached, bio, in __extent_readpages()
3279 u64 end = start + PAGE_CACHE_SIZE - 1; in __extent_read_full_page() local
3283 lock_extent(tree, start, end); in __extent_read_full_page()
3287 unlock_extent(tree, start, end); in __extent_read_full_page()
3445 u64 end; in __extent_writepage_io() local
3483 end = page_end; in __extent_writepage_io()
3493 while (cur <= end) { in __extent_writepage_io()
3502 end - cur + 1, 1); in __extent_writepage_io()
3512 BUG_ON(end < cur); in __extent_writepage_io()
3513 iosize = min(em_end - cur, end - cur + 1); in __extent_writepage_io()
3565 page->index, cur, end); in __extent_writepage_io()
3920 pgoff_t end; /* Inclusive */ in btree_write_cache_pages() local
3927 end = -1; in btree_write_cache_pages()
3930 end = wbc->range_end >> PAGE_CACHE_SHIFT; in btree_write_cache_pages()
3939 tag_pages_for_writeback(mapping, index, end); in btree_write_cache_pages()
3940 while (!done && !nr_to_write_done && (index <= end) && in btree_write_cache_pages()
3942 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { in btree_write_cache_pages()
3952 if (!wbc->range_cyclic && page->index > end) { in btree_write_cache_pages()
4052 pgoff_t end; /* Inclusive */ in extent_write_cache_pages() local
4071 end = -1; in extent_write_cache_pages()
4074 end = wbc->range_end >> PAGE_CACHE_SHIFT; in extent_write_cache_pages()
4083 tag_pages_for_writeback(mapping, index, end); in extent_write_cache_pages()
4084 while (!done && !nr_to_write_done && (index <= end) && in extent_write_cache_pages()
4086 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { in extent_write_cache_pages()
4110 if (!wbc->range_cyclic && page->index > end) { in extent_write_cache_pages()
4202 u64 start, u64 end, get_extent_t *get_extent, in extent_write_locked_range() argument
4208 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> in extent_write_locked_range()
4223 .range_end = end + 1, in extent_write_locked_range()
4226 while (start <= end) { in extent_write_locked_range()
4322 u64 end = start + PAGE_CACHE_SIZE - 1; in extent_invalidatepage() local
4326 if (start > end) in extent_invalidatepage()
4329 lock_extent_bits(tree, start, end, 0, &cached_state); in extent_invalidatepage()
4331 clear_extent_bit(tree, start, end, in extent_invalidatepage()
4348 u64 end = start + PAGE_CACHE_SIZE - 1; in try_release_extent_state() local
4351 if (test_range_bit(tree, start, end, in try_release_extent_state()
4361 ret = clear_extent_bit(tree, start, end, in try_release_extent_state()
4387 u64 end = start + PAGE_CACHE_SIZE - 1; in try_release_extent_mapping() local
4392 while (start <= end) { in try_release_extent_mapping()
4393 len = end - start + 1; in try_release_extent_mapping()
4481 int end = 0; in extent_fiemap() local
4551 while (!end) { in extent_fiemap()
4584 end = 1; in extent_fiemap()
4587 end = 1; in extent_fiemap()
4627 end = 1; in extent_fiemap()
4639 end = 1; in extent_fiemap()