Lines Matching refs:start

68 		       state->start, state->end, state->state,  in btrfs_leak_debug_check()
79 eb->start, eb->len, atomic_read(&eb->refs)); in btrfs_leak_debug_check()
85 #define btrfs_debug_check_extent_io_range(tree, start, end) \ argument
86 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
88 struct extent_io_tree *tree, u64 start, u64 end) in __btrfs_debug_check_extent_io_range() argument
101 caller, btrfs_ino(inode), isize, start, end); in __btrfs_debug_check_extent_io_range()
114 u64 start; member
146 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
147 ret = ulist_add(changeset->range_changed, state->start, state->end, in add_extent_changeset()
278 if (offset < entry->start) in tree_insert()
310 if (offset < entry->start) in __etree_search()
335 while (prev && offset < prev_entry->start) { in __etree_search()
394 if (other->end == state->start - 1 && in merge_state()
397 state->start = other->start; in merge_state()
406 if (other->start == state->end + 1 && in merge_state()
446 struct extent_state *state, u64 start, u64 end, in insert_state() argument
453 if (end < start) in insert_state()
455 end, start); in insert_state()
456 state->start = start; in insert_state()
467 found->start, found->end, start, end); in insert_state()
502 prealloc->start = orig->start; in split_state()
505 orig->start = split; in split_state()
541 u64 range = state->end - state->start + 1; in clear_state_bit()
594 static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __clear_extent_bit() argument
607 btrfs_debug_check_extent_io_range(tree, start, end); in __clear_extent_bit()
640 cached->start <= start && cached->end > start) { in __clear_extent_bit()
653 node = tree_search(tree, start); in __clear_extent_bit()
658 if (state->start > end) in __clear_extent_bit()
660 WARN_ON(state->end < start); in __clear_extent_bit()
685 if (state->start < start) { in __clear_extent_bit()
688 err = split_state(tree, state, prealloc, start); in __clear_extent_bit()
708 if (state->start <= end && state->end > end) { in __clear_extent_bit()
728 start = last_end + 1; in __clear_extent_bit()
729 if (start <= end && state && !need_resched()) in __clear_extent_bit()
741 if (start > end) in __clear_extent_bit()
767 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in wait_extent_bit() argument
773 btrfs_debug_check_extent_io_range(tree, start, end); in wait_extent_bit()
782 node = tree_search(tree, start); in wait_extent_bit()
789 if (state->start > end) in wait_extent_bit()
793 start = state->start; in wait_extent_bit()
799 start = state->end + 1; in wait_extent_bit()
801 if (start > end) in wait_extent_bit()
821 u64 range = state->end - state->start + 1; in set_state_bits()
859 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __set_extent_bit() argument
873 btrfs_debug_check_extent_io_range(tree, start, end); in __set_extent_bit()
885 if (state->start <= start && state->end > start && in __set_extent_bit()
895 node = tree_search_for_insert(tree, start, &p, &parent); in __set_extent_bit()
899 err = insert_state(tree, prealloc, start, end, in __set_extent_bit()
910 last_start = state->start; in __set_extent_bit()
919 if (state->start == start && state->end <= end) { in __set_extent_bit()
921 *failed_start = state->start; in __set_extent_bit()
931 start = last_end + 1; in __set_extent_bit()
933 if (start < end && state && state->start == start && in __set_extent_bit()
955 if (state->start < start) { in __set_extent_bit()
957 *failed_start = start; in __set_extent_bit()
964 err = split_state(tree, state, prealloc, start); in __set_extent_bit()
977 start = last_end + 1; in __set_extent_bit()
979 if (start < end && state && state->start == start && in __set_extent_bit()
992 if (state->start > start) { in __set_extent_bit()
1006 err = insert_state(tree, prealloc, start, this_end, in __set_extent_bit()
1013 start = this_end + 1; in __set_extent_bit()
1022 if (state->start <= end && state->end > end) { in __set_extent_bit()
1024 *failed_start = start; in __set_extent_bit()
1052 if (start > end) in __set_extent_bit()
1060 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bit() argument
1064 return __set_extent_bit(tree, start, end, bits, 0, failed_start, in set_extent_bit()
1086 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in convert_extent_bit() argument
1100 btrfs_debug_check_extent_io_range(tree, start, end); in convert_extent_bit()
1119 if (state->start <= start && state->end > start && in convert_extent_bit()
1130 node = tree_search_for_insert(tree, start, &p, &parent); in convert_extent_bit()
1137 err = insert_state(tree, prealloc, start, end, in convert_extent_bit()
1147 last_start = state->start; in convert_extent_bit()
1156 if (state->start == start && state->end <= end) { in convert_extent_bit()
1162 start = last_end + 1; in convert_extent_bit()
1163 if (start < end && state && state->start == start && in convert_extent_bit()
1185 if (state->start < start) { in convert_extent_bit()
1191 err = split_state(tree, state, prealloc, start); in convert_extent_bit()
1204 start = last_end + 1; in convert_extent_bit()
1205 if (start < end && state && state->start == start && in convert_extent_bit()
1218 if (state->start > start) { in convert_extent_bit()
1235 err = insert_state(tree, prealloc, start, this_end, in convert_extent_bit()
1241 start = this_end + 1; in convert_extent_bit()
1250 if (state->start <= end && state->end > end) { in convert_extent_bit()
1278 if (start > end) in convert_extent_bit()
1288 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_dirty() argument
1291 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, in set_extent_dirty()
1295 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bits() argument
1298 return set_extent_bit(tree, start, end, bits, NULL, in set_extent_bits()
1302 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_record_extent_bits() argument
1314 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask, in set_record_extent_bits()
1318 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bit() argument
1322 return __clear_extent_bit(tree, start, end, bits, wake, delete, in clear_extent_bit()
1326 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bits() argument
1334 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask); in clear_extent_bits()
1337 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_record_extent_bits() argument
1347 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask, in clear_record_extent_bits()
1351 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_delalloc() argument
1354 return set_extent_bit(tree, start, end, in set_extent_delalloc()
1359 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_defrag() argument
1362 return set_extent_bit(tree, start, end, in set_extent_defrag()
1367 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_dirty() argument
1370 return clear_extent_bit(tree, start, end, in clear_extent_dirty()
1375 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_new() argument
1378 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, in set_extent_new()
1382 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_uptodate() argument
1385 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, in set_extent_uptodate()
1389 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_uptodate() argument
1392 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, in clear_extent_uptodate()
1400 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in lock_extent_bits() argument
1407 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, in lock_extent_bits()
1412 start = failed_start; in lock_extent_bits()
1415 WARN_ON(start > end); in lock_extent_bits()
1420 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in lock_extent() argument
1422 return lock_extent_bits(tree, start, end, 0, NULL); in lock_extent()
1425 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in try_lock_extent() argument
1430 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, in try_lock_extent()
1433 if (failed_start > start) in try_lock_extent()
1434 clear_extent_bit(tree, start, failed_start - 1, in try_lock_extent()
1441 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, in unlock_extent_cached() argument
1444 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, in unlock_extent_cached()
1448 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) in unlock_extent() argument
1450 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, in unlock_extent()
1454 int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_clear_dirty_for_io() argument
1456 unsigned long index = start >> PAGE_CACHE_SHIFT; in extent_range_clear_dirty_for_io()
1470 int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_redirty_for_io() argument
1472 unsigned long index = start >> PAGE_CACHE_SHIFT; in extent_range_redirty_for_io()
1490 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) in set_range_writeback() argument
1492 unsigned long index = start >> PAGE_CACHE_SHIFT; in set_range_writeback()
1512 u64 start, unsigned bits) in find_first_extent_bit_state() argument
1521 node = tree_search(tree, start); in find_first_extent_bit_state()
1527 if (state->end >= start && (state->state & bits)) in find_first_extent_bit_state()
1545 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, in find_first_extent_bit() argument
1556 if (state->end == start - 1 && extent_state_in_tree(state)) { in find_first_extent_bit()
1573 state = find_first_extent_bit_state(tree, start, bits); in find_first_extent_bit()
1577 *start_ret = state->start; in find_first_extent_bit()
1593 u64 *start, u64 *end, u64 max_bytes, in find_delalloc_range() argument
1598 u64 cur_start = *start; in find_delalloc_range()
1617 if (found && (state->start != cur_start || in find_delalloc_range()
1627 *start = state->start; in find_delalloc_range()
1635 total_bytes += state->end - state->start + 1; in find_delalloc_range()
1648 u64 start, u64 end) in __unlock_for_delalloc() argument
1652 unsigned long index = start >> PAGE_CACHE_SHIFT; in __unlock_for_delalloc()
1745 struct page *locked_page, u64 *start, in find_lock_delalloc_range() argument
1757 delalloc_start = *start; in find_lock_delalloc_range()
1761 if (!found || delalloc_end <= *start) { in find_lock_delalloc_range()
1762 *start = delalloc_start; in find_lock_delalloc_range()
1773 if (delalloc_start < *start) in find_lock_delalloc_range()
1774 delalloc_start = *start; in find_lock_delalloc_range()
1817 *start = delalloc_start; in find_lock_delalloc_range()
1823 int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, in extent_clear_unlock_delalloc() argument
1831 unsigned long index = start >> PAGE_CACHE_SHIFT; in extent_clear_unlock_delalloc()
1836 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); in extent_clear_unlock_delalloc()
1881 u64 *start, u64 search_end, u64 max_bytes, in count_range_bits() argument
1886 u64 cur_start = *start; in count_range_bits()
1909 if (state->start > search_end) in count_range_bits()
1911 if (contig && found && state->start > last + 1) in count_range_bits()
1915 max(cur_start, state->start); in count_range_bits()
1919 *start = max(cur_start, state->start); in count_range_bits()
1939 static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) in set_state_private() argument
1950 node = tree_search(tree, start); in set_state_private()
1956 if (state->start != start) { in set_state_private()
1966 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) in get_state_private() argument
1977 node = tree_search(tree, start); in get_state_private()
1983 if (state->start != start) { in get_state_private()
1999 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, in test_range_bit() argument
2007 if (cached && extent_state_in_tree(cached) && cached->start <= start && in test_range_bit()
2008 cached->end > start) in test_range_bit()
2011 node = tree_search(tree, start); in test_range_bit()
2012 while (node && start <= end) { in test_range_bit()
2015 if (filled && state->start > start) { in test_range_bit()
2020 if (state->start > end) in test_range_bit()
2035 start = state->end + 1; in test_range_bit()
2036 if (start > end) in test_range_bit()
2055 u64 start = page_offset(page); in check_page_uptodate() local
2056 u64 end = start + PAGE_CACHE_SIZE - 1; in check_page_uptodate()
2057 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) in check_page_uptodate()
2067 set_state_private(failure_tree, rec->start, 0); in free_io_failure()
2068 ret = clear_extent_bits(failure_tree, rec->start, in free_io_failure()
2069 rec->start + rec->len - 1, in free_io_failure()
2074 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, in free_io_failure()
2075 rec->start + rec->len - 1, in free_io_failure()
2094 int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, in repair_io_failure() argument
2146 btrfs_ino(inode), start, in repair_io_failure()
2155 u64 start = eb->start; in repair_eb_io_failure() local
2156 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len); in repair_eb_io_failure()
2165 ret = repair_io_failure(root->fs_info->btree_inode, start, in repair_eb_io_failure()
2166 PAGE_CACHE_SIZE, start, p, in repair_eb_io_failure()
2167 start - page_offset(p), mirror_num); in repair_eb_io_failure()
2170 start += PAGE_CACHE_SIZE; in repair_eb_io_failure()
2180 int clean_io_failure(struct inode *inode, u64 start, struct page *page, in clean_io_failure() argument
2197 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start, in clean_io_failure()
2208 failrec->start); in clean_io_failure()
2216 failrec->start, in clean_io_failure()
2220 if (state && state->start <= failrec->start && in clean_io_failure()
2221 state->end >= failrec->start + failrec->len - 1) { in clean_io_failure()
2225 repair_io_failure(inode, start, failrec->len, in clean_io_failure()
2243 void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end) in btrfs_free_io_failure_record() argument
2253 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY); in btrfs_free_io_failure_record()
2255 if (state->start > end) in btrfs_free_io_failure_record()
2271 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, in btrfs_get_io_failure_record() argument
2283 ret = get_state_private(failure_tree, start, &private); in btrfs_get_io_failure_record()
2289 failrec->start = start; in btrfs_get_io_failure_record()
2290 failrec->len = end - start + 1; in btrfs_get_io_failure_record()
2296 em = lookup_extent_mapping(em_tree, start, failrec->len); in btrfs_get_io_failure_record()
2303 if (em->start > start || em->start + em->len <= start) { in btrfs_get_io_failure_record()
2313 logical = start - em->start; in btrfs_get_io_failure_record()
2323 logical, start, failrec->len); in btrfs_get_io_failure_record()
2329 ret = set_extent_bits(failure_tree, start, end, in btrfs_get_io_failure_record()
2332 ret = set_state_private(failure_tree, start, in btrfs_get_io_failure_record()
2336 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED, in btrfs_get_io_failure_record()
2345 failrec->logical, failrec->start, failrec->len, in btrfs_get_io_failure_record()
2466 struct page *page, u64 start, u64 end, in bio_readpage_error() argument
2478 ret = btrfs_get_io_failure_record(inode, start, end, &failrec); in bio_readpage_error()
2495 start - page_offset(page), in bio_readpage_error()
2519 int end_extent_writepage(struct page *page, int err, u64 start, u64 end) in end_extent_writepage() argument
2528 ret = tree->ops->writepage_end_io_hook(page, start, in end_extent_writepage()
2555 u64 start; in end_bio_extent_writepage() local
2579 start = page_offset(page); in end_bio_extent_writepage()
2580 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_writepage()
2582 if (end_extent_writepage(page, bio->bi_error, start, end)) in end_bio_extent_writepage()
2592 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len, in endio_readpage_release_extent() argument
2596 u64 end = start + len - 1; in endio_readpage_release_extent()
2599 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2600 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2621 u64 start; in end_bio_extent_readpage() local
2656 start = page_offset(page); in end_bio_extent_readpage()
2657 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_readpage()
2664 page, start, end, in end_bio_extent_readpage()
2669 clean_io_failure(inode, start, page, 0); in end_bio_extent_readpage()
2690 ret = bio_readpage_error(bio, offset, page, start, end, in end_bio_extent_readpage()
2724 endio_readpage_release_extent(tree, start, in end_bio_extent_readpage()
2725 end - start + 1, 0); in end_bio_extent_readpage()
2727 extent_start = start; in end_bio_extent_readpage()
2728 extent_len = end + 1 - start; in end_bio_extent_readpage()
2729 } else if (extent_start + extent_len == start) { in end_bio_extent_readpage()
2730 extent_len += end + 1 - start; in end_bio_extent_readpage()
2734 extent_start = start; in end_bio_extent_readpage()
2735 extent_len = end + 1 - start; in end_bio_extent_readpage()
2823 u64 start; in submit_one_bio() local
2825 start = page_offset(page) + bvec->bv_offset; in submit_one_bio()
2833 mirror_num, bio_flags, start); in submit_one_bio()
2942 u64 start, u64 len, get_extent_t *get_extent, in __get_extent_map() argument
2949 if (extent_map_in_tree(em) && start >= em->start && in __get_extent_map()
2950 start < extent_map_end(em)) { in __get_extent_map()
2959 em = get_extent(inode, page, pg_offset, start, len, 0); in __get_extent_map()
2982 u64 start = page_offset(page); in __do_readpage() local
2983 u64 page_end = start + PAGE_CACHE_SIZE - 1; in __do_readpage()
2985 u64 cur = start; in __do_readpage()
3008 unlock_extent(tree, start, end); in __do_readpage()
3054 extent_offset = cur - em->start; in __do_readpage()
3198 u64 start, u64 end, in __do_contiguous_readpages() argument
3211 lock_extent(tree, start, end); in __do_contiguous_readpages()
3212 ordered = btrfs_lookup_ordered_range(inode, start, in __do_contiguous_readpages()
3213 end - start + 1); in __do_contiguous_readpages()
3216 unlock_extent(tree, start, end); in __do_contiguous_readpages()
3236 u64 start = 0; in __extent_readpages() local
3245 start = page_start; in __extent_readpages()
3246 end = start + PAGE_CACHE_SIZE - 1; in __extent_readpages()
3252 index - first_index, start, in __extent_readpages()
3256 start = page_start; in __extent_readpages()
3257 end = start + PAGE_CACHE_SIZE - 1; in __extent_readpages()
3264 index - first_index, start, in __extent_readpages()
3278 u64 start = page_offset(page); in __extent_read_full_page() local
3279 u64 end = start + PAGE_CACHE_SIZE - 1; in __extent_read_full_page()
3283 lock_extent(tree, start, end); in __extent_read_full_page()
3284 ordered = btrfs_lookup_ordered_extent(inode, start); in __extent_read_full_page()
3287 unlock_extent(tree, start, end); in __extent_read_full_page()
3443 u64 start = page_offset(page); in __extent_writepage_io() local
3444 u64 page_end = start + PAGE_CACHE_SIZE - 1; in __extent_writepage_io()
3446 u64 cur = start; in __extent_writepage_io()
3461 ret = tree->ops->writepage_start_hook(page, start, in __extent_writepage_io()
3484 if (i_size <= start) { in __extent_writepage_io()
3486 tree->ops->writepage_end_io_hook(page, start, in __extent_writepage_io()
3509 extent_offset = cur - em->start; in __extent_writepage_io()
3601 u64 start = page_offset(page); in __extent_writepage() local
3602 u64 page_end = start + PAGE_CACHE_SIZE - 1; in __extent_writepage()
3644 ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written); in __extent_writepage()
3663 end_extent_writepage(page, ret, start, page_end); in __extent_writepage()
3733 num_pages = num_extent_pages(eb->start, eb->len); in lock_extent_buffer_for_io()
3855 u64 offset = eb->start; in write_one_eb()
3862 num_pages = num_extent_pages(eb->start, eb->len); in write_one_eb()
4202 u64 start, u64 end, get_extent_t *get_extent, in extent_write_locked_range() argument
4208 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> in extent_write_locked_range()
4222 .range_start = start, in extent_write_locked_range()
4226 while (start <= end) { in extent_write_locked_range()
4227 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); in extent_write_locked_range()
4232 tree->ops->writepage_end_io_hook(page, start, in extent_write_locked_range()
4233 start + PAGE_CACHE_SIZE - 1, in extent_write_locked_range()
4238 start += PAGE_CACHE_SIZE; in extent_write_locked_range()
4321 u64 start = page_offset(page); in extent_invalidatepage() local
4322 u64 end = start + PAGE_CACHE_SIZE - 1; in extent_invalidatepage()
4325 start += ALIGN(offset, blocksize); in extent_invalidatepage()
4326 if (start > end) in extent_invalidatepage()
4329 lock_extent_bits(tree, start, end, 0, &cached_state); in extent_invalidatepage()
4331 clear_extent_bit(tree, start, end, in extent_invalidatepage()
4347 u64 start = page_offset(page); in try_release_extent_state() local
4348 u64 end = start + PAGE_CACHE_SIZE - 1; in try_release_extent_state()
4351 if (test_range_bit(tree, start, end, in try_release_extent_state()
4361 ret = clear_extent_bit(tree, start, end, in try_release_extent_state()
4386 u64 start = page_offset(page); in try_release_extent_mapping() local
4387 u64 end = start + PAGE_CACHE_SIZE - 1; in try_release_extent_mapping()
4392 while (start <= end) { in try_release_extent_mapping()
4393 len = end - start + 1; in try_release_extent_mapping()
4395 em = lookup_extent_mapping(map, start, len); in try_release_extent_mapping()
4401 em->start != start) { in try_release_extent_mapping()
4406 if (!test_range_bit(tree, em->start, in try_release_extent_mapping()
4414 start = extent_map_end(em); in try_release_extent_mapping()
4465 __u64 start, __u64 len, get_extent_t *get_extent) in extent_fiemap() argument
4468 u64 off = start; in extent_fiemap()
4469 u64 max = start + len; in extent_fiemap()
4494 start = round_down(start, BTRFS_I(inode)->root->sectorsize); in extent_fiemap()
4495 len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start; in extent_fiemap()
4539 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0, in extent_fiemap()
4542 em = get_extent_skip_holes(inode, start, last_for_get_extent, in extent_fiemap()
4555 if (em->start >= max || extent_map_end(em) < off) in extent_fiemap()
4564 em_start = max(em->start, off); in extent_fiemap()
4573 offset_in_extent = em_start - em->start; in extent_fiemap()
4597 (em->start - em->orig_start); in extent_fiemap()
4653 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1, in extent_fiemap()
4682 index = num_extent_pages(eb->start, eb->len); in btrfs_release_extent_buffer_page()
4733 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, in __alloc_extent_buffer() argument
4739 eb->start = start; in __alloc_extent_buffer()
4775 unsigned long num_pages = num_extent_pages(src->start, src->len); in btrfs_clone_extent_buffer()
4777 new = __alloc_extent_buffer(src->fs_info, src->start, src->len); in btrfs_clone_extent_buffer()
4801 u64 start) in alloc_dummy_extent_buffer() argument
4819 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_dummy_extent_buffer()
4880 num_pages = num_extent_pages(eb->start, eb->len); in mark_extent_buffer_accessed()
4890 u64 start) in find_extent_buffer() argument
4896 start >> PAGE_CACHE_SHIFT); in find_extent_buffer()
4928 u64 start) in alloc_test_extent_buffer() argument
4933 eb = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
4936 eb = alloc_dummy_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
4946 start >> PAGE_CACHE_SHIFT, eb); in alloc_test_extent_buffer()
4950 exists = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
4974 u64 start) in alloc_extent_buffer() argument
4977 unsigned long num_pages = num_extent_pages(start, len); in alloc_extent_buffer()
4979 unsigned long index = start >> PAGE_CACHE_SHIFT; in alloc_extent_buffer()
4987 eb = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
4991 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_extent_buffer()
5048 start >> PAGE_CACHE_SHIFT, eb); in alloc_extent_buffer()
5052 exists = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
5111 eb->start >> PAGE_CACHE_SHIFT); in release_extent_buffer()
5187 num_pages = num_extent_pages(eb->start, eb->len); in clear_extent_buffer_dirty()
5221 num_pages = num_extent_pages(eb->start, eb->len); in set_extent_buffer_dirty()
5237 num_pages = num_extent_pages(eb->start, eb->len); in clear_extent_buffer_uptodate()
5253 num_pages = num_extent_pages(eb->start, eb->len); in set_extent_buffer_uptodate()
5267 struct extent_buffer *eb, u64 start, int wait, in read_extent_buffer_pages() argument
5285 if (start) { in read_extent_buffer_pages()
5286 WARN_ON(start < eb->start); in read_extent_buffer_pages()
5287 start_i = (start >> PAGE_CACHE_SHIFT) - in read_extent_buffer_pages()
5288 (eb->start >> PAGE_CACHE_SHIFT); in read_extent_buffer_pages()
5293 num_pages = num_extent_pages(eb->start, eb->len); in read_extent_buffer_pages()
5363 unsigned long start, in read_extent_buffer() argument
5371 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in read_extent_buffer()
5372 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in read_extent_buffer()
5374 WARN_ON(start > eb->len); in read_extent_buffer()
5375 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer()
5377 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); in read_extent_buffer()
5394 unsigned long start, in read_extent_buffer_to_user() argument
5402 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in read_extent_buffer_to_user()
5403 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in read_extent_buffer_to_user()
5406 WARN_ON(start > eb->len); in read_extent_buffer_to_user()
5407 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user()
5409 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); in read_extent_buffer_to_user()
5430 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, in map_private_extent_buffer() argument
5435 size_t offset = start & (PAGE_CACHE_SIZE - 1); in map_private_extent_buffer()
5438 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in map_private_extent_buffer()
5439 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in map_private_extent_buffer()
5440 unsigned long end_i = (start_offset + start + min_len - 1) >> in map_private_extent_buffer()
5454 if (start + min_len > eb->len) { in map_private_extent_buffer()
5457 eb->start, eb->len, start, min_len); in map_private_extent_buffer()
5469 unsigned long start, in memcmp_extent_buffer() argument
5477 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in memcmp_extent_buffer()
5478 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in memcmp_extent_buffer()
5481 WARN_ON(start > eb->len); in memcmp_extent_buffer()
5482 WARN_ON(start + len > eb->start + eb->len); in memcmp_extent_buffer()
5484 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); in memcmp_extent_buffer()
5505 unsigned long start, unsigned long len) in write_extent_buffer() argument
5512 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in write_extent_buffer()
5513 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in write_extent_buffer()
5515 WARN_ON(start > eb->len); in write_extent_buffer()
5516 WARN_ON(start + len > eb->start + eb->len); in write_extent_buffer()
5518 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); in write_extent_buffer()
5536 unsigned long start, unsigned long len) in memset_extent_buffer() argument
5542 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in memset_extent_buffer()
5543 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in memset_extent_buffer()
5545 WARN_ON(start > eb->len); in memset_extent_buffer()
5546 WARN_ON(start + len > eb->start + eb->len); in memset_extent_buffer()
5548 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); in memset_extent_buffer()
5573 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); in copy_extent_buffer()
5631 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); in memcpy_extent_buffer()
5679 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); in memmove_extent_buffer()