Lines Matching refs:start

68 		       state->start, state->end, state->state,  in btrfs_leak_debug_check()
79 eb->start, eb->len, atomic_read(&eb->refs)); in btrfs_leak_debug_check()
85 #define btrfs_debug_check_extent_io_range(tree, start, end) \ argument
86 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
88 struct extent_io_tree *tree, u64 start, u64 end) in __btrfs_debug_check_extent_io_range() argument
101 caller, btrfs_ino(inode), isize, start, end); in __btrfs_debug_check_extent_io_range()
114 u64 start; member
259 if (offset < entry->start) in tree_insert()
291 if (offset < entry->start) in __etree_search()
316 while (prev && offset < prev_entry->start) { in __etree_search()
375 if (other->end == state->start - 1 && in merge_state()
378 state->start = other->start; in merge_state()
387 if (other->start == state->end + 1 && in merge_state()
426 struct extent_state *state, u64 start, u64 end, in insert_state() argument
433 if (end < start) in insert_state()
435 end, start); in insert_state()
436 state->start = start; in insert_state()
447 found->start, found->end, start, end); in insert_state()
482 prealloc->start = orig->start; in split_state()
485 orig->start = split; in split_state()
520 u64 range = state->end - state->start + 1; in clear_state_bit()
572 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bit() argument
585 btrfs_debug_check_extent_io_range(tree, start, end); in clear_extent_bit()
618 cached->start <= start && cached->end > start) { in clear_extent_bit()
631 node = tree_search(tree, start); in clear_extent_bit()
636 if (state->start > end) in clear_extent_bit()
638 WARN_ON(state->end < start); in clear_extent_bit()
663 if (state->start < start) { in clear_extent_bit()
666 err = split_state(tree, state, prealloc, start); in clear_extent_bit()
685 if (state->start <= end && state->end > end) { in clear_extent_bit()
705 start = last_end + 1; in clear_extent_bit()
706 if (start <= end && state && !need_resched()) in clear_extent_bit()
718 if (start > end) in clear_extent_bit()
744 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in wait_extent_bit() argument
750 btrfs_debug_check_extent_io_range(tree, start, end); in wait_extent_bit()
759 node = tree_search(tree, start); in wait_extent_bit()
766 if (state->start > end) in wait_extent_bit()
770 start = state->start; in wait_extent_bit()
776 start = state->end + 1; in wait_extent_bit()
778 if (start > end) in wait_extent_bit()
798 u64 range = state->end - state->start + 1; in set_state_bits()
835 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __set_extent_bit() argument
849 btrfs_debug_check_extent_io_range(tree, start, end); in __set_extent_bit()
861 if (state->start <= start && state->end > start && in __set_extent_bit()
871 node = tree_search_for_insert(tree, start, &p, &parent); in __set_extent_bit()
875 err = insert_state(tree, prealloc, start, end, in __set_extent_bit()
886 last_start = state->start; in __set_extent_bit()
895 if (state->start == start && state->end <= end) { in __set_extent_bit()
897 *failed_start = state->start; in __set_extent_bit()
907 start = last_end + 1; in __set_extent_bit()
909 if (start < end && state && state->start == start && in __set_extent_bit()
931 if (state->start < start) { in __set_extent_bit()
933 *failed_start = start; in __set_extent_bit()
940 err = split_state(tree, state, prealloc, start); in __set_extent_bit()
953 start = last_end + 1; in __set_extent_bit()
955 if (start < end && state && state->start == start && in __set_extent_bit()
968 if (state->start > start) { in __set_extent_bit()
982 err = insert_state(tree, prealloc, start, this_end, in __set_extent_bit()
989 start = this_end + 1; in __set_extent_bit()
998 if (state->start <= end && state->end > end) { in __set_extent_bit()
1000 *failed_start = start; in __set_extent_bit()
1028 if (start > end) in __set_extent_bit()
1036 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bit() argument
1040 return __set_extent_bit(tree, start, end, bits, 0, failed_start, in set_extent_bit()
1062 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in convert_extent_bit() argument
1076 btrfs_debug_check_extent_io_range(tree, start, end); in convert_extent_bit()
1095 if (state->start <= start && state->end > start && in convert_extent_bit()
1106 node = tree_search_for_insert(tree, start, &p, &parent); in convert_extent_bit()
1113 err = insert_state(tree, prealloc, start, end, in convert_extent_bit()
1123 last_start = state->start; in convert_extent_bit()
1132 if (state->start == start && state->end <= end) { in convert_extent_bit()
1138 start = last_end + 1; in convert_extent_bit()
1139 if (start < end && state && state->start == start && in convert_extent_bit()
1161 if (state->start < start) { in convert_extent_bit()
1167 err = split_state(tree, state, prealloc, start); in convert_extent_bit()
1179 start = last_end + 1; in convert_extent_bit()
1180 if (start < end && state && state->start == start && in convert_extent_bit()
1193 if (state->start > start) { in convert_extent_bit()
1210 err = insert_state(tree, prealloc, start, this_end, in convert_extent_bit()
1216 start = this_end + 1; in convert_extent_bit()
1225 if (state->start <= end && state->end > end) { in convert_extent_bit()
1253 if (start > end) in convert_extent_bit()
1263 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_dirty() argument
1266 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, in set_extent_dirty()
1270 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bits() argument
1273 return set_extent_bit(tree, start, end, bits, NULL, in set_extent_bits()
1277 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bits() argument
1280 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask); in clear_extent_bits()
1283 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_delalloc() argument
1286 return set_extent_bit(tree, start, end, in set_extent_delalloc()
1291 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_defrag() argument
1294 return set_extent_bit(tree, start, end, in set_extent_defrag()
1299 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_dirty() argument
1302 return clear_extent_bit(tree, start, end, in clear_extent_dirty()
1307 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_new() argument
1310 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, in set_extent_new()
1314 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_uptodate() argument
1317 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, in set_extent_uptodate()
1321 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_uptodate() argument
1324 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, in clear_extent_uptodate()
1332 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in lock_extent_bits() argument
1339 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, in lock_extent_bits()
1344 start = failed_start; in lock_extent_bits()
1347 WARN_ON(start > end); in lock_extent_bits()
1352 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in lock_extent() argument
1354 return lock_extent_bits(tree, start, end, 0, NULL); in lock_extent()
1357 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in try_lock_extent() argument
1362 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, in try_lock_extent()
1365 if (failed_start > start) in try_lock_extent()
1366 clear_extent_bit(tree, start, failed_start - 1, in try_lock_extent()
1373 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, in unlock_extent_cached() argument
1376 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, in unlock_extent_cached()
1380 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) in unlock_extent() argument
1382 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, in unlock_extent()
1386 int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_clear_dirty_for_io() argument
1388 unsigned long index = start >> PAGE_CACHE_SHIFT; in extent_range_clear_dirty_for_io()
1402 int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_redirty_for_io() argument
1404 unsigned long index = start >> PAGE_CACHE_SHIFT; in extent_range_redirty_for_io()
1422 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) in set_range_writeback() argument
1424 unsigned long index = start >> PAGE_CACHE_SHIFT; in set_range_writeback()
1444 u64 start, unsigned bits) in find_first_extent_bit_state() argument
1453 node = tree_search(tree, start); in find_first_extent_bit_state()
1459 if (state->end >= start && (state->state & bits)) in find_first_extent_bit_state()
1477 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, in find_first_extent_bit() argument
1488 if (state->end == start - 1 && extent_state_in_tree(state)) { in find_first_extent_bit()
1505 state = find_first_extent_bit_state(tree, start, bits); in find_first_extent_bit()
1509 *start_ret = state->start; in find_first_extent_bit()
1525 u64 *start, u64 *end, u64 max_bytes, in find_delalloc_range() argument
1530 u64 cur_start = *start; in find_delalloc_range()
1549 if (found && (state->start != cur_start || in find_delalloc_range()
1559 *start = state->start; in find_delalloc_range()
1567 total_bytes += state->end - state->start + 1; in find_delalloc_range()
1580 u64 start, u64 end) in __unlock_for_delalloc() argument
1584 unsigned long index = start >> PAGE_CACHE_SHIFT; in __unlock_for_delalloc()
1677 struct page *locked_page, u64 *start, in find_lock_delalloc_range() argument
1689 delalloc_start = *start; in find_lock_delalloc_range()
1693 if (!found || delalloc_end <= *start) { in find_lock_delalloc_range()
1694 *start = delalloc_start; in find_lock_delalloc_range()
1705 if (delalloc_start < *start) in find_lock_delalloc_range()
1706 delalloc_start = *start; in find_lock_delalloc_range()
1749 *start = delalloc_start; in find_lock_delalloc_range()
1755 int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, in extent_clear_unlock_delalloc() argument
1763 unsigned long index = start >> PAGE_CACHE_SHIFT; in extent_clear_unlock_delalloc()
1768 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); in extent_clear_unlock_delalloc()
1813 u64 *start, u64 search_end, u64 max_bytes, in count_range_bits() argument
1818 u64 cur_start = *start; in count_range_bits()
1841 if (state->start > search_end) in count_range_bits()
1843 if (contig && found && state->start > last + 1) in count_range_bits()
1847 max(cur_start, state->start); in count_range_bits()
1851 *start = max(cur_start, state->start); in count_range_bits()
1871 static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) in set_state_private() argument
1882 node = tree_search(tree, start); in set_state_private()
1888 if (state->start != start) { in set_state_private()
1898 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) in get_state_private() argument
1909 node = tree_search(tree, start); in get_state_private()
1915 if (state->start != start) { in get_state_private()
1931 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, in test_range_bit() argument
1939 if (cached && extent_state_in_tree(cached) && cached->start <= start && in test_range_bit()
1940 cached->end > start) in test_range_bit()
1943 node = tree_search(tree, start); in test_range_bit()
1944 while (node && start <= end) { in test_range_bit()
1947 if (filled && state->start > start) { in test_range_bit()
1952 if (state->start > end) in test_range_bit()
1967 start = state->end + 1; in test_range_bit()
1968 if (start > end) in test_range_bit()
1987 u64 start = page_offset(page); in check_page_uptodate() local
1988 u64 end = start + PAGE_CACHE_SIZE - 1; in check_page_uptodate()
1989 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) in check_page_uptodate()
1999 set_state_private(failure_tree, rec->start, 0); in free_io_failure()
2000 ret = clear_extent_bits(failure_tree, rec->start, in free_io_failure()
2001 rec->start + rec->len - 1, in free_io_failure()
2006 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, in free_io_failure()
2007 rec->start + rec->len - 1, in free_io_failure()
2026 int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, in repair_io_failure() argument
2078 btrfs_ino(inode), start, in repair_io_failure()
2087 u64 start = eb->start; in repair_eb_io_failure() local
2088 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len); in repair_eb_io_failure()
2097 ret = repair_io_failure(root->fs_info->btree_inode, start, in repair_eb_io_failure()
2098 PAGE_CACHE_SIZE, start, p, in repair_eb_io_failure()
2099 start - page_offset(p), mirror_num); in repair_eb_io_failure()
2102 start += PAGE_CACHE_SIZE; in repair_eb_io_failure()
2112 int clean_io_failure(struct inode *inode, u64 start, struct page *page, in clean_io_failure() argument
2129 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start, in clean_io_failure()
2140 failrec->start); in clean_io_failure()
2148 failrec->start, in clean_io_failure()
2152 if (state && state->start <= failrec->start && in clean_io_failure()
2153 state->end >= failrec->start + failrec->len - 1) { in clean_io_failure()
2157 repair_io_failure(inode, start, failrec->len, in clean_io_failure()
2175 void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end) in btrfs_free_io_failure_record() argument
2185 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY); in btrfs_free_io_failure_record()
2187 if (state->start > end) in btrfs_free_io_failure_record()
2203 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, in btrfs_get_io_failure_record() argument
2215 ret = get_state_private(failure_tree, start, &private); in btrfs_get_io_failure_record()
2221 failrec->start = start; in btrfs_get_io_failure_record()
2222 failrec->len = end - start + 1; in btrfs_get_io_failure_record()
2228 em = lookup_extent_mapping(em_tree, start, failrec->len); in btrfs_get_io_failure_record()
2235 if (em->start > start || em->start + em->len <= start) { in btrfs_get_io_failure_record()
2245 logical = start - em->start; in btrfs_get_io_failure_record()
2255 logical, start, failrec->len); in btrfs_get_io_failure_record()
2261 ret = set_extent_bits(failure_tree, start, end, in btrfs_get_io_failure_record()
2264 ret = set_state_private(failure_tree, start, in btrfs_get_io_failure_record()
2268 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED, in btrfs_get_io_failure_record()
2277 failrec->logical, failrec->start, failrec->len, in btrfs_get_io_failure_record()
2398 struct page *page, u64 start, u64 end, in bio_readpage_error() argument
2410 ret = btrfs_get_io_failure_record(inode, start, end, &failrec); in bio_readpage_error()
2427 start - page_offset(page), in bio_readpage_error()
2451 int end_extent_writepage(struct page *page, int err, u64 start, u64 end) in end_extent_writepage() argument
2460 ret = tree->ops->writepage_end_io_hook(page, start, in end_extent_writepage()
2487 u64 start; in end_bio_extent_writepage() local
2511 start = page_offset(page); in end_bio_extent_writepage()
2512 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_writepage()
2514 if (end_extent_writepage(page, err, start, end)) in end_bio_extent_writepage()
2524 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len, in endio_readpage_release_extent() argument
2528 u64 end = start + len - 1; in endio_readpage_release_extent()
2531 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2532 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2553 u64 start; in end_bio_extent_readpage() local
2591 start = page_offset(page); in end_bio_extent_readpage()
2592 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_readpage()
2599 page, start, end, in end_bio_extent_readpage()
2604 clean_io_failure(inode, start, page, 0); in end_bio_extent_readpage()
2626 ret = bio_readpage_error(bio, offset, page, start, end, in end_bio_extent_readpage()
2663 endio_readpage_release_extent(tree, start, in end_bio_extent_readpage()
2664 end - start + 1, 0); in end_bio_extent_readpage()
2666 extent_start = start; in end_bio_extent_readpage()
2667 extent_len = end + 1 - start; in end_bio_extent_readpage()
2668 } else if (extent_start + extent_len == start) { in end_bio_extent_readpage()
2669 extent_len += end + 1 - start; in end_bio_extent_readpage()
2673 extent_start = start; in end_bio_extent_readpage()
2674 extent_len = end + 1 - start; in end_bio_extent_readpage()
2756 u64 start; in submit_one_bio() local
2758 start = page_offset(page) + bvec->bv_offset; in submit_one_bio()
2766 mirror_num, bio_flags, start); in submit_one_bio()
2875 u64 start, u64 len, get_extent_t *get_extent, in __get_extent_map() argument
2882 if (extent_map_in_tree(em) && start >= em->start && in __get_extent_map()
2883 start < extent_map_end(em)) { in __get_extent_map()
2892 em = get_extent(inode, page, pg_offset, start, len, 0); in __get_extent_map()
2915 u64 start = page_offset(page); in __do_readpage() local
2916 u64 page_end = start + PAGE_CACHE_SIZE - 1; in __do_readpage()
2918 u64 cur = start; in __do_readpage()
2941 unlock_extent(tree, start, end); in __do_readpage()
2987 extent_offset = cur - em->start; in __do_readpage()
3127 u64 start, u64 end, in __do_contiguous_readpages() argument
3140 lock_extent(tree, start, end); in __do_contiguous_readpages()
3141 ordered = btrfs_lookup_ordered_range(inode, start, in __do_contiguous_readpages()
3142 end - start + 1); in __do_contiguous_readpages()
3145 unlock_extent(tree, start, end); in __do_contiguous_readpages()
3165 u64 start = 0; in __extent_readpages() local
3174 start = page_start; in __extent_readpages()
3175 end = start + PAGE_CACHE_SIZE - 1; in __extent_readpages()
3181 index - first_index, start, in __extent_readpages()
3185 start = page_start; in __extent_readpages()
3186 end = start + PAGE_CACHE_SIZE - 1; in __extent_readpages()
3193 index - first_index, start, in __extent_readpages()
3207 u64 start = page_offset(page); in __extent_read_full_page() local
3208 u64 end = start + PAGE_CACHE_SIZE - 1; in __extent_read_full_page()
3212 lock_extent(tree, start, end); in __extent_read_full_page()
3213 ordered = btrfs_lookup_ordered_extent(inode, start); in __extent_read_full_page()
3216 unlock_extent(tree, start, end); in __extent_read_full_page()
3372 u64 start = page_offset(page); in __extent_writepage_io() local
3373 u64 page_end = start + PAGE_CACHE_SIZE - 1; in __extent_writepage_io()
3375 u64 cur = start; in __extent_writepage_io()
3390 ret = tree->ops->writepage_start_hook(page, start, in __extent_writepage_io()
3413 if (i_size <= start) { in __extent_writepage_io()
3415 tree->ops->writepage_end_io_hook(page, start, in __extent_writepage_io()
3438 extent_offset = cur - em->start; in __extent_writepage_io()
3530 u64 start = page_offset(page); in __extent_writepage() local
3531 u64 page_end = start + PAGE_CACHE_SIZE - 1; in __extent_writepage()
3573 ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written); in __extent_writepage()
3592 end_extent_writepage(page, ret, start, page_end); in __extent_writepage()
3662 num_pages = num_extent_pages(eb->start, eb->len); in lock_extent_buffer_for_io()
3783 u64 offset = eb->start; in write_one_eb()
3790 num_pages = num_extent_pages(eb->start, eb->len); in write_one_eb()
4130 u64 start, u64 end, get_extent_t *get_extent, in extent_write_locked_range() argument
4136 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> in extent_write_locked_range()
4150 .range_start = start, in extent_write_locked_range()
4154 while (start <= end) { in extent_write_locked_range()
4155 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); in extent_write_locked_range()
4160 tree->ops->writepage_end_io_hook(page, start, in extent_write_locked_range()
4161 start + PAGE_CACHE_SIZE - 1, in extent_write_locked_range()
4166 start += PAGE_CACHE_SIZE; in extent_write_locked_range()
4249 u64 start = page_offset(page); in extent_invalidatepage() local
4250 u64 end = start + PAGE_CACHE_SIZE - 1; in extent_invalidatepage()
4253 start += ALIGN(offset, blocksize); in extent_invalidatepage()
4254 if (start > end) in extent_invalidatepage()
4257 lock_extent_bits(tree, start, end, 0, &cached_state); in extent_invalidatepage()
4259 clear_extent_bit(tree, start, end, in extent_invalidatepage()
4275 u64 start = page_offset(page); in try_release_extent_state() local
4276 u64 end = start + PAGE_CACHE_SIZE - 1; in try_release_extent_state()
4279 if (test_range_bit(tree, start, end, in try_release_extent_state()
4289 ret = clear_extent_bit(tree, start, end, in try_release_extent_state()
4314 u64 start = page_offset(page); in try_release_extent_mapping() local
4315 u64 end = start + PAGE_CACHE_SIZE - 1; in try_release_extent_mapping()
4320 while (start <= end) { in try_release_extent_mapping()
4321 len = end - start + 1; in try_release_extent_mapping()
4323 em = lookup_extent_mapping(map, start, len); in try_release_extent_mapping()
4329 em->start != start) { in try_release_extent_mapping()
4334 if (!test_range_bit(tree, em->start, in try_release_extent_mapping()
4342 start = extent_map_end(em); in try_release_extent_mapping()
4393 __u64 start, __u64 len, get_extent_t *get_extent) in extent_fiemap() argument
4396 u64 off = start; in extent_fiemap()
4397 u64 max = start + len; in extent_fiemap()
4422 start = round_down(start, BTRFS_I(inode)->root->sectorsize); in extent_fiemap()
4423 len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start; in extent_fiemap()
4467 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0, in extent_fiemap()
4470 em = get_extent_skip_holes(inode, start, last_for_get_extent, in extent_fiemap()
4483 if (em->start >= max || extent_map_end(em) < off) in extent_fiemap()
4492 em_start = max(em->start, off); in extent_fiemap()
4501 offset_in_extent = em_start - em->start; in extent_fiemap()
4525 (em->start - em->orig_start); in extent_fiemap()
4579 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1, in extent_fiemap()
4608 index = num_extent_pages(eb->start, eb->len); in btrfs_release_extent_buffer_page()
4659 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, in __alloc_extent_buffer() argument
4667 eb->start = start; in __alloc_extent_buffer()
4703 unsigned long num_pages = num_extent_pages(src->start, src->len); in btrfs_clone_extent_buffer()
4705 new = __alloc_extent_buffer(src->fs_info, src->start, src->len); in btrfs_clone_extent_buffer()
4729 u64 start) in alloc_dummy_extent_buffer() argument
4747 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_dummy_extent_buffer()
4808 num_pages = num_extent_pages(eb->start, eb->len); in mark_extent_buffer_accessed()
4818 u64 start) in find_extent_buffer() argument
4824 start >> PAGE_CACHE_SHIFT); in find_extent_buffer()
4856 u64 start) in alloc_test_extent_buffer() argument
4861 eb = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
4864 eb = alloc_dummy_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
4874 start >> PAGE_CACHE_SHIFT, eb); in alloc_test_extent_buffer()
4878 exists = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
4902 u64 start) in alloc_extent_buffer() argument
4905 unsigned long num_pages = num_extent_pages(start, len); in alloc_extent_buffer()
4907 unsigned long index = start >> PAGE_CACHE_SHIFT; in alloc_extent_buffer()
4915 eb = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
4919 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_extent_buffer()
4976 start >> PAGE_CACHE_SHIFT, eb); in alloc_extent_buffer()
4980 exists = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
5039 eb->start >> PAGE_CACHE_SHIFT); in release_extent_buffer()
5115 num_pages = num_extent_pages(eb->start, eb->len); in clear_extent_buffer_dirty()
5149 num_pages = num_extent_pages(eb->start, eb->len); in set_extent_buffer_dirty()
5165 num_pages = num_extent_pages(eb->start, eb->len); in clear_extent_buffer_uptodate()
5181 num_pages = num_extent_pages(eb->start, eb->len); in set_extent_buffer_uptodate()
5195 struct extent_buffer *eb, u64 start, int wait, in read_extent_buffer_pages() argument
5213 if (start) { in read_extent_buffer_pages()
5214 WARN_ON(start < eb->start); in read_extent_buffer_pages()
5215 start_i = (start >> PAGE_CACHE_SHIFT) - in read_extent_buffer_pages()
5216 (eb->start >> PAGE_CACHE_SHIFT); in read_extent_buffer_pages()
5221 num_pages = num_extent_pages(eb->start, eb->len); in read_extent_buffer_pages()
5291 unsigned long start, in read_extent_buffer() argument
5299 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in read_extent_buffer()
5300 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in read_extent_buffer()
5302 WARN_ON(start > eb->len); in read_extent_buffer()
5303 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer()
5305 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); in read_extent_buffer()
5322 unsigned long start, in read_extent_buffer_to_user() argument
5330 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in read_extent_buffer_to_user()
5331 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in read_extent_buffer_to_user()
5334 WARN_ON(start > eb->len); in read_extent_buffer_to_user()
5335 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user()
5337 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); in read_extent_buffer_to_user()
5358 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, in map_private_extent_buffer() argument
5363 size_t offset = start & (PAGE_CACHE_SIZE - 1); in map_private_extent_buffer()
5366 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in map_private_extent_buffer()
5367 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in map_private_extent_buffer()
5368 unsigned long end_i = (start_offset + start + min_len - 1) >> in map_private_extent_buffer()
5382 if (start + min_len > eb->len) { in map_private_extent_buffer()
5385 eb->start, eb->len, start, min_len); in map_private_extent_buffer()
5397 unsigned long start, in memcmp_extent_buffer() argument
5405 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in memcmp_extent_buffer()
5406 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in memcmp_extent_buffer()
5409 WARN_ON(start > eb->len); in memcmp_extent_buffer()
5410 WARN_ON(start + len > eb->start + eb->len); in memcmp_extent_buffer()
5412 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); in memcmp_extent_buffer()
5433 unsigned long start, unsigned long len) in write_extent_buffer() argument
5440 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in write_extent_buffer()
5441 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in write_extent_buffer()
5443 WARN_ON(start > eb->len); in write_extent_buffer()
5444 WARN_ON(start + len > eb->start + eb->len); in write_extent_buffer()
5446 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); in write_extent_buffer()
5464 unsigned long start, unsigned long len) in memset_extent_buffer() argument
5470 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); in memset_extent_buffer()
5471 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; in memset_extent_buffer()
5473 WARN_ON(start > eb->len); in memset_extent_buffer()
5474 WARN_ON(start + len > eb->start + eb->len); in memset_extent_buffer()
5476 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); in memset_extent_buffer()
5501 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); in copy_extent_buffer()
5559 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); in memcpy_extent_buffer()
5605 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); in memmove_extent_buffer()