Searched refs:eb (Results 1 - 81 of 81) sorted by relevance

/linux-4.4.14/fs/btrfs/
H A Dlocking.c27 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
34 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) btrfs_set_lock_blocking_rw() argument
42 if (eb->lock_nested && current->pid == eb->lock_owner) btrfs_set_lock_blocking_rw()
45 if (atomic_read(&eb->blocking_writers) == 0) { btrfs_set_lock_blocking_rw()
46 WARN_ON(atomic_read(&eb->spinning_writers) != 1); btrfs_set_lock_blocking_rw()
47 atomic_dec(&eb->spinning_writers); btrfs_set_lock_blocking_rw()
48 btrfs_assert_tree_locked(eb); btrfs_set_lock_blocking_rw()
49 atomic_inc(&eb->blocking_writers); btrfs_set_lock_blocking_rw()
50 write_unlock(&eb->lock); btrfs_set_lock_blocking_rw()
53 btrfs_assert_tree_read_locked(eb); btrfs_set_lock_blocking_rw()
54 atomic_inc(&eb->blocking_readers); btrfs_set_lock_blocking_rw()
55 WARN_ON(atomic_read(&eb->spinning_readers) == 0); btrfs_set_lock_blocking_rw()
56 atomic_dec(&eb->spinning_readers); btrfs_set_lock_blocking_rw()
57 read_unlock(&eb->lock); btrfs_set_lock_blocking_rw()
66 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) btrfs_clear_lock_blocking_rw() argument
74 if (eb->lock_nested && current->pid == eb->lock_owner) btrfs_clear_lock_blocking_rw()
78 BUG_ON(atomic_read(&eb->blocking_writers) != 1); btrfs_clear_lock_blocking_rw()
79 write_lock(&eb->lock); btrfs_clear_lock_blocking_rw()
80 WARN_ON(atomic_read(&eb->spinning_writers)); btrfs_clear_lock_blocking_rw()
81 atomic_inc(&eb->spinning_writers); btrfs_clear_lock_blocking_rw()
85 if (atomic_dec_and_test(&eb->blocking_writers) && btrfs_clear_lock_blocking_rw()
86 waitqueue_active(&eb->write_lock_wq)) btrfs_clear_lock_blocking_rw()
87 wake_up(&eb->write_lock_wq); btrfs_clear_lock_blocking_rw()
89 BUG_ON(atomic_read(&eb->blocking_readers) == 0); btrfs_clear_lock_blocking_rw()
90 read_lock(&eb->lock); btrfs_clear_lock_blocking_rw()
91 atomic_inc(&eb->spinning_readers); btrfs_clear_lock_blocking_rw()
95 if (atomic_dec_and_test(&eb->blocking_readers) && btrfs_clear_lock_blocking_rw()
96 waitqueue_active(&eb->read_lock_wq)) btrfs_clear_lock_blocking_rw()
97 wake_up(&eb->read_lock_wq); btrfs_clear_lock_blocking_rw()
106 void btrfs_tree_read_lock(struct extent_buffer *eb) btrfs_tree_read_lock() argument
109 BUG_ON(!atomic_read(&eb->blocking_writers) && btrfs_tree_read_lock()
110 current->pid == eb->lock_owner); btrfs_tree_read_lock()
112 read_lock(&eb->lock); btrfs_tree_read_lock()
113 if (atomic_read(&eb->blocking_writers) && btrfs_tree_read_lock()
114 current->pid == eb->lock_owner) { btrfs_tree_read_lock()
121 BUG_ON(eb->lock_nested); btrfs_tree_read_lock()
122 eb->lock_nested = 1; btrfs_tree_read_lock()
123 read_unlock(&eb->lock); btrfs_tree_read_lock()
126 if (atomic_read(&eb->blocking_writers)) { btrfs_tree_read_lock()
127 read_unlock(&eb->lock); btrfs_tree_read_lock()
128 wait_event(eb->write_lock_wq, btrfs_tree_read_lock()
129 atomic_read(&eb->blocking_writers) == 0); btrfs_tree_read_lock()
132 atomic_inc(&eb->read_locks); btrfs_tree_read_lock()
133 atomic_inc(&eb->spinning_readers); btrfs_tree_read_lock()
141 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) btrfs_tree_read_lock_atomic() argument
143 if (atomic_read(&eb->blocking_writers)) btrfs_tree_read_lock_atomic()
146 read_lock(&eb->lock); btrfs_tree_read_lock_atomic()
147 if (atomic_read(&eb->blocking_writers)) { btrfs_tree_read_lock_atomic()
148 read_unlock(&eb->lock); btrfs_tree_read_lock_atomic()
151 atomic_inc(&eb->read_locks); btrfs_tree_read_lock_atomic()
152 atomic_inc(&eb->spinning_readers); btrfs_tree_read_lock_atomic()
160 int btrfs_try_tree_read_lock(struct extent_buffer *eb) btrfs_try_tree_read_lock() argument
162 if (atomic_read(&eb->blocking_writers)) btrfs_try_tree_read_lock()
165 if (!read_trylock(&eb->lock)) btrfs_try_tree_read_lock()
168 if (atomic_read(&eb->blocking_writers)) { btrfs_try_tree_read_lock()
169 read_unlock(&eb->lock); btrfs_try_tree_read_lock()
172 atomic_inc(&eb->read_locks); btrfs_try_tree_read_lock()
173 atomic_inc(&eb->spinning_readers); btrfs_try_tree_read_lock()
181 int btrfs_try_tree_write_lock(struct extent_buffer *eb) btrfs_try_tree_write_lock() argument
183 if (atomic_read(&eb->blocking_writers) || btrfs_try_tree_write_lock()
184 atomic_read(&eb->blocking_readers)) btrfs_try_tree_write_lock()
187 write_lock(&eb->lock); btrfs_try_tree_write_lock()
188 if (atomic_read(&eb->blocking_writers) || btrfs_try_tree_write_lock()
189 atomic_read(&eb->blocking_readers)) { btrfs_try_tree_write_lock()
190 write_unlock(&eb->lock); btrfs_try_tree_write_lock()
193 atomic_inc(&eb->write_locks); btrfs_try_tree_write_lock()
194 atomic_inc(&eb->spinning_writers); btrfs_try_tree_write_lock()
195 eb->lock_owner = current->pid; btrfs_try_tree_write_lock()
202 void btrfs_tree_read_unlock(struct extent_buffer *eb) btrfs_tree_read_unlock() argument
210 if (eb->lock_nested && current->pid == eb->lock_owner) { btrfs_tree_read_unlock()
211 eb->lock_nested = 0; btrfs_tree_read_unlock()
214 btrfs_assert_tree_read_locked(eb); btrfs_tree_read_unlock()
215 WARN_ON(atomic_read(&eb->spinning_readers) == 0); btrfs_tree_read_unlock()
216 atomic_dec(&eb->spinning_readers); btrfs_tree_read_unlock()
217 atomic_dec(&eb->read_locks); btrfs_tree_read_unlock()
218 read_unlock(&eb->lock); btrfs_tree_read_unlock()
224 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) btrfs_tree_read_unlock_blocking() argument
232 if (eb->lock_nested && current->pid == eb->lock_owner) { btrfs_tree_read_unlock_blocking()
233 eb->lock_nested = 0; btrfs_tree_read_unlock_blocking()
236 btrfs_assert_tree_read_locked(eb); btrfs_tree_read_unlock_blocking()
237 WARN_ON(atomic_read(&eb->blocking_readers) == 0); btrfs_tree_read_unlock_blocking()
241 if (atomic_dec_and_test(&eb->blocking_readers) && btrfs_tree_read_unlock_blocking()
242 waitqueue_active(&eb->read_lock_wq)) btrfs_tree_read_unlock_blocking()
243 wake_up(&eb->read_lock_wq); btrfs_tree_read_unlock_blocking()
244 atomic_dec(&eb->read_locks); btrfs_tree_read_unlock_blocking()
251 void btrfs_tree_lock(struct extent_buffer *eb) btrfs_tree_lock() argument
253 WARN_ON(eb->lock_owner == current->pid); btrfs_tree_lock()
255 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); btrfs_tree_lock()
256 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); btrfs_tree_lock()
257 write_lock(&eb->lock); btrfs_tree_lock()
258 if (atomic_read(&eb->blocking_readers)) { btrfs_tree_lock()
259 write_unlock(&eb->lock); btrfs_tree_lock()
260 wait_event(eb->read_lock_wq, btrfs_tree_lock()
261 atomic_read(&eb->blocking_readers) == 0); btrfs_tree_lock()
264 if (atomic_read(&eb->blocking_writers)) { btrfs_tree_lock()
265 write_unlock(&eb->lock); btrfs_tree_lock()
266 wait_event(eb->write_lock_wq, btrfs_tree_lock()
267 atomic_read(&eb->blocking_writers) == 0); btrfs_tree_lock()
270 WARN_ON(atomic_read(&eb->spinning_writers)); btrfs_tree_lock()
271 atomic_inc(&eb->spinning_writers); btrfs_tree_lock()
272 atomic_inc(&eb->write_locks); btrfs_tree_lock()
273 eb->lock_owner = current->pid; btrfs_tree_lock()
279 void btrfs_tree_unlock(struct extent_buffer *eb) btrfs_tree_unlock() argument
281 int blockers = atomic_read(&eb->blocking_writers); btrfs_tree_unlock()
285 btrfs_assert_tree_locked(eb); btrfs_tree_unlock()
286 eb->lock_owner = 0; btrfs_tree_unlock()
287 atomic_dec(&eb->write_locks); btrfs_tree_unlock()
290 WARN_ON(atomic_read(&eb->spinning_writers)); btrfs_tree_unlock()
291 atomic_dec(&eb->blocking_writers); btrfs_tree_unlock()
296 if (waitqueue_active(&eb->write_lock_wq)) btrfs_tree_unlock()
297 wake_up(&eb->write_lock_wq); btrfs_tree_unlock()
299 WARN_ON(atomic_read(&eb->spinning_writers) != 1); btrfs_tree_unlock()
300 atomic_dec(&eb->spinning_writers); btrfs_tree_unlock()
301 write_unlock(&eb->lock); btrfs_tree_unlock()
305 void btrfs_assert_tree_locked(struct extent_buffer *eb) btrfs_assert_tree_locked() argument
307 BUG_ON(!atomic_read(&eb->write_locks)); btrfs_assert_tree_locked()
310 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) btrfs_assert_tree_read_locked() argument
312 BUG_ON(!atomic_read(&eb->read_locks)); btrfs_assert_tree_read_locked()
H A Dlocking.h27 void btrfs_tree_lock(struct extent_buffer *eb);
28 void btrfs_tree_unlock(struct extent_buffer *eb);
30 void btrfs_tree_read_lock(struct extent_buffer *eb);
31 void btrfs_tree_read_unlock(struct extent_buffer *eb);
32 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
33 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw);
34 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
35 void btrfs_assert_tree_locked(struct extent_buffer *eb);
36 int btrfs_try_tree_read_lock(struct extent_buffer *eb);
37 int btrfs_try_tree_write_lock(struct extent_buffer *eb);
38 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
41 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) btrfs_tree_unlock_rw() argument
44 btrfs_tree_unlock(eb); btrfs_tree_unlock_rw()
46 btrfs_tree_read_unlock_blocking(eb); btrfs_tree_unlock_rw()
48 btrfs_tree_read_unlock(eb); btrfs_tree_unlock_rw()
53 static inline void btrfs_set_lock_blocking(struct extent_buffer *eb) btrfs_set_lock_blocking() argument
55 btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK); btrfs_set_lock_blocking()
58 static inline void btrfs_clear_lock_blocking(struct extent_buffer *eb) btrfs_clear_lock_blocking() argument
60 btrfs_clear_lock_blocking_rw(eb, BTRFS_WRITE_LOCK_BLOCKING); btrfs_clear_lock_blocking()
H A Dprint-tree.c23 static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk) print_chunk() argument
25 int num_stripes = btrfs_chunk_num_stripes(eb, chunk); print_chunk()
29 btrfs_chunk_length(eb, chunk), btrfs_chunk_owner(eb, chunk), print_chunk()
30 btrfs_chunk_type(eb, chunk), num_stripes); print_chunk()
33 btrfs_stripe_devid_nr(eb, chunk, i), print_chunk()
34 btrfs_stripe_offset_nr(eb, chunk, i)); print_chunk()
37 static void print_dev_item(struct extent_buffer *eb, print_dev_item() argument
42 btrfs_device_id(eb, dev_item), print_dev_item()
43 btrfs_device_total_bytes(eb, dev_item), print_dev_item()
44 btrfs_device_bytes_used(eb, dev_item)); print_dev_item()
46 static void print_extent_data_ref(struct extent_buffer *eb, print_extent_data_ref() argument
51 btrfs_extent_data_ref_root(eb, ref), print_extent_data_ref()
52 btrfs_extent_data_ref_objectid(eb, ref), print_extent_data_ref()
53 btrfs_extent_data_ref_offset(eb, ref), print_extent_data_ref()
54 btrfs_extent_data_ref_count(eb, ref)); print_extent_data_ref()
57 static void print_extent_item(struct extent_buffer *eb, int slot, int type) print_extent_item() argument
66 u32 item_size = btrfs_item_size_nr(eb, slot); print_extent_item()
74 ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0); print_extent_item()
76 btrfs_extent_refs_v0(eb, ei0)); print_extent_item()
83 ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item); print_extent_item()
84 flags = btrfs_extent_flags(eb, ei); print_extent_item()
87 btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei), print_extent_item()
94 btrfs_tree_block_key(eb, info, &key); print_extent_item()
99 btrfs_tree_block_level(eb, info)); print_extent_item()
109 type = btrfs_extent_inline_ref_type(eb, iref); print_extent_item()
110 offset = btrfs_extent_inline_ref_offset(eb, iref); print_extent_item()
122 print_extent_data_ref(eb, dref); print_extent_item()
128 offset, btrfs_shared_data_ref_count(eb, sref)); print_extent_item()
139 static void print_extent_ref_v0(struct extent_buffer *eb, int slot) print_extent_ref_v0() argument
143 ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0); print_extent_ref_v0()
146 btrfs_ref_root_v0(eb, ref0), print_extent_ref_v0()
147 btrfs_ref_generation_v0(eb, ref0), print_extent_ref_v0()
148 btrfs_ref_objectid_v0(eb, ref0), print_extent_ref_v0()
149 (unsigned long)btrfs_ref_count_v0(eb, ref0)); print_extent_ref_v0()
H A Dstruct-funcs.c53 u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
68 token->eb == eb && \
75 err = map_private_extent_buffer(eb, offset, size, \
80 read_extent_buffer(eb, &leres, offset, size); \
88 token->eb = eb; \
92 void btrfs_set_token_##bits(struct extent_buffer *eb, \
106 token->eb == eb && \
113 err = map_private_extent_buffer(eb, offset, size, \
119 write_extent_buffer(eb, &val2, offset, size); \
127 token->eb = eb; \
136 void btrfs_node_key(struct extent_buffer *eb, btrfs_node_key() argument
140 read_eb_member(eb, (struct btrfs_key_ptr *)ptr, btrfs_node_key()
H A Duuid-tree.c39 struct extent_buffer *eb; btrfs_uuid_tree_lookup() local
65 eb = path->nodes[0]; btrfs_uuid_tree_lookup()
67 item_size = btrfs_item_size_nr(eb, slot); btrfs_uuid_tree_lookup()
68 offset = btrfs_item_ptr_offset(eb, slot); btrfs_uuid_tree_lookup()
79 read_extent_buffer(eb, &data, offset, sizeof(data)); btrfs_uuid_tree_lookup()
100 struct extent_buffer *eb; btrfs_uuid_tree_add() local
126 eb = path->nodes[0]; btrfs_uuid_tree_add()
128 offset = btrfs_item_ptr_offset(eb, slot); btrfs_uuid_tree_add()
135 eb = path->nodes[0]; btrfs_uuid_tree_add()
137 offset = btrfs_item_ptr_offset(eb, slot); btrfs_uuid_tree_add()
138 offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le); btrfs_uuid_tree_add()
149 write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le)); btrfs_uuid_tree_add()
150 btrfs_mark_buffer_dirty(eb); btrfs_uuid_tree_add()
164 struct extent_buffer *eb; btrfs_uuid_tree_rem() local
196 eb = path->nodes[0]; btrfs_uuid_tree_rem()
198 offset = btrfs_item_ptr_offset(eb, slot); btrfs_uuid_tree_rem()
199 item_size = btrfs_item_size_nr(eb, slot); btrfs_uuid_tree_rem()
209 read_extent_buffer(eb, &read_subid, offset, sizeof(read_subid)); btrfs_uuid_tree_rem()
221 item_size = btrfs_item_size_nr(eb, slot); btrfs_uuid_tree_rem()
229 move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot)); btrfs_uuid_tree_rem()
230 memmove_extent_buffer(eb, move_dst, move_src, move_len); btrfs_uuid_tree_rem()
H A Dextent_io.c63 struct extent_buffer *eb; btrfs_leak_debug_check() local
76 eb = list_entry(buffers.next, struct extent_buffer, leak_list); btrfs_leak_debug_check()
79 eb->start, eb->len, atomic_read(&eb->refs)); btrfs_leak_debug_check()
80 list_del(&eb->leak_list); btrfs_leak_debug_check()
81 kmem_cache_free(extent_buffer_cache, eb); btrfs_leak_debug_check()
2152 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb, repair_eb_io_failure() argument
2155 u64 start = eb->start; repair_eb_io_failure()
2156 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len); repair_eb_io_failure()
2163 struct page *p = eb->pages[i]; repair_eb_io_failure()
2919 static void attach_extent_buffer_page(struct extent_buffer *eb, attach_extent_buffer_page() argument
2925 set_page_private(page, (unsigned long)eb); attach_extent_buffer_page()
2927 WARN_ON(page->private != (unsigned long)eb); attach_extent_buffer_page()
3672 void wait_on_extent_buffer_writeback(struct extent_buffer *eb) wait_on_extent_buffer_writeback() argument
3674 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, wait_on_extent_buffer_writeback()
3679 lock_extent_buffer_for_io(struct extent_buffer *eb, lock_extent_buffer_for_io() argument
3687 if (!btrfs_try_tree_write_lock(eb)) { lock_extent_buffer_for_io()
3690 btrfs_tree_lock(eb); lock_extent_buffer_for_io()
3693 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { lock_extent_buffer_for_io()
3694 btrfs_tree_unlock(eb); lock_extent_buffer_for_io()
3702 wait_on_extent_buffer_writeback(eb); lock_extent_buffer_for_io()
3703 btrfs_tree_lock(eb); lock_extent_buffer_for_io()
3704 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) lock_extent_buffer_for_io()
3706 btrfs_tree_unlock(eb); lock_extent_buffer_for_io()
3711 * We need to do this to prevent races in people who check if the eb is lock_extent_buffer_for_io()
3715 spin_lock(&eb->refs_lock); lock_extent_buffer_for_io()
3716 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { lock_extent_buffer_for_io()
3717 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); lock_extent_buffer_for_io()
3718 spin_unlock(&eb->refs_lock); lock_extent_buffer_for_io()
3719 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); lock_extent_buffer_for_io()
3721 -eb->len, lock_extent_buffer_for_io()
3725 spin_unlock(&eb->refs_lock); lock_extent_buffer_for_io()
3728 btrfs_tree_unlock(eb); lock_extent_buffer_for_io()
3733 num_pages = num_extent_pages(eb->start, eb->len); lock_extent_buffer_for_io()
3735 struct page *p = eb->pages[i]; lock_extent_buffer_for_io()
3749 static void end_extent_buffer_writeback(struct extent_buffer *eb) end_extent_buffer_writeback() argument
3751 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); end_extent_buffer_writeback()
3753 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); end_extent_buffer_writeback()
3758 struct extent_buffer *eb = (struct extent_buffer *)page->private; set_btree_ioerr() local
3759 struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode); set_btree_ioerr()
3762 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) set_btree_ioerr()
3789 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is set_btree_ioerr()
3790 * not done and would not be reliable - the eb might have been released set_btree_ioerr()
3803 switch (eb->log_index) { set_btree_ioerr()
3821 struct extent_buffer *eb; end_bio_extent_buffer_writepage() local
3827 eb = (struct extent_buffer *)page->private; bio_for_each_segment_all()
3828 BUG_ON(!eb); bio_for_each_segment_all()
3829 done = atomic_dec_and_test(&eb->io_pages); bio_for_each_segment_all()
3832 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { bio_for_each_segment_all()
3842 end_extent_buffer_writeback(eb); bio_for_each_segment_all()
3848 static noinline_for_stack int write_one_eb(struct extent_buffer *eb, write_one_eb() argument
3855 u64 offset = eb->start; write_one_eb()
3861 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); write_one_eb()
3862 num_pages = num_extent_pages(eb->start, eb->len); write_one_eb()
3863 atomic_set(&eb->io_pages, num_pages); write_one_eb()
3864 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID) write_one_eb()
3868 struct page *p = eb->pages[i]; write_one_eb()
3880 if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) write_one_eb()
3881 end_extent_buffer_writeback(eb); write_one_eb()
3892 struct page *p = eb->pages[i]; write_one_eb()
3906 struct extent_buffer *eb, *prev_eb = NULL; btree_write_cache_pages() local
3963 eb = (struct extent_buffer *)page->private; btree_write_cache_pages()
3970 if (WARN_ON(!eb)) { btree_write_cache_pages()
3975 if (eb == prev_eb) { btree_write_cache_pages()
3980 ret = atomic_inc_not_zero(&eb->refs); btree_write_cache_pages()
3985 prev_eb = eb; btree_write_cache_pages()
3986 ret = lock_extent_buffer_for_io(eb, fs_info, &epd); btree_write_cache_pages()
3988 free_extent_buffer(eb); btree_write_cache_pages()
3992 ret = write_one_eb(eb, fs_info, wbc, &epd); btree_write_cache_pages()
3995 free_extent_buffer(eb); btree_write_cache_pages()
3998 free_extent_buffer(eb); btree_write_cache_pages()
4658 static void __free_extent_buffer(struct extent_buffer *eb) __free_extent_buffer() argument
4660 btrfs_leak_debug_del(&eb->leak_list); __free_extent_buffer()
4661 kmem_cache_free(extent_buffer_cache, eb); __free_extent_buffer()
4664 int extent_buffer_under_io(struct extent_buffer *eb) extent_buffer_under_io() argument
4666 return (atomic_read(&eb->io_pages) || extent_buffer_under_io()
4667 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || extent_buffer_under_io()
4668 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); extent_buffer_under_io()
4674 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) btrfs_release_extent_buffer_page() argument
4678 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags); btrfs_release_extent_buffer_page()
4680 BUG_ON(extent_buffer_under_io(eb)); btrfs_release_extent_buffer_page()
4682 index = num_extent_pages(eb->start, eb->len); btrfs_release_extent_buffer_page()
4688 page = eb->pages[index]; btrfs_release_extent_buffer_page()
4695 * removed the eb from the radix tree, so we could race btrfs_release_extent_buffer_page()
4696 * and have this page now attached to the new eb. So btrfs_release_extent_buffer_page()
4698 * this eb. btrfs_release_extent_buffer_page()
4701 page->private == (unsigned long)eb) { btrfs_release_extent_buffer_page()
4702 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); btrfs_release_extent_buffer_page()
4707 * to a new eb. btrfs_release_extent_buffer_page()
4726 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) btrfs_release_extent_buffer() argument
4728 btrfs_release_extent_buffer_page(eb); btrfs_release_extent_buffer()
4729 __free_extent_buffer(eb); btrfs_release_extent_buffer()
4736 struct extent_buffer *eb = NULL; __alloc_extent_buffer() local
4738 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); __alloc_extent_buffer()
4739 eb->start = start; __alloc_extent_buffer()
4740 eb->len = len; __alloc_extent_buffer()
4741 eb->fs_info = fs_info; __alloc_extent_buffer()
4742 eb->bflags = 0; __alloc_extent_buffer()
4743 rwlock_init(&eb->lock); __alloc_extent_buffer()
4744 atomic_set(&eb->write_locks, 0); __alloc_extent_buffer()
4745 atomic_set(&eb->read_locks, 0); __alloc_extent_buffer()
4746 atomic_set(&eb->blocking_readers, 0); __alloc_extent_buffer()
4747 atomic_set(&eb->blocking_writers, 0); __alloc_extent_buffer()
4748 atomic_set(&eb->spinning_readers, 0); __alloc_extent_buffer()
4749 atomic_set(&eb->spinning_writers, 0); __alloc_extent_buffer()
4750 eb->lock_nested = 0; __alloc_extent_buffer()
4751 init_waitqueue_head(&eb->write_lock_wq); __alloc_extent_buffer()
4752 init_waitqueue_head(&eb->read_lock_wq); __alloc_extent_buffer()
4754 btrfs_leak_debug_add(&eb->leak_list, &buffers); __alloc_extent_buffer()
4756 spin_lock_init(&eb->refs_lock); __alloc_extent_buffer()
4757 atomic_set(&eb->refs, 1); __alloc_extent_buffer()
4758 atomic_set(&eb->io_pages, 0); __alloc_extent_buffer()
4767 return eb; __alloc_extent_buffer()
4803 struct extent_buffer *eb; alloc_dummy_extent_buffer() local
4819 eb = __alloc_extent_buffer(fs_info, start, len); alloc_dummy_extent_buffer()
4820 if (!eb) alloc_dummy_extent_buffer()
4824 eb->pages[i] = alloc_page(GFP_NOFS); alloc_dummy_extent_buffer()
4825 if (!eb->pages[i]) alloc_dummy_extent_buffer()
4828 set_extent_buffer_uptodate(eb); alloc_dummy_extent_buffer()
4829 btrfs_set_header_nritems(eb, 0); alloc_dummy_extent_buffer()
4830 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags); alloc_dummy_extent_buffer()
4832 return eb; alloc_dummy_extent_buffer()
4835 __free_page(eb->pages[i - 1]); alloc_dummy_extent_buffer()
4836 __free_extent_buffer(eb); alloc_dummy_extent_buffer()
4840 static void check_buffer_tree_ref(struct extent_buffer *eb) check_buffer_tree_ref() argument
4851 * eb bumped. check_buffer_tree_ref()
4854 * ref on the eb because free_extent_buffer might check_buffer_tree_ref()
4863 refs = atomic_read(&eb->refs); check_buffer_tree_ref()
4864 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) check_buffer_tree_ref()
4867 spin_lock(&eb->refs_lock); check_buffer_tree_ref()
4868 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) check_buffer_tree_ref()
4869 atomic_inc(&eb->refs); check_buffer_tree_ref()
4870 spin_unlock(&eb->refs_lock); check_buffer_tree_ref()
4873 static void mark_extent_buffer_accessed(struct extent_buffer *eb, mark_extent_buffer_accessed() argument
4878 check_buffer_tree_ref(eb); mark_extent_buffer_accessed()
4880 num_pages = num_extent_pages(eb->start, eb->len); mark_extent_buffer_accessed()
4882 struct page *p = eb->pages[i]; mark_extent_buffer_accessed()
4892 struct extent_buffer *eb; find_extent_buffer() local
4895 eb = radix_tree_lookup(&fs_info->buffer_radix, find_extent_buffer()
4897 if (eb && atomic_inc_not_zero(&eb->refs)) { find_extent_buffer()
4900 * Lock our eb's refs_lock to avoid races with find_extent_buffer()
4901 * free_extent_buffer. When we get our eb it might be flagged find_extent_buffer()
4904 * eb->refs == 2, that the buffer isn't under IO (dirty and find_extent_buffer()
4908 * So here we could race and increment the eb's reference count, find_extent_buffer()
4914 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { find_extent_buffer()
4915 spin_lock(&eb->refs_lock); find_extent_buffer()
4916 spin_unlock(&eb->refs_lock); find_extent_buffer()
4918 mark_extent_buffer_accessed(eb, NULL); find_extent_buffer()
4919 return eb; find_extent_buffer()
4930 struct extent_buffer *eb, *exists = NULL; alloc_test_extent_buffer() local
4933 eb = find_extent_buffer(fs_info, start); alloc_test_extent_buffer()
4934 if (eb) alloc_test_extent_buffer()
4935 return eb; alloc_test_extent_buffer()
4936 eb = alloc_dummy_extent_buffer(fs_info, start); alloc_test_extent_buffer()
4937 if (!eb) alloc_test_extent_buffer()
4939 eb->fs_info = fs_info; alloc_test_extent_buffer()
4946 start >> PAGE_CACHE_SHIFT, eb); alloc_test_extent_buffer()
4956 check_buffer_tree_ref(eb); alloc_test_extent_buffer()
4957 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); alloc_test_extent_buffer()
4965 atomic_inc(&eb->refs); alloc_test_extent_buffer()
4966 return eb; alloc_test_extent_buffer()
4968 btrfs_release_extent_buffer(eb); alloc_test_extent_buffer()
4980 struct extent_buffer *eb; alloc_extent_buffer() local
4987 eb = find_extent_buffer(fs_info, start); alloc_extent_buffer()
4988 if (eb) alloc_extent_buffer()
4989 return eb; alloc_extent_buffer()
4991 eb = __alloc_extent_buffer(fs_info, start, len); alloc_extent_buffer()
4992 if (!eb) alloc_extent_buffer()
5003 * We could have already allocated an eb for this page alloc_extent_buffer()
5005 * the existing eb, and if we can we know it's good and alloc_extent_buffer()
5027 attach_extent_buffer_page(eb, p); alloc_extent_buffer()
5030 eb->pages[i] = p; alloc_extent_buffer()
5040 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); alloc_extent_buffer()
5048 start >> PAGE_CACHE_SHIFT, eb); alloc_extent_buffer()
5059 check_buffer_tree_ref(eb); alloc_extent_buffer()
5060 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); alloc_extent_buffer()
5071 SetPageChecked(eb->pages[0]); alloc_extent_buffer()
5073 p = eb->pages[i]; alloc_extent_buffer()
5077 unlock_page(eb->pages[0]); alloc_extent_buffer()
5078 return eb; alloc_extent_buffer()
5081 WARN_ON(!atomic_dec_and_test(&eb->refs)); alloc_extent_buffer()
5083 if (eb->pages[i]) alloc_extent_buffer()
5084 unlock_page(eb->pages[i]); alloc_extent_buffer()
5087 btrfs_release_extent_buffer(eb); alloc_extent_buffer()
5093 struct extent_buffer *eb = btrfs_release_extent_buffer_rcu() local
5096 __free_extent_buffer(eb); btrfs_release_extent_buffer_rcu()
5099 /* Expects to have eb->eb_lock already held */ release_extent_buffer()
5100 static int release_extent_buffer(struct extent_buffer *eb) release_extent_buffer() argument
5102 WARN_ON(atomic_read(&eb->refs) == 0); release_extent_buffer()
5103 if (atomic_dec_and_test(&eb->refs)) { release_extent_buffer()
5104 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { release_extent_buffer()
5105 struct btrfs_fs_info *fs_info = eb->fs_info; release_extent_buffer()
5107 spin_unlock(&eb->refs_lock); release_extent_buffer()
5111 eb->start >> PAGE_CACHE_SHIFT); release_extent_buffer()
5114 spin_unlock(&eb->refs_lock); release_extent_buffer()
5118 btrfs_release_extent_buffer_page(eb); release_extent_buffer()
5120 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) { release_extent_buffer()
5121 __free_extent_buffer(eb); release_extent_buffer()
5125 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); release_extent_buffer()
5128 spin_unlock(&eb->refs_lock); release_extent_buffer()
5133 void free_extent_buffer(struct extent_buffer *eb) free_extent_buffer() argument
5137 if (!eb) free_extent_buffer()
5141 refs = atomic_read(&eb->refs); free_extent_buffer()
5144 old = atomic_cmpxchg(&eb->refs, refs, refs - 1); free_extent_buffer()
5149 spin_lock(&eb->refs_lock); free_extent_buffer()
5150 if (atomic_read(&eb->refs) == 2 && free_extent_buffer()
5151 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) free_extent_buffer()
5152 atomic_dec(&eb->refs); free_extent_buffer()
5154 if (atomic_read(&eb->refs) == 2 && free_extent_buffer()
5155 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && free_extent_buffer()
5156 !extent_buffer_under_io(eb) && free_extent_buffer()
5157 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) free_extent_buffer()
5158 atomic_dec(&eb->refs); free_extent_buffer()
5164 release_extent_buffer(eb); free_extent_buffer()
5167 void free_extent_buffer_stale(struct extent_buffer *eb) free_extent_buffer_stale() argument
5169 if (!eb) free_extent_buffer_stale()
5172 spin_lock(&eb->refs_lock); free_extent_buffer_stale()
5173 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); free_extent_buffer_stale()
5175 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && free_extent_buffer_stale()
5176 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) free_extent_buffer_stale()
5177 atomic_dec(&eb->refs); free_extent_buffer_stale()
5178 release_extent_buffer(eb); free_extent_buffer_stale()
5181 void clear_extent_buffer_dirty(struct extent_buffer *eb) clear_extent_buffer_dirty() argument
5187 num_pages = num_extent_pages(eb->start, eb->len); clear_extent_buffer_dirty()
5190 page = eb->pages[i]; clear_extent_buffer_dirty()
5208 WARN_ON(atomic_read(&eb->refs) == 0); clear_extent_buffer_dirty()
5211 int set_extent_buffer_dirty(struct extent_buffer *eb) set_extent_buffer_dirty() argument
5217 check_buffer_tree_ref(eb); set_extent_buffer_dirty()
5219 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); set_extent_buffer_dirty()
5221 num_pages = num_extent_pages(eb->start, eb->len); set_extent_buffer_dirty()
5222 WARN_ON(atomic_read(&eb->refs) == 0); set_extent_buffer_dirty()
5223 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); set_extent_buffer_dirty()
5226 set_page_dirty(eb->pages[i]); set_extent_buffer_dirty()
5230 int clear_extent_buffer_uptodate(struct extent_buffer *eb) clear_extent_buffer_uptodate() argument
5236 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); clear_extent_buffer_uptodate()
5237 num_pages = num_extent_pages(eb->start, eb->len); clear_extent_buffer_uptodate()
5239 page = eb->pages[i]; clear_extent_buffer_uptodate()
5246 int set_extent_buffer_uptodate(struct extent_buffer *eb) set_extent_buffer_uptodate() argument
5252 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); set_extent_buffer_uptodate()
5253 num_pages = num_extent_pages(eb->start, eb->len); set_extent_buffer_uptodate()
5255 page = eb->pages[i]; set_extent_buffer_uptodate()
5261 int extent_buffer_uptodate(struct extent_buffer *eb) extent_buffer_uptodate() argument
5263 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); extent_buffer_uptodate()
5267 struct extent_buffer *eb, u64 start, int wait, read_extent_buffer_pages()
5282 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) read_extent_buffer_pages()
5286 WARN_ON(start < eb->start); read_extent_buffer_pages()
5288 (eb->start >> PAGE_CACHE_SHIFT); read_extent_buffer_pages()
5293 num_pages = num_extent_pages(eb->start, eb->len); read_extent_buffer_pages()
5295 page = eb->pages[i]; read_extent_buffer_pages()
5310 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); read_extent_buffer_pages()
5314 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); read_extent_buffer_pages()
5315 eb->read_mirror = 0; read_extent_buffer_pages()
5316 atomic_set(&eb->io_pages, num_reads); read_extent_buffer_pages()
5318 page = eb->pages[i]; read_extent_buffer_pages()
5343 page = eb->pages[i]; read_extent_buffer_pages()
5354 page = eb->pages[i]; read_extent_buffer_pages()
5362 void read_extent_buffer(struct extent_buffer *eb, void *dstv, read_extent_buffer() argument
5371 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); read_extent_buffer()
5374 WARN_ON(start > eb->len); read_extent_buffer()
5375 WARN_ON(start + len > eb->start + eb->len); read_extent_buffer()
5380 page = eb->pages[i]; read_extent_buffer()
5393 int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv, read_extent_buffer_to_user() argument
5402 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); read_extent_buffer_to_user()
5406 WARN_ON(start > eb->len); read_extent_buffer_to_user()
5407 WARN_ON(start + len > eb->start + eb->len); read_extent_buffer_to_user()
5412 page = eb->pages[i]; read_extent_buffer_to_user()
5430 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, map_private_extent_buffer() argument
5438 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); map_private_extent_buffer()
5454 if (start + min_len > eb->len) { map_private_extent_buffer()
5455 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, " map_private_extent_buffer()
5457 eb->start, eb->len, start, min_len); map_private_extent_buffer()
5461 p = eb->pages[i]; map_private_extent_buffer()
5468 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, memcmp_extent_buffer() argument
5477 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); memcmp_extent_buffer()
5481 WARN_ON(start > eb->len); memcmp_extent_buffer()
5482 WARN_ON(start + len > eb->start + eb->len); memcmp_extent_buffer()
5487 page = eb->pages[i]; memcmp_extent_buffer()
5504 void write_extent_buffer(struct extent_buffer *eb, const void *srcv, write_extent_buffer() argument
5512 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); write_extent_buffer()
5515 WARN_ON(start > eb->len); write_extent_buffer()
5516 WARN_ON(start + len > eb->start + eb->len); write_extent_buffer()
5521 page = eb->pages[i]; write_extent_buffer()
5535 void memset_extent_buffer(struct extent_buffer *eb, char c, memset_extent_buffer() argument
5542 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); memset_extent_buffer()
5545 WARN_ON(start > eb->len); memset_extent_buffer()
5546 WARN_ON(start + len > eb->start + eb->len); memset_extent_buffer()
5551 page = eb->pages[i]; memset_extent_buffer()
5720 struct extent_buffer *eb; try_release_extent_buffer() local
5723 * We need to make sure noboody is attaching this page to an eb right try_release_extent_buffer()
5732 eb = (struct extent_buffer *)page->private; try_release_extent_buffer()
5733 BUG_ON(!eb); try_release_extent_buffer()
5737 * the eb doesn't disappear out from under us while we're looking at try_release_extent_buffer()
5740 spin_lock(&eb->refs_lock); try_release_extent_buffer()
5741 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { try_release_extent_buffer()
5742 spin_unlock(&eb->refs_lock); try_release_extent_buffer()
5749 * If tree ref isn't set then we know the ref on this eb is a real ref, try_release_extent_buffer()
5752 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { try_release_extent_buffer()
5753 spin_unlock(&eb->refs_lock); try_release_extent_buffer()
5757 return release_extent_buffer(eb); try_release_extent_buffer()
5266 read_extent_buffer_pages(struct extent_io_tree *tree, struct extent_buffer *eb, u64 start, int wait, get_extent_t *get_extent, int mirror_num) read_extent_buffer_pages() argument
H A Dbackref.c37 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, check_extent_in_eb() argument
45 if (!btrfs_file_extent_compression(eb, fi) && check_extent_in_eb()
46 !btrfs_file_extent_encryption(eb, fi) && check_extent_in_eb()
47 !btrfs_file_extent_other_encoding(eb, fi)) { check_extent_in_eb()
51 data_offset = btrfs_file_extent_offset(eb, fi); check_extent_in_eb()
52 data_len = btrfs_file_extent_num_bytes(eb, fi); check_extent_in_eb()
82 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte, find_extent_in_eb() argument
99 nritems = btrfs_header_nritems(eb); find_extent_in_eb()
101 btrfs_item_key_to_cpu(eb, &key, slot); find_extent_in_eb()
104 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); find_extent_in_eb()
105 extent_type = btrfs_file_extent_type(eb, fi); find_extent_in_eb()
109 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); find_extent_in_eb()
113 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie); find_extent_in_eb()
254 struct extent_buffer *eb; add_all_parents() local
264 eb = path->nodes[level]; add_all_parents()
265 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); add_all_parents()
284 eb = path->nodes[0]; add_all_parents()
287 btrfs_item_key_to_cpu(eb, &key, slot); add_all_parents()
293 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); add_all_parents()
294 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); add_all_parents()
301 ret = check_extent_in_eb(&key, eb, fi, add_all_parents()
309 ret = ulist_add_merge_ptr(parents, eb->start, add_all_parents()
346 struct extent_buffer *eb; __resolve_indirect_ref() local
402 eb = path->nodes[level]; __resolve_indirect_ref()
403 while (!eb) { __resolve_indirect_ref()
409 eb = path->nodes[level]; __resolve_indirect_ref()
524 struct extent_buffer *eb; __add_missing_keys() local
535 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte, list_for_each()
537 if (IS_ERR(eb)) { list_for_each()
538 return PTR_ERR(eb); list_for_each()
539 } else if (!extent_buffer_uptodate(eb)) { list_for_each()
540 free_extent_buffer(eb); list_for_each()
543 btrfs_tree_read_lock(eb); list_for_each()
544 if (btrfs_header_level(eb) == 0) list_for_each()
545 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); list_for_each()
547 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); list_for_each()
548 btrfs_tree_read_unlock(eb); list_for_each()
549 free_extent_buffer(eb); list_for_each()
1078 struct extent_buffer *eb; local
1080 eb = read_tree_block(fs_info->extent_root,
1082 if (IS_ERR(eb)) {
1083 ret = PTR_ERR(eb);
1085 } else if (!extent_buffer_uptodate(eb)) {
1086 free_extent_buffer(eb);
1090 btrfs_tree_read_lock(eb);
1091 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1092 ret = find_extent_in_eb(eb, bytenr,
1094 btrfs_tree_read_unlock_blocking(eb);
1095 free_extent_buffer(eb);
1405 struct extent_buffer *eb = eb_in; btrfs_ref_to_path() local
1417 read_extent_buffer(eb, dest + bytes_left, btrfs_ref_to_path()
1419 if (eb != eb_in) { btrfs_ref_to_path()
1421 btrfs_tree_read_unlock_blocking(eb); btrfs_ref_to_path()
1422 free_extent_buffer(eb); btrfs_ref_to_path()
1438 eb = path->nodes[0]; btrfs_ref_to_path()
1439 /* make sure we can use eb after releasing the path */ btrfs_ref_to_path()
1440 if (eb != eb_in) { btrfs_ref_to_path()
1442 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); btrfs_ref_to_path()
1447 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); btrfs_ref_to_path()
1449 name_len = btrfs_inode_ref_name_len(eb, iref); btrfs_ref_to_path()
1480 struct extent_buffer *eb; extent_from_logical() local
1513 eb = path->nodes[0]; extent_from_logical()
1514 item_size = btrfs_item_size_nr(eb, path->slots[0]); extent_from_logical()
1517 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); extent_from_logical()
1518 flags = btrfs_extent_flags(eb, ei); extent_from_logical()
1547 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, __get_extent_inline_ref() argument
1559 flags = btrfs_extent_flags(eb, ei); __get_extent_inline_ref()
1581 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref); __get_extent_inline_ref()
1598 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, tree_backref_for_extent() argument
1610 ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size, tree_backref_for_extent()
1624 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); tree_backref_for_extent()
1630 *out_level = btrfs_tree_block_level(eb, info); tree_backref_for_extent()
1760 struct extent_buffer *eb, void *ctx);
1773 struct extent_buffer *eb; iterate_inode_refs() local
1793 eb = btrfs_clone_extent_buffer(path->nodes[0]); iterate_inode_refs()
1794 if (!eb) { iterate_inode_refs()
1798 extent_buffer_get(eb); iterate_inode_refs()
1799 btrfs_tree_read_lock(eb); iterate_inode_refs()
1800 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); iterate_inode_refs()
1804 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); iterate_inode_refs()
1806 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { iterate_inode_refs()
1807 name_len = btrfs_inode_ref_name_len(eb, iref); iterate_inode_refs()
1813 (unsigned long)(iref + 1), eb, ctx); iterate_inode_refs()
1819 btrfs_tree_read_unlock_blocking(eb); iterate_inode_refs()
1820 free_extent_buffer(eb); iterate_inode_refs()
1837 struct extent_buffer *eb; iterate_inode_extrefs() local
1855 eb = btrfs_clone_extent_buffer(path->nodes[0]); iterate_inode_extrefs()
1856 if (!eb) { iterate_inode_extrefs()
1860 extent_buffer_get(eb); iterate_inode_extrefs()
1862 btrfs_tree_read_lock(eb); iterate_inode_extrefs()
1863 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); iterate_inode_extrefs()
1866 item_size = btrfs_item_size_nr(eb, slot); iterate_inode_extrefs()
1867 ptr = btrfs_item_ptr_offset(eb, slot); iterate_inode_extrefs()
1874 parent = btrfs_inode_extref_parent(eb, extref); iterate_inode_extrefs()
1875 name_len = btrfs_inode_extref_name_len(eb, extref); iterate_inode_extrefs()
1877 (unsigned long)&extref->name, eb, ctx); iterate_inode_extrefs()
1881 cur_offset += btrfs_inode_extref_name_len(eb, extref); iterate_inode_extrefs()
1884 btrfs_tree_read_unlock_blocking(eb); iterate_inode_extrefs()
1885 free_extent_buffer(eb); iterate_inode_extrefs()
1920 struct extent_buffer *eb, void *ctx) inode_to_path()
1934 name_off, eb, inum, fspath_min, bytes_left); inode_to_path()
1919 inode_to_path(u64 inum, u32 name_len, unsigned long name_off, struct extent_buffer *eb, void *ctx) inode_to_path() argument
H A Dextent_io.h145 /* >= 0 if eb belongs to a log tree, -1 otherwise */
290 void free_extent_buffer(struct extent_buffer *eb);
291 void free_extent_buffer_stale(struct extent_buffer *eb);
296 struct extent_buffer *eb, u64 start, int wait,
298 void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
306 static inline void extent_buffer_get(struct extent_buffer *eb) extent_buffer_get() argument
308 atomic_inc(&eb->refs); extent_buffer_get()
311 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
314 void read_extent_buffer(struct extent_buffer *eb, void *dst,
317 int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst,
320 void write_extent_buffer(struct extent_buffer *eb, const void *src,
329 void memset_extent_buffer(struct extent_buffer *eb, char c,
331 void clear_extent_buffer_dirty(struct extent_buffer *eb);
332 int set_extent_buffer_dirty(struct extent_buffer *eb);
333 int set_extent_buffer_uptodate(struct extent_buffer *eb);
334 int clear_extent_buffer_uptodate(struct extent_buffer *eb);
335 int extent_buffer_uptodate(struct extent_buffer *eb);
336 int extent_buffer_under_io(struct extent_buffer *eb);
337 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
361 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
H A Ddev-replace.c60 struct extent_buffer *eb; btrfs_init_dev_replace() local
101 eb = path->nodes[0]; btrfs_init_dev_replace()
102 item_size = btrfs_item_size_nr(eb, slot); btrfs_init_dev_replace()
103 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item); btrfs_init_dev_replace()
111 src_devid = btrfs_dev_replace_src_devid(eb, ptr); btrfs_init_dev_replace()
113 btrfs_dev_replace_cont_reading_from_srcdev_mode(eb, ptr); btrfs_init_dev_replace()
114 dev_replace->replace_state = btrfs_dev_replace_replace_state(eb, ptr); btrfs_init_dev_replace()
115 dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr); btrfs_init_dev_replace()
117 btrfs_dev_replace_time_stopped(eb, ptr); btrfs_init_dev_replace()
119 btrfs_dev_replace_num_write_errors(eb, ptr)); btrfs_init_dev_replace()
121 btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr)); btrfs_init_dev_replace()
122 dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr); btrfs_init_dev_replace()
125 dev_replace->cursor_right = btrfs_dev_replace_cursor_right(eb, ptr); btrfs_init_dev_replace()
201 struct extent_buffer *eb; btrfs_run_dev_replace() local
263 eb = path->nodes[0]; btrfs_run_dev_replace()
264 ptr = btrfs_item_ptr(eb, path->slots[0], btrfs_run_dev_replace()
269 btrfs_set_dev_replace_src_devid(eb, ptr, btrfs_run_dev_replace()
272 btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1); btrfs_run_dev_replace()
273 btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr, btrfs_run_dev_replace()
275 btrfs_set_dev_replace_replace_state(eb, ptr, btrfs_run_dev_replace()
277 btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started); btrfs_run_dev_replace()
278 btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped); btrfs_run_dev_replace()
279 btrfs_set_dev_replace_num_write_errors(eb, ptr, btrfs_run_dev_replace()
281 btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr, btrfs_run_dev_replace()
285 btrfs_set_dev_replace_cursor_left(eb, ptr, btrfs_run_dev_replace()
287 btrfs_set_dev_replace_cursor_right(eb, ptr, btrfs_run_dev_replace()
292 btrfs_mark_buffer_dirty(eb); btrfs_run_dev_replace()
H A Drelocation.c62 struct extent_buffer *eb; member in struct:backref_node
394 btrfs_tree_unlock(node->eb); unlock_node_buffer()
401 if (node->eb) { drop_node_buffer()
403 free_extent_buffer(node->eb); drop_node_buffer()
404 node->eb = NULL; drop_node_buffer()
686 struct extent_buffer *eb; local
767 eb = path1->nodes[0];
770 if (path1->slots[0] >= btrfs_header_nritems(eb)) {
778 eb = path1->nodes[0];
781 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
789 ret = find_inline_backref(eb, path1->slots[0],
800 key.type = btrfs_extent_inline_ref_type(eb, iref);
801 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
820 ref0 = btrfs_item_ptr(eb, path1->slots[0],
823 root = find_tree_root(rc, eb, ref0);
830 if (is_cowonly_root(btrfs_ref_root_v0(eb,
923 eb = path2->nodes[level];
924 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
946 eb = path2->nodes[level];
947 rb_node = tree_search(&cache->rb_root, eb->start);
955 upper->bytenr = eb->start;
956 upper->owner = btrfs_header_owner(eb);
966 if (btrfs_block_can_be_shared(root, eb))
992 upper->owner = btrfs_header_owner(eb);
1370 struct extent_buffer *eb; create_reloc_root() local
1385 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, create_reloc_root()
1400 ret = btrfs_copy_root(trans, root, root->node, &eb, create_reloc_root()
1406 btrfs_set_root_bytenr(root_item, eb->start); create_reloc_root()
1407 btrfs_set_root_level(root_item, btrfs_header_level(eb)); create_reloc_root()
1423 btrfs_tree_unlock(eb); create_reloc_root()
1424 free_extent_buffer(eb); create_reloc_root()
1741 int memcmp_node_keys(struct extent_buffer *eb, int slot, memcmp_node_keys() argument
1746 btrfs_node_key(eb, &key1, slot); memcmp_node_keys()
1766 struct extent_buffer *eb; replace_path() local
1788 eb = btrfs_lock_root_node(dest); replace_path()
1789 btrfs_set_lock_blocking(eb); replace_path()
1790 level = btrfs_header_level(eb); replace_path()
1793 btrfs_tree_unlock(eb); replace_path()
1794 free_extent_buffer(eb); replace_path()
1799 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); replace_path()
1802 btrfs_set_lock_blocking(eb); replace_path()
1810 parent = eb; replace_path()
1827 eb = path->nodes[level]; replace_path()
1828 new_bytenr = btrfs_node_blockptr(eb, replace_path()
1830 new_ptr_gen = btrfs_node_ptr_generation(eb, replace_path()
1849 eb = read_tree_block(dest, old_bytenr, old_ptr_gen); replace_path()
1850 if (IS_ERR(eb)) { replace_path()
1851 ret = PTR_ERR(eb); replace_path()
1852 } else if (!extent_buffer_uptodate(eb)) { replace_path()
1854 free_extent_buffer(eb); replace_path()
1857 btrfs_tree_lock(eb); replace_path()
1859 ret = btrfs_cow_block(trans, dest, eb, parent, replace_path()
1860 slot, &eb); replace_path()
1863 btrfs_set_lock_blocking(eb); replace_path()
1868 parent = eb; replace_path()
1937 struct extent_buffer *eb; walk_up_reloc_tree() local
1950 eb = path->nodes[i]; walk_up_reloc_tree()
1951 nritems = btrfs_header_nritems(eb); walk_up_reloc_tree()
1954 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= walk_up_reloc_tree()
1974 struct extent_buffer *eb = NULL; walk_down_reloc_tree() local
1984 eb = path->nodes[i]; walk_down_reloc_tree()
1985 nritems = btrfs_header_nritems(eb); walk_down_reloc_tree()
1987 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); walk_down_reloc_tree()
2003 bytenr = btrfs_node_blockptr(eb, path->slots[i]); walk_down_reloc_tree()
2004 eb = read_tree_block(root, bytenr, ptr_gen); walk_down_reloc_tree()
2005 if (IS_ERR(eb)) { walk_down_reloc_tree()
2006 return PTR_ERR(eb); walk_down_reloc_tree()
2007 } else if (!extent_buffer_uptodate(eb)) { walk_down_reloc_tree()
2008 free_extent_buffer(eb); walk_down_reloc_tree()
2011 BUG_ON(btrfs_header_level(eb) != i - 1); walk_down_reloc_tree()
2012 path->nodes[i - 1] = eb; walk_down_reloc_tree()
2646 struct extent_buffer *eb; do_relocation() local
2654 BUG_ON(lowest && node->eb); do_relocation()
2665 if (upper->eb && !upper->locked) { do_relocation()
2667 ret = btrfs_bin_search(upper->eb, key, do_relocation()
2670 bytenr = btrfs_node_blockptr(upper->eb, slot); do_relocation()
2671 if (node->eb->start == bytenr) do_relocation()
2677 if (!upper->eb) { do_relocation()
2685 if (!upper->eb) { do_relocation()
2686 upper->eb = path->nodes[upper->level]; do_relocation()
2689 BUG_ON(upper->eb != path->nodes[upper->level]); do_relocation()
2698 ret = btrfs_bin_search(upper->eb, key, upper->level, do_relocation()
2703 bytenr = btrfs_node_blockptr(upper->eb, slot); do_relocation()
2707 if (node->eb->start == bytenr) do_relocation()
2712 generation = btrfs_node_ptr_generation(upper->eb, slot); do_relocation()
2713 eb = read_tree_block(root, bytenr, generation); do_relocation()
2714 if (IS_ERR(eb)) { do_relocation()
2715 err = PTR_ERR(eb); do_relocation()
2717 } else if (!extent_buffer_uptodate(eb)) { do_relocation()
2718 free_extent_buffer(eb); do_relocation()
2722 btrfs_tree_lock(eb); do_relocation()
2723 btrfs_set_lock_blocking(eb); do_relocation()
2725 if (!node->eb) { do_relocation()
2726 ret = btrfs_cow_block(trans, root, eb, upper->eb, do_relocation()
2727 slot, &eb); do_relocation()
2728 btrfs_tree_unlock(eb); do_relocation()
2729 free_extent_buffer(eb); do_relocation()
2734 BUG_ON(node->eb != eb); do_relocation()
2736 btrfs_set_node_blockptr(upper->eb, slot, do_relocation()
2737 node->eb->start); do_relocation()
2738 btrfs_set_node_ptr_generation(upper->eb, slot, do_relocation()
2740 btrfs_mark_buffer_dirty(upper->eb); do_relocation()
2743 node->eb->start, blocksize, do_relocation()
2744 upper->eb->start, do_relocation()
2745 btrfs_header_owner(upper->eb), do_relocation()
2749 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); do_relocation()
2779 btrfs_node_key_to_cpu(node->eb, &key, 0); link_to_upper()
2875 struct extent_buffer *eb; get_tree_block_key() local
2878 eb = read_tree_block(rc->extent_root, block->bytenr, get_tree_block_key()
2880 if (IS_ERR(eb)) { get_tree_block_key()
2881 return PTR_ERR(eb); get_tree_block_key()
2882 } else if (!extent_buffer_uptodate(eb)) { get_tree_block_key()
2883 free_extent_buffer(eb); get_tree_block_key()
2886 WARN_ON(btrfs_header_level(eb) != block->level); get_tree_block_key()
2888 btrfs_item_key_to_cpu(eb, &block->key, 0); get_tree_block_key()
2890 btrfs_node_key_to_cpu(eb, &block->key, 0); get_tree_block_key()
2891 free_extent_buffer(eb); get_tree_block_key()
3286 struct extent_buffer *eb; add_tree_block() local
3295 eb = path->nodes[0]; add_tree_block()
3296 item_size = btrfs_item_size_nr(eb, path->slots[0]); add_tree_block()
3300 ei = btrfs_item_ptr(eb, path->slots[0], add_tree_block()
3304 level = btrfs_tree_block_level(eb, bi); add_tree_block()
3308 generation = btrfs_extent_generation(eb, ei); add_tree_block()
3417 struct extent_buffer *eb) block_use_full_backref()
3422 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) || block_use_full_backref()
3423 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) block_use_full_backref()
3427 eb->start, btrfs_header_level(eb), 1, block_use_full_backref()
3662 struct extent_buffer *eb; local
3671 eb = path->nodes[0];
3672 ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3673 end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
3683 key.type = btrfs_extent_inline_ref_type(eb, iref);
3685 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3691 eb, dref, blocks);
3705 eb = path->nodes[0];
3706 if (path->slots[0] >= btrfs_header_nritems(eb)) {
3714 eb = path->nodes[0];
3717 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
3731 dref = btrfs_item_ptr(eb, path->slots[0],
3734 eb, dref, blocks);
4570 node->eb = cow; btrfs_reloc_cow_block()
3416 block_use_full_backref(struct reloc_control *rc, struct extent_buffer *eb) block_use_full_backref() argument
H A Dctree.c43 struct extent_buffer *eb);
150 struct extent_buffer *eb; btrfs_root_node() local
154 eb = rcu_dereference(root->node); btrfs_root_node()
162 if (atomic_inc_not_zero(&eb->refs)) { btrfs_root_node()
169 return eb; btrfs_root_node()
178 struct extent_buffer *eb; btrfs_lock_root_node() local
181 eb = btrfs_root_node(root); btrfs_lock_root_node()
182 btrfs_tree_lock(eb); btrfs_lock_root_node()
183 if (eb == root->node) btrfs_lock_root_node()
185 btrfs_tree_unlock(eb); btrfs_lock_root_node()
186 free_extent_buffer(eb); btrfs_lock_root_node()
188 return eb; btrfs_lock_root_node()
197 struct extent_buffer *eb; btrfs_read_lock_root_node() local
200 eb = btrfs_root_node(root); btrfs_read_lock_root_node()
201 btrfs_tree_read_lock(eb); btrfs_read_lock_root_node()
202 if (eb == root->node) btrfs_read_lock_root_node()
204 btrfs_tree_read_unlock(eb); btrfs_read_lock_root_node()
205 free_extent_buffer(eb); btrfs_read_lock_root_node()
207 return eb; btrfs_read_lock_root_node()
487 struct extent_buffer *eb) { tree_mod_dont_log()
491 if (eb && btrfs_header_level(eb) == 0) tree_mod_dont_log()
505 struct extent_buffer *eb) tree_mod_need_log()
510 if (eb && btrfs_header_level(eb) == 0) tree_mod_need_log()
517 alloc_tree_mod_elem(struct extent_buffer *eb, int slot, alloc_tree_mod_elem() argument
526 tm->index = eb->start >> PAGE_CACHE_SHIFT; alloc_tree_mod_elem()
528 btrfs_node_key(eb, &tm->key, slot); alloc_tree_mod_elem()
529 tm->blockptr = btrfs_node_blockptr(eb, slot); alloc_tree_mod_elem()
533 tm->generation = btrfs_node_ptr_generation(eb, slot); alloc_tree_mod_elem()
541 struct extent_buffer *eb, int slot, tree_mod_log_insert_key()
547 if (!tree_mod_need_log(fs_info, eb)) tree_mod_log_insert_key()
550 tm = alloc_tree_mod_elem(eb, slot, op, flags); tree_mod_log_insert_key()
554 if (tree_mod_dont_log(fs_info, eb)) { tree_mod_log_insert_key()
569 struct extent_buffer *eb, int dst_slot, int src_slot, tree_mod_log_insert_move()
578 if (!tree_mod_need_log(fs_info, eb)) tree_mod_log_insert_move()
591 tm->index = eb->start >> PAGE_CACHE_SHIFT; tree_mod_log_insert_move()
598 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot, tree_mod_log_insert_move()
606 if (tree_mod_dont_log(fs_info, eb)) tree_mod_log_insert_move()
883 struct extent_buffer *eb, int slot, int atomic) tree_mod_log_set_node_key()
887 ret = tree_mod_log_insert_key(fs_info, eb, slot, tree_mod_log_set_node_key()
894 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) tree_mod_log_free_eb() argument
901 if (btrfs_header_level(eb) == 0) tree_mod_log_free_eb()
907 nritems = btrfs_header_nritems(eb); tree_mod_log_free_eb()
913 tm_list[i] = alloc_tree_mod_elem(eb, i, tree_mod_log_free_eb()
921 if (tree_mod_dont_log(fs_info, eb)) tree_mod_log_free_eb()
1271 * tm is a pointer to the first operation to rewind within eb. then, all
1276 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, __tree_mod_log_rewind() argument
1286 n = btrfs_header_nritems(eb); __tree_mod_log_rewind()
1300 btrfs_set_node_key(eb, &tm->key, tm->slot); __tree_mod_log_rewind()
1301 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); __tree_mod_log_rewind()
1302 btrfs_set_node_ptr_generation(eb, tm->slot, __tree_mod_log_rewind()
1308 btrfs_set_node_key(eb, &tm->key, tm->slot); __tree_mod_log_rewind()
1309 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); __tree_mod_log_rewind()
1310 btrfs_set_node_ptr_generation(eb, tm->slot, __tree_mod_log_rewind()
1320 memmove_extent_buffer(eb, o_dst, o_src, __tree_mod_log_rewind()
1343 btrfs_set_header_nritems(eb, n); __tree_mod_log_rewind()
1347 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1355 struct extent_buffer *eb, u64 time_seq) tree_mod_log_rewind()
1361 return eb; tree_mod_log_rewind()
1363 if (btrfs_header_level(eb) == 0) tree_mod_log_rewind()
1364 return eb; tree_mod_log_rewind()
1366 tm = tree_mod_log_search(fs_info, eb->start, time_seq); tree_mod_log_rewind()
1368 return eb; tree_mod_log_rewind()
1371 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); tree_mod_log_rewind()
1375 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); tree_mod_log_rewind()
1377 btrfs_tree_read_unlock_blocking(eb); tree_mod_log_rewind()
1378 free_extent_buffer(eb); tree_mod_log_rewind()
1381 btrfs_set_header_bytenr(eb_rewin, eb->start); tree_mod_log_rewind()
1383 btrfs_header_backref_rev(eb)); tree_mod_log_rewind()
1384 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); tree_mod_log_rewind()
1385 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); tree_mod_log_rewind()
1387 eb_rewin = btrfs_clone_extent_buffer(eb); tree_mod_log_rewind()
1389 btrfs_tree_read_unlock_blocking(eb); tree_mod_log_rewind()
1390 free_extent_buffer(eb); tree_mod_log_rewind()
1396 btrfs_tree_read_unlock_blocking(eb); tree_mod_log_rewind()
1397 free_extent_buffer(eb); tree_mod_log_rewind()
1419 struct extent_buffer *eb = NULL; get_old_root() local
1450 eb = btrfs_clone_extent_buffer(old); get_old_root()
1456 eb = alloc_dummy_extent_buffer(root->fs_info, logical); get_old_root()
1459 eb = btrfs_clone_extent_buffer(eb_root); get_old_root()
1464 if (!eb) get_old_root()
1466 extent_buffer_get(eb); get_old_root()
1467 btrfs_tree_read_lock(eb); get_old_root()
1469 btrfs_set_header_bytenr(eb, eb->start); get_old_root()
1470 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); get_old_root()
1471 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root)); get_old_root()
1472 btrfs_set_header_level(eb, old_root->level); get_old_root()
1473 btrfs_set_header_generation(eb, old_generation); get_old_root()
1476 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm); get_old_root()
1478 WARN_ON(btrfs_header_level(eb) != 0); get_old_root()
1479 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root)); get_old_root()
1481 return eb; get_old_root()
1753 static noinline int generic_bin_search(struct extent_buffer *eb, generic_bin_search() argument
1778 err = map_private_extent_buffer(eb, offset, generic_bin_search()
1786 read_extent_buffer(eb, &unaligned, generic_bin_search()
1814 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, bin_search() argument
1818 return generic_bin_search(eb, bin_search()
1821 key, btrfs_header_nritems(eb), bin_search()
1824 return generic_bin_search(eb, bin_search()
1827 key, btrfs_header_nritems(eb), bin_search()
1831 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, btrfs_bin_search() argument
1834 return bin_search(eb, key, level, slot); btrfs_bin_search()
1861 struct extent_buffer *eb; read_node_slot() local
1870 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot), read_node_slot()
1872 if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) { read_node_slot()
1873 if (!IS_ERR(eb)) read_node_slot()
1874 free_extent_buffer(eb); read_node_slot()
1875 eb = NULL; read_node_slot()
1878 return eb; read_node_slot()
2252 struct extent_buffer *eb; reada_for_search() local
2267 eb = btrfs_find_tree_block(root->fs_info, search); reada_for_search()
2268 if (eb) { reada_for_search()
2269 free_extent_buffer(eb); reada_for_search()
2312 struct extent_buffer *eb; reada_for_balance() local
2327 eb = btrfs_find_tree_block(root->fs_info, block1); reada_for_balance()
2333 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) reada_for_balance()
2335 free_extent_buffer(eb); reada_for_balance()
2340 eb = btrfs_find_tree_block(root->fs_info, block2); reada_for_balance()
2341 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) reada_for_balance()
2343 free_extent_buffer(eb); reada_for_balance()
2628 struct extent_buffer *eb; btrfs_find_item() local
2641 eb = path->nodes[0]; btrfs_find_item()
2642 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { btrfs_find_item()
2646 eb = path->nodes[0]; btrfs_find_item()
2649 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); btrfs_find_item()
2989 * Since we can unwind eb's we want to do a real search every btrfs_search_old_slot()
3165 struct extent_buffer *eb; btrfs_set_item_key_safe() local
3168 eb = path->nodes[0]; btrfs_set_item_key_safe()
3171 btrfs_item_key(eb, &disk_key, slot - 1); btrfs_set_item_key_safe()
3174 if (slot < btrfs_header_nritems(eb) - 1) { btrfs_set_item_key_safe()
3175 btrfs_item_key(eb, &disk_key, slot + 1); btrfs_set_item_key_safe()
3180 btrfs_set_item_key(eb, &disk_key, slot); btrfs_set_item_key_safe()
3181 btrfs_mark_buffer_dirty(eb); btrfs_set_item_key_safe()
486 tree_mod_dont_log(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) tree_mod_dont_log() argument
504 tree_mod_need_log(const struct btrfs_fs_info *fs_info, struct extent_buffer *eb) tree_mod_need_log() argument
540 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int slot, enum mod_log_op op, gfp_t flags) tree_mod_log_insert_key() argument
568 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int dst_slot, int src_slot, int nr_items, gfp_t flags) tree_mod_log_insert_move() argument
882 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int slot, int atomic) tree_mod_log_set_node_key() argument
1354 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct extent_buffer *eb, u64 time_seq) tree_mod_log_rewind() argument
H A Dctree.h2285 struct extent_buffer *eb; member in struct:btrfs_map_token
2303 #define read_eb_member(eb, ptr, type, member, result) ( \
2304 read_extent_buffer(eb, (char *)(result), \
2309 #define write_eb_member(eb, ptr, type, member, result) ( \
2310 write_extent_buffer(eb, (char *)(result), \
2316 u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
2319 void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \
2322 static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \
2325 return btrfs_get_token_##bits(eb, ptr, off, NULL); \
2327 static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
2330 btrfs_set_token_##bits(eb, ptr, off, val, NULL); \
2339 static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \
2342 return btrfs_get_##bits(eb, s, offsetof(type, member)); \
2344 static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \
2348 btrfs_set_##bits(eb, s, offsetof(type, member), val); \
2350 static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \
2354 return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \
2356 static inline void btrfs_set_token_##name(struct extent_buffer *eb, \
2361 btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \
2365 static inline u##bits btrfs_##name(struct extent_buffer *eb) \
2367 type *p = page_address(eb->pages[0]); \
2371 static inline void btrfs_set_##name(struct extent_buffer *eb, \
2374 type *p = page_address(eb->pages[0]); \
2482 static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, btrfs_stripe_offset_nr() argument
2485 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); btrfs_stripe_offset_nr()
2488 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, btrfs_stripe_devid_nr() argument
2491 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); btrfs_stripe_devid_nr()
2580 static inline void btrfs_tree_block_key(struct extent_buffer *eb, btrfs_tree_block_key() argument
2584 read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); btrfs_tree_block_key()
2587 static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, btrfs_set_tree_block_key() argument
2591 write_eb_member(eb, item, struct btrfs_tree_block_info, key, key); btrfs_set_tree_block_key()
2640 static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) btrfs_node_blockptr() argument
2645 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); btrfs_node_blockptr()
2648 static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, btrfs_set_node_blockptr() argument
2654 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); btrfs_set_node_blockptr()
2657 static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) btrfs_node_ptr_generation() argument
2662 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); btrfs_node_ptr_generation()
2665 static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, btrfs_set_node_ptr_generation() argument
2671 btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val); btrfs_set_node_ptr_generation()
2680 void btrfs_node_key(struct extent_buffer *eb,
2683 static inline void btrfs_set_node_key(struct extent_buffer *eb, btrfs_set_node_key() argument
2688 write_eb_member(eb, (struct btrfs_key_ptr *)ptr, btrfs_set_node_key()
2709 static inline u32 btrfs_item_end(struct extent_buffer *eb, btrfs_item_end() argument
2712 return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); btrfs_item_end()
2715 static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr) btrfs_item_end_nr() argument
2717 return btrfs_item_end(eb, btrfs_item_nr(nr)); btrfs_item_end_nr()
2720 static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr) btrfs_item_offset_nr() argument
2722 return btrfs_item_offset(eb, btrfs_item_nr(nr)); btrfs_item_offset_nr()
2725 static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr) btrfs_item_size_nr() argument
2727 return btrfs_item_size(eb, btrfs_item_nr(nr)); btrfs_item_size_nr()
2730 static inline void btrfs_item_key(struct extent_buffer *eb, btrfs_item_key() argument
2734 read_eb_member(eb, item, struct btrfs_item, key, disk_key); btrfs_item_key()
2737 static inline void btrfs_set_item_key(struct extent_buffer *eb, btrfs_set_item_key() argument
2741 write_eb_member(eb, item, struct btrfs_item, key, disk_key); btrfs_set_item_key()
2766 static inline void btrfs_dir_item_key(struct extent_buffer *eb, btrfs_dir_item_key() argument
2770 read_eb_member(eb, item, struct btrfs_dir_item, location, key); btrfs_dir_item_key()
2773 static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, btrfs_set_dir_item_key() argument
2777 write_eb_member(eb, item, struct btrfs_dir_item, location, key); btrfs_set_dir_item_key()
2787 static inline void btrfs_free_space_key(struct extent_buffer *eb, btrfs_free_space_key() argument
2791 read_eb_member(eb, h, struct btrfs_free_space_header, location, key); btrfs_free_space_key()
2794 static inline void btrfs_set_free_space_key(struct extent_buffer *eb, btrfs_set_free_space_key() argument
2798 write_eb_member(eb, h, struct btrfs_free_space_header, location, key); btrfs_set_free_space_key()
2823 static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb, btrfs_node_key_to_cpu() argument
2827 btrfs_node_key(eb, &disk_key, nr); btrfs_node_key_to_cpu()
2831 static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb, btrfs_item_key_to_cpu() argument
2835 btrfs_item_key(eb, &disk_key, nr); btrfs_item_key_to_cpu()
2839 static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb, btrfs_dir_item_key_to_cpu() argument
2844 btrfs_dir_item_key(eb, item, &disk_key); btrfs_dir_item_key_to_cpu()
2874 static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag) btrfs_header_flag() argument
2876 return (btrfs_header_flags(eb) & flag) == flag; btrfs_header_flag()
2879 static inline int btrfs_set_header_flag(struct extent_buffer *eb, u64 flag) btrfs_set_header_flag() argument
2881 u64 flags = btrfs_header_flags(eb); btrfs_set_header_flag()
2882 btrfs_set_header_flags(eb, flags | flag); btrfs_set_header_flag()
2886 static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) btrfs_clear_header_flag() argument
2888 u64 flags = btrfs_header_flags(eb); btrfs_clear_header_flag()
2889 btrfs_set_header_flags(eb, flags & ~flag); btrfs_clear_header_flag()
2893 static inline int btrfs_header_backref_rev(struct extent_buffer *eb) btrfs_header_backref_rev() argument
2895 u64 flags = btrfs_header_flags(eb); btrfs_header_backref_rev()
2899 static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, btrfs_set_header_backref_rev() argument
2902 u64 flags = btrfs_header_flags(eb); btrfs_set_header_backref_rev()
2905 btrfs_set_header_flags(eb, flags); btrfs_set_header_backref_rev()
2913 static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) btrfs_header_chunk_tree_uuid() argument
2918 static inline int btrfs_is_leaf(struct extent_buffer *eb) btrfs_is_leaf() argument
2920 return btrfs_header_level(eb) == 0; btrfs_is_leaf()
3014 static inline void btrfs_balance_data(struct extent_buffer *eb, btrfs_balance_data() argument
3018 read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); btrfs_balance_data()
3021 static inline void btrfs_set_balance_data(struct extent_buffer *eb, btrfs_set_balance_data() argument
3025 write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); btrfs_set_balance_data()
3028 static inline void btrfs_balance_meta(struct extent_buffer *eb, btrfs_balance_meta() argument
3032 read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); btrfs_balance_meta()
3035 static inline void btrfs_set_balance_meta(struct extent_buffer *eb, btrfs_set_balance_meta() argument
3039 write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); btrfs_set_balance_meta()
3042 static inline void btrfs_balance_sys(struct extent_buffer *eb, btrfs_balance_sys() argument
3046 read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); btrfs_balance_sys()
3049 static inline void btrfs_set_balance_sys(struct extent_buffer *eb, btrfs_set_balance_sys() argument
3053 write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); btrfs_set_balance_sys()
3206 static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, btrfs_file_extent_inline_item_len() argument
3209 return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; btrfs_file_extent_inline_item_len()
3215 static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, btrfs_file_extent_inline_len() argument
3226 if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 && btrfs_file_extent_inline_len()
3227 btrfs_token_file_extent_encryption(eb, fi, &token) == 0 && btrfs_file_extent_inline_len()
3228 btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) { btrfs_file_extent_inline_len()
3229 return btrfs_file_extent_inline_item_len(eb, btrfs_file_extent_inline_len()
3234 return btrfs_token_file_extent_ram_bytes(eb, fi, &token); btrfs_file_extent_inline_len()
3239 static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb, btrfs_dev_stats_value() argument
3245 read_extent_buffer(eb, &val, btrfs_dev_stats_value()
3252 static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb, btrfs_set_dev_stats_value() argument
3256 write_extent_buffer(eb, &val, btrfs_set_dev_stats_value()
3412 struct extent_buffer *eb);
3577 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
4365 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
H A Ddisk-io.c135 * eb, the lockdep key is determined by the btrfs_root it belongs to and
136 * the level the eb occupies in the tree.
195 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, btrfs_set_buffer_lockdep_class() argument
207 lockdep_set_class_and_name(&eb->lock, btrfs_set_buffer_lockdep_class()
346 struct extent_buffer *eb, u64 parent_transid, verify_parent_transid()
353 if (!parent_transid || btrfs_header_generation(eb) == parent_transid) verify_parent_transid()
360 btrfs_tree_read_lock(eb); verify_parent_transid()
361 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); verify_parent_transid()
364 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, verify_parent_transid()
366 if (extent_buffer_uptodate(eb) && verify_parent_transid()
367 btrfs_header_generation(eb) == parent_transid) { verify_parent_transid()
371 btrfs_err_rl(eb->fs_info, verify_parent_transid()
373 eb->start, verify_parent_transid()
374 parent_transid, btrfs_header_generation(eb)); verify_parent_transid()
381 * if we find an eb that is under IO (dirty/writeback) because we could verify_parent_transid()
385 if (!extent_buffer_under_io(eb)) verify_parent_transid()
386 clear_extent_buffer_uptodate(eb); verify_parent_transid()
388 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, verify_parent_transid()
391 btrfs_tree_read_unlock_blocking(eb); verify_parent_transid()
438 struct extent_buffer *eb, btree_read_extent_buffer_pages()
448 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); btree_read_extent_buffer_pages()
451 ret = read_extent_buffer_pages(io_tree, eb, start, btree_read_extent_buffer_pages()
455 if (!verify_parent_transid(io_tree, eb, btree_read_extent_buffer_pages()
467 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) btree_read_extent_buffer_pages()
471 eb->start, eb->len); btree_read_extent_buffer_pages()
477 failed_mirror = eb->read_mirror; btree_read_extent_buffer_pages()
489 repair_eb_io_failure(root, eb, failed_mirror); btree_read_extent_buffer_pages()
503 struct extent_buffer *eb; csum_dirty_buffer() local
505 eb = (struct extent_buffer *)page->private; csum_dirty_buffer()
506 if (page != eb->pages[0]) csum_dirty_buffer()
508 found_start = btrfs_header_bytenr(eb); csum_dirty_buffer()
511 csum_tree_block(fs_info, eb, 0); csum_dirty_buffer()
516 struct extent_buffer *eb) check_tree_block_fsid()
522 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); check_tree_block_fsid()
533 #define CORRUPT(reason, eb, root, slot) \
536 btrfs_header_bytenr(eb), root->objectid, slot)
605 struct extent_buffer *eb; btree_readpage_end_io_hook() local
613 eb = (struct extent_buffer *)page->private; btree_readpage_end_io_hook()
618 extent_buffer_get(eb); btree_readpage_end_io_hook()
620 reads_done = atomic_dec_and_test(&eb->io_pages); btree_readpage_end_io_hook()
624 eb->read_mirror = mirror; btree_readpage_end_io_hook()
625 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { btree_readpage_end_io_hook()
630 found_start = btrfs_header_bytenr(eb); btree_readpage_end_io_hook()
631 if (found_start != eb->start) { btree_readpage_end_io_hook()
632 btrfs_err_rl(eb->fs_info, "bad tree block start %llu %llu", btree_readpage_end_io_hook()
633 found_start, eb->start); btree_readpage_end_io_hook()
637 if (check_tree_block_fsid(root->fs_info, eb)) { btree_readpage_end_io_hook()
638 btrfs_err_rl(eb->fs_info, "bad fsid on block %llu", btree_readpage_end_io_hook()
639 eb->start); btree_readpage_end_io_hook()
643 found_level = btrfs_header_level(eb); btree_readpage_end_io_hook()
646 (int)btrfs_header_level(eb)); btree_readpage_end_io_hook()
651 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), btree_readpage_end_io_hook()
652 eb, found_level); btree_readpage_end_io_hook() local
654 ret = csum_tree_block(root->fs_info, eb, 1); btree_readpage_end_io_hook()
665 if (found_level == 0 && check_leaf(root, eb)) { btree_readpage_end_io_hook()
666 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); btree_readpage_end_io_hook()
671 set_extent_buffer_uptodate(eb); btree_readpage_end_io_hook()
674 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) btree_readpage_end_io_hook()
675 btree_readahead_hook(root, eb, eb->start, ret); btree_readpage_end_io_hook()
683 atomic_inc(&eb->io_pages); btree_readpage_end_io_hook()
684 clear_extent_buffer_uptodate(eb); btree_readpage_end_io_hook()
686 free_extent_buffer(eb); btree_readpage_end_io_hook()
693 struct extent_buffer *eb; btree_io_failed_hook() local
696 eb = (struct extent_buffer *)page->private; btree_io_failed_hook()
697 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); btree_io_failed_hook()
698 eb->read_mirror = failed_mirror; btree_io_failed_hook()
699 atomic_dec(&eb->io_pages); btree_io_failed_hook()
700 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) btree_io_failed_hook()
701 btree_readahead_hook(root, eb, eb->start, -EIO); btree_io_failed_hook()
1057 struct extent_buffer *eb; btree_set_page_dirty() local
1060 eb = (struct extent_buffer *)page->private; btree_set_page_dirty()
1061 BUG_ON(!eb); btree_set_page_dirty()
1062 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); btree_set_page_dirty()
1063 BUG_ON(!atomic_read(&eb->refs)); btree_set_page_dirty()
1064 btrfs_assert_tree_locked(eb); btree_set_page_dirty()
1094 int mirror_num, struct extent_buffer **eb) reada_tree_block_flagged()
1118 *eb = buf; reada_tree_block_flagged()
4302 struct extent_buffer *eb; btrfs_destroy_marked_extents() local
4314 eb = btrfs_find_tree_block(root->fs_info, start); btrfs_destroy_marked_extents()
4316 if (!eb) btrfs_destroy_marked_extents()
4318 wait_on_extent_buffer_writeback(eb); btrfs_destroy_marked_extents()
4321 &eb->bflags)) btrfs_destroy_marked_extents()
4322 clear_extent_buffer_dirty(eb); btrfs_destroy_marked_extents()
4323 free_extent_buffer_stale(eb); btrfs_destroy_marked_extents()
345 verify_parent_transid(struct extent_io_tree *io_tree, struct extent_buffer *eb, u64 parent_transid, int atomic) verify_parent_transid() argument
437 btree_read_extent_buffer_pages(struct btrfs_root *root, struct extent_buffer *eb, u64 start, u64 parent_transid) btree_read_extent_buffer_pages() argument
515 check_tree_block_fsid(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) check_tree_block_fsid() argument
1093 reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, int mirror_num, struct extent_buffer **eb) reada_tree_block_flagged() argument
H A Dtree-log.c282 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
290 struct extent_buffer *eb, process_one_buffer()
300 ret = btrfs_read_buffer(eb, gen); process_one_buffer()
307 eb->start, eb->len); process_one_buffer()
309 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { process_one_buffer()
310 if (wc->pin && btrfs_header_level(eb) == 0) process_one_buffer()
311 ret = btrfs_exclude_logged_extents(log, eb); process_one_buffer()
313 btrfs_write_tree_block(eb); process_one_buffer()
315 btrfs_wait_tree_block_writeback(eb); process_one_buffer()
321 * Item overwrite used by replay and tree logging. eb, slot and key all refer
337 struct extent_buffer *eb, int slot, overwrite_item()
352 item_size = btrfs_item_size_nr(eb, slot); overwrite_item()
353 src_ptr = btrfs_item_ptr_offset(eb, slot); overwrite_item()
381 read_extent_buffer(eb, src_copy, src_ptr, item_size); overwrite_item()
413 item = btrfs_item_ptr(eb, slot, overwrite_item()
415 btrfs_set_inode_nbytes(eb, item, nbytes); overwrite_item()
422 mode = btrfs_inode_mode(eb, item); overwrite_item()
424 btrfs_set_inode_size(eb, item, 0); overwrite_item()
434 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); overwrite_item()
435 btrfs_set_inode_nbytes(eb, item, 0); overwrite_item()
442 mode = btrfs_inode_mode(eb, item); overwrite_item()
444 btrfs_set_inode_size(eb, item, 0); overwrite_item()
486 if (btrfs_inode_generation(eb, src_item) == 0) { overwrite_item()
488 const u64 ino_size = btrfs_inode_size(eb, src_item); overwrite_item()
497 if (S_ISREG(btrfs_inode_mode(eb, src_item)) && overwrite_item()
510 S_ISDIR(btrfs_inode_mode(eb, src_item)) && overwrite_item()
518 copy_extent_buffer(path->nodes[0], eb, dst_ptr, overwrite_item()
565 /* replays a single extent in 'eb' at 'slot' with 'key' into the
580 struct extent_buffer *eb, int slot, replay_one_extent()
592 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); replay_one_extent()
593 found_type = btrfs_file_extent_type(eb, item); replay_one_extent()
597 nbytes = btrfs_file_extent_num_bytes(eb, item); replay_one_extent()
604 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) replay_one_extent()
607 size = btrfs_file_extent_inline_len(eb, slot, item); replay_one_extent()
608 nbytes = btrfs_file_extent_ram_bytes(eb, item); replay_one_extent()
641 read_extent_buffer(eb, &cmp1, (unsigned long)item, replay_one_extent()
674 copy_extent_buffer(path->nodes[0], eb, dest_offset, replay_one_extent()
677 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); replay_one_extent()
678 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); replay_one_extent()
680 offset = key->offset - btrfs_file_extent_offset(eb, item); replay_one_extent()
712 if (btrfs_file_extent_compression(eb, item)) { replay_one_extent()
717 btrfs_file_extent_offset(eb, item); replay_one_extent()
719 btrfs_file_extent_num_bytes(eb, item); replay_one_extent()
800 ret = overwrite_item(trans, root, path, eb, slot, key); replay_one_extent()
972 struct extent_buffer *eb, __add_inode_ref()
1148 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, extref_get_fields() argument
1156 *namelen = btrfs_inode_extref_name_len(eb, extref); extref_get_fields()
1161 read_extent_buffer(eb, *name, (unsigned long)&extref->name, extref_get_fields()
1164 *index = btrfs_inode_extref_index(eb, extref); extref_get_fields()
1166 *parent_objectid = btrfs_inode_extref_parent(eb, extref); extref_get_fields()
1171 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, ref_get_fields() argument
1178 *namelen = btrfs_inode_ref_name_len(eb, ref); ref_get_fields()
1183 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); ref_get_fields()
1185 *index = btrfs_inode_ref_index(eb, ref); ref_get_fields()
1192 * eb, slot and key refer to the buffer and key found in the log tree.
1200 struct extent_buffer *eb, int slot, add_inode_ref()
1217 ref_ptr = btrfs_item_ptr_offset(eb, slot); add_inode_ref()
1218 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); add_inode_ref()
1226 parent_objectid = btrfs_inode_extref_parent(eb, r); add_inode_ref()
1253 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, add_inode_ref()
1266 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, add_inode_ref()
1285 dir, inode, eb, add_inode_ref()
1316 ret = overwrite_item(trans, root, path, eb, slot, key); add_inode_ref()
1669 struct extent_buffer *eb, replay_one_name()
1689 name_len = btrfs_dir_name_len(eb, di); replay_one_name()
1696 log_type = btrfs_dir_type(eb, di); replay_one_name()
1697 read_extent_buffer(eb, name, (unsigned long)(di + 1), replay_one_name()
1700 btrfs_dir_item_key_to_cpu(eb, di, &log_key); replay_one_name()
1794 struct extent_buffer *eb, int slot, replay_one_dir_item()
1798 u32 item_size = btrfs_item_size_nr(eb, slot); replay_one_dir_item()
1805 ptr = btrfs_item_ptr_offset(eb, slot); replay_one_dir_item()
1809 if (verify_dir_item(root, eb, di)) replay_one_dir_item()
1811 name_len = btrfs_dir_name_len(eb, di); replay_one_dir_item()
1812 ret = replay_one_name(trans, root, path, eb, di, key); replay_one_dir_item()
1845 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) { replay_one_dir_item()
1856 btrfs_dir_item_key_to_cpu(eb, di, &di_key); replay_one_dir_item()
1965 struct extent_buffer *eb; check_item_in_log() local
1978 eb = path->nodes[0]; check_item_in_log()
1980 item_size = btrfs_item_size_nr(eb, slot); check_item_in_log()
1981 ptr = btrfs_item_ptr_offset(eb, slot); check_item_in_log()
1985 if (verify_dir_item(root, eb, di)) { check_item_in_log()
1990 name_len = btrfs_dir_name_len(eb, di); check_item_in_log()
1996 read_extent_buffer(eb, name, (unsigned long)(di + 1), check_item_in_log()
2011 btrfs_dir_item_key_to_cpu(eb, di, &location); check_item_in_log()
2279 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, replay_one_buffer() argument
2290 ret = btrfs_read_buffer(eb, gen); replay_one_buffer()
2294 level = btrfs_header_level(eb); replay_one_buffer()
2303 nritems = btrfs_header_nritems(eb); replay_one_buffer()
2305 btrfs_item_key_to_cpu(eb, &key, i); replay_one_buffer()
2313 inode_item = btrfs_item_ptr(eb, i, replay_one_buffer()
2319 mode = btrfs_inode_mode(eb, inode_item); replay_one_buffer()
2327 eb, i, &key); replay_one_buffer()
2351 eb, i, &key); replay_one_buffer()
2362 eb, i, &key); replay_one_buffer()
2368 eb, i, &key); replay_one_buffer()
2374 eb, i, &key); replay_one_buffer()
2379 eb, i, &key); replay_one_buffer()
4451 static int btrfs_check_ref_name_override(struct extent_buffer *eb, btrfs_check_ref_name_override() argument
4460 u32 item_size = btrfs_item_size_nr(eb, slot); btrfs_check_ref_name_override()
4462 unsigned long ptr = btrfs_item_ptr_offset(eb, slot); btrfs_check_ref_name_override()
4482 this_name_len = btrfs_inode_ref_name_len(eb, iref); btrfs_check_ref_name_override()
4490 parent = btrfs_inode_extref_parent(eb, extref); btrfs_check_ref_name_override()
4491 this_name_len = btrfs_inode_extref_name_len(eb, extref); btrfs_check_ref_name_override()
4508 read_extent_buffer(eb, name, name_ptr, this_name_len); btrfs_check_ref_name_override()
289 process_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, struct walk_control *wc, u64 gen) process_one_buffer() argument
334 overwrite_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) overwrite_item() argument
577 replay_one_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) replay_one_extent() argument
967 __add_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_root *log_root, struct inode *dir, struct inode *inode, struct extent_buffer *eb, u64 inode_objectid, u64 parent_objectid, u64 ref_index, char *name, int namelen, int *search_done) __add_inode_ref() argument
1196 add_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) add_inode_ref() argument
1666 replay_one_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, struct btrfs_dir_item *di, struct btrfs_key *key) replay_one_name() argument
1791 replay_one_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) replay_one_dir_item() argument
H A Dreada.c107 /* in case of err, eb might be NULL */ __readahead_hook()
108 static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, __readahead_hook() argument
122 if (eb) __readahead_hook()
123 level = btrfs_header_level(eb); __readahead_hook()
146 nritems = level ? btrfs_header_nritems(eb) : 0; __readahead_hook()
147 generation = btrfs_header_generation(eb); __readahead_hook()
171 btrfs_node_key_to_cpu(eb, &key, i); __readahead_hook()
173 btrfs_node_key_to_cpu(eb, &next_key, i + 1); __readahead_hook()
176 bytenr = btrfs_node_blockptr(eb, i); __readahead_hook()
177 n_gen = btrfs_node_ptr_generation(eb, i); __readahead_hook()
233 * start is passed separately in case eb in NULL, which may be the case with
236 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, btree_readahead_hook() argument
241 ret = __readahead_hook(root, eb, start, err); btree_readahead_hook()
661 struct extent_buffer *eb = NULL; reada_start_machine_dev() local
726 mirror_num, &eb); reada_start_machine_dev()
729 else if (eb) reada_start_machine_dev()
730 __readahead_hook(fs_info->extent_root, eb, eb->start, ret); reada_start_machine_dev()
732 if (eb) reada_start_machine_dev()
733 free_extent_buffer(eb); reada_start_machine_dev()
H A Ddisk-io.h51 int mirror_num, struct extent_buffer **eb);
153 struct extent_buffer *eb, int level);
158 struct extent_buffer *eb, int level) btrfs_set_buffer_lockdep_class()
157 btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level) btrfs_set_buffer_lockdep_class() argument
H A Dsend.c452 struct extent_buffer *eb, fs_path_add_from_extent_buffer()
462 read_extent_buffer(eb, prepared, off, len); fs_path_add_from_extent_buffer()
585 struct extent_buffer *eb, tlv_put_btrfs_timespec()
589 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); tlv_put_btrfs_timespec()
631 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
633 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
855 struct extent_buffer *eb = path->nodes[0]; iterate_inode_ref() local
886 ptr = (unsigned long)btrfs_item_ptr(eb, slot, iterate_inode_ref()
889 total = btrfs_item_size(eb, item); iterate_inode_ref()
892 ptr = btrfs_item_ptr_offset(eb, slot); iterate_inode_ref()
893 total = btrfs_item_size_nr(eb, slot); iterate_inode_ref()
902 name_len = btrfs_inode_ref_name_len(eb, iref); iterate_inode_ref()
904 index = btrfs_inode_ref_index(eb, iref); iterate_inode_ref()
908 name_len = btrfs_inode_extref_name_len(eb, extref); iterate_inode_ref()
910 index = btrfs_inode_extref_index(eb, extref); iterate_inode_ref()
911 dir = btrfs_inode_extref_parent(eb, extref); iterate_inode_ref()
916 name_off, eb, dir, iterate_inode_ref()
930 eb, dir, iterate_inode_ref()
940 ret = fs_path_add_from_extent_buffer(p, eb, name_off, iterate_inode_ref()
976 struct extent_buffer *eb; iterate_dir_item() local
1004 eb = path->nodes[0]; iterate_dir_item()
1007 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); iterate_dir_item()
1010 total = btrfs_item_size(eb, item); iterate_dir_item()
1014 name_len = btrfs_dir_name_len(eb, di); iterate_dir_item()
1015 data_len = btrfs_dir_data_len(eb, di); iterate_dir_item()
1016 type = btrfs_dir_type(eb, di); iterate_dir_item()
1017 btrfs_dir_item_key_to_cpu(eb, di, &di_key); iterate_dir_item()
1060 read_extent_buffer(eb, buf, (unsigned long)(di + 1), iterate_dir_item()
1291 struct extent_buffer *eb = path->nodes[0]; find_extent_clone() local
1324 fi = btrfs_item_ptr(eb, path->slots[0], find_extent_clone()
1326 extent_type = btrfs_file_extent_type(eb, fi); find_extent_clone()
1331 compressed = btrfs_file_extent_compression(eb, fi); find_extent_clone()
1333 num_bytes = btrfs_file_extent_num_bytes(eb, fi); find_extent_clone()
1334 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); find_extent_clone()
1339 logical = disk_byte + btrfs_file_extent_offset(eb, fi); find_extent_clone()
1382 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi); find_extent_clone()
2484 struct extent_buffer *eb; send_utimes() local
2507 eb = path->nodes[0]; send_utimes()
2509 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); send_utimes()
2519 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); send_utimes()
2520 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); send_utimes()
2521 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); send_utimes()
2629 struct extent_buffer *eb; did_create_dir() local
2647 eb = path->nodes[0]; did_create_dir()
2649 if (slot >= btrfs_header_nritems(eb)) { did_create_dir()
2660 btrfs_item_key_to_cpu(eb, &found_key, slot); did_create_dir()
2667 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); did_create_dir()
2668 btrfs_dir_item_key_to_cpu(eb, di, &di_key); did_create_dir()
4072 struct extent_buffer *eb; process_all_refs() local
4102 eb = path->nodes[0]; process_all_refs()
4104 if (slot >= btrfs_header_nritems(eb)) { process_all_refs()
4113 btrfs_item_key_to_cpu(eb, &found_key, slot); process_all_refs()
4393 struct extent_buffer *eb; process_all_new_xattrs() local
4410 eb = path->nodes[0]; process_all_new_xattrs()
4412 if (slot >= btrfs_header_nritems(eb)) { process_all_new_xattrs()
4423 btrfs_item_key_to_cpu(eb, &found_key, slot); process_all_new_xattrs()
4922 struct extent_buffer *eb; is_extent_unchanged() local
4942 eb = left_path->nodes[0]; is_extent_unchanged()
4944 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); is_extent_unchanged()
4945 left_type = btrfs_file_extent_type(eb, ei); is_extent_unchanged()
4951 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); is_extent_unchanged()
4952 left_len = btrfs_file_extent_num_bytes(eb, ei); is_extent_unchanged()
4953 left_offset = btrfs_file_extent_offset(eb, ei); is_extent_unchanged()
4954 left_gen = btrfs_file_extent_generation(eb, ei); is_extent_unchanged()
4991 eb = path->nodes[0]; is_extent_unchanged()
4993 btrfs_item_key_to_cpu(eb, &found_key, slot); is_extent_unchanged()
5006 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); is_extent_unchanged()
5007 right_type = btrfs_file_extent_type(eb, ei); is_extent_unchanged()
5013 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); is_extent_unchanged()
5014 right_len = btrfs_file_extent_num_bytes(eb, ei); is_extent_unchanged()
5015 right_offset = btrfs_file_extent_offset(eb, ei); is_extent_unchanged()
5016 right_gen = btrfs_file_extent_generation(eb, ei); is_extent_unchanged()
5054 eb = path->nodes[0]; is_extent_unchanged()
5056 btrfs_item_key_to_cpu(eb, &found_key, slot); is_extent_unchanged()
5247 struct extent_buffer *eb; process_all_extents() local
5263 eb = path->nodes[0]; process_all_extents()
5266 if (slot >= btrfs_header_nritems(eb)) { process_all_extents()
5277 btrfs_item_key_to_cpu(eb, &found_key, slot); process_all_extents()
5788 struct extent_buffer *eb; full_send_tree() local
5806 eb = path->nodes[0]; full_send_tree()
5808 btrfs_item_key_to_cpu(eb, &found_key, slot); full_send_tree()
451 fs_path_add_from_extent_buffer(struct fs_path *p, struct extent_buffer *eb, unsigned long off, int len) fs_path_add_from_extent_buffer() argument
584 tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, struct extent_buffer *eb, struct btrfs_timespec *ts) tlv_put_btrfs_timespec() argument
H A Dbackref.h39 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
H A Droot-tree.c33 static void btrfs_read_root_item(struct extent_buffer *eb, int slot, btrfs_read_root_item() argument
40 len = btrfs_item_size_nr(eb, slot); btrfs_read_root_item()
41 read_extent_buffer(eb, item, btrfs_item_ptr_offset(eb, slot), btrfs_read_root_item()
48 btrfs_warn(eb->fs_info, btrfs_read_root_item()
H A Dextent-tree.c6091 struct extent_buffer *eb) btrfs_exclude_logged_extents()
6101 for (i = 0; i < btrfs_header_nritems(eb); i++) { btrfs_exclude_logged_extents()
6102 btrfs_item_key_to_cpu(eb, &key, i); btrfs_exclude_logged_extents()
6105 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); btrfs_exclude_logged_extents()
6106 found_type = btrfs_file_extent_type(eb, item); btrfs_exclude_logged_extents()
6109 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) btrfs_exclude_logged_extents()
6111 key.objectid = btrfs_file_extent_disk_bytenr(eb, item); btrfs_exclude_logged_extents()
6112 key.offset = btrfs_file_extent_disk_num_bytes(eb, item); btrfs_exclude_logged_extents()
8044 struct extent_buffer *eb; reada_walk_down() local
8058 eb = path->nodes[wc->level]; reada_walk_down()
8059 nritems = btrfs_header_nritems(eb); reada_walk_down()
8067 bytenr = btrfs_node_blockptr(eb, slot); reada_walk_down()
8068 generation = btrfs_node_ptr_generation(eb, slot); reada_walk_down()
8096 btrfs_node_key_to_cpu(eb, &key, slot); reada_walk_down()
8143 struct extent_buffer *eb) account_leaf_items()
8145 int nr = btrfs_header_nritems(eb); account_leaf_items()
8156 btrfs_item_key_to_cpu(eb, &key, i); account_leaf_items()
8161 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); account_leaf_items()
8163 extent_type = btrfs_file_extent_type(eb, fi); account_leaf_items()
8168 bytenr = btrfs_file_extent_disk_bytenr(eb, fi); account_leaf_items()
8172 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); account_leaf_items()
8200 struct extent_buffer *eb; adjust_slots_upwards() local
8206 eb = path->nodes[level]; adjust_slots_upwards()
8207 nr = btrfs_header_nritems(eb); adjust_slots_upwards()
8217 btrfs_tree_unlock_rw(eb, path->locks[level]); adjust_slots_upwards()
8220 free_extent_buffer(eb); adjust_slots_upwards()
8236 eb = path->nodes[root_level]; adjust_slots_upwards()
8237 if (path->slots[root_level] >= btrfs_header_nritems(eb)) adjust_slots_upwards()
8254 struct extent_buffer *eb = root_eb; account_shared_subtree() local
8301 eb = path->nodes[level + 1]; account_shared_subtree()
8303 child_bytenr = btrfs_node_blockptr(eb, parent_slot); account_shared_subtree()
8304 child_gen = btrfs_node_ptr_generation(eb, parent_slot); account_shared_subtree()
8306 eb = read_tree_block(root, child_bytenr, child_gen); account_shared_subtree()
8307 if (IS_ERR(eb)) { account_shared_subtree()
8308 ret = PTR_ERR(eb); account_shared_subtree()
8310 } else if (!extent_buffer_uptodate(eb)) { account_shared_subtree()
8311 free_extent_buffer(eb); account_shared_subtree()
8316 path->nodes[level] = eb; account_shared_subtree()
8319 btrfs_tree_read_lock(eb); account_shared_subtree()
8320 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); account_shared_subtree()
8367 struct extent_buffer *eb = path->nodes[level]; walk_down_proc() local
8372 btrfs_header_owner(eb) != root->root_key.objectid) walk_down_proc()
8384 eb->start, level, 1, walk_down_proc()
8398 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_down_proc()
8407 ret = btrfs_inc_ref(trans, root, eb, 1); walk_down_proc()
8409 ret = btrfs_dec_ref(trans, root, eb, 0); walk_down_proc()
8411 ret = btrfs_set_disk_extent_flags(trans, root, eb->start, walk_down_proc()
8412 eb->len, flag, walk_down_proc()
8413 btrfs_header_level(eb), 0); walk_down_proc()
8423 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_down_proc()
8608 struct extent_buffer *eb = path->nodes[level]; walk_up_proc() local
8631 btrfs_tree_lock(eb); walk_up_proc()
8632 btrfs_set_lock_blocking(eb); walk_up_proc()
8636 eb->start, level, 1, walk_up_proc()
8640 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_up_proc()
8646 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_up_proc()
8659 ret = btrfs_dec_ref(trans, root, eb, 1); walk_up_proc()
8661 ret = btrfs_dec_ref(trans, root, eb, 0); walk_up_proc()
8663 ret = account_leaf_items(trans, root, eb); walk_up_proc()
8674 btrfs_header_generation(eb) == trans->transid) { walk_up_proc()
8675 btrfs_tree_lock(eb); walk_up_proc()
8676 btrfs_set_lock_blocking(eb); walk_up_proc()
8679 clean_tree_block(trans, root->fs_info, eb); walk_up_proc()
8682 if (eb == root->node) { walk_up_proc()
8684 parent = eb->start; walk_up_proc()
8687 btrfs_header_owner(eb)); walk_up_proc()
8696 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); walk_up_proc()
6090 btrfs_exclude_logged_extents(struct btrfs_root *log, struct extent_buffer *eb) btrfs_exclude_logged_extents() argument
8141 account_leaf_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *eb) account_leaf_items() argument
H A Dtransaction.c1111 struct extent_buffer *eb; commit_cowonly_roots() local
1114 eb = btrfs_lock_root_node(fs_info->tree_root); commit_cowonly_roots()
1115 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, commit_cowonly_roots()
1116 0, &eb); commit_cowonly_roots()
1117 btrfs_tree_unlock(eb); commit_cowonly_roots()
1118 free_extent_buffer(eb); commit_cowonly_roots()
H A Dvolumes.c3992 struct extent_buffer *eb; btrfs_uuid_scan_kthread() local
4026 eb = path->nodes[0]; btrfs_uuid_scan_kthread()
4028 item_size = btrfs_item_size_nr(eb, slot); btrfs_uuid_scan_kthread()
4032 read_extent_buffer(eb, &root_item, btrfs_uuid_scan_kthread()
4033 btrfs_item_ptr_offset(eb, slot), btrfs_uuid_scan_kthread()
6645 struct extent_buffer *eb; btrfs_init_dev_stats() local
6674 eb = path->nodes[0]; btrfs_init_dev_stats()
6675 btrfs_item_key_to_cpu(eb, &found_key, slot); btrfs_init_dev_stats()
6676 item_size = btrfs_item_size_nr(eb, slot); btrfs_init_dev_stats()
6678 ptr = btrfs_item_ptr(eb, slot, btrfs_init_dev_stats()
6684 btrfs_dev_stats_value(eb, ptr, i)); btrfs_init_dev_stats()
6706 struct extent_buffer *eb; update_dev_stat_item() local
6751 eb = path->nodes[0]; update_dev_stat_item()
6752 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); update_dev_stat_item()
6754 btrfs_set_dev_stats_value(eb, ptr, i, update_dev_stat_item()
6756 btrfs_mark_buffer_dirty(eb); update_dev_stat_item()
H A Dscrub.c524 struct extent_buffer *eb; scrub_print_warning_inode() local
555 eb = swarn->path->nodes[0]; scrub_print_warning_inode()
556 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], scrub_print_warning_inode()
558 isize = btrfs_inode_size(eb, inode_item); scrub_print_warning_inode()
559 nlink = btrfs_inode_nlink(eb, inode_item); scrub_print_warning_inode()
606 struct extent_buffer *eb; scrub_print_warning() local
638 eb = path->nodes[0]; scrub_print_warning()
639 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); scrub_print_warning()
640 item_size = btrfs_item_size_nr(eb, path->slots[0]); scrub_print_warning()
644 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, scrub_print_warning()
/linux-4.4.14/drivers/mtd/
H A Dmtdswap.c199 static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_eb_offset() argument
201 return (loff_t)(eb - d->eb_data) * d->mtd->erasesize; mtdswap_eb_offset()
204 static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_eb_detach() argument
209 if (eb->root) { mtdswap_eb_detach()
210 tp = container_of(eb->root, struct mtdswap_tree, root); mtdswap_eb_detach()
214 rb_erase(&eb->rb, eb->root); mtdswap_eb_detach()
218 static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb) __mtdswap_rb_add() argument
227 if (eb->erase_count > cur->erase_count) __mtdswap_rb_add()
233 rb_link_node(&eb->rb, parent, p); __mtdswap_rb_add()
234 rb_insert_color(&eb->rb, root); __mtdswap_rb_add()
237 static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx) mtdswap_rb_add() argument
241 if (eb->root == &d->trees[idx].root) mtdswap_rb_add()
244 mtdswap_eb_detach(d, eb); mtdswap_rb_add()
246 __mtdswap_rb_add(root, eb); mtdswap_rb_add()
247 eb->root = root; mtdswap_rb_add()
266 static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_handle_badblock() argument
272 eb->flags |= EBLOCK_BAD; mtdswap_handle_badblock()
273 mtdswap_eb_detach(d, eb); mtdswap_handle_badblock()
274 eb->root = NULL; mtdswap_handle_badblock()
280 offset = mtdswap_eb_offset(d, eb); mtdswap_handle_badblock()
294 static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_handle_write_error() argument
296 unsigned int marked = eb->flags & EBLOCK_FAILED; mtdswap_handle_write_error()
299 eb->flags |= EBLOCK_FAILED; mtdswap_handle_write_error()
300 if (curr_write == eb) { mtdswap_handle_write_error()
304 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); mtdswap_handle_write_error()
309 return mtdswap_handle_badblock(d, eb); mtdswap_handle_write_error()
336 static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_read_markers() argument
343 offset = mtdswap_eb_offset(d, eb); mtdswap_read_markers()
365 eb->erase_count = le32_to_cpu(data->count); mtdswap_read_markers()
375 eb->flags |= EBLOCK_NOMAGIC; mtdswap_read_markers()
382 static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb, mtdswap_write_marker() argument
397 n.count = cpu_to_le32(eb->erase_count); mtdswap_write_marker()
399 offset = mtdswap_eb_offset(d, eb); mtdswap_write_marker()
403 offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize; mtdswap_write_marker()
412 mtdswap_handle_write_error(d, eb); mtdswap_write_marker()
435 struct swap_eb *eb; mtdswap_check_counts() local
440 eb = d->eb_data + i; mtdswap_check_counts()
442 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) mtdswap_check_counts()
445 __mtdswap_rb_add(&hist_root, eb); mtdswap_check_counts()
458 eb = d->eb_data + i; mtdswap_check_counts()
460 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR)) mtdswap_check_counts()
461 eb->erase_count = median; mtdswap_check_counts()
463 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) mtdswap_check_counts()
466 rb_erase(&eb->rb, &hist_root); mtdswap_check_counts()
474 struct swap_eb *eb; mtdswap_scan_eblks() local
477 eb = d->eb_data + i; mtdswap_scan_eblks()
479 status = mtdswap_read_markers(d, eb); mtdswap_scan_eblks()
481 eb->flags |= EBLOCK_READERR; mtdswap_scan_eblks()
483 eb->flags |= EBLOCK_BAD; mtdswap_scan_eblks()
499 eb->flags |= (idx << EBLOCK_IDX_SHIFT); mtdswap_scan_eblks()
505 eb = d->eb_data + i; mtdswap_scan_eblks()
507 if (eb->flags & EBLOCK_BAD) mtdswap_scan_eblks()
510 idx = eb->flags >> EBLOCK_IDX_SHIFT; mtdswap_scan_eblks()
511 mtdswap_rb_add(d, eb, idx); mtdswap_scan_eblks()
519 static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_store_eb() argument
521 unsigned int weight = eb->active_count; mtdswap_store_eb()
524 if (eb == d->curr_write) mtdswap_store_eb()
527 if (eb->flags & EBLOCK_BITFLIP) mtdswap_store_eb()
528 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); mtdswap_store_eb()
529 else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED)) mtdswap_store_eb()
530 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); mtdswap_store_eb()
532 mtdswap_rb_add(d, eb, MTDSWAP_USED); mtdswap_store_eb()
534 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); mtdswap_store_eb()
536 mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG); mtdswap_store_eb()
538 mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG); mtdswap_store_eb()
548 static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_erase_block() argument
556 eb->erase_count++; mtdswap_erase_block()
557 if (eb->erase_count > d->max_erase_count) mtdswap_erase_block()
558 d->max_erase_count = eb->erase_count; mtdswap_erase_block()
566 erase.addr = mtdswap_eb_offset(d, eb); mtdswap_erase_block()
583 mtdswap_handle_badblock(d, eb); mtdswap_erase_block()
604 mtdswap_handle_badblock(d, eb); mtdswap_erase_block()
617 struct swap_eb *eb; mtdswap_map_free_block() local
625 eb = rb_entry(rb_first(clean_root), struct swap_eb, rb); mtdswap_map_free_block()
626 rb_erase(&eb->rb, clean_root); mtdswap_map_free_block()
627 eb->root = NULL; mtdswap_map_free_block()
630 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); mtdswap_map_free_block()
637 d->curr_write = eb; mtdswap_map_free_block()
667 struct swap_eb *eb; mtdswap_write_block() local
679 eb = d->eb_data + (*bp / d->pages_per_eblk); mtdswap_write_block()
683 eb->active_count--; mtdswap_write_block()
695 eb->active_count--; mtdswap_write_block()
697 mtdswap_handle_write_error(d, eb); mtdswap_write_block()
718 eb->active_count--; mtdswap_write_block()
728 struct swap_eb *eb, *oldeb; mtdswap_move_block() local
768 eb = d->eb_data + *newblock / d->pages_per_eblk; mtdswap_move_block()
771 eb = d->eb_data + oldblock / d->pages_per_eblk; mtdswap_move_block()
772 eb->active_count--; mtdswap_move_block()
782 static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_gc_eblock() argument
788 eblk_base = (eb - d->eb_data) * d->pages_per_eblk; mtdswap_gc_eblock()
898 struct swap_eb *eb = NULL; mtdswap_pick_gc_eblk() local
910 eb = rb_entry(rb_first(rp), struct swap_eb, rb); mtdswap_pick_gc_eblk()
912 rb_erase(&eb->rb, rp); mtdswap_pick_gc_eblk()
913 eb->root = NULL; mtdswap_pick_gc_eblk()
915 return eb; mtdswap_pick_gc_eblk()
924 struct swap_eb *eb) mtdswap_eblk_passes()
940 base = mtdswap_eb_offset(d, eb); mtdswap_eblk_passes()
974 ret = mtdswap_erase_block(d, eb); mtdswap_eblk_passes()
979 eb->flags &= ~EBLOCK_READERR; mtdswap_eblk_passes()
983 mtdswap_handle_badblock(d, eb); mtdswap_eblk_passes()
989 struct swap_eb *eb; mtdswap_gc() local
995 eb = mtdswap_pick_gc_eblk(d, background); mtdswap_gc()
996 if (!eb) mtdswap_gc()
999 ret = mtdswap_gc_eblock(d, eb); mtdswap_gc()
1003 if (eb->flags & EBLOCK_FAILED) { mtdswap_gc()
1004 mtdswap_handle_badblock(d, eb); mtdswap_gc()
1008 eb->flags &= ~EBLOCK_BITFLIP; mtdswap_gc()
1009 ret = mtdswap_erase_block(d, eb); mtdswap_gc()
1010 if ((eb->flags & EBLOCK_READERR) && mtdswap_gc()
1011 (ret || !mtdswap_eblk_passes(d, eb))) mtdswap_gc()
1015 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN); mtdswap_gc()
1018 mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); mtdswap_gc()
1020 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); mtdswap_gc()
1074 struct swap_eb *eb; mtdswap_writesect() local
1092 eb = d->eb_data + (mapped / d->pages_per_eblk); mtdswap_writesect()
1093 eb->active_count--; mtdswap_writesect()
1094 mtdswap_store_eb(d, eb); mtdswap_writesect()
1105 eb = d->eb_data + (newblock / d->pages_per_eblk); mtdswap_writesect()
1134 struct swap_eb *eb; mtdswap_readsect() local
1156 eb = d->eb_data + (realblock / d->pages_per_eblk); mtdswap_readsect()
1167 eb->flags |= EBLOCK_BITFLIP; mtdswap_readsect()
1168 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); mtdswap_readsect()
1174 eb->flags |= EBLOCK_READERR; mtdswap_readsect()
1175 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); mtdswap_readsect()
1196 struct swap_eb *eb; mtdswap_discard() local
1204 eb = d->eb_data + (mapped / d->pages_per_eblk); mtdswap_discard()
1205 eb->active_count--; mtdswap_discard()
1206 mtdswap_store_eb(d, eb); mtdswap_discard()
923 mtdswap_eblk_passes(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_eblk_passes() argument
/linux-4.4.14/fs/btrfs/tests/
H A Dextent-buffer-tests.c29 struct extent_buffer *eb; test_btrfs_split_item() local
56 path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096); test_btrfs_split_item()
57 if (!eb) { test_btrfs_split_item()
71 write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0), test_btrfs_split_item()
91 btrfs_item_key_to_cpu(eb, &key, 0); test_btrfs_split_item()
100 if (btrfs_item_size(eb, item) != strlen(split1)) { test_btrfs_split_item()
106 read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0), test_btrfs_split_item()
116 btrfs_item_key_to_cpu(eb, &key, 1); test_btrfs_split_item()
125 if (btrfs_item_size(eb, item) != strlen(split2)) { test_btrfs_split_item()
131 read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1), test_btrfs_split_item()
148 btrfs_item_key_to_cpu(eb, &key, 0); test_btrfs_split_item()
157 if (btrfs_item_size(eb, item) != strlen(split3)) { test_btrfs_split_item()
163 read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0), test_btrfs_split_item()
172 btrfs_item_key_to_cpu(eb, &key, 1); test_btrfs_split_item()
181 if (btrfs_item_size(eb, item) != strlen(split4)) { test_btrfs_split_item()
187 read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1), test_btrfs_split_item()
196 btrfs_item_key_to_cpu(eb, &key, 2); test_btrfs_split_item()
205 if (btrfs_item_size(eb, item) != strlen(split2)) { test_btrfs_split_item()
211 read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 2), test_btrfs_split_item()
H A Dbtrfs-tests.c136 struct extent_buffer *eb; btrfs_free_dummy_fs_info() local
138 eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock); btrfs_free_dummy_fs_info()
139 if (!eb) btrfs_free_dummy_fs_info()
142 if (radix_tree_exception(eb)) { btrfs_free_dummy_fs_info()
143 if (radix_tree_deref_retry(eb)) btrfs_free_dummy_fs_info()
148 free_extent_buffer_stale(eb); btrfs_free_dummy_fs_info()
/linux-4.4.14/drivers/mtd/tests/
H A Dmtd_test.h17 unsigned int eb, int ebcnt);
19 unsigned int eb, int ebcnt);
H A Dstresstest.c57 unsigned int eb; rand_eb() local
60 eb = prandom_u32(); rand_eb()
62 eb %= (ebcnt - 1); rand_eb()
63 if (bbt[eb]) rand_eb()
65 return eb; rand_eb()
88 int eb = rand_eb(); do_read() local
93 if (bbt[eb + 1]) { do_read()
99 addr = (loff_t)eb * mtd->erasesize + offs; do_read()
105 int eb = rand_eb(), offs, err, len; do_write() local
108 offs = offsets[eb]; do_write()
110 err = mtdtest_erase_eraseblock(mtd, eb); do_write()
113 offs = offsets[eb] = 0; do_write()
118 if (bbt[eb + 1]) do_write()
121 err = mtdtest_erase_eraseblock(mtd, eb + 1); do_write()
124 offsets[eb + 1] = 0; do_write()
127 addr = (loff_t)eb * mtd->erasesize + offs; do_write()
133 offsets[eb++] = mtd->erasesize; do_write()
136 offsets[eb] = offs; do_write()
H A Dmtd_test.c46 unsigned int eb, int ebcnt) mtdtest_scan_for_bad_eraseblocks()
55 bbt[i] = is_block_bad(mtd, eb + i) ? 1 : 0; mtdtest_scan_for_bad_eraseblocks()
66 unsigned int eb, int ebcnt) mtdtest_erase_good_eraseblocks()
74 err = mtdtest_erase_eraseblock(mtd, eb + i); mtdtest_erase_good_eraseblocks()
45 mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt, unsigned int eb, int ebcnt) mtdtest_scan_for_bad_eraseblocks() argument
65 mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt, unsigned int eb, int ebcnt) mtdtest_erase_good_eraseblocks() argument
H A Dtorturetest.c40 static int eb = 8; variable
41 module_param(eb, int, S_IRUGO);
42 MODULE_PARM_DESC(eb, "eraseblock number within the selected MTD device");
199 ebcnt, eb, eb + ebcnt - 1, dev); tort_init()
259 err = mtdtest_scan_for_bad_eraseblocks(mtd, bad_ebs, eb, ebcnt); tort_init()
268 err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt); tort_init()
274 for (i = eb; i < eb + ebcnt; i++) { tort_init()
275 if (bad_ebs[i - eb]) tort_init()
291 for (i = eb; i < eb + ebcnt; i++) { tort_init()
292 if (bad_ebs[i - eb]) tort_init()
294 if ((eb + erase_cycles) & 1) tort_init()
309 for (i = eb; i < eb + ebcnt; i++) { tort_init()
310 if (bad_ebs[i - eb]) tort_init()
312 if ((eb + erase_cycles) & 1) tort_init()
320 ((eb + erase_cycles) & 1) ? tort_init()
/linux-4.4.14/drivers/gpu/drm/i915/
H A Di915_gem_execbuffer.c56 struct eb_vmas *eb = NULL; eb_create() local
62 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); eb_create()
65 if (eb == NULL) { eb_create()
71 eb = kzalloc(count*sizeof(struct hlist_head) + eb_create()
74 if (eb == NULL) eb_create()
75 return eb; eb_create()
77 eb->and = count - 1; eb_create()
79 eb->and = -args->buffer_count; eb_create()
81 INIT_LIST_HEAD(&eb->vmas); eb_create()
82 return eb; eb_create()
86 eb_reset(struct eb_vmas *eb) eb_reset() argument
88 if (eb->and >= 0) eb_reset()
89 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); eb_reset()
93 eb_lookup_vmas(struct eb_vmas *eb, eb_lookup_vmas() argument
154 list_add_tail(&vma->exec_list, &eb->vmas); eb_lookup_vmas()
158 if (eb->and < 0) { eb_lookup_vmas()
159 eb->lut[i] = vma; eb_lookup_vmas()
164 &eb->buckets[handle & eb->and]); eb_lookup_vmas()
188 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle) eb_get_vma() argument
190 if (eb->and < 0) { eb_get_vma()
191 if (handle >= -eb->and) eb_get_vma()
193 return eb->lut[handle]; eb_get_vma()
198 head = &eb->buckets[handle & eb->and]; hlist_for_each()
230 static void eb_destroy(struct eb_vmas *eb) eb_destroy() argument
232 while (!list_empty(&eb->vmas)) { eb_destroy()
235 vma = list_first_entry(&eb->vmas, eb_destroy()
242 kfree(eb); eb_destroy()
381 struct eb_vmas *eb, i915_gem_execbuffer_relocate_entry()
392 target_vma = eb_get_vma(eb, reloc->target_handle); i915_gem_execbuffer_relocate_entry()
487 struct eb_vmas *eb) i915_gem_execbuffer_relocate_vma()
511 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r); i915_gem_execbuffer_relocate_vma()
533 struct eb_vmas *eb, i915_gem_execbuffer_relocate_vma_slow()
540 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]); i915_gem_execbuffer_relocate_vma_slow()
549 i915_gem_execbuffer_relocate(struct eb_vmas *eb) i915_gem_execbuffer_relocate() argument
562 list_for_each_entry(vma, &eb->vmas, exec_list) { i915_gem_execbuffer_relocate()
563 ret = i915_gem_execbuffer_relocate_vma(vma, eb); i915_gem_execbuffer_relocate()
793 struct eb_vmas *eb, i915_gem_execbuffer_relocate_slow()
805 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm; i915_gem_execbuffer_relocate_slow()
808 while (!list_empty(&eb->vmas)) { i915_gem_execbuffer_relocate_slow()
809 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); i915_gem_execbuffer_relocate_slow()
875 eb_reset(eb); i915_gem_execbuffer_relocate_slow()
876 ret = eb_lookup_vmas(eb, exec, args, vm, file); i915_gem_execbuffer_relocate_slow()
881 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); i915_gem_execbuffer_relocate_slow()
885 list_for_each_entry(vma, &eb->vmas, exec_list) { i915_gem_execbuffer_relocate_slow()
887 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb, i915_gem_execbuffer_relocate_slow()
1129 struct eb_vmas *eb, i915_gem_execbuffer_parse()
1165 list_add_tail(&vma->exec_list, &eb->vmas); i915_gem_execbuffer_parse()
1307 eb_get_batch(struct eb_vmas *eb) eb_get_batch() argument
1309 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); eb_get_batch()
1332 struct eb_vmas *eb; i915_gem_do_execbuffer() local
1449 eb = eb_create(args); i915_gem_do_execbuffer()
1450 if (eb == NULL) { i915_gem_do_execbuffer()
1458 ret = eb_lookup_vmas(eb, exec, args, vm, file); i915_gem_do_execbuffer()
1463 batch_obj = eb_get_batch(eb); i915_gem_do_execbuffer()
1467 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); i915_gem_do_execbuffer()
1473 ret = i915_gem_execbuffer_relocate(eb); i915_gem_do_execbuffer()
1477 eb, exec, ctx); i915_gem_do_execbuffer()
1497 eb, i915_gem_do_execbuffer()
1574 ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); i915_gem_do_execbuffer()
1589 eb_destroy(eb); i915_gem_do_execbuffer()
380 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_vmas *eb, struct drm_i915_gem_relocation_entry *reloc) i915_gem_execbuffer_relocate_entry() argument
486 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, struct eb_vmas *eb) i915_gem_execbuffer_relocate_vma() argument
532 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, struct eb_vmas *eb, struct drm_i915_gem_relocation_entry *relocs) i915_gem_execbuffer_relocate_vma_slow() argument
789 i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_i915_gem_execbuffer2 *args, struct drm_file *file, struct intel_engine_cs *ring, struct eb_vmas *eb, struct drm_i915_gem_exec_object2 *exec, struct intel_context *ctx) i915_gem_execbuffer_relocate_slow() argument
1127 i915_gem_execbuffer_parse(struct intel_engine_cs *ring, struct drm_i915_gem_exec_object2 *shadow_exec_entry, struct eb_vmas *eb, struct drm_i915_gem_object *batch_obj, u32 batch_start_offset, u32 batch_len, bool is_master) i915_gem_execbuffer_parse() argument
H A Di915_drv.h3262 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, intel_display_crc_init()
3266 struct drm_i915_error_state_buf *eb) i915_error_state_buf_release()
3268 kfree(eb->buf); i915_error_state_buf_release()
3265 i915_error_state_buf_release( struct drm_i915_error_state_buf *eb) i915_error_state_buf_release() argument
/linux-4.4.14/lib/
H A Dtest-string_helpers.c155 .in = "\eb \\C\007\"\x90\r]",
157 .out = "\eb \\C\007\"\x90\\r]",
160 .out = "\\eb \\\\C\\a\"\x90\r]",
163 .out = "\\eb \\\\C\\a\"\x90\\r]",
178 .out = "\eb \\C\007\"\x90\r]",
181 .out = "\eb \\C\007\"\x90\\r]",
184 .out = "\\eb \\C\\a\"\x90\r]",
187 .out = "\\eb \\C\\a\"\x90\\r]",
196 .out = "\\eb \\C\\a\"\\220\\r]",
230 .in = "\eb \\C\007\"\x90\r]",
/linux-4.4.14/arch/sh/include/asm/
H A Dbugs.h71 /* 'eb' means 'Endian Big' */ check_bugs()
/linux-4.4.14/arch/arm/mach-realview/
H A Drealview-dt.c17 "arm,realview-eb",
H A Dplatsmp.c20 #include <mach/board-eb.h>
H A Drealview_eb.c46 #include <mach/board-eb.h>
/linux-4.4.14/drivers/input/mouse/
H A Dsynaptics.h114 #define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01))
115 #define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02))
116 #define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04))
/linux-4.4.14/fs/ocfs2/
H A Dextent_map.c294 struct ocfs2_extent_block *eb; ocfs2_last_eb_is_empty() local
303 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_last_eb_is_empty()
304 el = &eb->h_list; ocfs2_last_eb_is_empty()
365 struct ocfs2_extent_block *eb, *next_eb; ocfs2_figure_hole_clusters() local
370 eb = (struct ocfs2_extent_block *)eb_bh->b_data; ocfs2_figure_hole_clusters()
376 if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL) ocfs2_figure_hole_clusters()
380 le64_to_cpu(eb->h_next_leaf_blk), ocfs2_figure_hole_clusters()
418 struct ocfs2_extent_block *uninitialized_var(eb); ocfs2_get_clusters_nocache()
439 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_get_clusters_nocache()
440 el = &eb->h_list; ocfs2_get_clusters_nocache()
508 else if (eb->h_blkno == di->i_last_eb_blk) ocfs2_get_clusters_nocache()
510 else if (eb->h_next_leaf_blk == di->i_last_eb_blk) { ocfs2_get_clusters_nocache()
551 struct ocfs2_extent_block *eb; ocfs2_xattr_get_clusters() local
563 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_xattr_get_clusters()
564 el = &eb->h_list; ocfs2_xattr_get_clusters()
H A Dalloc.c570 struct ocfs2_extent_block *eb);
673 struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data; ocfs2_path_insert_eb() local
684 path->p_node[index].el = &eb->h_list; ocfs2_path_insert_eb()
887 struct ocfs2_extent_block *eb = ocfs2_validate_extent_block() local
899 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &eb->h_check); ocfs2_validate_extent_block()
910 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { ocfs2_validate_extent_block()
914 eb->h_signature); ocfs2_validate_extent_block()
918 if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) { ocfs2_validate_extent_block()
922 (unsigned long long)le64_to_cpu(eb->h_blkno)); ocfs2_validate_extent_block()
926 if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation) { ocfs2_validate_extent_block()
930 le32_to_cpu(eb->h_fs_generation)); ocfs2_validate_extent_block()
962 struct ocfs2_extent_block *eb; ocfs2_num_free_extents() local
976 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_num_free_extents()
977 el = &eb->h_list; ocfs2_num_free_extents()
1007 struct ocfs2_extent_block *eb; ocfs2_create_new_meta_bhs() local
1041 eb = (struct ocfs2_extent_block *) bhs[i]->b_data; ocfs2_create_new_meta_bhs()
1043 strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); ocfs2_create_new_meta_bhs()
1044 eb->h_blkno = cpu_to_le64(first_blkno); ocfs2_create_new_meta_bhs()
1045 eb->h_fs_generation = cpu_to_le32(osb->fs_generation); ocfs2_create_new_meta_bhs()
1046 eb->h_suballoc_slot = ocfs2_create_new_meta_bhs()
1048 eb->h_suballoc_loc = cpu_to_le64(suballoc_loc); ocfs2_create_new_meta_bhs()
1049 eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start); ocfs2_create_new_meta_bhs()
1050 eb->h_list.l_count = ocfs2_create_new_meta_bhs()
1165 struct ocfs2_extent_block *eb; ocfs2_add_branch() local
1173 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_add_branch()
1174 el = &eb->h_list; ocfs2_add_branch()
1183 eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data; ocfs2_add_branch()
1184 new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list); ocfs2_add_branch()
1207 /* allocate the number of new eb blocks we need */ ocfs2_add_branch()
1233 eb = (struct ocfs2_extent_block *) bh->b_data; ocfs2_add_branch()
1235 BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); ocfs2_add_branch()
1236 eb_el = &eb->h_list; ocfs2_add_branch()
1245 eb->h_next_leaf_blk = 0; ocfs2_add_branch()
1261 new_last_eb_blk = le64_to_cpu(eb->h_blkno); ocfs2_add_branch()
1264 next_blkno = le64_to_cpu(eb->h_blkno); ocfs2_add_branch()
1306 eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; ocfs2_add_branch()
1307 eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk); ocfs2_add_branch()
1346 struct ocfs2_extent_block *eb; ocfs2_shift_tree_depth() local
1357 eb = (struct ocfs2_extent_block *) new_eb_bh->b_data; ocfs2_shift_tree_depth()
1359 BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); ocfs2_shift_tree_depth()
1361 eb_el = &eb->h_list; ocfs2_shift_tree_depth()
1391 root_el->l_recs[0].e_blkno = eb->h_blkno; ocfs2_shift_tree_depth()
1400 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); ocfs2_shift_tree_depth()
1435 struct ocfs2_extent_block *eb; ocfs2_find_branch_target() local
1471 eb = (struct ocfs2_extent_block *) bh->b_data; ocfs2_find_branch_target()
1472 el = &eb->h_list; ocfs2_find_branch_target()
1778 struct ocfs2_extent_block *eb; __ocfs2_find_path() local
1826 eb = (struct ocfs2_extent_block *) bh->b_data; __ocfs2_find_path()
1827 el = &eb->h_list; __ocfs2_find_path()
1890 struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data; find_leaf_ins() local
1891 struct ocfs2_extent_list *el = &eb->h_list; find_leaf_ins()
2516 struct ocfs2_extent_block *eb; ocfs2_update_edge_lengths() local
2541 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; ocfs2_update_edge_lengths()
2542 BUG_ON(eb->h_next_leaf_blk != 0ULL); ocfs2_update_edge_lengths()
2544 el = &eb->h_list; ocfs2_update_edge_lengths()
2570 struct ocfs2_extent_block *eb; ocfs2_unlink_path() local
2577 eb = (struct ocfs2_extent_block *)bh->b_data; ocfs2_unlink_path()
2582 el = &eb->h_list; ocfs2_unlink_path()
2588 (unsigned long long)le64_to_cpu(eb->h_blkno), ocfs2_unlink_path()
2601 ret = ocfs2_cache_extent_block_free(dealloc, eb); ocfs2_unlink_path()
2620 struct ocfs2_extent_block *eb; ocfs2_unlink_subtree() local
2624 eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data; ocfs2_unlink_subtree()
2627 if (root_el->l_recs[i].e_blkno == eb->h_blkno) ocfs2_unlink_subtree()
2635 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; ocfs2_unlink_subtree()
2636 eb->h_next_leaf_blk = 0; ocfs2_unlink_subtree()
2656 struct ocfs2_extent_block *eb; ocfs2_rotate_subtree_left() local
2668 eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data; ocfs2_rotate_subtree_left()
2683 if (eb->h_next_leaf_blk != 0ULL) ocfs2_rotate_subtree_left()
2700 if (eb->h_next_leaf_blk == 0ULL && ocfs2_rotate_subtree_left()
2756 if (eb->h_next_leaf_blk == 0ULL) { ocfs2_rotate_subtree_left()
2779 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; ocfs2_rotate_subtree_left()
2780 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); ocfs2_rotate_subtree_left()
3029 struct ocfs2_extent_block *eb; ocfs2_remove_rightmost_path() local
3096 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; ocfs2_remove_rightmost_path()
3097 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); ocfs2_remove_rightmost_path()
3170 struct ocfs2_extent_block *eb; ocfs2_rotate_tree_left() local
3202 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; ocfs2_rotate_tree_left()
3203 el = &eb->h_list; ocfs2_rotate_tree_left()
3204 if (eb->h_next_leaf_blk == 0) { ocfs2_rotate_tree_left()
3218 (unsigned long long)le64_to_cpu(eb->h_blkno)); ocfs2_rotate_tree_left()
4341 struct ocfs2_extent_block *eb; ocfs2_figure_merge_contig_type() local
4369 eb = (struct ocfs2_extent_block *)bh->b_data; ocfs2_figure_merge_contig_type()
4372 (unsigned long long)le64_to_cpu(eb->h_blkno), ocfs2_figure_merge_contig_type()
4424 eb = (struct ocfs2_extent_block *)bh->b_data; ocfs2_figure_merge_contig_type()
4427 (unsigned long long)le64_to_cpu(eb->h_blkno), ocfs2_figure_merge_contig_type()
4558 struct ocfs2_extent_block *eb; ocfs2_figure_insert_type() local
4582 eb = (struct ocfs2_extent_block *) bh->b_data; ocfs2_figure_insert_type()
4583 el = &eb->h_list; ocfs2_figure_insert_type()
4898 struct ocfs2_extent_block *eb; ocfs2_split_and_insert() local
4912 eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; ocfs2_split_and_insert()
4913 rightmost_el = &eb->h_list; ocfs2_split_and_insert()
5075 struct ocfs2_extent_block *eb; ocfs2_split_extent() local
5085 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; ocfs2_split_extent()
5086 rightmost_el = &eb->h_list; ocfs2_split_extent()
5267 struct ocfs2_extent_block *eb; ocfs2_split_tree() local
5291 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; ocfs2_split_tree()
5292 rightmost_el = &eb->h_list; ocfs2_split_tree()
5342 struct ocfs2_extent_block *eb; ocfs2_truncate_rec() local
5362 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; ocfs2_truncate_rec()
5363 if (eb->h_next_leaf_blk == 0) ocfs2_truncate_rec()
6595 struct ocfs2_extent_block *eb) ocfs2_cache_extent_block_free()
6598 le16_to_cpu(eb->h_suballoc_slot), ocfs2_cache_extent_block_free()
6599 le64_to_cpu(eb->h_suballoc_loc), ocfs2_cache_extent_block_free()
6600 le64_to_cpu(eb->h_blkno), ocfs2_cache_extent_block_free()
6601 le16_to_cpu(eb->h_suballoc_bit)); ocfs2_cache_extent_block_free()
6594 ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, struct ocfs2_extent_block *eb) ocfs2_cache_extent_block_free() argument
H A Drefcounttree.c966 struct ocfs2_extent_block *eb, ocfs2_get_refcount_cpos_end()
987 if (!eb || (eb && !eb->h_next_leaf_blk)) { ocfs2_get_refcount_cpos_end()
1011 cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos); ocfs2_get_refcount_cpos_end()
1075 struct ocfs2_extent_block *eb = NULL; ocfs2_get_refcount_rec() local
1099 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_get_refcount_rec()
1100 el = &eb->h_list; ocfs2_get_refcount_rec()
1123 eb, el, i, &cpos_end); ocfs2_get_refcount_rec()
2648 struct ocfs2_extent_block *eb = NULL; ocfs2_refcount_cal_cow_clusters() local
2663 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_refcount_cal_cow_clusters()
2664 el = &eb->h_list; ocfs2_refcount_cal_cow_clusters()
2812 eb && eb->h_next_leaf_blk) { ocfs2_refcount_cal_cow_clusters()
2817 le64_to_cpu(eb->h_next_leaf_blk), ocfs2_refcount_cal_cow_clusters()
2824 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_refcount_cal_cow_clusters()
2825 el = &eb->h_list; ocfs2_refcount_cal_cow_clusters()
964 ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, struct ocfs2_extent_block *eb, struct ocfs2_extent_list *el, int index, u32 *cpos_end) ocfs2_get_refcount_cpos_end() argument
H A Docfs2_fs.h556 eb belongs to. Only valid
H A Ddir.c791 struct ocfs2_extent_block *eb; ocfs2_dx_dir_lookup_rec() local
802 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_dx_dir_lookup_rec()
803 el = &eb->h_list; ocfs2_dx_dir_lookup_rec()
H A Dxattr.c3675 struct ocfs2_extent_block *eb; ocfs2_xattr_get_rec() local
3687 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_xattr_get_rec()
3688 el = &eb->h_list; ocfs2_xattr_get_rec()
/linux-4.4.14/arch/x86/mm/
H A Dnuma_emulation.c49 struct numa_memblk *eb = &ei->blk[ei->nr_blks]; emu_setup_memblk() local
58 eb->start = pb->start; emu_setup_memblk()
59 eb->end = pb->start + size; emu_setup_memblk()
60 eb->nid = nid; emu_setup_memblk()
72 nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20); emu_setup_memblk()
/linux-4.4.14/arch/powerpc/crypto/
H A Daes-tab-4k.S48 .long R(ef, fa, fa, 15), R(b2, 59, 59, eb)
68 .long R(cd, eb, eb, 26), R(4e, 27, 27, 69)
150 .long R(d9, e1, e1, 38), R(eb, f8, f8, 13)
182 .long R(bf, 6d, 7a, eb), R(95, 52, 59, da)
197 .long R(b2, eb, 28, 07), R(2f, b5, c2, 03)
206 .long R(05, 8a, e1, 32), R(a4, f6, eb, 75)
282 .long R(59, f8, 14, 8e), R(eb, 13, 3c, 89)
/linux-4.4.14/arch/arm/mach-realview/include/mach/
H A Dirqs.h25 #include <mach/irqs-eb.h>
H A Duncompress.h23 #include <mach/board-eb.h>
H A Dboard-eb.h2 * arch/arm/mach-realview/include/mach/board-eb.h
H A Dirqs-eb.h2 * arch/arm/mach-realview/include/mach/irqs-eb.h
/linux-4.4.14/drivers/power/reset/
H A Darm-versatile-reboot.c49 .compatible = "arm,realview-eb-syscon",
/linux-4.4.14/drivers/soc/versatile/
H A Dsoc-realview.c24 { .compatible = "arm,realview-eb-soc", },
/linux-4.4.14/arch/unicore32/include/asm/
H A Dassembler.h83 .else; .ifc \cond, eb
/linux-4.4.14/include/media/
H A Dv4l2-mem2mem.h129 struct v4l2_exportbuffer *eb);
269 struct v4l2_exportbuffer *eb);
H A Dvideobuf2-v4l2.h60 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb);
/linux-4.4.14/arch/ia64/kernel/
H A Dmca_drv.c413 * Bus_Check structure with Bus_Check.eb (external bus error) flag set is_mca_global()
423 if (pbci->eb) is_mca_global()
583 if (psp->bc && pbci->eb && pbci->bsi == 0) { recover_from_platform_error()
701 if (pbci->eb && pbci->bsi > 0) recover_from_processor_error()
/linux-4.4.14/arch/sparc/kernel/
H A Dbtext.c267 unsigned int *eb = (int *)expand_bits_16; draw_byte_16() local
272 base[0] = (eb[bits >> 6] & fg) ^ bg; draw_byte_16()
273 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; draw_byte_16()
274 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; draw_byte_16()
275 base[3] = (eb[bits & 3] & fg) ^ bg; draw_byte_16()
285 unsigned int *eb = (int *)expand_bits_8; draw_byte_8() local
290 base[0] = (eb[bits >> 4] & fg) ^ bg; draw_byte_8()
291 base[1] = (eb[bits & 0xf] & fg) ^ bg; draw_byte_8()
/linux-4.4.14/drivers/scsi/aic94xx/
H A Daic94xx_hwi.c366 struct sg_el *eb = &escb->eb[k]; asd_assign_edbs2escbs() local
369 memset(eb, 0, sizeof(*eb)); asd_assign_edbs2escbs()
370 eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle)); asd_assign_edbs2escbs()
371 eb->size = cpu_to_le32(((u32) edb->size)); asd_assign_edbs2escbs()
H A Daic94xx_scb.c386 struct sg_el *eb = &escb->eb[edb_id]; asd_invalidate_edb() local
390 eb->flags |= ELEMENT_NOT_VALID; asd_invalidate_edb()
406 escb->eb[i].flags = 0; asd_invalidate_edb()
H A Daic94xx_sas.h480 struct sg_el eb[ASD_EDBS_PER_SCB]; member in struct:empty_scb
/linux-4.4.14/drivers/media/v4l2-core/
H A Dvideobuf2-v4l2.c675 * @eb: export buffer structure passed from userspace to vidioc_expbuf
681 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) vb2_expbuf() argument
683 return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index, vb2_expbuf()
684 eb->plane, eb->flags); vb2_expbuf()
H A Dv4l2-mem2mem.c474 struct v4l2_exportbuffer *eb) v4l2_m2m_expbuf()
478 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); v4l2_m2m_expbuf()
479 return vb2_expbuf(vq, eb); v4l2_m2m_expbuf()
845 struct v4l2_exportbuffer *eb) v4l2_m2m_ioctl_expbuf()
849 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); v4l2_m2m_ioctl_expbuf()
473 v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_exportbuffer *eb) v4l2_m2m_expbuf() argument
844 v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *eb) v4l2_m2m_ioctl_expbuf() argument
/linux-4.4.14/drivers/media/platform/s5p-mfc/
H A Ds5p_mfc_dec.c668 struct v4l2_exportbuffer *eb) vidioc_expbuf()
672 if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) vidioc_expbuf()
673 return vb2_expbuf(&ctx->vq_src, eb); vidioc_expbuf()
674 if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) vidioc_expbuf()
675 return vb2_expbuf(&ctx->vq_dst, eb); vidioc_expbuf()
667 vidioc_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *eb) vidioc_expbuf() argument
H A Ds5p_mfc_enc.c1301 struct v4l2_exportbuffer *eb) vidioc_expbuf()
1305 if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) vidioc_expbuf()
1306 return vb2_expbuf(&ctx->vq_src, eb); vidioc_expbuf()
1307 if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) vidioc_expbuf()
1308 return vb2_expbuf(&ctx->vq_dst, eb); vidioc_expbuf()
1300 vidioc_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *eb) vidioc_expbuf() argument
/linux-4.4.14/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_hsi.h1713 /* eb is the bitwidth of a single element */
1714 #define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
1715 #define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
1741 #define SHMEM_ARRAY_BITPOS(i, eb, fb) \
1742 ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
1743 (((i)%((fb)/(eb))) * (eb)))
1745 #define SHMEM_ARRAY_GET(a, i, eb, fb) \
1746 ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
1747 SHMEM_ARRAY_MASK(eb))
1749 #define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
1751 a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
1752 SHMEM_ARRAY_BITPOS(i, eb, fb)); \
1753 a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
1754 SHMEM_ARRAY_BITPOS(i, eb, fb)); \
/linux-4.4.14/arch/powerpc/kernel/
H A Dbtext.c451 unsigned int *eb = (int *)expand_bits_16; draw_byte_16() local
456 base[0] = (eb[bits >> 6] & fg) ^ bg; draw_byte_16()
457 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; draw_byte_16()
458 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; draw_byte_16()
459 base[3] = (eb[bits & 3] & fg) ^ bg; draw_byte_16()
469 unsigned int *eb = (int *)expand_bits_8; draw_byte_8() local
474 base[0] = (eb[bits >> 4] & fg) ^ bg; draw_byte_8()
475 base[1] = (eb[bits & 0xf] & fg) ^ bg; draw_byte_8()
/linux-4.4.14/drivers/media/platform/exynos-gsc/
H A Dgsc-m2m.c383 struct v4l2_exportbuffer *eb) gsc_m2m_expbuf()
386 return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb); gsc_m2m_expbuf()
382 gsc_m2m_expbuf(struct file *file, void *fh, struct v4l2_exportbuffer *eb) gsc_m2m_expbuf() argument
/linux-4.4.14/drivers/isdn/i4l/
H A Disdn_tty.c3622 char eb[2]; isdn_tty_edit_at() local
3632 eb[0] = c; isdn_tty_edit_at()
3633 eb[1] = 0; isdn_tty_edit_at()
3634 isdn_tty_at_cout(eb, info); isdn_tty_edit_at()
3653 eb[0] = c; isdn_tty_edit_at()
3654 eb[1] = 0; isdn_tty_edit_at()
3655 isdn_tty_at_cout(eb, info); isdn_tty_edit_at()
/linux-4.4.14/drivers/misc/cxl/
H A Dsysfs.c605 "Unable to create eb attr for the afu. Err(%d)\n", cxl_sysfs_afu_add()
/linux-4.4.14/tools/lib/traceevent/
H A Devent-parse.c5403 struct event_format * const * eb = b; events_id_cmp() local
5405 if ((*ea)->id < (*eb)->id) events_id_cmp()
5408 if ((*ea)->id > (*eb)->id) events_id_cmp()
5417 struct event_format * const * eb = b; events_name_cmp() local
5420 res = strcmp((*ea)->name, (*eb)->name); events_name_cmp()
5424 res = strcmp((*ea)->system, (*eb)->system); events_name_cmp()
5434 struct event_format * const * eb = b; events_system_cmp() local
5437 res = strcmp((*ea)->system, (*eb)->system); events_system_cmp()
5441 res = strcmp((*ea)->name, (*eb)->name); events_system_cmp()
H A Dparse-filter.c107 const struct filter_type *eb = b; filter_cmp() local
109 if (ea->event_id < eb->event_id) filter_cmp()
112 if (ea->event_id > eb->event_id) filter_cmp()
/linux-4.4.14/drivers/media/platform/s5p-tv/
H A Dmixer_video.c722 struct v4l2_exportbuffer *eb) mxr_expbuf()
727 return vb2_expbuf(&layer->vb_queue, eb); mxr_expbuf()
721 mxr_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *eb) mxr_expbuf() argument
/linux-4.4.14/drivers/media/usb/em28xx/
H A Dem28xx.h484 u8 id[4]; /* 1a eb 67 95 */
/linux-4.4.14/drivers/net/fddi/skfp/h/
H A Dsmt.h363 u_int eb_error_ct ; /* # of eb overflows */
/linux-4.4.14/arch/ia64/include/asm/
H A Dpal.h575 eb : 1, /* External bus error */ member in struct:pal_bus_check_info_s
743 #define pmci_bus_external_error pme_bus.eb
/linux-4.4.14/net/netfilter/
H A Dx_tables.c39 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
66 [NFPROTO_BRIDGE] = "eb",
/linux-4.4.14/arch/x86/kvm/
H A Dvmx.c1641 u32 eb; update_exception_bitmap() local
1643 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | update_exception_bitmap()
1648 eb |= 1u << BP_VECTOR; update_exception_bitmap()
1650 eb = ~0; update_exception_bitmap()
1652 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ update_exception_bitmap()
1654 eb &= ~(1u << NM_VECTOR); update_exception_bitmap()
1662 eb |= get_vmcs12(vcpu)->exception_bitmap; update_exception_bitmap()
1664 vmcs_write32(EXCEPTION_BITMAP, eb); update_exception_bitmap()
/linux-4.4.14/drivers/media/usb/gspca/
H A Dspca508.c394 /* READ { 0x0000, 0x860e } -> 0000: eb */
H A Dzc3xx.c3981 {0xa0, 0xeb, ZC3XX_R020_HSYNC_3}, /* 00,20,eb,cc */
4886 {0xaa, 0x0f, 0x00eb}, /* 00,0f,eb,aa */
/linux-4.4.14/drivers/mtd/onenand/
H A Donenand_base.c2305 /* loop over 64 eb batches */ onenand_multiblock_erase()
2345 /* last block of 64-eb series */ onenand_multiblock_erase()
/linux-4.4.14/drivers/mtd/nand/
H A Dnandsim.c161 " separated by commas e.g. 113:2 means eb 113"

Completed in 6815 milliseconds