Searched refs:eb (Results 1 - 82 of 82) sorted by relevance

/linux-4.1.27/fs/btrfs/
H A Dlocking.c27 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
34 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) btrfs_set_lock_blocking_rw() argument
42 if (eb->lock_nested && current->pid == eb->lock_owner) btrfs_set_lock_blocking_rw()
45 if (atomic_read(&eb->blocking_writers) == 0) { btrfs_set_lock_blocking_rw()
46 WARN_ON(atomic_read(&eb->spinning_writers) != 1); btrfs_set_lock_blocking_rw()
47 atomic_dec(&eb->spinning_writers); btrfs_set_lock_blocking_rw()
48 btrfs_assert_tree_locked(eb); btrfs_set_lock_blocking_rw()
49 atomic_inc(&eb->blocking_writers); btrfs_set_lock_blocking_rw()
50 write_unlock(&eb->lock); btrfs_set_lock_blocking_rw()
53 btrfs_assert_tree_read_locked(eb); btrfs_set_lock_blocking_rw()
54 atomic_inc(&eb->blocking_readers); btrfs_set_lock_blocking_rw()
55 WARN_ON(atomic_read(&eb->spinning_readers) == 0); btrfs_set_lock_blocking_rw()
56 atomic_dec(&eb->spinning_readers); btrfs_set_lock_blocking_rw()
57 read_unlock(&eb->lock); btrfs_set_lock_blocking_rw()
66 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) btrfs_clear_lock_blocking_rw() argument
74 if (eb->lock_nested && current->pid == eb->lock_owner) btrfs_clear_lock_blocking_rw()
78 BUG_ON(atomic_read(&eb->blocking_writers) != 1); btrfs_clear_lock_blocking_rw()
79 write_lock(&eb->lock); btrfs_clear_lock_blocking_rw()
80 WARN_ON(atomic_read(&eb->spinning_writers)); btrfs_clear_lock_blocking_rw()
81 atomic_inc(&eb->spinning_writers); btrfs_clear_lock_blocking_rw()
82 if (atomic_dec_and_test(&eb->blocking_writers) && btrfs_clear_lock_blocking_rw()
83 waitqueue_active(&eb->write_lock_wq)) btrfs_clear_lock_blocking_rw()
84 wake_up(&eb->write_lock_wq); btrfs_clear_lock_blocking_rw()
86 BUG_ON(atomic_read(&eb->blocking_readers) == 0); btrfs_clear_lock_blocking_rw()
87 read_lock(&eb->lock); btrfs_clear_lock_blocking_rw()
88 atomic_inc(&eb->spinning_readers); btrfs_clear_lock_blocking_rw()
89 if (atomic_dec_and_test(&eb->blocking_readers) && btrfs_clear_lock_blocking_rw()
90 waitqueue_active(&eb->read_lock_wq)) btrfs_clear_lock_blocking_rw()
91 wake_up(&eb->read_lock_wq); btrfs_clear_lock_blocking_rw()
100 void btrfs_tree_read_lock(struct extent_buffer *eb) btrfs_tree_read_lock() argument
103 BUG_ON(!atomic_read(&eb->blocking_writers) && btrfs_tree_read_lock()
104 current->pid == eb->lock_owner); btrfs_tree_read_lock()
106 read_lock(&eb->lock); btrfs_tree_read_lock()
107 if (atomic_read(&eb->blocking_writers) && btrfs_tree_read_lock()
108 current->pid == eb->lock_owner) { btrfs_tree_read_lock()
115 BUG_ON(eb->lock_nested); btrfs_tree_read_lock()
116 eb->lock_nested = 1; btrfs_tree_read_lock()
117 read_unlock(&eb->lock); btrfs_tree_read_lock()
120 if (atomic_read(&eb->blocking_writers)) { btrfs_tree_read_lock()
121 read_unlock(&eb->lock); btrfs_tree_read_lock()
122 wait_event(eb->write_lock_wq, btrfs_tree_read_lock()
123 atomic_read(&eb->blocking_writers) == 0); btrfs_tree_read_lock()
126 atomic_inc(&eb->read_locks); btrfs_tree_read_lock()
127 atomic_inc(&eb->spinning_readers); btrfs_tree_read_lock()
135 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) btrfs_tree_read_lock_atomic() argument
137 if (atomic_read(&eb->blocking_writers)) btrfs_tree_read_lock_atomic()
140 read_lock(&eb->lock); btrfs_tree_read_lock_atomic()
141 if (atomic_read(&eb->blocking_writers)) { btrfs_tree_read_lock_atomic()
142 read_unlock(&eb->lock); btrfs_tree_read_lock_atomic()
145 atomic_inc(&eb->read_locks); btrfs_tree_read_lock_atomic()
146 atomic_inc(&eb->spinning_readers); btrfs_tree_read_lock_atomic()
154 int btrfs_try_tree_read_lock(struct extent_buffer *eb) btrfs_try_tree_read_lock() argument
156 if (atomic_read(&eb->blocking_writers)) btrfs_try_tree_read_lock()
159 if (!read_trylock(&eb->lock)) btrfs_try_tree_read_lock()
162 if (atomic_read(&eb->blocking_writers)) { btrfs_try_tree_read_lock()
163 read_unlock(&eb->lock); btrfs_try_tree_read_lock()
166 atomic_inc(&eb->read_locks); btrfs_try_tree_read_lock()
167 atomic_inc(&eb->spinning_readers); btrfs_try_tree_read_lock()
175 int btrfs_try_tree_write_lock(struct extent_buffer *eb) btrfs_try_tree_write_lock() argument
177 if (atomic_read(&eb->blocking_writers) || btrfs_try_tree_write_lock()
178 atomic_read(&eb->blocking_readers)) btrfs_try_tree_write_lock()
181 write_lock(&eb->lock); btrfs_try_tree_write_lock()
182 if (atomic_read(&eb->blocking_writers) || btrfs_try_tree_write_lock()
183 atomic_read(&eb->blocking_readers)) { btrfs_try_tree_write_lock()
184 write_unlock(&eb->lock); btrfs_try_tree_write_lock()
187 atomic_inc(&eb->write_locks); btrfs_try_tree_write_lock()
188 atomic_inc(&eb->spinning_writers); btrfs_try_tree_write_lock()
189 eb->lock_owner = current->pid; btrfs_try_tree_write_lock()
196 void btrfs_tree_read_unlock(struct extent_buffer *eb) btrfs_tree_read_unlock() argument
204 if (eb->lock_nested && current->pid == eb->lock_owner) { btrfs_tree_read_unlock()
205 eb->lock_nested = 0; btrfs_tree_read_unlock()
208 btrfs_assert_tree_read_locked(eb); btrfs_tree_read_unlock()
209 WARN_ON(atomic_read(&eb->spinning_readers) == 0); btrfs_tree_read_unlock()
210 atomic_dec(&eb->spinning_readers); btrfs_tree_read_unlock()
211 atomic_dec(&eb->read_locks); btrfs_tree_read_unlock()
212 read_unlock(&eb->lock); btrfs_tree_read_unlock()
218 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) btrfs_tree_read_unlock_blocking() argument
226 if (eb->lock_nested && current->pid == eb->lock_owner) { btrfs_tree_read_unlock_blocking()
227 eb->lock_nested = 0; btrfs_tree_read_unlock_blocking()
230 btrfs_assert_tree_read_locked(eb); btrfs_tree_read_unlock_blocking()
231 WARN_ON(atomic_read(&eb->blocking_readers) == 0); btrfs_tree_read_unlock_blocking()
232 if (atomic_dec_and_test(&eb->blocking_readers) && btrfs_tree_read_unlock_blocking()
233 waitqueue_active(&eb->read_lock_wq)) btrfs_tree_read_unlock_blocking()
234 wake_up(&eb->read_lock_wq); btrfs_tree_read_unlock_blocking()
235 atomic_dec(&eb->read_locks); btrfs_tree_read_unlock_blocking()
242 void btrfs_tree_lock(struct extent_buffer *eb) btrfs_tree_lock() argument
245 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); btrfs_tree_lock()
246 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); btrfs_tree_lock()
247 write_lock(&eb->lock); btrfs_tree_lock()
248 if (atomic_read(&eb->blocking_readers)) { btrfs_tree_lock()
249 write_unlock(&eb->lock); btrfs_tree_lock()
250 wait_event(eb->read_lock_wq, btrfs_tree_lock()
251 atomic_read(&eb->blocking_readers) == 0); btrfs_tree_lock()
254 if (atomic_read(&eb->blocking_writers)) { btrfs_tree_lock()
255 write_unlock(&eb->lock); btrfs_tree_lock()
256 wait_event(eb->write_lock_wq, btrfs_tree_lock()
257 atomic_read(&eb->blocking_writers) == 0); btrfs_tree_lock()
260 WARN_ON(atomic_read(&eb->spinning_writers)); btrfs_tree_lock()
261 atomic_inc(&eb->spinning_writers); btrfs_tree_lock()
262 atomic_inc(&eb->write_locks); btrfs_tree_lock()
263 eb->lock_owner = current->pid; btrfs_tree_lock()
269 void btrfs_tree_unlock(struct extent_buffer *eb) btrfs_tree_unlock() argument
271 int blockers = atomic_read(&eb->blocking_writers); btrfs_tree_unlock()
275 btrfs_assert_tree_locked(eb); btrfs_tree_unlock()
276 eb->lock_owner = 0; btrfs_tree_unlock()
277 atomic_dec(&eb->write_locks); btrfs_tree_unlock()
280 WARN_ON(atomic_read(&eb->spinning_writers)); btrfs_tree_unlock()
281 atomic_dec(&eb->blocking_writers); btrfs_tree_unlock()
283 if (waitqueue_active(&eb->write_lock_wq)) btrfs_tree_unlock()
284 wake_up(&eb->write_lock_wq); btrfs_tree_unlock()
286 WARN_ON(atomic_read(&eb->spinning_writers) != 1); btrfs_tree_unlock()
287 atomic_dec(&eb->spinning_writers); btrfs_tree_unlock()
288 write_unlock(&eb->lock); btrfs_tree_unlock()
292 void btrfs_assert_tree_locked(struct extent_buffer *eb) btrfs_assert_tree_locked() argument
294 BUG_ON(!atomic_read(&eb->write_locks)); btrfs_assert_tree_locked()
297 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) btrfs_assert_tree_read_locked() argument
299 BUG_ON(!atomic_read(&eb->read_locks)); btrfs_assert_tree_read_locked()
H A Dlocking.h27 void btrfs_tree_lock(struct extent_buffer *eb);
28 void btrfs_tree_unlock(struct extent_buffer *eb);
30 void btrfs_tree_read_lock(struct extent_buffer *eb);
31 void btrfs_tree_read_unlock(struct extent_buffer *eb);
32 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
33 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw);
34 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
35 void btrfs_assert_tree_locked(struct extent_buffer *eb);
36 int btrfs_try_tree_read_lock(struct extent_buffer *eb);
37 int btrfs_try_tree_write_lock(struct extent_buffer *eb);
38 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
41 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) btrfs_tree_unlock_rw() argument
44 btrfs_tree_unlock(eb); btrfs_tree_unlock_rw()
46 btrfs_tree_read_unlock_blocking(eb); btrfs_tree_unlock_rw()
48 btrfs_tree_read_unlock(eb); btrfs_tree_unlock_rw()
53 static inline void btrfs_set_lock_blocking(struct extent_buffer *eb) btrfs_set_lock_blocking() argument
55 btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK); btrfs_set_lock_blocking()
58 static inline void btrfs_clear_lock_blocking(struct extent_buffer *eb) btrfs_clear_lock_blocking() argument
60 btrfs_clear_lock_blocking_rw(eb, BTRFS_WRITE_LOCK_BLOCKING); btrfs_clear_lock_blocking()
H A Dprint-tree.c23 static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk) print_chunk() argument
25 int num_stripes = btrfs_chunk_num_stripes(eb, chunk); print_chunk()
29 btrfs_chunk_length(eb, chunk), btrfs_chunk_owner(eb, chunk), print_chunk()
30 btrfs_chunk_type(eb, chunk), num_stripes); print_chunk()
33 btrfs_stripe_devid_nr(eb, chunk, i), print_chunk()
34 btrfs_stripe_offset_nr(eb, chunk, i)); print_chunk()
37 static void print_dev_item(struct extent_buffer *eb, print_dev_item() argument
42 btrfs_device_id(eb, dev_item), print_dev_item()
43 btrfs_device_total_bytes(eb, dev_item), print_dev_item()
44 btrfs_device_bytes_used(eb, dev_item)); print_dev_item()
46 static void print_extent_data_ref(struct extent_buffer *eb, print_extent_data_ref() argument
51 btrfs_extent_data_ref_root(eb, ref), print_extent_data_ref()
52 btrfs_extent_data_ref_objectid(eb, ref), print_extent_data_ref()
53 btrfs_extent_data_ref_offset(eb, ref), print_extent_data_ref()
54 btrfs_extent_data_ref_count(eb, ref)); print_extent_data_ref()
57 static void print_extent_item(struct extent_buffer *eb, int slot, int type) print_extent_item() argument
66 u32 item_size = btrfs_item_size_nr(eb, slot); print_extent_item()
74 ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0); print_extent_item()
76 btrfs_extent_refs_v0(eb, ei0)); print_extent_item()
83 ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item); print_extent_item()
84 flags = btrfs_extent_flags(eb, ei); print_extent_item()
87 btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei), print_extent_item()
94 btrfs_tree_block_key(eb, info, &key); print_extent_item()
99 btrfs_tree_block_level(eb, info)); print_extent_item()
109 type = btrfs_extent_inline_ref_type(eb, iref); print_extent_item()
110 offset = btrfs_extent_inline_ref_offset(eb, iref); print_extent_item()
122 print_extent_data_ref(eb, dref); print_extent_item()
128 offset, btrfs_shared_data_ref_count(eb, sref)); print_extent_item()
139 static void print_extent_ref_v0(struct extent_buffer *eb, int slot) print_extent_ref_v0() argument
143 ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0); print_extent_ref_v0()
146 btrfs_ref_root_v0(eb, ref0), print_extent_ref_v0()
147 btrfs_ref_generation_v0(eb, ref0), print_extent_ref_v0()
148 btrfs_ref_objectid_v0(eb, ref0), print_extent_ref_v0()
149 (unsigned long)btrfs_ref_count_v0(eb, ref0)); print_extent_ref_v0()
H A Dstruct-funcs.c53 u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
68 token->eb == eb && \
75 err = map_private_extent_buffer(eb, offset, size, \
80 read_extent_buffer(eb, &leres, offset, size); \
88 token->eb = eb; \
92 void btrfs_set_token_##bits(struct extent_buffer *eb, \
106 token->eb == eb && \
113 err = map_private_extent_buffer(eb, offset, size, \
119 write_extent_buffer(eb, &val2, offset, size); \
127 token->eb = eb; \
136 void btrfs_node_key(struct extent_buffer *eb, btrfs_node_key() argument
140 read_eb_member(eb, (struct btrfs_key_ptr *)ptr, btrfs_node_key()
H A Duuid-tree.c39 struct extent_buffer *eb; btrfs_uuid_tree_lookup() local
65 eb = path->nodes[0]; btrfs_uuid_tree_lookup()
67 item_size = btrfs_item_size_nr(eb, slot); btrfs_uuid_tree_lookup()
68 offset = btrfs_item_ptr_offset(eb, slot); btrfs_uuid_tree_lookup()
79 read_extent_buffer(eb, &data, offset, sizeof(data)); btrfs_uuid_tree_lookup()
100 struct extent_buffer *eb; btrfs_uuid_tree_add() local
126 eb = path->nodes[0]; btrfs_uuid_tree_add()
128 offset = btrfs_item_ptr_offset(eb, slot); btrfs_uuid_tree_add()
135 eb = path->nodes[0]; btrfs_uuid_tree_add()
137 offset = btrfs_item_ptr_offset(eb, slot); btrfs_uuid_tree_add()
138 offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le); btrfs_uuid_tree_add()
149 write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le)); btrfs_uuid_tree_add()
150 btrfs_mark_buffer_dirty(eb); btrfs_uuid_tree_add()
164 struct extent_buffer *eb; btrfs_uuid_tree_rem() local
196 eb = path->nodes[0]; btrfs_uuid_tree_rem()
198 offset = btrfs_item_ptr_offset(eb, slot); btrfs_uuid_tree_rem()
199 item_size = btrfs_item_size_nr(eb, slot); btrfs_uuid_tree_rem()
209 read_extent_buffer(eb, &read_subid, offset, sizeof(read_subid)); btrfs_uuid_tree_rem()
221 item_size = btrfs_item_size_nr(eb, slot); btrfs_uuid_tree_rem()
229 move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot)); btrfs_uuid_tree_rem()
230 memmove_extent_buffer(eb, move_dst, move_src, move_len); btrfs_uuid_tree_rem()
H A Dextent_io.c63 struct extent_buffer *eb; btrfs_leak_debug_check() local
76 eb = list_entry(buffers.next, struct extent_buffer, leak_list); btrfs_leak_debug_check()
79 eb->start, eb->len, atomic_read(&eb->refs)); btrfs_leak_debug_check()
80 list_del(&eb->leak_list); btrfs_leak_debug_check()
81 kmem_cache_free(extent_buffer_cache, eb); btrfs_leak_debug_check()
2084 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb, repair_eb_io_failure() argument
2087 u64 start = eb->start; repair_eb_io_failure()
2088 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len); repair_eb_io_failure()
2095 struct page *p = eb->pages[i]; repair_eb_io_failure()
2852 static void attach_extent_buffer_page(struct extent_buffer *eb, attach_extent_buffer_page() argument
2858 set_page_private(page, (unsigned long)eb); attach_extent_buffer_page()
2860 WARN_ON(page->private != (unsigned long)eb); attach_extent_buffer_page()
3601 void wait_on_extent_buffer_writeback(struct extent_buffer *eb) wait_on_extent_buffer_writeback() argument
3603 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, wait_on_extent_buffer_writeback()
3608 lock_extent_buffer_for_io(struct extent_buffer *eb, lock_extent_buffer_for_io() argument
3616 if (!btrfs_try_tree_write_lock(eb)) { lock_extent_buffer_for_io()
3619 btrfs_tree_lock(eb); lock_extent_buffer_for_io()
3622 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { lock_extent_buffer_for_io()
3623 btrfs_tree_unlock(eb); lock_extent_buffer_for_io()
3631 wait_on_extent_buffer_writeback(eb); lock_extent_buffer_for_io()
3632 btrfs_tree_lock(eb); lock_extent_buffer_for_io()
3633 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) lock_extent_buffer_for_io()
3635 btrfs_tree_unlock(eb); lock_extent_buffer_for_io()
3640 * We need to do this to prevent races in people who check if the eb is lock_extent_buffer_for_io()
3644 spin_lock(&eb->refs_lock); lock_extent_buffer_for_io()
3645 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { lock_extent_buffer_for_io()
3646 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); lock_extent_buffer_for_io()
3647 spin_unlock(&eb->refs_lock); lock_extent_buffer_for_io()
3648 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); lock_extent_buffer_for_io()
3650 -eb->len, lock_extent_buffer_for_io()
3654 spin_unlock(&eb->refs_lock); lock_extent_buffer_for_io()
3657 btrfs_tree_unlock(eb); lock_extent_buffer_for_io()
3662 num_pages = num_extent_pages(eb->start, eb->len); lock_extent_buffer_for_io()
3664 struct page *p = eb->pages[i]; lock_extent_buffer_for_io()
3678 static void end_extent_buffer_writeback(struct extent_buffer *eb) end_extent_buffer_writeback() argument
3680 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); end_extent_buffer_writeback()
3682 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); end_extent_buffer_writeback()
3687 struct extent_buffer *eb = (struct extent_buffer *)page->private; set_btree_ioerr() local
3688 struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode); set_btree_ioerr()
3691 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) set_btree_ioerr()
3718 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is set_btree_ioerr()
3719 * not done and would not be reliable - the eb might have been released set_btree_ioerr()
3732 switch (eb->log_index) { set_btree_ioerr()
3750 struct extent_buffer *eb; end_bio_extent_buffer_writepage() local
3756 eb = (struct extent_buffer *)page->private; bio_for_each_segment_all()
3757 BUG_ON(!eb); bio_for_each_segment_all()
3758 done = atomic_dec_and_test(&eb->io_pages); bio_for_each_segment_all()
3760 if (err || test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { bio_for_each_segment_all()
3770 end_extent_buffer_writeback(eb); bio_for_each_segment_all()
3776 static noinline_for_stack int write_one_eb(struct extent_buffer *eb, write_one_eb() argument
3783 u64 offset = eb->start; write_one_eb()
3789 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); write_one_eb()
3790 num_pages = num_extent_pages(eb->start, eb->len); write_one_eb()
3791 atomic_set(&eb->io_pages, num_pages); write_one_eb()
3792 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID) write_one_eb()
3796 struct page *p = eb->pages[i]; write_one_eb()
3808 if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) write_one_eb()
3809 end_extent_buffer_writeback(eb); write_one_eb()
3820 struct page *p = eb->pages[i]; write_one_eb()
3834 struct extent_buffer *eb, *prev_eb = NULL; btree_write_cache_pages() local
3891 eb = (struct extent_buffer *)page->private; btree_write_cache_pages()
3898 if (WARN_ON(!eb)) { btree_write_cache_pages()
3903 if (eb == prev_eb) { btree_write_cache_pages()
3908 ret = atomic_inc_not_zero(&eb->refs); btree_write_cache_pages()
3913 prev_eb = eb; btree_write_cache_pages()
3914 ret = lock_extent_buffer_for_io(eb, fs_info, &epd); btree_write_cache_pages()
3916 free_extent_buffer(eb); btree_write_cache_pages()
3920 ret = write_one_eb(eb, fs_info, wbc, &epd); btree_write_cache_pages()
3923 free_extent_buffer(eb); btree_write_cache_pages()
3926 free_extent_buffer(eb); btree_write_cache_pages()
4584 static void __free_extent_buffer(struct extent_buffer *eb) __free_extent_buffer() argument
4586 btrfs_leak_debug_del(&eb->leak_list); __free_extent_buffer()
4587 kmem_cache_free(extent_buffer_cache, eb); __free_extent_buffer()
4590 int extent_buffer_under_io(struct extent_buffer *eb) extent_buffer_under_io() argument
4592 return (atomic_read(&eb->io_pages) || extent_buffer_under_io()
4593 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || extent_buffer_under_io()
4594 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); extent_buffer_under_io()
4600 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) btrfs_release_extent_buffer_page() argument
4604 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags); btrfs_release_extent_buffer_page()
4606 BUG_ON(extent_buffer_under_io(eb)); btrfs_release_extent_buffer_page()
4608 index = num_extent_pages(eb->start, eb->len); btrfs_release_extent_buffer_page()
4614 page = eb->pages[index]; btrfs_release_extent_buffer_page()
4621 * removed the eb from the radix tree, so we could race btrfs_release_extent_buffer_page()
4622 * and have this page now attached to the new eb. So btrfs_release_extent_buffer_page()
4624 * this eb. btrfs_release_extent_buffer_page()
4627 page->private == (unsigned long)eb) { btrfs_release_extent_buffer_page()
4628 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); btrfs_release_extent_buffer_page()
4633 * to a new eb. btrfs_release_extent_buffer_page()
4652 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) btrfs_release_extent_buffer() argument
4654 btrfs_release_extent_buffer_page(eb); btrfs_release_extent_buffer()
4655 __free_extent_buffer(eb); btrfs_release_extent_buffer()
4662 struct extent_buffer *eb = NULL; __alloc_extent_buffer() local
4664 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS); __alloc_extent_buffer()
4665 if (eb == NULL) __alloc_extent_buffer()
4667 eb->start = start; __alloc_extent_buffer()
4668 eb->len = len; __alloc_extent_buffer()
4669 eb->fs_info = fs_info; __alloc_extent_buffer()
4670 eb->bflags = 0; __alloc_extent_buffer()
4671 rwlock_init(&eb->lock); __alloc_extent_buffer()
4672 atomic_set(&eb->write_locks, 0); __alloc_extent_buffer()
4673 atomic_set(&eb->read_locks, 0); __alloc_extent_buffer()
4674 atomic_set(&eb->blocking_readers, 0); __alloc_extent_buffer()
4675 atomic_set(&eb->blocking_writers, 0); __alloc_extent_buffer()
4676 atomic_set(&eb->spinning_readers, 0); __alloc_extent_buffer()
4677 atomic_set(&eb->spinning_writers, 0); __alloc_extent_buffer()
4678 eb->lock_nested = 0; __alloc_extent_buffer()
4679 init_waitqueue_head(&eb->write_lock_wq); __alloc_extent_buffer()
4680 init_waitqueue_head(&eb->read_lock_wq); __alloc_extent_buffer()
4682 btrfs_leak_debug_add(&eb->leak_list, &buffers); __alloc_extent_buffer()
4684 spin_lock_init(&eb->refs_lock); __alloc_extent_buffer()
4685 atomic_set(&eb->refs, 1); __alloc_extent_buffer()
4686 atomic_set(&eb->io_pages, 0); __alloc_extent_buffer()
4695 return eb; __alloc_extent_buffer()
4731 struct extent_buffer *eb; alloc_dummy_extent_buffer() local
4747 eb = __alloc_extent_buffer(fs_info, start, len); alloc_dummy_extent_buffer()
4748 if (!eb) alloc_dummy_extent_buffer()
4752 eb->pages[i] = alloc_page(GFP_NOFS); alloc_dummy_extent_buffer()
4753 if (!eb->pages[i]) alloc_dummy_extent_buffer()
4756 set_extent_buffer_uptodate(eb); alloc_dummy_extent_buffer()
4757 btrfs_set_header_nritems(eb, 0); alloc_dummy_extent_buffer()
4758 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags); alloc_dummy_extent_buffer()
4760 return eb; alloc_dummy_extent_buffer()
4763 __free_page(eb->pages[i - 1]); alloc_dummy_extent_buffer()
4764 __free_extent_buffer(eb); alloc_dummy_extent_buffer()
4768 static void check_buffer_tree_ref(struct extent_buffer *eb) check_buffer_tree_ref() argument
4779 * eb bumped. check_buffer_tree_ref()
4782 * ref on the eb because free_extent_buffer might check_buffer_tree_ref()
4791 refs = atomic_read(&eb->refs); check_buffer_tree_ref()
4792 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) check_buffer_tree_ref()
4795 spin_lock(&eb->refs_lock); check_buffer_tree_ref()
4796 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) check_buffer_tree_ref()
4797 atomic_inc(&eb->refs); check_buffer_tree_ref()
4798 spin_unlock(&eb->refs_lock); check_buffer_tree_ref()
4801 static void mark_extent_buffer_accessed(struct extent_buffer *eb, mark_extent_buffer_accessed() argument
4806 check_buffer_tree_ref(eb); mark_extent_buffer_accessed()
4808 num_pages = num_extent_pages(eb->start, eb->len); mark_extent_buffer_accessed()
4810 struct page *p = eb->pages[i]; mark_extent_buffer_accessed()
4820 struct extent_buffer *eb; find_extent_buffer() local
4823 eb = radix_tree_lookup(&fs_info->buffer_radix, find_extent_buffer()
4825 if (eb && atomic_inc_not_zero(&eb->refs)) { find_extent_buffer()
4828 * Lock our eb's refs_lock to avoid races with find_extent_buffer()
4829 * free_extent_buffer. When we get our eb it might be flagged find_extent_buffer()
4832 * eb->refs == 2, that the buffer isn't under IO (dirty and find_extent_buffer()
4836 * So here we could race and increment the eb's reference count, find_extent_buffer()
4842 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { find_extent_buffer()
4843 spin_lock(&eb->refs_lock); find_extent_buffer()
4844 spin_unlock(&eb->refs_lock); find_extent_buffer()
4846 mark_extent_buffer_accessed(eb, NULL); find_extent_buffer()
4847 return eb; find_extent_buffer()
4858 struct extent_buffer *eb, *exists = NULL; alloc_test_extent_buffer() local
4861 eb = find_extent_buffer(fs_info, start); alloc_test_extent_buffer()
4862 if (eb) alloc_test_extent_buffer()
4863 return eb; alloc_test_extent_buffer()
4864 eb = alloc_dummy_extent_buffer(fs_info, start); alloc_test_extent_buffer()
4865 if (!eb) alloc_test_extent_buffer()
4867 eb->fs_info = fs_info; alloc_test_extent_buffer()
4874 start >> PAGE_CACHE_SHIFT, eb); alloc_test_extent_buffer()
4884 check_buffer_tree_ref(eb); alloc_test_extent_buffer()
4885 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); alloc_test_extent_buffer()
4893 atomic_inc(&eb->refs); alloc_test_extent_buffer()
4894 return eb; alloc_test_extent_buffer()
4896 btrfs_release_extent_buffer(eb); alloc_test_extent_buffer()
4908 struct extent_buffer *eb; alloc_extent_buffer() local
4915 eb = find_extent_buffer(fs_info, start); alloc_extent_buffer()
4916 if (eb) alloc_extent_buffer()
4917 return eb; alloc_extent_buffer()
4919 eb = __alloc_extent_buffer(fs_info, start, len); alloc_extent_buffer()
4920 if (!eb) alloc_extent_buffer()
4931 * We could have already allocated an eb for this page alloc_extent_buffer()
4933 * the existing eb, and if we can we know it's good and alloc_extent_buffer()
4955 attach_extent_buffer_page(eb, p); alloc_extent_buffer()
4958 eb->pages[i] = p; alloc_extent_buffer()
4968 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); alloc_extent_buffer()
4976 start >> PAGE_CACHE_SHIFT, eb); alloc_extent_buffer()
4987 check_buffer_tree_ref(eb); alloc_extent_buffer()
4988 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); alloc_extent_buffer()
4999 SetPageChecked(eb->pages[0]); alloc_extent_buffer()
5001 p = eb->pages[i]; alloc_extent_buffer()
5005 unlock_page(eb->pages[0]); alloc_extent_buffer()
5006 return eb; alloc_extent_buffer()
5009 WARN_ON(!atomic_dec_and_test(&eb->refs)); alloc_extent_buffer()
5011 if (eb->pages[i]) alloc_extent_buffer()
5012 unlock_page(eb->pages[i]); alloc_extent_buffer()
5015 btrfs_release_extent_buffer(eb); alloc_extent_buffer()
5021 struct extent_buffer *eb = btrfs_release_extent_buffer_rcu() local
5024 __free_extent_buffer(eb); btrfs_release_extent_buffer_rcu()
5027 /* Expects to have eb->eb_lock already held */ release_extent_buffer()
5028 static int release_extent_buffer(struct extent_buffer *eb) release_extent_buffer() argument
5030 WARN_ON(atomic_read(&eb->refs) == 0); release_extent_buffer()
5031 if (atomic_dec_and_test(&eb->refs)) { release_extent_buffer()
5032 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { release_extent_buffer()
5033 struct btrfs_fs_info *fs_info = eb->fs_info; release_extent_buffer()
5035 spin_unlock(&eb->refs_lock); release_extent_buffer()
5039 eb->start >> PAGE_CACHE_SHIFT); release_extent_buffer()
5042 spin_unlock(&eb->refs_lock); release_extent_buffer()
5046 btrfs_release_extent_buffer_page(eb); release_extent_buffer()
5048 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) { release_extent_buffer()
5049 __free_extent_buffer(eb); release_extent_buffer()
5053 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); release_extent_buffer()
5056 spin_unlock(&eb->refs_lock); release_extent_buffer()
5061 void free_extent_buffer(struct extent_buffer *eb) free_extent_buffer() argument
5065 if (!eb) free_extent_buffer()
5069 refs = atomic_read(&eb->refs); free_extent_buffer()
5072 old = atomic_cmpxchg(&eb->refs, refs, refs - 1); free_extent_buffer()
5077 spin_lock(&eb->refs_lock); free_extent_buffer()
5078 if (atomic_read(&eb->refs) == 2 && free_extent_buffer()
5079 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) free_extent_buffer()
5080 atomic_dec(&eb->refs); free_extent_buffer()
5082 if (atomic_read(&eb->refs) == 2 && free_extent_buffer()
5083 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && free_extent_buffer()
5084 !extent_buffer_under_io(eb) && free_extent_buffer()
5085 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) free_extent_buffer()
5086 atomic_dec(&eb->refs); free_extent_buffer()
5092 release_extent_buffer(eb); free_extent_buffer()
5095 void free_extent_buffer_stale(struct extent_buffer *eb) free_extent_buffer_stale() argument
5097 if (!eb) free_extent_buffer_stale()
5100 spin_lock(&eb->refs_lock); free_extent_buffer_stale()
5101 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); free_extent_buffer_stale()
5103 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && free_extent_buffer_stale()
5104 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) free_extent_buffer_stale()
5105 atomic_dec(&eb->refs); free_extent_buffer_stale()
5106 release_extent_buffer(eb); free_extent_buffer_stale()
5109 void clear_extent_buffer_dirty(struct extent_buffer *eb) clear_extent_buffer_dirty() argument
5115 num_pages = num_extent_pages(eb->start, eb->len); clear_extent_buffer_dirty()
5118 page = eb->pages[i]; clear_extent_buffer_dirty()
5136 WARN_ON(atomic_read(&eb->refs) == 0); clear_extent_buffer_dirty()
5139 int set_extent_buffer_dirty(struct extent_buffer *eb) set_extent_buffer_dirty() argument
5145 check_buffer_tree_ref(eb); set_extent_buffer_dirty()
5147 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); set_extent_buffer_dirty()
5149 num_pages = num_extent_pages(eb->start, eb->len); set_extent_buffer_dirty()
5150 WARN_ON(atomic_read(&eb->refs) == 0); set_extent_buffer_dirty()
5151 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); set_extent_buffer_dirty()
5154 set_page_dirty(eb->pages[i]); set_extent_buffer_dirty()
5158 int clear_extent_buffer_uptodate(struct extent_buffer *eb) clear_extent_buffer_uptodate() argument
5164 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); clear_extent_buffer_uptodate()
5165 num_pages = num_extent_pages(eb->start, eb->len); clear_extent_buffer_uptodate()
5167 page = eb->pages[i]; clear_extent_buffer_uptodate()
5174 int set_extent_buffer_uptodate(struct extent_buffer *eb) set_extent_buffer_uptodate() argument
5180 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); set_extent_buffer_uptodate()
5181 num_pages = num_extent_pages(eb->start, eb->len); set_extent_buffer_uptodate()
5183 page = eb->pages[i]; set_extent_buffer_uptodate()
5189 int extent_buffer_uptodate(struct extent_buffer *eb) extent_buffer_uptodate() argument
5191 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); extent_buffer_uptodate()
5195 struct extent_buffer *eb, u64 start, int wait, read_extent_buffer_pages()
5210 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) read_extent_buffer_pages()
5214 WARN_ON(start < eb->start); read_extent_buffer_pages()
5216 (eb->start >> PAGE_CACHE_SHIFT); read_extent_buffer_pages()
5221 num_pages = num_extent_pages(eb->start, eb->len); read_extent_buffer_pages()
5223 page = eb->pages[i]; read_extent_buffer_pages()
5238 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); read_extent_buffer_pages()
5242 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); read_extent_buffer_pages()
5243 eb->read_mirror = 0; read_extent_buffer_pages()
5244 atomic_set(&eb->io_pages, num_reads); read_extent_buffer_pages()
5246 page = eb->pages[i]; read_extent_buffer_pages()
5271 page = eb->pages[i]; read_extent_buffer_pages()
5282 page = eb->pages[i]; read_extent_buffer_pages()
5290 void read_extent_buffer(struct extent_buffer *eb, void *dstv, read_extent_buffer() argument
5299 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); read_extent_buffer()
5302 WARN_ON(start > eb->len); read_extent_buffer()
5303 WARN_ON(start + len > eb->start + eb->len); read_extent_buffer()
5308 page = eb->pages[i]; read_extent_buffer()
5321 int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv, read_extent_buffer_to_user() argument
5330 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); read_extent_buffer_to_user()
5334 WARN_ON(start > eb->len); read_extent_buffer_to_user()
5335 WARN_ON(start + len > eb->start + eb->len); read_extent_buffer_to_user()
5340 page = eb->pages[i]; read_extent_buffer_to_user()
5358 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, map_private_extent_buffer() argument
5366 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); map_private_extent_buffer()
5382 if (start + min_len > eb->len) { map_private_extent_buffer()
5383 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, " map_private_extent_buffer()
5385 eb->start, eb->len, start, min_len); map_private_extent_buffer()
5389 p = eb->pages[i]; map_private_extent_buffer()
5396 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, memcmp_extent_buffer() argument
5405 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); memcmp_extent_buffer()
5409 WARN_ON(start > eb->len); memcmp_extent_buffer()
5410 WARN_ON(start + len > eb->start + eb->len); memcmp_extent_buffer()
5415 page = eb->pages[i]; memcmp_extent_buffer()
5432 void write_extent_buffer(struct extent_buffer *eb, const void *srcv, write_extent_buffer() argument
5440 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); write_extent_buffer()
5443 WARN_ON(start > eb->len); write_extent_buffer()
5444 WARN_ON(start + len > eb->start + eb->len); write_extent_buffer()
5449 page = eb->pages[i]; write_extent_buffer()
5463 void memset_extent_buffer(struct extent_buffer *eb, char c, memset_extent_buffer() argument
5470 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); memset_extent_buffer()
5473 WARN_ON(start > eb->len); memset_extent_buffer()
5474 WARN_ON(start + len > eb->start + eb->len); memset_extent_buffer()
5479 page = eb->pages[i]; memset_extent_buffer()
5646 struct extent_buffer *eb; try_release_extent_buffer() local
5649 * We need to make sure noboody is attaching this page to an eb right try_release_extent_buffer()
5658 eb = (struct extent_buffer *)page->private; try_release_extent_buffer()
5659 BUG_ON(!eb); try_release_extent_buffer()
5663 * the eb doesn't disappear out from under us while we're looking at try_release_extent_buffer()
5666 spin_lock(&eb->refs_lock); try_release_extent_buffer()
5667 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { try_release_extent_buffer()
5668 spin_unlock(&eb->refs_lock); try_release_extent_buffer()
5675 * If tree ref isn't set then we know the ref on this eb is a real ref, try_release_extent_buffer()
5678 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { try_release_extent_buffer()
5679 spin_unlock(&eb->refs_lock); try_release_extent_buffer()
5683 return release_extent_buffer(eb); try_release_extent_buffer()
5194 read_extent_buffer_pages(struct extent_io_tree *tree, struct extent_buffer *eb, u64 start, int wait, get_extent_t *get_extent, int mirror_num) read_extent_buffer_pages() argument
H A Dbackref.c37 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, check_extent_in_eb() argument
45 if (!btrfs_file_extent_compression(eb, fi) && check_extent_in_eb()
46 !btrfs_file_extent_encryption(eb, fi) && check_extent_in_eb()
47 !btrfs_file_extent_other_encoding(eb, fi)) { check_extent_in_eb()
51 data_offset = btrfs_file_extent_offset(eb, fi); check_extent_in_eb()
52 data_len = btrfs_file_extent_num_bytes(eb, fi); check_extent_in_eb()
82 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte, find_extent_in_eb() argument
99 nritems = btrfs_header_nritems(eb); find_extent_in_eb()
101 btrfs_item_key_to_cpu(eb, &key, slot); find_extent_in_eb()
104 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); find_extent_in_eb()
105 extent_type = btrfs_file_extent_type(eb, fi); find_extent_in_eb()
109 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); find_extent_in_eb()
113 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie); find_extent_in_eb()
231 struct extent_buffer *eb; add_all_parents() local
241 eb = path->nodes[level]; add_all_parents()
242 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); add_all_parents()
257 eb = path->nodes[0]; add_all_parents()
260 btrfs_item_key_to_cpu(eb, &key, slot); add_all_parents()
266 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); add_all_parents()
267 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); add_all_parents()
274 ret = check_extent_in_eb(&key, eb, fi, add_all_parents()
282 ret = ulist_add_merge_ptr(parents, eb->start, add_all_parents()
316 struct extent_buffer *eb; __resolve_indirect_ref() local
359 eb = path->nodes[level]; __resolve_indirect_ref()
360 while (!eb) { __resolve_indirect_ref()
366 eb = path->nodes[level]; __resolve_indirect_ref()
481 struct extent_buffer *eb; __add_missing_keys() local
492 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte, list_for_each()
494 if (!eb || !extent_buffer_uptodate(eb)) { list_for_each()
495 free_extent_buffer(eb); list_for_each()
498 btrfs_tree_read_lock(eb); list_for_each()
499 if (btrfs_header_level(eb) == 0) list_for_each()
500 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); list_for_each()
502 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); list_for_each()
503 btrfs_tree_read_unlock(eb); list_for_each()
504 free_extent_buffer(eb); list_for_each()
1033 struct extent_buffer *eb; local
1035 eb = read_tree_block(fs_info->extent_root,
1037 if (!eb || !extent_buffer_uptodate(eb)) {
1038 free_extent_buffer(eb);
1042 btrfs_tree_read_lock(eb);
1043 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1044 ret = find_extent_in_eb(eb, bytenr,
1046 btrfs_tree_read_unlock_blocking(eb);
1047 free_extent_buffer(eb);
1357 struct extent_buffer *eb = eb_in; btrfs_ref_to_path() local
1369 read_extent_buffer(eb, dest + bytes_left, btrfs_ref_to_path()
1371 if (eb != eb_in) { btrfs_ref_to_path()
1373 btrfs_tree_read_unlock_blocking(eb); btrfs_ref_to_path()
1374 free_extent_buffer(eb); btrfs_ref_to_path()
1390 eb = path->nodes[0]; btrfs_ref_to_path()
1391 /* make sure we can use eb after releasing the path */ btrfs_ref_to_path()
1392 if (eb != eb_in) { btrfs_ref_to_path()
1394 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); btrfs_ref_to_path()
1399 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); btrfs_ref_to_path()
1401 name_len = btrfs_inode_ref_name_len(eb, iref); btrfs_ref_to_path()
1432 struct extent_buffer *eb; extent_from_logical() local
1465 eb = path->nodes[0]; extent_from_logical()
1466 item_size = btrfs_item_size_nr(eb, path->slots[0]); extent_from_logical()
1469 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); extent_from_logical()
1470 flags = btrfs_extent_flags(eb, ei); extent_from_logical()
1499 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, __get_extent_inline_ref() argument
1511 flags = btrfs_extent_flags(eb, ei); __get_extent_inline_ref()
1533 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref); __get_extent_inline_ref()
1550 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, tree_backref_for_extent() argument
1562 ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size, tree_backref_for_extent()
1576 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); tree_backref_for_extent()
1582 *out_level = btrfs_tree_block_level(eb, info); tree_backref_for_extent()
1712 struct extent_buffer *eb, void *ctx);
1725 struct extent_buffer *eb; iterate_inode_refs() local
1745 eb = btrfs_clone_extent_buffer(path->nodes[0]); iterate_inode_refs()
1746 if (!eb) { iterate_inode_refs()
1750 extent_buffer_get(eb); iterate_inode_refs()
1751 btrfs_tree_read_lock(eb); iterate_inode_refs()
1752 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); iterate_inode_refs()
1756 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); iterate_inode_refs()
1758 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { iterate_inode_refs()
1759 name_len = btrfs_inode_ref_name_len(eb, iref); iterate_inode_refs()
1765 (unsigned long)(iref + 1), eb, ctx); iterate_inode_refs()
1771 btrfs_tree_read_unlock_blocking(eb); iterate_inode_refs()
1772 free_extent_buffer(eb); iterate_inode_refs()
1789 struct extent_buffer *eb; iterate_inode_extrefs() local
1807 eb = btrfs_clone_extent_buffer(path->nodes[0]); iterate_inode_extrefs()
1808 if (!eb) { iterate_inode_extrefs()
1812 extent_buffer_get(eb); iterate_inode_extrefs()
1814 btrfs_tree_read_lock(eb); iterate_inode_extrefs()
1815 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); iterate_inode_extrefs()
1818 item_size = btrfs_item_size_nr(eb, slot); iterate_inode_extrefs()
1819 ptr = btrfs_item_ptr_offset(eb, slot); iterate_inode_extrefs()
1826 parent = btrfs_inode_extref_parent(eb, extref); iterate_inode_extrefs()
1827 name_len = btrfs_inode_extref_name_len(eb, extref); iterate_inode_extrefs()
1829 (unsigned long)&extref->name, eb, ctx); iterate_inode_extrefs()
1833 cur_offset += btrfs_inode_extref_name_len(eb, extref); iterate_inode_extrefs()
1836 btrfs_tree_read_unlock_blocking(eb); iterate_inode_extrefs()
1837 free_extent_buffer(eb); iterate_inode_extrefs()
1872 struct extent_buffer *eb, void *ctx) inode_to_path()
1886 name_off, eb, inum, fspath_min, bytes_left); inode_to_path()
1871 inode_to_path(u64 inum, u32 name_len, unsigned long name_off, struct extent_buffer *eb, void *ctx) inode_to_path() argument
H A Dextent_io.h143 /* >= 0 if eb belongs to a log tree, -1 otherwise */
271 void free_extent_buffer(struct extent_buffer *eb);
272 void free_extent_buffer_stale(struct extent_buffer *eb);
277 struct extent_buffer *eb, u64 start, int wait,
279 void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
287 static inline void extent_buffer_get(struct extent_buffer *eb) extent_buffer_get() argument
289 atomic_inc(&eb->refs); extent_buffer_get()
292 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
295 void read_extent_buffer(struct extent_buffer *eb, void *dst,
298 int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst,
301 void write_extent_buffer(struct extent_buffer *eb, const void *src,
310 void memset_extent_buffer(struct extent_buffer *eb, char c,
312 void clear_extent_buffer_dirty(struct extent_buffer *eb);
313 int set_extent_buffer_dirty(struct extent_buffer *eb);
314 int set_extent_buffer_uptodate(struct extent_buffer *eb);
315 int clear_extent_buffer_uptodate(struct extent_buffer *eb);
316 int extent_buffer_uptodate(struct extent_buffer *eb);
317 int extent_buffer_under_io(struct extent_buffer *eb);
318 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
342 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
H A Ddev-replace.c60 struct extent_buffer *eb; btrfs_init_dev_replace() local
101 eb = path->nodes[0]; btrfs_init_dev_replace()
102 item_size = btrfs_item_size_nr(eb, slot); btrfs_init_dev_replace()
103 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item); btrfs_init_dev_replace()
111 src_devid = btrfs_dev_replace_src_devid(eb, ptr); btrfs_init_dev_replace()
113 btrfs_dev_replace_cont_reading_from_srcdev_mode(eb, ptr); btrfs_init_dev_replace()
114 dev_replace->replace_state = btrfs_dev_replace_replace_state(eb, ptr); btrfs_init_dev_replace()
115 dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr); btrfs_init_dev_replace()
117 btrfs_dev_replace_time_stopped(eb, ptr); btrfs_init_dev_replace()
119 btrfs_dev_replace_num_write_errors(eb, ptr)); btrfs_init_dev_replace()
121 btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr)); btrfs_init_dev_replace()
122 dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr); btrfs_init_dev_replace()
125 dev_replace->cursor_right = btrfs_dev_replace_cursor_right(eb, ptr); btrfs_init_dev_replace()
202 struct extent_buffer *eb; btrfs_run_dev_replace() local
264 eb = path->nodes[0]; btrfs_run_dev_replace()
265 ptr = btrfs_item_ptr(eb, path->slots[0], btrfs_run_dev_replace()
270 btrfs_set_dev_replace_src_devid(eb, ptr, btrfs_run_dev_replace()
273 btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1); btrfs_run_dev_replace()
274 btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr, btrfs_run_dev_replace()
276 btrfs_set_dev_replace_replace_state(eb, ptr, btrfs_run_dev_replace()
278 btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started); btrfs_run_dev_replace()
279 btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped); btrfs_run_dev_replace()
280 btrfs_set_dev_replace_num_write_errors(eb, ptr, btrfs_run_dev_replace()
282 btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr, btrfs_run_dev_replace()
286 btrfs_set_dev_replace_cursor_left(eb, ptr, btrfs_run_dev_replace()
288 btrfs_set_dev_replace_cursor_right(eb, ptr, btrfs_run_dev_replace()
293 btrfs_mark_buffer_dirty(eb); btrfs_run_dev_replace()
H A Drelocation.c62 struct extent_buffer *eb; member in struct:backref_node
394 btrfs_tree_unlock(node->eb); unlock_node_buffer()
401 if (node->eb) { drop_node_buffer()
403 free_extent_buffer(node->eb); drop_node_buffer()
404 node->eb = NULL; drop_node_buffer()
686 struct extent_buffer *eb; local
767 eb = path1->nodes[0];
770 if (path1->slots[0] >= btrfs_header_nritems(eb)) {
778 eb = path1->nodes[0];
781 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
789 ret = find_inline_backref(eb, path1->slots[0],
800 key.type = btrfs_extent_inline_ref_type(eb, iref);
801 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
820 ref0 = btrfs_item_ptr(eb, path1->slots[0],
823 root = find_tree_root(rc, eb, ref0);
830 if (is_cowonly_root(btrfs_ref_root_v0(eb,
923 eb = path2->nodes[level];
924 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
946 eb = path2->nodes[level];
947 rb_node = tree_search(&cache->rb_root, eb->start);
955 upper->bytenr = eb->start;
956 upper->owner = btrfs_header_owner(eb);
966 if (btrfs_block_can_be_shared(root, eb))
992 upper->owner = btrfs_header_owner(eb);
1370 struct extent_buffer *eb; create_reloc_root() local
1385 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, create_reloc_root()
1400 ret = btrfs_copy_root(trans, root, root->node, &eb, create_reloc_root()
1406 btrfs_set_root_bytenr(root_item, eb->start); create_reloc_root()
1407 btrfs_set_root_level(root_item, btrfs_header_level(eb)); create_reloc_root()
1423 btrfs_tree_unlock(eb); create_reloc_root()
1424 free_extent_buffer(eb); create_reloc_root()
1741 int memcmp_node_keys(struct extent_buffer *eb, int slot, memcmp_node_keys() argument
1746 btrfs_node_key(eb, &key1, slot); memcmp_node_keys()
1766 struct extent_buffer *eb; replace_path() local
1788 eb = btrfs_lock_root_node(dest); replace_path()
1789 btrfs_set_lock_blocking(eb); replace_path()
1790 level = btrfs_header_level(eb); replace_path()
1793 btrfs_tree_unlock(eb); replace_path()
1794 free_extent_buffer(eb); replace_path()
1799 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); replace_path()
1802 btrfs_set_lock_blocking(eb); replace_path()
1810 parent = eb; replace_path()
1827 eb = path->nodes[level]; replace_path()
1828 new_bytenr = btrfs_node_blockptr(eb, replace_path()
1830 new_ptr_gen = btrfs_node_ptr_generation(eb, replace_path()
1849 eb = read_tree_block(dest, old_bytenr, old_ptr_gen); replace_path()
1850 if (!eb || !extent_buffer_uptodate(eb)) { replace_path()
1851 ret = (!eb) ? -ENOMEM : -EIO; replace_path()
1852 free_extent_buffer(eb); replace_path()
1855 btrfs_tree_lock(eb); replace_path()
1857 ret = btrfs_cow_block(trans, dest, eb, parent, replace_path()
1858 slot, &eb); replace_path()
1861 btrfs_set_lock_blocking(eb); replace_path()
1866 parent = eb; replace_path()
1937 struct extent_buffer *eb; walk_up_reloc_tree() local
1950 eb = path->nodes[i]; walk_up_reloc_tree()
1951 nritems = btrfs_header_nritems(eb); walk_up_reloc_tree()
1954 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= walk_up_reloc_tree()
1974 struct extent_buffer *eb = NULL; walk_down_reloc_tree() local
1984 eb = path->nodes[i]; walk_down_reloc_tree()
1985 nritems = btrfs_header_nritems(eb); walk_down_reloc_tree()
1987 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); walk_down_reloc_tree()
2003 bytenr = btrfs_node_blockptr(eb, path->slots[i]); walk_down_reloc_tree()
2004 eb = read_tree_block(root, bytenr, ptr_gen); walk_down_reloc_tree()
2005 if (!eb || !extent_buffer_uptodate(eb)) { walk_down_reloc_tree()
2006 free_extent_buffer(eb); walk_down_reloc_tree()
2009 BUG_ON(btrfs_header_level(eb) != i - 1); walk_down_reloc_tree()
2010 path->nodes[i - 1] = eb; walk_down_reloc_tree()
2645 struct extent_buffer *eb; do_relocation() local
2653 BUG_ON(lowest && node->eb); do_relocation()
2664 if (upper->eb && !upper->locked) { do_relocation()
2666 ret = btrfs_bin_search(upper->eb, key, do_relocation()
2669 bytenr = btrfs_node_blockptr(upper->eb, slot); do_relocation()
2670 if (node->eb->start == bytenr) do_relocation()
2676 if (!upper->eb) { do_relocation()
2684 if (!upper->eb) { do_relocation()
2685 upper->eb = path->nodes[upper->level]; do_relocation()
2688 BUG_ON(upper->eb != path->nodes[upper->level]); do_relocation()
2697 ret = btrfs_bin_search(upper->eb, key, upper->level, do_relocation()
2702 bytenr = btrfs_node_blockptr(upper->eb, slot); do_relocation()
2706 if (node->eb->start == bytenr) do_relocation()
2711 generation = btrfs_node_ptr_generation(upper->eb, slot); do_relocation()
2712 eb = read_tree_block(root, bytenr, generation); do_relocation()
2713 if (!eb || !extent_buffer_uptodate(eb)) { do_relocation()
2714 free_extent_buffer(eb); do_relocation()
2718 btrfs_tree_lock(eb); do_relocation()
2719 btrfs_set_lock_blocking(eb); do_relocation()
2721 if (!node->eb) { do_relocation()
2722 ret = btrfs_cow_block(trans, root, eb, upper->eb, do_relocation()
2723 slot, &eb); do_relocation()
2724 btrfs_tree_unlock(eb); do_relocation()
2725 free_extent_buffer(eb); do_relocation()
2730 BUG_ON(node->eb != eb); do_relocation()
2732 btrfs_set_node_blockptr(upper->eb, slot, do_relocation()
2733 node->eb->start); do_relocation()
2734 btrfs_set_node_ptr_generation(upper->eb, slot, do_relocation()
2736 btrfs_mark_buffer_dirty(upper->eb); do_relocation()
2739 node->eb->start, blocksize, do_relocation()
2740 upper->eb->start, do_relocation()
2741 btrfs_header_owner(upper->eb), do_relocation()
2745 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); do_relocation()
2775 btrfs_node_key_to_cpu(node->eb, &key, 0); link_to_upper()
2871 struct extent_buffer *eb; get_tree_block_key() local
2874 eb = read_tree_block(rc->extent_root, block->bytenr, get_tree_block_key()
2876 if (!eb || !extent_buffer_uptodate(eb)) { get_tree_block_key()
2877 free_extent_buffer(eb); get_tree_block_key()
2880 WARN_ON(btrfs_header_level(eb) != block->level); get_tree_block_key()
2882 btrfs_item_key_to_cpu(eb, &block->key, 0); get_tree_block_key()
2884 btrfs_node_key_to_cpu(eb, &block->key, 0); get_tree_block_key()
2885 free_extent_buffer(eb); get_tree_block_key()
3280 struct extent_buffer *eb; add_tree_block() local
3289 eb = path->nodes[0]; add_tree_block()
3290 item_size = btrfs_item_size_nr(eb, path->slots[0]); add_tree_block()
3294 ei = btrfs_item_ptr(eb, path->slots[0], add_tree_block()
3298 level = btrfs_tree_block_level(eb, bi); add_tree_block()
3302 generation = btrfs_extent_generation(eb, ei); add_tree_block()
3411 struct extent_buffer *eb) block_use_full_backref()
3416 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) || block_use_full_backref()
3417 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) block_use_full_backref()
3421 eb->start, btrfs_header_level(eb), 1, block_use_full_backref()
3656 struct extent_buffer *eb; local
3665 eb = path->nodes[0];
3666 ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3667 end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
3677 key.type = btrfs_extent_inline_ref_type(eb, iref);
3679 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3685 eb, dref, blocks);
3699 eb = path->nodes[0];
3700 if (path->slots[0] >= btrfs_header_nritems(eb)) {
3708 eb = path->nodes[0];
3711 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
3725 dref = btrfs_item_ptr(eb, path->slots[0],
3728 eb, dref, blocks);
4563 node->eb = cow; btrfs_reloc_cow_block()
3410 block_use_full_backref(struct reloc_control *rc, struct extent_buffer *eb) block_use_full_backref() argument
H A Dctree.c43 struct extent_buffer *eb);
150 struct extent_buffer *eb; btrfs_root_node() local
154 eb = rcu_dereference(root->node); btrfs_root_node()
162 if (atomic_inc_not_zero(&eb->refs)) { btrfs_root_node()
169 return eb; btrfs_root_node()
178 struct extent_buffer *eb; btrfs_lock_root_node() local
181 eb = btrfs_root_node(root); btrfs_lock_root_node()
182 btrfs_tree_lock(eb); btrfs_lock_root_node()
183 if (eb == root->node) btrfs_lock_root_node()
185 btrfs_tree_unlock(eb); btrfs_lock_root_node()
186 free_extent_buffer(eb); btrfs_lock_root_node()
188 return eb; btrfs_lock_root_node()
197 struct extent_buffer *eb; btrfs_read_lock_root_node() local
200 eb = btrfs_root_node(root); btrfs_read_lock_root_node()
201 btrfs_tree_read_lock(eb); btrfs_read_lock_root_node()
202 if (eb == root->node) btrfs_read_lock_root_node()
204 btrfs_tree_read_unlock(eb); btrfs_read_lock_root_node()
205 free_extent_buffer(eb); btrfs_read_lock_root_node()
207 return eb; btrfs_read_lock_root_node()
487 struct extent_buffer *eb) { tree_mod_dont_log()
491 if (eb && btrfs_header_level(eb) == 0) tree_mod_dont_log()
505 struct extent_buffer *eb) tree_mod_need_log()
510 if (eb && btrfs_header_level(eb) == 0) tree_mod_need_log()
517 alloc_tree_mod_elem(struct extent_buffer *eb, int slot, alloc_tree_mod_elem() argument
526 tm->index = eb->start >> PAGE_CACHE_SHIFT; alloc_tree_mod_elem()
528 btrfs_node_key(eb, &tm->key, slot); alloc_tree_mod_elem()
529 tm->blockptr = btrfs_node_blockptr(eb, slot); alloc_tree_mod_elem()
533 tm->generation = btrfs_node_ptr_generation(eb, slot); alloc_tree_mod_elem()
541 struct extent_buffer *eb, int slot, tree_mod_log_insert_key()
547 if (!tree_mod_need_log(fs_info, eb)) tree_mod_log_insert_key()
550 tm = alloc_tree_mod_elem(eb, slot, op, flags); tree_mod_log_insert_key()
554 if (tree_mod_dont_log(fs_info, eb)) { tree_mod_log_insert_key()
569 struct extent_buffer *eb, int dst_slot, int src_slot, tree_mod_log_insert_move()
578 if (!tree_mod_need_log(fs_info, eb)) tree_mod_log_insert_move()
591 tm->index = eb->start >> PAGE_CACHE_SHIFT; tree_mod_log_insert_move()
598 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot, tree_mod_log_insert_move()
606 if (tree_mod_dont_log(fs_info, eb)) tree_mod_log_insert_move()
883 struct extent_buffer *eb, int slot, int atomic) tree_mod_log_set_node_key()
887 ret = tree_mod_log_insert_key(fs_info, eb, slot, tree_mod_log_set_node_key()
894 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) tree_mod_log_free_eb() argument
901 if (btrfs_header_level(eb) == 0) tree_mod_log_free_eb()
907 nritems = btrfs_header_nritems(eb); tree_mod_log_free_eb()
913 tm_list[i] = alloc_tree_mod_elem(eb, i, tree_mod_log_free_eb()
921 if (tree_mod_dont_log(fs_info, eb)) tree_mod_log_free_eb()
1269 * tm is a pointer to the first operation to rewind within eb. then, all
1274 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, __tree_mod_log_rewind() argument
1284 n = btrfs_header_nritems(eb); __tree_mod_log_rewind()
1298 btrfs_set_node_key(eb, &tm->key, tm->slot); __tree_mod_log_rewind()
1299 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); __tree_mod_log_rewind()
1300 btrfs_set_node_ptr_generation(eb, tm->slot, __tree_mod_log_rewind()
1306 btrfs_set_node_key(eb, &tm->key, tm->slot); __tree_mod_log_rewind()
1307 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); __tree_mod_log_rewind()
1308 btrfs_set_node_ptr_generation(eb, tm->slot, __tree_mod_log_rewind()
1318 memmove_extent_buffer(eb, o_dst, o_src, __tree_mod_log_rewind()
1341 btrfs_set_header_nritems(eb, n); __tree_mod_log_rewind()
1345 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1353 struct extent_buffer *eb, u64 time_seq) tree_mod_log_rewind()
1359 return eb; tree_mod_log_rewind()
1361 if (btrfs_header_level(eb) == 0) tree_mod_log_rewind()
1362 return eb; tree_mod_log_rewind()
1364 tm = tree_mod_log_search(fs_info, eb->start, time_seq); tree_mod_log_rewind()
1366 return eb; tree_mod_log_rewind()
1369 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); tree_mod_log_rewind()
1373 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); tree_mod_log_rewind()
1375 btrfs_tree_read_unlock_blocking(eb); tree_mod_log_rewind()
1376 free_extent_buffer(eb); tree_mod_log_rewind()
1379 btrfs_set_header_bytenr(eb_rewin, eb->start); tree_mod_log_rewind()
1381 btrfs_header_backref_rev(eb)); tree_mod_log_rewind()
1382 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); tree_mod_log_rewind()
1383 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); tree_mod_log_rewind()
1385 eb_rewin = btrfs_clone_extent_buffer(eb); tree_mod_log_rewind()
1387 btrfs_tree_read_unlock_blocking(eb); tree_mod_log_rewind()
1388 free_extent_buffer(eb); tree_mod_log_rewind()
1394 btrfs_tree_read_unlock_blocking(eb); tree_mod_log_rewind()
1395 free_extent_buffer(eb); tree_mod_log_rewind()
1417 struct extent_buffer *eb = NULL; get_old_root() local
1447 eb = btrfs_clone_extent_buffer(old); get_old_root()
1453 eb = alloc_dummy_extent_buffer(root->fs_info, logical); get_old_root()
1456 eb = btrfs_clone_extent_buffer(eb_root); get_old_root()
1461 if (!eb) get_old_root()
1463 extent_buffer_get(eb); get_old_root()
1464 btrfs_tree_read_lock(eb); get_old_root()
1466 btrfs_set_header_bytenr(eb, eb->start); get_old_root()
1467 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); get_old_root()
1468 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root)); get_old_root()
1469 btrfs_set_header_level(eb, old_root->level); get_old_root()
1470 btrfs_set_header_generation(eb, old_generation); get_old_root()
1473 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm); get_old_root()
1475 WARN_ON(btrfs_header_level(eb) != 0); get_old_root()
1476 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root)); get_old_root()
1478 return eb; get_old_root()
1748 static noinline int generic_bin_search(struct extent_buffer *eb, generic_bin_search() argument
1773 err = map_private_extent_buffer(eb, offset, generic_bin_search()
1781 read_extent_buffer(eb, &unaligned, generic_bin_search()
1809 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, bin_search() argument
1813 return generic_bin_search(eb, bin_search()
1816 key, btrfs_header_nritems(eb), bin_search()
1819 return generic_bin_search(eb, bin_search()
1822 key, btrfs_header_nritems(eb), bin_search()
1826 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, btrfs_bin_search() argument
1829 return bin_search(eb, key, level, slot); btrfs_bin_search()
1856 struct extent_buffer *eb; read_node_slot() local
1865 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot), read_node_slot()
1867 if (eb && !extent_buffer_uptodate(eb)) { read_node_slot()
1868 free_extent_buffer(eb); read_node_slot()
1869 eb = NULL; read_node_slot()
1872 return eb; read_node_slot()
2246 struct extent_buffer *eb; reada_for_search() local
2261 eb = btrfs_find_tree_block(root->fs_info, search); reada_for_search()
2262 if (eb) { reada_for_search()
2263 free_extent_buffer(eb); reada_for_search()
2306 struct extent_buffer *eb; reada_for_balance() local
2321 eb = btrfs_find_tree_block(root->fs_info, block1); reada_for_balance()
2327 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) reada_for_balance()
2329 free_extent_buffer(eb); reada_for_balance()
2334 eb = btrfs_find_tree_block(root->fs_info, block2); reada_for_balance()
2335 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) reada_for_balance()
2337 free_extent_buffer(eb); reada_for_balance()
2622 struct extent_buffer *eb; btrfs_find_item() local
2635 eb = path->nodes[0]; btrfs_find_item()
2636 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { btrfs_find_item()
2640 eb = path->nodes[0]; btrfs_find_item()
2643 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); btrfs_find_item()
2983 * Since we can unwind eb's we want to do a real search every btrfs_search_old_slot()
3159 struct extent_buffer *eb; btrfs_set_item_key_safe() local
3162 eb = path->nodes[0]; btrfs_set_item_key_safe()
3165 btrfs_item_key(eb, &disk_key, slot - 1); btrfs_set_item_key_safe()
3168 if (slot < btrfs_header_nritems(eb) - 1) { btrfs_set_item_key_safe()
3169 btrfs_item_key(eb, &disk_key, slot + 1); btrfs_set_item_key_safe()
3174 btrfs_set_item_key(eb, &disk_key, slot); btrfs_set_item_key_safe()
3175 btrfs_mark_buffer_dirty(eb); btrfs_set_item_key_safe()
486 tree_mod_dont_log(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) tree_mod_dont_log() argument
504 tree_mod_need_log(const struct btrfs_fs_info *fs_info, struct extent_buffer *eb) tree_mod_need_log() argument
540 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int slot, enum mod_log_op op, gfp_t flags) tree_mod_log_insert_key() argument
568 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int dst_slot, int src_slot, int nr_items, gfp_t flags) tree_mod_log_insert_move() argument
882 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int slot, int atomic) tree_mod_log_set_node_key() argument
1352 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct extent_buffer *eb, u64 time_seq) tree_mod_log_rewind() argument
H A Dctree.h2241 struct extent_buffer *eb; member in struct:btrfs_map_token
2259 #define read_eb_member(eb, ptr, type, member, result) ( \
2260 read_extent_buffer(eb, (char *)(result), \
2265 #define write_eb_member(eb, ptr, type, member, result) ( \
2266 write_extent_buffer(eb, (char *)(result), \
2272 u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
2275 void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \
2278 static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \
2281 return btrfs_get_token_##bits(eb, ptr, off, NULL); \
2283 static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
2286 btrfs_set_token_##bits(eb, ptr, off, val, NULL); \
2295 static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \
2298 return btrfs_get_##bits(eb, s, offsetof(type, member)); \
2300 static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \
2304 btrfs_set_##bits(eb, s, offsetof(type, member), val); \
2306 static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \
2310 return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \
2312 static inline void btrfs_set_token_##name(struct extent_buffer *eb, \
2317 btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \
2321 static inline u##bits btrfs_##name(struct extent_buffer *eb) \
2323 type *p = page_address(eb->pages[0]); \
2327 static inline void btrfs_set_##name(struct extent_buffer *eb, \
2330 type *p = page_address(eb->pages[0]); \
2438 static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, btrfs_stripe_offset_nr() argument
2441 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); btrfs_stripe_offset_nr()
2444 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, btrfs_stripe_devid_nr() argument
2447 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); btrfs_stripe_devid_nr()
2536 static inline void btrfs_tree_block_key(struct extent_buffer *eb, btrfs_tree_block_key() argument
2540 read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); btrfs_tree_block_key()
2543 static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, btrfs_set_tree_block_key() argument
2547 write_eb_member(eb, item, struct btrfs_tree_block_info, key, key); btrfs_set_tree_block_key()
2596 static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) btrfs_node_blockptr() argument
2601 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); btrfs_node_blockptr()
2604 static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, btrfs_set_node_blockptr() argument
2610 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); btrfs_set_node_blockptr()
2613 static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) btrfs_node_ptr_generation() argument
2618 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); btrfs_node_ptr_generation()
2621 static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, btrfs_set_node_ptr_generation() argument
2627 btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val); btrfs_set_node_ptr_generation()
2636 void btrfs_node_key(struct extent_buffer *eb,
2639 static inline void btrfs_set_node_key(struct extent_buffer *eb, btrfs_set_node_key() argument
2644 write_eb_member(eb, (struct btrfs_key_ptr *)ptr, btrfs_set_node_key()
2665 static inline u32 btrfs_item_end(struct extent_buffer *eb, btrfs_item_end() argument
2668 return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); btrfs_item_end()
2671 static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr) btrfs_item_end_nr() argument
2673 return btrfs_item_end(eb, btrfs_item_nr(nr)); btrfs_item_end_nr()
2676 static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr) btrfs_item_offset_nr() argument
2678 return btrfs_item_offset(eb, btrfs_item_nr(nr)); btrfs_item_offset_nr()
2681 static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr) btrfs_item_size_nr() argument
2683 return btrfs_item_size(eb, btrfs_item_nr(nr)); btrfs_item_size_nr()
2686 static inline void btrfs_item_key(struct extent_buffer *eb, btrfs_item_key() argument
2690 read_eb_member(eb, item, struct btrfs_item, key, disk_key); btrfs_item_key()
2693 static inline void btrfs_set_item_key(struct extent_buffer *eb, btrfs_set_item_key() argument
2697 write_eb_member(eb, item, struct btrfs_item, key, disk_key); btrfs_set_item_key()
2722 static inline void btrfs_dir_item_key(struct extent_buffer *eb, btrfs_dir_item_key() argument
2726 read_eb_member(eb, item, struct btrfs_dir_item, location, key); btrfs_dir_item_key()
2729 static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, btrfs_set_dir_item_key() argument
2733 write_eb_member(eb, item, struct btrfs_dir_item, location, key); btrfs_set_dir_item_key()
2743 static inline void btrfs_free_space_key(struct extent_buffer *eb, btrfs_free_space_key() argument
2747 read_eb_member(eb, h, struct btrfs_free_space_header, location, key); btrfs_free_space_key()
2750 static inline void btrfs_set_free_space_key(struct extent_buffer *eb, btrfs_set_free_space_key() argument
2754 write_eb_member(eb, h, struct btrfs_free_space_header, location, key); btrfs_set_free_space_key()
2779 static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb, btrfs_node_key_to_cpu() argument
2783 btrfs_node_key(eb, &disk_key, nr); btrfs_node_key_to_cpu()
2787 static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb, btrfs_item_key_to_cpu() argument
2791 btrfs_item_key(eb, &disk_key, nr); btrfs_item_key_to_cpu()
2795 static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb, btrfs_dir_item_key_to_cpu() argument
2800 btrfs_dir_item_key(eb, item, &disk_key); btrfs_dir_item_key_to_cpu()
2830 static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag) btrfs_header_flag() argument
2832 return (btrfs_header_flags(eb) & flag) == flag; btrfs_header_flag()
2835 static inline int btrfs_set_header_flag(struct extent_buffer *eb, u64 flag) btrfs_set_header_flag() argument
2837 u64 flags = btrfs_header_flags(eb); btrfs_set_header_flag()
2838 btrfs_set_header_flags(eb, flags | flag); btrfs_set_header_flag()
2842 static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) btrfs_clear_header_flag() argument
2844 u64 flags = btrfs_header_flags(eb); btrfs_clear_header_flag()
2845 btrfs_set_header_flags(eb, flags & ~flag); btrfs_clear_header_flag()
2849 static inline int btrfs_header_backref_rev(struct extent_buffer *eb) btrfs_header_backref_rev() argument
2851 u64 flags = btrfs_header_flags(eb); btrfs_header_backref_rev()
2855 static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, btrfs_set_header_backref_rev() argument
2858 u64 flags = btrfs_header_flags(eb); btrfs_set_header_backref_rev()
2861 btrfs_set_header_flags(eb, flags); btrfs_set_header_backref_rev()
2869 static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) btrfs_header_chunk_tree_uuid() argument
2874 static inline int btrfs_is_leaf(struct extent_buffer *eb) btrfs_is_leaf() argument
2876 return btrfs_header_level(eb) == 0; btrfs_is_leaf()
2970 static inline void btrfs_balance_data(struct extent_buffer *eb, btrfs_balance_data() argument
2974 read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); btrfs_balance_data()
2977 static inline void btrfs_set_balance_data(struct extent_buffer *eb, btrfs_set_balance_data() argument
2981 write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); btrfs_set_balance_data()
2984 static inline void btrfs_balance_meta(struct extent_buffer *eb, btrfs_balance_meta() argument
2988 read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); btrfs_balance_meta()
2991 static inline void btrfs_set_balance_meta(struct extent_buffer *eb, btrfs_set_balance_meta() argument
2995 write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); btrfs_set_balance_meta()
2998 static inline void btrfs_balance_sys(struct extent_buffer *eb, btrfs_balance_sys() argument
3002 read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); btrfs_balance_sys()
3005 static inline void btrfs_set_balance_sys(struct extent_buffer *eb, btrfs_set_balance_sys() argument
3009 write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); btrfs_set_balance_sys()
3162 static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, btrfs_file_extent_inline_item_len() argument
3165 return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; btrfs_file_extent_inline_item_len()
3171 static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, btrfs_file_extent_inline_len() argument
3182 if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 && btrfs_file_extent_inline_len()
3183 btrfs_token_file_extent_encryption(eb, fi, &token) == 0 && btrfs_file_extent_inline_len()
3184 btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) { btrfs_file_extent_inline_len()
3185 return btrfs_file_extent_inline_item_len(eb, btrfs_file_extent_inline_len()
3190 return btrfs_token_file_extent_ram_bytes(eb, fi, &token); btrfs_file_extent_inline_len()
3195 static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb, btrfs_dev_stats_value() argument
3201 read_extent_buffer(eb, &val, btrfs_dev_stats_value()
3208 static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb, btrfs_set_dev_stats_value() argument
3212 write_extent_buffer(eb, &val, btrfs_set_dev_stats_value()
3368 struct extent_buffer *eb);
3519 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
4217 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
H A Dtree-log.c289 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
297 struct extent_buffer *eb, process_one_buffer()
307 ret = btrfs_read_buffer(eb, gen); process_one_buffer()
314 eb->start, eb->len); process_one_buffer()
316 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { process_one_buffer()
317 if (wc->pin && btrfs_header_level(eb) == 0) process_one_buffer()
318 ret = btrfs_exclude_logged_extents(log, eb); process_one_buffer()
320 btrfs_write_tree_block(eb); process_one_buffer()
322 btrfs_wait_tree_block_writeback(eb); process_one_buffer()
328 * Item overwrite used by replay and tree logging. eb, slot and key all refer
344 struct extent_buffer *eb, int slot, overwrite_item()
359 item_size = btrfs_item_size_nr(eb, slot); overwrite_item()
360 src_ptr = btrfs_item_ptr_offset(eb, slot); overwrite_item()
388 read_extent_buffer(eb, src_copy, src_ptr, item_size); overwrite_item()
420 item = btrfs_item_ptr(eb, slot, overwrite_item()
422 btrfs_set_inode_nbytes(eb, item, nbytes); overwrite_item()
429 mode = btrfs_inode_mode(eb, item); overwrite_item()
431 btrfs_set_inode_size(eb, item, 0); overwrite_item()
441 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); overwrite_item()
442 btrfs_set_inode_nbytes(eb, item, 0); overwrite_item()
449 mode = btrfs_inode_mode(eb, item); overwrite_item()
451 btrfs_set_inode_size(eb, item, 0); overwrite_item()
493 if (btrfs_inode_generation(eb, src_item) == 0) { overwrite_item()
495 const u64 ino_size = btrfs_inode_size(eb, src_item); overwrite_item()
504 if (S_ISREG(btrfs_inode_mode(eb, src_item)) && overwrite_item()
517 S_ISDIR(btrfs_inode_mode(eb, src_item)) && overwrite_item()
525 copy_extent_buffer(path->nodes[0], eb, dst_ptr, overwrite_item()
572 /* replays a single extent in 'eb' at 'slot' with 'key' into the
587 struct extent_buffer *eb, int slot, replay_one_extent()
599 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); replay_one_extent()
600 found_type = btrfs_file_extent_type(eb, item); replay_one_extent()
604 nbytes = btrfs_file_extent_num_bytes(eb, item); replay_one_extent()
611 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) replay_one_extent()
614 size = btrfs_file_extent_inline_len(eb, slot, item); replay_one_extent()
615 nbytes = btrfs_file_extent_ram_bytes(eb, item); replay_one_extent()
648 read_extent_buffer(eb, &cmp1, (unsigned long)item, replay_one_extent()
681 copy_extent_buffer(path->nodes[0], eb, dest_offset, replay_one_extent()
684 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); replay_one_extent()
685 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); replay_one_extent()
687 offset = key->offset - btrfs_file_extent_offset(eb, item); replay_one_extent()
719 if (btrfs_file_extent_compression(eb, item)) { replay_one_extent()
724 btrfs_file_extent_offset(eb, item); replay_one_extent()
726 btrfs_file_extent_num_bytes(eb, item); replay_one_extent()
753 ret = overwrite_item(trans, root, path, eb, slot, key); replay_one_extent()
925 struct extent_buffer *eb, __add_inode_ref()
1101 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, extref_get_fields() argument
1109 *namelen = btrfs_inode_extref_name_len(eb, extref); extref_get_fields()
1114 read_extent_buffer(eb, *name, (unsigned long)&extref->name, extref_get_fields()
1117 *index = btrfs_inode_extref_index(eb, extref); extref_get_fields()
1119 *parent_objectid = btrfs_inode_extref_parent(eb, extref); extref_get_fields()
1124 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, ref_get_fields() argument
1131 *namelen = btrfs_inode_ref_name_len(eb, ref); ref_get_fields()
1136 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); ref_get_fields()
1138 *index = btrfs_inode_ref_index(eb, ref); ref_get_fields()
1145 * eb, slot and key refer to the buffer and key found in the log tree.
1153 struct extent_buffer *eb, int slot, add_inode_ref()
1170 ref_ptr = btrfs_item_ptr_offset(eb, slot); add_inode_ref()
1171 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); add_inode_ref()
1179 parent_objectid = btrfs_inode_extref_parent(eb, r); add_inode_ref()
1206 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, add_inode_ref()
1219 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, add_inode_ref()
1238 dir, inode, eb, add_inode_ref()
1269 ret = overwrite_item(trans, root, path, eb, slot, key); add_inode_ref()
1620 struct extent_buffer *eb, replay_one_name()
1639 name_len = btrfs_dir_name_len(eb, di); replay_one_name()
1646 log_type = btrfs_dir_type(eb, di); replay_one_name()
1647 read_extent_buffer(eb, name, (unsigned long)(di + 1), replay_one_name()
1650 btrfs_dir_item_key_to_cpu(eb, di, &log_key); replay_one_name()
1740 struct extent_buffer *eb, int slot, replay_one_dir_item()
1744 u32 item_size = btrfs_item_size_nr(eb, slot); replay_one_dir_item()
1750 ptr = btrfs_item_ptr_offset(eb, slot); replay_one_dir_item()
1754 if (verify_dir_item(root, eb, di)) replay_one_dir_item()
1756 name_len = btrfs_dir_name_len(eb, di); replay_one_dir_item()
1757 ret = replay_one_name(trans, root, path, eb, di, key); replay_one_dir_item()
1863 struct extent_buffer *eb; check_item_in_log() local
1876 eb = path->nodes[0]; check_item_in_log()
1878 item_size = btrfs_item_size_nr(eb, slot); check_item_in_log()
1879 ptr = btrfs_item_ptr_offset(eb, slot); check_item_in_log()
1883 if (verify_dir_item(root, eb, di)) { check_item_in_log()
1888 name_len = btrfs_dir_name_len(eb, di); check_item_in_log()
1894 read_extent_buffer(eb, name, (unsigned long)(di + 1), check_item_in_log()
1909 btrfs_dir_item_key_to_cpu(eb, di, &location); check_item_in_log()
2177 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, replay_one_buffer() argument
2188 ret = btrfs_read_buffer(eb, gen); replay_one_buffer()
2192 level = btrfs_header_level(eb); replay_one_buffer()
2201 nritems = btrfs_header_nritems(eb); replay_one_buffer()
2203 btrfs_item_key_to_cpu(eb, &key, i); replay_one_buffer()
2211 inode_item = btrfs_item_ptr(eb, i, replay_one_buffer()
2217 mode = btrfs_inode_mode(eb, inode_item); replay_one_buffer()
2225 eb, i, &key); replay_one_buffer()
2249 eb, i, &key); replay_one_buffer()
2260 eb, i, &key); replay_one_buffer()
2266 eb, i, &key); replay_one_buffer()
2272 eb, i, &key); replay_one_buffer()
2277 eb, i, &key); replay_one_buffer()
4349 static int btrfs_check_ref_name_override(struct extent_buffer *eb, btrfs_check_ref_name_override() argument
4358 u32 item_size = btrfs_item_size_nr(eb, slot); btrfs_check_ref_name_override()
4360 unsigned long ptr = btrfs_item_ptr_offset(eb, slot); btrfs_check_ref_name_override()
4380 this_name_len = btrfs_inode_ref_name_len(eb, iref); btrfs_check_ref_name_override()
4388 parent = btrfs_inode_extref_parent(eb, extref); btrfs_check_ref_name_override()
4389 this_name_len = btrfs_inode_extref_name_len(eb, extref); btrfs_check_ref_name_override()
4406 read_extent_buffer(eb, name, name_ptr, this_name_len); btrfs_check_ref_name_override()
296 process_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, struct walk_control *wc, u64 gen) process_one_buffer() argument
341 overwrite_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) overwrite_item() argument
584 replay_one_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) replay_one_extent() argument
920 __add_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_root *log_root, struct inode *dir, struct inode *inode, struct extent_buffer *eb, u64 inode_objectid, u64 parent_objectid, u64 ref_index, char *name, int namelen, int *search_done) __add_inode_ref() argument
1149 add_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) add_inode_ref() argument
1617 replay_one_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, struct btrfs_dir_item *di, struct btrfs_key *key) replay_one_name() argument
1737 replay_one_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) replay_one_dir_item() argument
H A Ddisk-io.c135 * eb, the lockdep key is determined by the btrfs_root it belongs to and
136 * the level the eb occupies in the tree.
195 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, btrfs_set_buffer_lockdep_class() argument
207 lockdep_set_class_and_name(&eb->lock, btrfs_set_buffer_lockdep_class()
346 struct extent_buffer *eb, u64 parent_transid, verify_parent_transid()
353 if (!parent_transid || btrfs_header_generation(eb) == parent_transid) verify_parent_transid()
360 btrfs_tree_read_lock(eb); verify_parent_transid()
361 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); verify_parent_transid()
364 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, verify_parent_transid()
366 if (extent_buffer_uptodate(eb) && verify_parent_transid()
367 btrfs_header_generation(eb) == parent_transid) { verify_parent_transid()
373 eb->fs_info->sb->s_id, eb->start, verify_parent_transid()
374 parent_transid, btrfs_header_generation(eb)); verify_parent_transid()
381 * if we find an eb that is under IO (dirty/writeback) because we could verify_parent_transid()
385 if (!extent_buffer_under_io(eb)) verify_parent_transid()
386 clear_extent_buffer_uptodate(eb); verify_parent_transid()
388 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, verify_parent_transid()
391 btrfs_tree_read_unlock_blocking(eb); verify_parent_transid()
438 struct extent_buffer *eb, btree_read_extent_buffer_pages()
448 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); btree_read_extent_buffer_pages()
451 ret = read_extent_buffer_pages(io_tree, eb, start, btree_read_extent_buffer_pages()
455 if (!verify_parent_transid(io_tree, eb, btree_read_extent_buffer_pages()
467 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) btree_read_extent_buffer_pages()
471 eb->start, eb->len); btree_read_extent_buffer_pages()
477 failed_mirror = eb->read_mirror; btree_read_extent_buffer_pages()
489 repair_eb_io_failure(root, eb, failed_mirror); btree_read_extent_buffer_pages()
503 struct extent_buffer *eb; csum_dirty_buffer() local
505 eb = (struct extent_buffer *)page->private; csum_dirty_buffer()
506 if (page != eb->pages[0]) csum_dirty_buffer()
508 found_start = btrfs_header_bytenr(eb); csum_dirty_buffer()
511 csum_tree_block(fs_info, eb, 0); csum_dirty_buffer()
516 struct extent_buffer *eb) check_tree_block_fsid()
522 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); check_tree_block_fsid()
533 #define CORRUPT(reason, eb, root, slot) \
536 btrfs_header_bytenr(eb), root->objectid, slot)
605 struct extent_buffer *eb; btree_readpage_end_io_hook() local
613 eb = (struct extent_buffer *)page->private; btree_readpage_end_io_hook()
618 extent_buffer_get(eb); btree_readpage_end_io_hook()
620 reads_done = atomic_dec_and_test(&eb->io_pages); btree_readpage_end_io_hook()
624 eb->read_mirror = mirror; btree_readpage_end_io_hook()
625 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { btree_readpage_end_io_hook()
630 found_start = btrfs_header_bytenr(eb); btree_readpage_end_io_hook()
631 if (found_start != eb->start) { btree_readpage_end_io_hook()
634 eb->fs_info->sb->s_id, found_start, eb->start); btree_readpage_end_io_hook()
638 if (check_tree_block_fsid(root->fs_info, eb)) { btree_readpage_end_io_hook()
640 eb->fs_info->sb->s_id, eb->start); btree_readpage_end_io_hook()
644 found_level = btrfs_header_level(eb); btree_readpage_end_io_hook()
647 (int)btrfs_header_level(eb)); btree_readpage_end_io_hook()
652 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), btree_readpage_end_io_hook()
653 eb, found_level); btree_readpage_end_io_hook() local
655 ret = csum_tree_block(root->fs_info, eb, 1); btree_readpage_end_io_hook()
666 if (found_level == 0 && check_leaf(root, eb)) { btree_readpage_end_io_hook()
667 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); btree_readpage_end_io_hook()
672 set_extent_buffer_uptodate(eb); btree_readpage_end_io_hook()
675 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) btree_readpage_end_io_hook()
676 btree_readahead_hook(root, eb, eb->start, ret); btree_readpage_end_io_hook()
684 atomic_inc(&eb->io_pages); btree_readpage_end_io_hook()
685 clear_extent_buffer_uptodate(eb); btree_readpage_end_io_hook()
687 free_extent_buffer(eb); btree_readpage_end_io_hook()
694 struct extent_buffer *eb; btree_io_failed_hook() local
697 eb = (struct extent_buffer *)page->private; btree_io_failed_hook()
698 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); btree_io_failed_hook()
699 eb->read_mirror = failed_mirror; btree_io_failed_hook()
700 atomic_dec(&eb->io_pages); btree_io_failed_hook()
701 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) btree_io_failed_hook()
702 btree_readahead_hook(root, eb, eb->start, -EIO); btree_io_failed_hook()
1049 struct extent_buffer *eb; btree_set_page_dirty() local
1052 eb = (struct extent_buffer *)page->private; btree_set_page_dirty()
1053 BUG_ON(!eb); btree_set_page_dirty()
1054 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); btree_set_page_dirty()
1055 BUG_ON(!atomic_read(&eb->refs)); btree_set_page_dirty()
1056 btrfs_assert_tree_locked(eb); btree_set_page_dirty()
1086 int mirror_num, struct extent_buffer **eb) reada_tree_block_flagged()
1110 *eb = buf; reada_tree_block_flagged()
4200 struct extent_buffer *eb; btrfs_destroy_marked_extents() local
4212 eb = btrfs_find_tree_block(root->fs_info, start); btrfs_destroy_marked_extents()
4214 if (!eb) btrfs_destroy_marked_extents()
4216 wait_on_extent_buffer_writeback(eb); btrfs_destroy_marked_extents()
4219 &eb->bflags)) btrfs_destroy_marked_extents()
4220 clear_extent_buffer_dirty(eb); btrfs_destroy_marked_extents()
4221 free_extent_buffer_stale(eb); btrfs_destroy_marked_extents()
345 verify_parent_transid(struct extent_io_tree *io_tree, struct extent_buffer *eb, u64 parent_transid, int atomic) verify_parent_transid() argument
437 btree_read_extent_buffer_pages(struct btrfs_root *root, struct extent_buffer *eb, u64 start, u64 parent_transid) btree_read_extent_buffer_pages() argument
515 check_tree_block_fsid(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) check_tree_block_fsid() argument
1085 reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, int mirror_num, struct extent_buffer **eb) reada_tree_block_flagged() argument
H A Dsend.c451 struct extent_buffer *eb, fs_path_add_from_extent_buffer()
461 read_extent_buffer(eb, prepared, off, len); fs_path_add_from_extent_buffer()
584 struct extent_buffer *eb, tlv_put_btrfs_timespec()
588 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); tlv_put_btrfs_timespec()
630 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
632 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
854 struct extent_buffer *eb = path->nodes[0]; iterate_inode_ref() local
885 ptr = (unsigned long)btrfs_item_ptr(eb, slot, iterate_inode_ref()
888 total = btrfs_item_size(eb, item); iterate_inode_ref()
891 ptr = btrfs_item_ptr_offset(eb, slot); iterate_inode_ref()
892 total = btrfs_item_size_nr(eb, slot); iterate_inode_ref()
901 name_len = btrfs_inode_ref_name_len(eb, iref); iterate_inode_ref()
903 index = btrfs_inode_ref_index(eb, iref); iterate_inode_ref()
907 name_len = btrfs_inode_extref_name_len(eb, extref); iterate_inode_ref()
909 index = btrfs_inode_extref_index(eb, extref); iterate_inode_ref()
910 dir = btrfs_inode_extref_parent(eb, extref); iterate_inode_ref()
915 name_off, eb, dir, iterate_inode_ref()
929 eb, dir, iterate_inode_ref()
939 ret = fs_path_add_from_extent_buffer(p, eb, name_off, iterate_inode_ref()
975 struct extent_buffer *eb; iterate_dir_item() local
1003 eb = path->nodes[0]; iterate_dir_item()
1006 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); iterate_dir_item()
1009 total = btrfs_item_size(eb, item); iterate_dir_item()
1013 name_len = btrfs_dir_name_len(eb, di); iterate_dir_item()
1014 data_len = btrfs_dir_data_len(eb, di); iterate_dir_item()
1015 type = btrfs_dir_type(eb, di); iterate_dir_item()
1016 btrfs_dir_item_key_to_cpu(eb, di, &di_key); iterate_dir_item()
1059 read_extent_buffer(eb, buf, (unsigned long)(di + 1), iterate_dir_item()
1287 struct extent_buffer *eb = path->nodes[0]; find_extent_clone() local
1320 fi = btrfs_item_ptr(eb, path->slots[0], find_extent_clone()
1322 extent_type = btrfs_file_extent_type(eb, fi); find_extent_clone()
1327 compressed = btrfs_file_extent_compression(eb, fi); find_extent_clone()
1329 num_bytes = btrfs_file_extent_num_bytes(eb, fi); find_extent_clone()
1330 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); find_extent_clone()
1335 logical = disk_byte + btrfs_file_extent_offset(eb, fi); find_extent_clone()
1427 cur_clone_root->offset += btrfs_file_extent_offset(eb, find_extent_clone()
2454 struct extent_buffer *eb; send_utimes() local
2477 eb = path->nodes[0]; send_utimes()
2479 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); send_utimes()
2489 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); send_utimes()
2490 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); send_utimes()
2491 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); send_utimes()
2599 struct extent_buffer *eb; did_create_dir() local
2617 eb = path->nodes[0]; did_create_dir()
2619 if (slot >= btrfs_header_nritems(eb)) { did_create_dir()
2630 btrfs_item_key_to_cpu(eb, &found_key, slot); did_create_dir()
2637 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); did_create_dir()
2638 btrfs_dir_item_key_to_cpu(eb, di, &di_key); did_create_dir()
3992 struct extent_buffer *eb; process_all_refs() local
4022 eb = path->nodes[0]; process_all_refs()
4024 if (slot >= btrfs_header_nritems(eb)) { process_all_refs()
4033 btrfs_item_key_to_cpu(eb, &found_key, slot); process_all_refs()
4313 struct extent_buffer *eb; process_all_new_xattrs() local
4330 eb = path->nodes[0]; process_all_new_xattrs()
4332 if (slot >= btrfs_header_nritems(eb)) { process_all_new_xattrs()
4343 btrfs_item_key_to_cpu(eb, &found_key, slot); process_all_new_xattrs()
4673 struct extent_buffer *eb; is_extent_unchanged() local
4693 eb = left_path->nodes[0]; is_extent_unchanged()
4695 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); is_extent_unchanged()
4696 left_type = btrfs_file_extent_type(eb, ei); is_extent_unchanged()
4702 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); is_extent_unchanged()
4703 left_len = btrfs_file_extent_num_bytes(eb, ei); is_extent_unchanged()
4704 left_offset = btrfs_file_extent_offset(eb, ei); is_extent_unchanged()
4705 left_gen = btrfs_file_extent_generation(eb, ei); is_extent_unchanged()
4742 eb = path->nodes[0]; is_extent_unchanged()
4744 btrfs_item_key_to_cpu(eb, &found_key, slot); is_extent_unchanged()
4757 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); is_extent_unchanged()
4758 right_type = btrfs_file_extent_type(eb, ei); is_extent_unchanged()
4764 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); is_extent_unchanged()
4765 right_len = btrfs_file_extent_num_bytes(eb, ei); is_extent_unchanged()
4766 right_offset = btrfs_file_extent_offset(eb, ei); is_extent_unchanged()
4767 right_gen = btrfs_file_extent_generation(eb, ei); is_extent_unchanged()
4805 eb = path->nodes[0]; is_extent_unchanged()
4807 btrfs_item_key_to_cpu(eb, &found_key, slot); is_extent_unchanged()
4998 struct extent_buffer *eb; process_all_extents() local
5014 eb = path->nodes[0]; process_all_extents()
5017 if (slot >= btrfs_header_nritems(eb)) { process_all_extents()
5028 btrfs_item_key_to_cpu(eb, &found_key, slot); process_all_extents()
5539 struct extent_buffer *eb; full_send_tree() local
5557 eb = path->nodes[0]; full_send_tree()
5559 btrfs_item_key_to_cpu(eb, &found_key, slot); full_send_tree()
450 fs_path_add_from_extent_buffer(struct fs_path *p, struct extent_buffer *eb, unsigned long off, int len) fs_path_add_from_extent_buffer() argument
583 tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, struct extent_buffer *eb, struct btrfs_timespec *ts) tlv_put_btrfs_timespec() argument
H A Dreada.c107 /* in case of err, eb might be NULL */ __readahead_hook()
108 static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, __readahead_hook() argument
122 if (eb) __readahead_hook()
123 level = btrfs_header_level(eb); __readahead_hook()
146 nritems = level ? btrfs_header_nritems(eb) : 0; __readahead_hook()
147 generation = btrfs_header_generation(eb); __readahead_hook()
171 btrfs_node_key_to_cpu(eb, &key, i); __readahead_hook()
173 btrfs_node_key_to_cpu(eb, &next_key, i + 1); __readahead_hook()
176 bytenr = btrfs_node_blockptr(eb, i); __readahead_hook()
177 n_gen = btrfs_node_ptr_generation(eb, i); __readahead_hook()
233 * start is passed separately in case eb in NULL, which may be the case with
236 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, btree_readahead_hook() argument
241 ret = __readahead_hook(root, eb, start, err); btree_readahead_hook()
659 struct extent_buffer *eb = NULL; reada_start_machine_dev() local
724 mirror_num, &eb); reada_start_machine_dev()
727 else if (eb) reada_start_machine_dev()
728 __readahead_hook(fs_info->extent_root, eb, eb->start, ret); reada_start_machine_dev()
730 if (eb) reada_start_machine_dev()
731 free_extent_buffer(eb); reada_start_machine_dev()
H A Ddisk-io.h51 int mirror_num, struct extent_buffer **eb);
150 struct extent_buffer *eb, int level);
155 struct extent_buffer *eb, int level) btrfs_set_buffer_lockdep_class()
154 btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level) btrfs_set_buffer_lockdep_class() argument
H A Dbackref.h39 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
H A Dextent-tree.c5856 struct extent_buffer *eb) btrfs_exclude_logged_extents()
5866 for (i = 0; i < btrfs_header_nritems(eb); i++) { btrfs_exclude_logged_extents()
5867 btrfs_item_key_to_cpu(eb, &key, i); btrfs_exclude_logged_extents()
5870 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); btrfs_exclude_logged_extents()
5871 found_type = btrfs_file_extent_type(eb, item); btrfs_exclude_logged_extents()
5874 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) btrfs_exclude_logged_extents()
5876 key.objectid = btrfs_file_extent_disk_bytenr(eb, item); btrfs_exclude_logged_extents()
5877 key.offset = btrfs_file_extent_disk_num_bytes(eb, item); btrfs_exclude_logged_extents()
7695 struct extent_buffer *eb; reada_walk_down() local
7709 eb = path->nodes[wc->level]; reada_walk_down()
7710 nritems = btrfs_header_nritems(eb); reada_walk_down()
7718 bytenr = btrfs_node_blockptr(eb, slot); reada_walk_down()
7719 generation = btrfs_node_ptr_generation(eb, slot); reada_walk_down()
7747 btrfs_node_key_to_cpu(eb, &key, slot); reada_walk_down()
7766 struct extent_buffer *eb) account_leaf_items()
7768 int nr = btrfs_header_nritems(eb); account_leaf_items()
7775 btrfs_item_key_to_cpu(eb, &key, i); account_leaf_items()
7780 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); account_leaf_items()
7782 extent_type = btrfs_file_extent_type(eb, fi); account_leaf_items()
7787 bytenr = btrfs_file_extent_disk_bytenr(eb, fi); account_leaf_items()
7791 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); account_leaf_items()
7822 struct extent_buffer *eb; adjust_slots_upwards() local
7828 eb = path->nodes[level]; adjust_slots_upwards()
7829 nr = btrfs_header_nritems(eb); adjust_slots_upwards()
7839 btrfs_tree_unlock_rw(eb, path->locks[level]); adjust_slots_upwards()
7842 free_extent_buffer(eb); adjust_slots_upwards()
7858 eb = path->nodes[root_level]; adjust_slots_upwards()
7859 if (path->slots[root_level] >= btrfs_header_nritems(eb)) adjust_slots_upwards()
7876 struct extent_buffer *eb = root_eb; account_shared_subtree() local
7923 eb = path->nodes[level + 1]; account_shared_subtree()
7925 child_bytenr = btrfs_node_blockptr(eb, parent_slot); account_shared_subtree()
7926 child_gen = btrfs_node_ptr_generation(eb, parent_slot); account_shared_subtree()
7928 eb = read_tree_block(root, child_bytenr, child_gen); account_shared_subtree()
7929 if (!eb || !extent_buffer_uptodate(eb)) { account_shared_subtree()
7934 path->nodes[level] = eb; account_shared_subtree()
7937 btrfs_tree_read_lock(eb); account_shared_subtree()
7938 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); account_shared_subtree()
7990 struct extent_buffer *eb = path->nodes[level]; walk_down_proc() local
7995 btrfs_header_owner(eb) != root->root_key.objectid) walk_down_proc()
8007 eb->start, level, 1, walk_down_proc()
8021 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_down_proc()
8030 ret = btrfs_inc_ref(trans, root, eb, 1); walk_down_proc()
8032 ret = btrfs_dec_ref(trans, root, eb, 0); walk_down_proc()
8034 ret = btrfs_set_disk_extent_flags(trans, root, eb->start, walk_down_proc()
8035 eb->len, flag, walk_down_proc()
8036 btrfs_header_level(eb), 0); walk_down_proc()
8046 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_down_proc()
8228 struct extent_buffer *eb = path->nodes[level]; walk_up_proc() local
8251 btrfs_tree_lock(eb); walk_up_proc()
8252 btrfs_set_lock_blocking(eb); walk_up_proc()
8256 eb->start, level, 1, walk_up_proc()
8260 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_up_proc()
8266 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_up_proc()
8279 ret = btrfs_dec_ref(trans, root, eb, 1); walk_up_proc()
8281 ret = btrfs_dec_ref(trans, root, eb, 0); walk_up_proc()
8283 ret = account_leaf_items(trans, root, eb); walk_up_proc()
8293 btrfs_header_generation(eb) == trans->transid) { walk_up_proc()
8294 btrfs_tree_lock(eb); walk_up_proc()
8295 btrfs_set_lock_blocking(eb); walk_up_proc()
8298 clean_tree_block(trans, root->fs_info, eb); walk_up_proc()
8301 if (eb == root->node) { walk_up_proc()
8303 parent = eb->start; walk_up_proc()
8306 btrfs_header_owner(eb)); walk_up_proc()
8315 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); walk_up_proc()
5855 btrfs_exclude_logged_extents(struct btrfs_root *log, struct extent_buffer *eb) btrfs_exclude_logged_extents() argument
7764 account_leaf_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *eb) account_leaf_items() argument
H A Droot-tree.c33 static void btrfs_read_root_item(struct extent_buffer *eb, int slot, btrfs_read_root_item() argument
40 len = btrfs_item_size_nr(eb, slot); btrfs_read_root_item()
41 read_extent_buffer(eb, item, btrfs_item_ptr_offset(eb, slot), btrfs_read_root_item()
H A Dtransaction.c1067 struct extent_buffer *eb; commit_cowonly_roots() local
1070 eb = btrfs_lock_root_node(fs_info->tree_root); commit_cowonly_roots()
1071 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, commit_cowonly_roots()
1072 0, &eb); commit_cowonly_roots()
1073 btrfs_tree_unlock(eb); commit_cowonly_roots()
1074 free_extent_buffer(eb); commit_cowonly_roots()
H A Dvolumes.c3703 struct extent_buffer *eb; btrfs_uuid_scan_kthread() local
3737 eb = path->nodes[0]; btrfs_uuid_scan_kthread()
3739 item_size = btrfs_item_size_nr(eb, slot); btrfs_uuid_scan_kthread()
3743 read_extent_buffer(eb, &root_item, btrfs_uuid_scan_kthread()
3744 btrfs_item_ptr_offset(eb, slot), btrfs_uuid_scan_kthread()
6455 struct extent_buffer *eb; btrfs_init_dev_stats() local
6484 eb = path->nodes[0]; btrfs_init_dev_stats()
6485 btrfs_item_key_to_cpu(eb, &found_key, slot); btrfs_init_dev_stats()
6486 item_size = btrfs_item_size_nr(eb, slot); btrfs_init_dev_stats()
6488 ptr = btrfs_item_ptr(eb, slot, btrfs_init_dev_stats()
6494 btrfs_dev_stats_value(eb, ptr, i)); btrfs_init_dev_stats()
6516 struct extent_buffer *eb; update_dev_stat_item() local
6561 eb = path->nodes[0]; update_dev_stat_item()
6562 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); update_dev_stat_item()
6564 btrfs_set_dev_stats_value(eb, ptr, i, update_dev_stat_item()
6566 btrfs_mark_buffer_dirty(eb); update_dev_stat_item()
H A Dscrub.c532 struct extent_buffer *eb; scrub_print_warning_inode() local
563 eb = swarn->path->nodes[0]; scrub_print_warning_inode()
564 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], scrub_print_warning_inode()
566 isize = btrfs_inode_size(eb, inode_item); scrub_print_warning_inode()
567 nlink = btrfs_inode_nlink(eb, inode_item); scrub_print_warning_inode()
614 struct extent_buffer *eb; scrub_print_warning() local
646 eb = path->nodes[0]; scrub_print_warning()
647 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); scrub_print_warning()
648 item_size = btrfs_item_size_nr(eb, path->slots[0]); scrub_print_warning()
652 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, scrub_print_warning()
/linux-4.1.27/drivers/mtd/
H A Dmtdswap.c199 static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_eb_offset() argument
201 return (loff_t)(eb - d->eb_data) * d->mtd->erasesize; mtdswap_eb_offset()
204 static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_eb_detach() argument
209 if (eb->root) { mtdswap_eb_detach()
210 tp = container_of(eb->root, struct mtdswap_tree, root); mtdswap_eb_detach()
214 rb_erase(&eb->rb, eb->root); mtdswap_eb_detach()
218 static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb) __mtdswap_rb_add() argument
227 if (eb->erase_count > cur->erase_count) __mtdswap_rb_add()
233 rb_link_node(&eb->rb, parent, p); __mtdswap_rb_add()
234 rb_insert_color(&eb->rb, root); __mtdswap_rb_add()
237 static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx) mtdswap_rb_add() argument
241 if (eb->root == &d->trees[idx].root) mtdswap_rb_add()
244 mtdswap_eb_detach(d, eb); mtdswap_rb_add()
246 __mtdswap_rb_add(root, eb); mtdswap_rb_add()
247 eb->root = root; mtdswap_rb_add()
266 static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_handle_badblock() argument
272 eb->flags |= EBLOCK_BAD; mtdswap_handle_badblock()
273 mtdswap_eb_detach(d, eb); mtdswap_handle_badblock()
274 eb->root = NULL; mtdswap_handle_badblock()
280 offset = mtdswap_eb_offset(d, eb); mtdswap_handle_badblock()
294 static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_handle_write_error() argument
296 unsigned int marked = eb->flags & EBLOCK_FAILED; mtdswap_handle_write_error()
299 eb->flags |= EBLOCK_FAILED; mtdswap_handle_write_error()
300 if (curr_write == eb) { mtdswap_handle_write_error()
304 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); mtdswap_handle_write_error()
309 return mtdswap_handle_badblock(d, eb); mtdswap_handle_write_error()
336 static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_read_markers() argument
343 offset = mtdswap_eb_offset(d, eb); mtdswap_read_markers()
365 eb->erase_count = le32_to_cpu(data->count); mtdswap_read_markers()
375 eb->flags |= EBLOCK_NOMAGIC; mtdswap_read_markers()
382 static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb, mtdswap_write_marker() argument
397 n.count = cpu_to_le32(eb->erase_count); mtdswap_write_marker()
399 offset = mtdswap_eb_offset(d, eb); mtdswap_write_marker()
403 offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize; mtdswap_write_marker()
412 mtdswap_handle_write_error(d, eb); mtdswap_write_marker()
435 struct swap_eb *eb; mtdswap_check_counts() local
440 eb = d->eb_data + i; mtdswap_check_counts()
442 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) mtdswap_check_counts()
445 __mtdswap_rb_add(&hist_root, eb); mtdswap_check_counts()
458 eb = d->eb_data + i; mtdswap_check_counts()
460 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR)) mtdswap_check_counts()
461 eb->erase_count = median; mtdswap_check_counts()
463 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) mtdswap_check_counts()
466 rb_erase(&eb->rb, &hist_root); mtdswap_check_counts()
474 struct swap_eb *eb; mtdswap_scan_eblks() local
477 eb = d->eb_data + i; mtdswap_scan_eblks()
479 status = mtdswap_read_markers(d, eb); mtdswap_scan_eblks()
481 eb->flags |= EBLOCK_READERR; mtdswap_scan_eblks()
483 eb->flags |= EBLOCK_BAD; mtdswap_scan_eblks()
499 eb->flags |= (idx << EBLOCK_IDX_SHIFT); mtdswap_scan_eblks()
505 eb = d->eb_data + i; mtdswap_scan_eblks()
507 if (eb->flags & EBLOCK_BAD) mtdswap_scan_eblks()
510 idx = eb->flags >> EBLOCK_IDX_SHIFT; mtdswap_scan_eblks()
511 mtdswap_rb_add(d, eb, idx); mtdswap_scan_eblks()
519 static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_store_eb() argument
521 unsigned int weight = eb->active_count; mtdswap_store_eb()
524 if (eb == d->curr_write) mtdswap_store_eb()
527 if (eb->flags & EBLOCK_BITFLIP) mtdswap_store_eb()
528 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); mtdswap_store_eb()
529 else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED)) mtdswap_store_eb()
530 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); mtdswap_store_eb()
532 mtdswap_rb_add(d, eb, MTDSWAP_USED); mtdswap_store_eb()
534 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); mtdswap_store_eb()
536 mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG); mtdswap_store_eb()
538 mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG); mtdswap_store_eb()
548 static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_erase_block() argument
556 eb->erase_count++; mtdswap_erase_block()
557 if (eb->erase_count > d->max_erase_count) mtdswap_erase_block()
558 d->max_erase_count = eb->erase_count; mtdswap_erase_block()
566 erase.addr = mtdswap_eb_offset(d, eb); mtdswap_erase_block()
583 mtdswap_handle_badblock(d, eb); mtdswap_erase_block()
604 mtdswap_handle_badblock(d, eb); mtdswap_erase_block()
617 struct swap_eb *eb; mtdswap_map_free_block() local
625 eb = rb_entry(rb_first(clean_root), struct swap_eb, rb); mtdswap_map_free_block()
626 rb_erase(&eb->rb, clean_root); mtdswap_map_free_block()
627 eb->root = NULL; mtdswap_map_free_block()
630 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); mtdswap_map_free_block()
637 d->curr_write = eb; mtdswap_map_free_block()
667 struct swap_eb *eb; mtdswap_write_block() local
679 eb = d->eb_data + (*bp / d->pages_per_eblk); mtdswap_write_block()
683 eb->active_count--; mtdswap_write_block()
695 eb->active_count--; mtdswap_write_block()
697 mtdswap_handle_write_error(d, eb); mtdswap_write_block()
718 eb->active_count--; mtdswap_write_block()
728 struct swap_eb *eb, *oldeb; mtdswap_move_block() local
768 eb = d->eb_data + *newblock / d->pages_per_eblk; mtdswap_move_block()
771 eb = d->eb_data + oldblock / d->pages_per_eblk; mtdswap_move_block()
772 eb->active_count--; mtdswap_move_block()
782 static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_gc_eblock() argument
788 eblk_base = (eb - d->eb_data) * d->pages_per_eblk; mtdswap_gc_eblock()
898 struct swap_eb *eb = NULL; mtdswap_pick_gc_eblk() local
910 eb = rb_entry(rb_first(rp), struct swap_eb, rb); mtdswap_pick_gc_eblk()
912 rb_erase(&eb->rb, rp); mtdswap_pick_gc_eblk()
913 eb->root = NULL; mtdswap_pick_gc_eblk()
915 return eb; mtdswap_pick_gc_eblk()
924 struct swap_eb *eb) mtdswap_eblk_passes()
940 base = mtdswap_eb_offset(d, eb); mtdswap_eblk_passes()
974 ret = mtdswap_erase_block(d, eb); mtdswap_eblk_passes()
979 eb->flags &= ~EBLOCK_READERR; mtdswap_eblk_passes()
983 mtdswap_handle_badblock(d, eb); mtdswap_eblk_passes()
989 struct swap_eb *eb; mtdswap_gc() local
995 eb = mtdswap_pick_gc_eblk(d, background); mtdswap_gc()
996 if (!eb) mtdswap_gc()
999 ret = mtdswap_gc_eblock(d, eb); mtdswap_gc()
1003 if (eb->flags & EBLOCK_FAILED) { mtdswap_gc()
1004 mtdswap_handle_badblock(d, eb); mtdswap_gc()
1008 eb->flags &= ~EBLOCK_BITFLIP; mtdswap_gc()
1009 ret = mtdswap_erase_block(d, eb); mtdswap_gc()
1010 if ((eb->flags & EBLOCK_READERR) && mtdswap_gc()
1011 (ret || !mtdswap_eblk_passes(d, eb))) mtdswap_gc()
1015 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN); mtdswap_gc()
1018 mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); mtdswap_gc()
1020 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); mtdswap_gc()
1074 struct swap_eb *eb; mtdswap_writesect() local
1092 eb = d->eb_data + (mapped / d->pages_per_eblk); mtdswap_writesect()
1093 eb->active_count--; mtdswap_writesect()
1094 mtdswap_store_eb(d, eb); mtdswap_writesect()
1105 eb = d->eb_data + (newblock / d->pages_per_eblk); mtdswap_writesect()
1134 struct swap_eb *eb; mtdswap_readsect() local
1156 eb = d->eb_data + (realblock / d->pages_per_eblk); mtdswap_readsect()
1167 eb->flags |= EBLOCK_BITFLIP; mtdswap_readsect()
1168 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); mtdswap_readsect()
1174 eb->flags |= EBLOCK_READERR; mtdswap_readsect()
1175 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); mtdswap_readsect()
1196 struct swap_eb *eb; mtdswap_discard() local
1204 eb = d->eb_data + (mapped / d->pages_per_eblk); mtdswap_discard()
1205 eb->active_count--; mtdswap_discard()
1206 mtdswap_store_eb(d, eb); mtdswap_discard()
923 mtdswap_eblk_passes(struct mtdswap_dev *d, struct swap_eb *eb) mtdswap_eblk_passes() argument
/linux-4.1.27/fs/btrfs/tests/
H A Dextent-buffer-tests.c29 struct extent_buffer *eb; test_btrfs_split_item() local
56 path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096); test_btrfs_split_item()
57 if (!eb) { test_btrfs_split_item()
71 write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0), test_btrfs_split_item()
91 btrfs_item_key_to_cpu(eb, &key, 0); test_btrfs_split_item()
100 if (btrfs_item_size(eb, item) != strlen(split1)) { test_btrfs_split_item()
106 read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0), test_btrfs_split_item()
116 btrfs_item_key_to_cpu(eb, &key, 1); test_btrfs_split_item()
125 if (btrfs_item_size(eb, item) != strlen(split2)) { test_btrfs_split_item()
131 read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1), test_btrfs_split_item()
148 btrfs_item_key_to_cpu(eb, &key, 0); test_btrfs_split_item()
157 if (btrfs_item_size(eb, item) != strlen(split3)) { test_btrfs_split_item()
163 read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0), test_btrfs_split_item()
172 btrfs_item_key_to_cpu(eb, &key, 1); test_btrfs_split_item()
181 if (btrfs_item_size(eb, item) != strlen(split4)) { test_btrfs_split_item()
187 read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1), test_btrfs_split_item()
196 btrfs_item_key_to_cpu(eb, &key, 2); test_btrfs_split_item()
205 if (btrfs_item_size(eb, item) != strlen(split2)) { test_btrfs_split_item()
211 read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 2), test_btrfs_split_item()
H A Dbtrfs-tests.c136 struct extent_buffer *eb; btrfs_free_dummy_fs_info() local
138 eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock); btrfs_free_dummy_fs_info()
139 if (!eb) btrfs_free_dummy_fs_info()
142 if (radix_tree_exception(eb)) { btrfs_free_dummy_fs_info()
143 if (radix_tree_deref_retry(eb)) btrfs_free_dummy_fs_info()
148 free_extent_buffer_stale(eb); btrfs_free_dummy_fs_info()
/linux-4.1.27/drivers/mtd/tests/
H A Dmtd_test.h17 unsigned int eb, int ebcnt);
19 unsigned int eb, int ebcnt);
H A Dstresstest.c57 unsigned int eb; rand_eb() local
60 eb = prandom_u32(); rand_eb()
62 eb %= (ebcnt - 1); rand_eb()
63 if (bbt[eb]) rand_eb()
65 return eb; rand_eb()
88 int eb = rand_eb(); do_read() local
93 if (bbt[eb + 1]) { do_read()
99 addr = (loff_t)eb * mtd->erasesize + offs; do_read()
105 int eb = rand_eb(), offs, err, len; do_write() local
108 offs = offsets[eb]; do_write()
110 err = mtdtest_erase_eraseblock(mtd, eb); do_write()
113 offs = offsets[eb] = 0; do_write()
118 if (bbt[eb + 1]) do_write()
121 err = mtdtest_erase_eraseblock(mtd, eb + 1); do_write()
124 offsets[eb + 1] = 0; do_write()
127 addr = (loff_t)eb * mtd->erasesize + offs; do_write()
133 offsets[eb++] = mtd->erasesize; do_write()
136 offsets[eb] = offs; do_write()
H A Dmtd_test.c46 unsigned int eb, int ebcnt) mtdtest_scan_for_bad_eraseblocks()
55 bbt[i] = is_block_bad(mtd, eb + i) ? 1 : 0; mtdtest_scan_for_bad_eraseblocks()
66 unsigned int eb, int ebcnt) mtdtest_erase_good_eraseblocks()
74 err = mtdtest_erase_eraseblock(mtd, eb + i); mtdtest_erase_good_eraseblocks()
45 mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt, unsigned int eb, int ebcnt) mtdtest_scan_for_bad_eraseblocks() argument
65 mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt, unsigned int eb, int ebcnt) mtdtest_erase_good_eraseblocks() argument
H A Dtorturetest.c39 static int eb = 8; variable
40 module_param(eb, int, S_IRUGO);
41 MODULE_PARM_DESC(eb, "eraseblock number within the selected MTD device");
198 ebcnt, eb, eb + ebcnt - 1, dev); tort_init()
258 err = mtdtest_scan_for_bad_eraseblocks(mtd, bad_ebs, eb, ebcnt); tort_init()
267 err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt); tort_init()
273 for (i = eb; i < eb + ebcnt; i++) { tort_init()
274 if (bad_ebs[i - eb]) tort_init()
290 for (i = eb; i < eb + ebcnt; i++) { tort_init()
291 if (bad_ebs[i - eb]) tort_init()
293 if ((eb + erase_cycles) & 1) tort_init()
308 for (i = eb; i < eb + ebcnt; i++) { tort_init()
309 if (bad_ebs[i - eb]) tort_init()
311 if ((eb + erase_cycles) & 1) tort_init()
319 ((eb + erase_cycles) & 1) ? tort_init()
/linux-4.1.27/drivers/staging/ozwpan/
H A Dozusbsvc1.c29 static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei, oz_usb_submit_elt() argument
40 spin_lock_bh(&eb->lock); oz_usb_submit_elt()
46 ret = oz_queue_elt_info(eb, isoc, strid, ei); oz_usb_submit_elt()
48 oz_elt_info_free(eb, ei); oz_usb_submit_elt()
49 spin_unlock_bh(&eb->lock); oz_usb_submit_elt()
63 struct oz_elt_buf *eb = &pd->elt_buff; oz_usb_get_desc_req() local
87 return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); oz_usb_get_desc_req()
98 struct oz_elt_buf *eb = &pd->elt_buff; oz_usb_set_config_req() local
110 return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); oz_usb_set_config_req()
121 struct oz_elt_buf *eb = &pd->elt_buff; oz_usb_set_interface_req() local
134 return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); oz_usb_set_interface_req()
146 struct oz_elt_buf *eb = &pd->elt_buff; oz_usb_set_clear_feature_req() local
160 return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); oz_usb_set_clear_feature_req()
172 struct oz_elt_buf *eb = &pd->elt_buff; oz_usb_vendor_class_req() local
189 return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); oz_usb_vendor_class_req()
249 struct oz_elt_buf *eb; oz_usb_send_isoc() local
267 eb = &pd->elt_buff; oz_usb_send_isoc()
270 struct oz_elt_info *ei = oz_elt_info_alloc(eb); oz_usb_send_isoc()
310 oz_usb_submit_elt(eb, ei, usb_ctx, ep_num, oz_usb_send_isoc()
H A Dozcdev.c152 struct oz_elt_buf *eb; oz_cdev_write() local
170 eb = &pd->elt_buff; oz_cdev_write()
171 ei = oz_elt_info_alloc(eb); oz_cdev_write()
191 spin_lock(&eb->lock); oz_cdev_write()
192 if (oz_queue_elt_info(eb, 0, 0, ei) == 0) oz_cdev_write()
194 spin_unlock(&eb->lock); oz_cdev_write()
200 spin_lock_bh(&eb->lock); oz_cdev_write()
201 oz_elt_info_free(eb, ei); oz_cdev_write()
202 spin_unlock_bh(&eb->lock); oz_cdev_write()
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_gem_execbuffer.c56 struct eb_vmas *eb = NULL; eb_create() local
62 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); eb_create()
65 if (eb == NULL) { eb_create()
71 eb = kzalloc(count*sizeof(struct hlist_head) + eb_create()
74 if (eb == NULL) eb_create()
75 return eb; eb_create()
77 eb->and = count - 1; eb_create()
79 eb->and = -args->buffer_count; eb_create()
81 INIT_LIST_HEAD(&eb->vmas); eb_create()
82 return eb; eb_create()
86 eb_reset(struct eb_vmas *eb) eb_reset() argument
88 if (eb->and >= 0) eb_reset()
89 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); eb_reset()
93 eb_lookup_vmas(struct eb_vmas *eb, eb_lookup_vmas() argument
154 list_add_tail(&vma->exec_list, &eb->vmas); eb_lookup_vmas()
158 if (eb->and < 0) { eb_lookup_vmas()
159 eb->lut[i] = vma; eb_lookup_vmas()
164 &eb->buckets[handle & eb->and]); eb_lookup_vmas()
188 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle) eb_get_vma() argument
190 if (eb->and < 0) { eb_get_vma()
191 if (handle >= -eb->and) eb_get_vma()
193 return eb->lut[handle]; eb_get_vma()
198 head = &eb->buckets[handle & eb->and]; hlist_for_each()
235 static void eb_destroy(struct eb_vmas *eb) eb_destroy() argument
237 while (!list_empty(&eb->vmas)) { eb_destroy()
240 vma = list_first_entry(&eb->vmas, eb_destroy()
247 kfree(eb); eb_destroy()
386 struct eb_vmas *eb, i915_gem_execbuffer_relocate_entry()
397 target_vma = eb_get_vma(eb, reloc->target_handle); i915_gem_execbuffer_relocate_entry()
493 struct eb_vmas *eb) i915_gem_execbuffer_relocate_vma()
517 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r); i915_gem_execbuffer_relocate_vma()
539 struct eb_vmas *eb, i915_gem_execbuffer_relocate_vma_slow()
546 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]); i915_gem_execbuffer_relocate_vma_slow()
555 i915_gem_execbuffer_relocate(struct eb_vmas *eb) i915_gem_execbuffer_relocate() argument
568 list_for_each_entry(vma, &eb->vmas, exec_list) { i915_gem_execbuffer_relocate()
569 ret = i915_gem_execbuffer_relocate_vma(vma, eb); i915_gem_execbuffer_relocate()
783 struct eb_vmas *eb, i915_gem_execbuffer_relocate_slow()
794 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm; i915_gem_execbuffer_relocate_slow()
797 while (!list_empty(&eb->vmas)) { i915_gem_execbuffer_relocate_slow()
798 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); i915_gem_execbuffer_relocate_slow()
864 eb_reset(eb); i915_gem_execbuffer_relocate_slow()
865 ret = eb_lookup_vmas(eb, exec, args, vm, file); i915_gem_execbuffer_relocate_slow()
870 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); i915_gem_execbuffer_relocate_slow()
874 list_for_each_entry(vma, &eb->vmas, exec_list) { i915_gem_execbuffer_relocate_slow()
876 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb, i915_gem_execbuffer_relocate_slow()
1139 struct eb_vmas *eb, i915_gem_execbuffer_parse()
1174 list_add_tail(&vma->exec_list, &eb->vmas); i915_gem_execbuffer_parse()
1383 eb_get_batch(struct eb_vmas *eb) eb_get_batch() argument
1385 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); eb_get_batch()
1408 struct eb_vmas *eb; i915_gem_do_execbuffer() local
1508 eb = eb_create(args); i915_gem_do_execbuffer()
1509 if (eb == NULL) { i915_gem_do_execbuffer()
1517 ret = eb_lookup_vmas(eb, exec, args, vm, file); i915_gem_do_execbuffer()
1522 batch_obj = eb_get_batch(eb); i915_gem_do_execbuffer()
1526 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); i915_gem_do_execbuffer()
1532 ret = i915_gem_execbuffer_relocate(eb); i915_gem_do_execbuffer()
1536 eb, exec); i915_gem_do_execbuffer()
1553 eb, i915_gem_do_execbuffer()
1605 &eb->vmas, batch_obj, exec_start, i915_gem_do_execbuffer()
1619 eb_destroy(eb); i915_gem_do_execbuffer()
385 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_vmas *eb, struct drm_i915_gem_relocation_entry *reloc) i915_gem_execbuffer_relocate_entry() argument
492 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, struct eb_vmas *eb) i915_gem_execbuffer_relocate_vma() argument
538 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, struct eb_vmas *eb, struct drm_i915_gem_relocation_entry *relocs) i915_gem_execbuffer_relocate_vma_slow() argument
779 i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_i915_gem_execbuffer2 *args, struct drm_file *file, struct intel_engine_cs *ring, struct eb_vmas *eb, struct drm_i915_gem_exec_object2 *exec) i915_gem_execbuffer_relocate_slow() argument
1137 i915_gem_execbuffer_parse(struct intel_engine_cs *ring, struct drm_i915_gem_exec_object2 *shadow_exec_entry, struct eb_vmas *eb, struct drm_i915_gem_object *batch_obj, u32 batch_start_offset, u32 batch_len, bool is_master) i915_gem_execbuffer_parse() argument
H A Di915_drv.h3006 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, intel_display_crc_init()
3010 struct drm_i915_error_state_buf *eb) i915_error_state_buf_release()
3012 kfree(eb->buf); i915_error_state_buf_release()
3009 i915_error_state_buf_release( struct drm_i915_error_state_buf *eb) i915_error_state_buf_release() argument
/linux-4.1.27/lib/
H A Dtest-string_helpers.c155 .in = "\eb \\C\007\"\x90\r]",
157 .out = "\eb \\C\007\"\x90\\r]",
160 .out = "\\eb \\\\C\\a\"\x90\r]",
163 .out = "\\eb \\\\C\\a\"\x90\\r]",
178 .out = "\eb \\C\007\"\x90\r]",
181 .out = "\eb \\C\007\"\x90\\r]",
184 .out = "\\eb \\C\\a\"\x90\r]",
187 .out = "\\eb \\C\\a\"\x90\\r]",
196 .out = "\\eb \\C\\a\"\\220\\r]",
230 .in = "\eb \\C\007\"\x90\r]",
/linux-4.1.27/arch/sh/include/asm/
H A Dbugs.h71 /* 'eb' means 'Endian Big' */ check_bugs()
/linux-4.1.27/arch/arm/mach-realview/
H A Drealview-dt.c17 "arm,realview-eb",
H A Dplatsmp.c20 #include <mach/board-eb.h>
H A Drealview_eb.c46 #include <mach/board-eb.h>
/linux-4.1.27/drivers/input/mouse/
H A Dsynaptics.h114 #define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01))
115 #define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02))
116 #define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04))
/linux-4.1.27/fs/ocfs2/
H A Dextent_map.c294 struct ocfs2_extent_block *eb; ocfs2_last_eb_is_empty() local
303 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_last_eb_is_empty()
304 el = &eb->h_list; ocfs2_last_eb_is_empty()
365 struct ocfs2_extent_block *eb, *next_eb; ocfs2_figure_hole_clusters() local
370 eb = (struct ocfs2_extent_block *)eb_bh->b_data; ocfs2_figure_hole_clusters()
376 if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL) ocfs2_figure_hole_clusters()
380 le64_to_cpu(eb->h_next_leaf_blk), ocfs2_figure_hole_clusters()
418 struct ocfs2_extent_block *uninitialized_var(eb); ocfs2_get_clusters_nocache()
439 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_get_clusters_nocache()
440 el = &eb->h_list; ocfs2_get_clusters_nocache()
507 else if (eb->h_blkno == di->i_last_eb_blk) ocfs2_get_clusters_nocache()
509 else if (eb->h_next_leaf_blk == di->i_last_eb_blk) { ocfs2_get_clusters_nocache()
550 struct ocfs2_extent_block *eb; ocfs2_xattr_get_clusters() local
562 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_xattr_get_clusters()
563 el = &eb->h_list; ocfs2_xattr_get_clusters()
H A Dalloc.c570 struct ocfs2_extent_block *eb);
673 struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data; ocfs2_path_insert_eb() local
684 path->p_node[index].el = &eb->h_list; ocfs2_path_insert_eb()
887 struct ocfs2_extent_block *eb = ocfs2_validate_extent_block() local
899 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &eb->h_check); ocfs2_validate_extent_block()
910 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { ocfs2_validate_extent_block()
914 eb->h_signature); ocfs2_validate_extent_block()
918 if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) { ocfs2_validate_extent_block()
923 (unsigned long long)le64_to_cpu(eb->h_blkno)); ocfs2_validate_extent_block()
927 if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation) { ocfs2_validate_extent_block()
932 le32_to_cpu(eb->h_fs_generation)); ocfs2_validate_extent_block()
964 struct ocfs2_extent_block *eb; ocfs2_num_free_extents() local
978 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_num_free_extents()
979 el = &eb->h_list; ocfs2_num_free_extents()
1009 struct ocfs2_extent_block *eb; ocfs2_create_new_meta_bhs() local
1043 eb = (struct ocfs2_extent_block *) bhs[i]->b_data; ocfs2_create_new_meta_bhs()
1045 strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); ocfs2_create_new_meta_bhs()
1046 eb->h_blkno = cpu_to_le64(first_blkno); ocfs2_create_new_meta_bhs()
1047 eb->h_fs_generation = cpu_to_le32(osb->fs_generation); ocfs2_create_new_meta_bhs()
1048 eb->h_suballoc_slot = ocfs2_create_new_meta_bhs()
1050 eb->h_suballoc_loc = cpu_to_le64(suballoc_loc); ocfs2_create_new_meta_bhs()
1051 eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start); ocfs2_create_new_meta_bhs()
1052 eb->h_list.l_count = ocfs2_create_new_meta_bhs()
1167 struct ocfs2_extent_block *eb; ocfs2_add_branch() local
1175 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_add_branch()
1176 el = &eb->h_list; ocfs2_add_branch()
1185 eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data; ocfs2_add_branch()
1186 new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list); ocfs2_add_branch()
1209 /* allocate the number of new eb blocks we need */ ocfs2_add_branch()
1235 eb = (struct ocfs2_extent_block *) bh->b_data; ocfs2_add_branch()
1237 BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); ocfs2_add_branch()
1238 eb_el = &eb->h_list; ocfs2_add_branch()
1247 eb->h_next_leaf_blk = 0; ocfs2_add_branch()
1263 new_last_eb_blk = le64_to_cpu(eb->h_blkno); ocfs2_add_branch()
1266 next_blkno = le64_to_cpu(eb->h_blkno); ocfs2_add_branch()
1308 eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; ocfs2_add_branch()
1309 eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk); ocfs2_add_branch()
1348 struct ocfs2_extent_block *eb; ocfs2_shift_tree_depth() local
1359 eb = (struct ocfs2_extent_block *) new_eb_bh->b_data; ocfs2_shift_tree_depth()
1361 BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); ocfs2_shift_tree_depth()
1363 eb_el = &eb->h_list; ocfs2_shift_tree_depth()
1393 root_el->l_recs[0].e_blkno = eb->h_blkno; ocfs2_shift_tree_depth()
1402 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); ocfs2_shift_tree_depth()
1437 struct ocfs2_extent_block *eb; ocfs2_find_branch_target() local
1476 eb = (struct ocfs2_extent_block *) bh->b_data; ocfs2_find_branch_target()
1477 el = &eb->h_list; ocfs2_find_branch_target()
1783 struct ocfs2_extent_block *eb; __ocfs2_find_path() local
1833 eb = (struct ocfs2_extent_block *) bh->b_data; __ocfs2_find_path()
1834 el = &eb->h_list; __ocfs2_find_path()
1898 struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data; find_leaf_ins() local
1899 struct ocfs2_extent_list *el = &eb->h_list; find_leaf_ins()
2526 struct ocfs2_extent_block *eb; ocfs2_update_edge_lengths() local
2551 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; ocfs2_update_edge_lengths()
2552 BUG_ON(eb->h_next_leaf_blk != 0ULL); ocfs2_update_edge_lengths()
2554 el = &eb->h_list; ocfs2_update_edge_lengths()
2580 struct ocfs2_extent_block *eb; ocfs2_unlink_path() local
2587 eb = (struct ocfs2_extent_block *)bh->b_data; ocfs2_unlink_path()
2592 el = &eb->h_list; ocfs2_unlink_path()
2598 (unsigned long long)le64_to_cpu(eb->h_blkno), ocfs2_unlink_path()
2611 ret = ocfs2_cache_extent_block_free(dealloc, eb); ocfs2_unlink_path()
2630 struct ocfs2_extent_block *eb; ocfs2_unlink_subtree() local
2634 eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data; ocfs2_unlink_subtree()
2637 if (root_el->l_recs[i].e_blkno == eb->h_blkno) ocfs2_unlink_subtree()
2645 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; ocfs2_unlink_subtree()
2646 eb->h_next_leaf_blk = 0; ocfs2_unlink_subtree()
2666 struct ocfs2_extent_block *eb; ocfs2_rotate_subtree_left() local
2678 eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data; ocfs2_rotate_subtree_left()
2693 if (eb->h_next_leaf_blk != 0ULL) ocfs2_rotate_subtree_left()
2710 if (eb->h_next_leaf_blk == 0ULL && ocfs2_rotate_subtree_left()
2766 if (eb->h_next_leaf_blk == 0ULL) { ocfs2_rotate_subtree_left()
2789 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; ocfs2_rotate_subtree_left()
2790 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); ocfs2_rotate_subtree_left()
3039 struct ocfs2_extent_block *eb; ocfs2_remove_rightmost_path() local
3106 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; ocfs2_remove_rightmost_path()
3107 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); ocfs2_remove_rightmost_path()
3156 struct ocfs2_extent_block *eb; ocfs2_rotate_tree_left() local
3188 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; ocfs2_rotate_tree_left()
3189 el = &eb->h_list; ocfs2_rotate_tree_left()
3190 if (eb->h_next_leaf_blk == 0) { ocfs2_rotate_tree_left()
3204 (unsigned long long)le64_to_cpu(eb->h_blkno)); ocfs2_rotate_tree_left()
4327 struct ocfs2_extent_block *eb; ocfs2_figure_merge_contig_type() local
4352 eb = (struct ocfs2_extent_block *)bh->b_data; ocfs2_figure_merge_contig_type()
4358 (unsigned long long)le64_to_cpu(eb->h_blkno), ocfs2_figure_merge_contig_type()
4407 eb = (struct ocfs2_extent_block *)bh->b_data; ocfs2_figure_merge_contig_type()
4411 (unsigned long long)le64_to_cpu(eb->h_blkno), ocfs2_figure_merge_contig_type()
4539 struct ocfs2_extent_block *eb; ocfs2_figure_insert_type() local
4563 eb = (struct ocfs2_extent_block *) bh->b_data; ocfs2_figure_insert_type()
4564 el = &eb->h_list; ocfs2_figure_insert_type()
4879 struct ocfs2_extent_block *eb; ocfs2_split_and_insert() local
4893 eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; ocfs2_split_and_insert()
4894 rightmost_el = &eb->h_list; ocfs2_split_and_insert()
5052 struct ocfs2_extent_block *eb; ocfs2_split_extent() local
5062 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; ocfs2_split_extent()
5063 rightmost_el = &eb->h_list; ocfs2_split_extent()
5247 struct ocfs2_extent_block *eb; ocfs2_split_tree() local
5271 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; ocfs2_split_tree()
5272 rightmost_el = &eb->h_list; ocfs2_split_tree()
5322 struct ocfs2_extent_block *eb; ocfs2_truncate_rec() local
5342 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; ocfs2_truncate_rec()
5343 if (eb->h_next_leaf_blk == 0) ocfs2_truncate_rec()
6577 struct ocfs2_extent_block *eb) ocfs2_cache_extent_block_free()
6580 le16_to_cpu(eb->h_suballoc_slot), ocfs2_cache_extent_block_free()
6581 le64_to_cpu(eb->h_suballoc_loc), ocfs2_cache_extent_block_free()
6582 le64_to_cpu(eb->h_blkno), ocfs2_cache_extent_block_free()
6583 le16_to_cpu(eb->h_suballoc_bit)); ocfs2_cache_extent_block_free()
6576 ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, struct ocfs2_extent_block *eb) ocfs2_cache_extent_block_free() argument
H A Drefcounttree.c968 struct ocfs2_extent_block *eb, ocfs2_get_refcount_cpos_end()
989 if (!eb || (eb && !eb->h_next_leaf_blk)) { ocfs2_get_refcount_cpos_end()
1013 cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos); ocfs2_get_refcount_cpos_end()
1077 struct ocfs2_extent_block *eb = NULL; ocfs2_get_refcount_rec() local
1101 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_get_refcount_rec()
1102 el = &eb->h_list; ocfs2_get_refcount_rec()
1127 eb, el, i, &cpos_end); ocfs2_get_refcount_rec()
2658 struct ocfs2_extent_block *eb = NULL; ocfs2_refcount_cal_cow_clusters() local
2673 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_refcount_cal_cow_clusters()
2674 el = &eb->h_list; ocfs2_refcount_cal_cow_clusters()
2823 eb && eb->h_next_leaf_blk) { ocfs2_refcount_cal_cow_clusters()
2828 le64_to_cpu(eb->h_next_leaf_blk), ocfs2_refcount_cal_cow_clusters()
2835 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_refcount_cal_cow_clusters()
2836 el = &eb->h_list; ocfs2_refcount_cal_cow_clusters()
966 ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, struct ocfs2_extent_block *eb, struct ocfs2_extent_list *el, int index, u32 *cpos_end) ocfs2_get_refcount_cpos_end() argument
H A Docfs2_fs.h556 eb belongs to. Only valid
H A Ddir.c800 struct ocfs2_extent_block *eb; ocfs2_dx_dir_lookup_rec() local
811 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_dx_dir_lookup_rec()
812 el = &eb->h_list; ocfs2_dx_dir_lookup_rec()
H A Dxattr.c3681 struct ocfs2_extent_block *eb; ocfs2_xattr_get_rec() local
3693 eb = (struct ocfs2_extent_block *) eb_bh->b_data; ocfs2_xattr_get_rec()
3694 el = &eb->h_list; ocfs2_xattr_get_rec()
/linux-4.1.27/arch/x86/mm/
H A Dnuma_emulation.c49 struct numa_memblk *eb = &ei->blk[ei->nr_blks]; emu_setup_memblk() local
58 eb->start = pb->start; emu_setup_memblk()
59 eb->end = pb->start + size; emu_setup_memblk()
60 eb->nid = nid; emu_setup_memblk()
72 nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20); emu_setup_memblk()
/linux-4.1.27/arch/powerpc/crypto/
H A Daes-tab-4k.S48 .long R(ef, fa, fa, 15), R(b2, 59, 59, eb)
68 .long R(cd, eb, eb, 26), R(4e, 27, 27, 69)
150 .long R(d9, e1, e1, 38), R(eb, f8, f8, 13)
182 .long R(bf, 6d, 7a, eb), R(95, 52, 59, da)
197 .long R(b2, eb, 28, 07), R(2f, b5, c2, 03)
206 .long R(05, 8a, e1, 32), R(a4, f6, eb, 75)
282 .long R(59, f8, 14, 8e), R(eb, 13, 3c, 89)
/linux-4.1.27/arch/arm/mach-realview/include/mach/
H A Dirqs.h25 #include <mach/irqs-eb.h>
H A Duncompress.h23 #include <mach/board-eb.h>
H A Dboard-eb.h2 * arch/arm/mach-realview/include/mach/board-eb.h
H A Dirqs-eb.h2 * arch/arm/mach-realview/include/mach/irqs-eb.h
/linux-4.1.27/drivers/power/reset/
H A Darm-versatile-reboot.c49 .compatible = "arm,realview-eb-syscon",
/linux-4.1.27/drivers/soc/versatile/
H A Dsoc-realview.c24 { .compatible = "arm,realview-eb-soc", },
/linux-4.1.27/arch/unicore32/include/asm/
H A Dassembler.h83 .else; .ifc \cond, eb
/linux-4.1.27/include/media/
H A Dv4l2-mem2mem.h123 struct v4l2_exportbuffer *eb);
246 struct v4l2_exportbuffer *eb);
H A Dvideobuf2-core.h462 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb);
/linux-4.1.27/drivers/media/v4l2-core/
H A Dv4l2-mem2mem.c448 struct v4l2_exportbuffer *eb) v4l2_m2m_expbuf()
452 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); v4l2_m2m_expbuf()
453 return vb2_expbuf(vq, eb); v4l2_m2m_expbuf()
807 struct v4l2_exportbuffer *eb) v4l2_m2m_ioctl_expbuf()
811 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); v4l2_m2m_ioctl_expbuf()
447 v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_exportbuffer *eb) v4l2_m2m_expbuf() argument
806 v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *eb) v4l2_m2m_ioctl_expbuf() argument
H A Dvideobuf2-core.c2391 * @eb: export buffer structure passed from userspace to vidioc_expbuf
2397 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) vb2_expbuf() argument
2414 if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) { vb2_expbuf()
2419 if (eb->type != q->type) { vb2_expbuf()
2424 if (eb->index >= q->num_buffers) { vb2_expbuf()
2429 vb = q->bufs[eb->index]; vb2_expbuf()
2431 if (eb->plane >= vb->num_planes) { vb2_expbuf()
2441 vb_plane = &vb->planes[eb->plane]; vb2_expbuf()
2443 dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE); vb2_expbuf()
2446 eb->index, eb->plane); vb2_expbuf()
2450 ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE); vb2_expbuf()
2453 eb->index, eb->plane, ret); vb2_expbuf()
2459 eb->index, eb->plane, ret); vb2_expbuf()
2460 eb->fd = ret; vb2_expbuf()
/linux-4.1.27/arch/ia64/kernel/
H A Dmca_drv.c413 * Bus_Check structure with Bus_Check.eb (external bus error) flag set is_mca_global()
423 if (pbci->eb) is_mca_global()
583 if (psp->bc && pbci->eb && pbci->bsi == 0) { recover_from_platform_error()
701 if (pbci->eb && pbci->bsi > 0) recover_from_processor_error()
/linux-4.1.27/arch/sparc/kernel/
H A Dbtext.c267 unsigned int *eb = (int *)expand_bits_16; draw_byte_16() local
272 base[0] = (eb[bits >> 6] & fg) ^ bg; draw_byte_16()
273 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; draw_byte_16()
274 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; draw_byte_16()
275 base[3] = (eb[bits & 3] & fg) ^ bg; draw_byte_16()
285 unsigned int *eb = (int *)expand_bits_8; draw_byte_8() local
290 base[0] = (eb[bits >> 4] & fg) ^ bg; draw_byte_8()
291 base[1] = (eb[bits & 0xf] & fg) ^ bg; draw_byte_8()
/linux-4.1.27/drivers/scsi/aic94xx/
H A Daic94xx_hwi.c366 struct sg_el *eb = &escb->eb[k]; asd_assign_edbs2escbs() local
369 memset(eb, 0, sizeof(*eb)); asd_assign_edbs2escbs()
370 eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle)); asd_assign_edbs2escbs()
371 eb->size = cpu_to_le32(((u32) edb->size)); asd_assign_edbs2escbs()
H A Daic94xx_scb.c386 struct sg_el *eb = &escb->eb[edb_id]; asd_invalidate_edb() local
390 eb->flags |= ELEMENT_NOT_VALID; asd_invalidate_edb()
406 escb->eb[i].flags = 0; asd_invalidate_edb()
H A Daic94xx_sas.h480 struct sg_el eb[ASD_EDBS_PER_SCB]; member in struct:empty_scb
/linux-4.1.27/drivers/media/platform/s5p-mfc/
H A Ds5p_mfc_dec.c663 struct v4l2_exportbuffer *eb) vidioc_expbuf()
667 if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) vidioc_expbuf()
668 return vb2_expbuf(&ctx->vq_src, eb); vidioc_expbuf()
669 if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) vidioc_expbuf()
670 return vb2_expbuf(&ctx->vq_dst, eb); vidioc_expbuf()
662 vidioc_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *eb) vidioc_expbuf() argument
H A Ds5p_mfc_enc.c1295 struct v4l2_exportbuffer *eb) vidioc_expbuf()
1299 if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) vidioc_expbuf()
1300 return vb2_expbuf(&ctx->vq_src, eb); vidioc_expbuf()
1301 if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) vidioc_expbuf()
1302 return vb2_expbuf(&ctx->vq_dst, eb); vidioc_expbuf()
1294 vidioc_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *eb) vidioc_expbuf() argument
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_hsi.h1708 /* eb is the bitwidth of a single element */
1709 #define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
1710 #define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
1736 #define SHMEM_ARRAY_BITPOS(i, eb, fb) \
1737 ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
1738 (((i)%((fb)/(eb))) * (eb)))
1740 #define SHMEM_ARRAY_GET(a, i, eb, fb) \
1741 ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
1742 SHMEM_ARRAY_MASK(eb))
1744 #define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
1746 a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
1747 SHMEM_ARRAY_BITPOS(i, eb, fb)); \
1748 a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
1749 SHMEM_ARRAY_BITPOS(i, eb, fb)); \
/linux-4.1.27/arch/powerpc/kernel/
H A Dbtext.c451 unsigned int *eb = (int *)expand_bits_16; draw_byte_16() local
456 base[0] = (eb[bits >> 6] & fg) ^ bg; draw_byte_16()
457 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; draw_byte_16()
458 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; draw_byte_16()
459 base[3] = (eb[bits & 3] & fg) ^ bg; draw_byte_16()
469 unsigned int *eb = (int *)expand_bits_8; draw_byte_8() local
474 base[0] = (eb[bits >> 4] & fg) ^ bg; draw_byte_8()
475 base[1] = (eb[bits & 0xf] & fg) ^ bg; draw_byte_8()
/linux-4.1.27/drivers/media/platform/exynos-gsc/
H A Dgsc-m2m.c382 struct v4l2_exportbuffer *eb) gsc_m2m_expbuf()
385 return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb); gsc_m2m_expbuf()
381 gsc_m2m_expbuf(struct file *file, void *fh, struct v4l2_exportbuffer *eb) gsc_m2m_expbuf() argument
/linux-4.1.27/drivers/isdn/i4l/
H A Disdn_tty.c3622 char eb[2]; isdn_tty_edit_at() local
3632 eb[0] = c; isdn_tty_edit_at()
3633 eb[1] = 0; isdn_tty_edit_at()
3634 isdn_tty_at_cout(eb, info); isdn_tty_edit_at()
3653 eb[0] = c; isdn_tty_edit_at()
3654 eb[1] = 0; isdn_tty_edit_at()
3655 isdn_tty_at_cout(eb, info); isdn_tty_edit_at()
/linux-4.1.27/tools/lib/traceevent/
H A Devent-parse.c5276 struct event_format * const * eb = b; events_id_cmp() local
5278 if ((*ea)->id < (*eb)->id) events_id_cmp()
5281 if ((*ea)->id > (*eb)->id) events_id_cmp()
5290 struct event_format * const * eb = b; events_name_cmp() local
5293 res = strcmp((*ea)->name, (*eb)->name); events_name_cmp()
5297 res = strcmp((*ea)->system, (*eb)->system); events_name_cmp()
5307 struct event_format * const * eb = b; events_system_cmp() local
5310 res = strcmp((*ea)->system, (*eb)->system); events_system_cmp()
5314 res = strcmp((*ea)->name, (*eb)->name); events_system_cmp()
H A Dparse-filter.c107 const struct filter_type *eb = b; filter_cmp() local
109 if (ea->event_id < eb->event_id) filter_cmp()
112 if (ea->event_id > eb->event_id) filter_cmp()
/linux-4.1.27/drivers/media/platform/s5p-tv/
H A Dmixer_video.c722 struct v4l2_exportbuffer *eb) mxr_expbuf()
727 return vb2_expbuf(&layer->vb_queue, eb); mxr_expbuf()
721 mxr_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *eb) mxr_expbuf() argument
/linux-4.1.27/net/netfilter/
H A Dx_tables.c39 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
66 [NFPROTO_BRIDGE] = "eb",
/linux-4.1.27/drivers/media/usb/em28xx/
H A Dem28xx.h483 u8 id[4]; /* 1a eb 67 95 */
/linux-4.1.27/drivers/net/fddi/skfp/h/
H A Dsmt.h363 u_int eb_error_ct ; /* # of eb overflows */
/linux-4.1.27/arch/ia64/include/asm/
H A Dpal.h575 eb : 1, /* External bus error */ member in struct:pal_bus_check_info_s
743 #define pmci_bus_external_error pme_bus.eb
/linux-4.1.27/arch/x86/kvm/
H A Dvmx.c1567 u32 eb; update_exception_bitmap() local
1569 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | update_exception_bitmap()
1574 eb |= 1u << BP_VECTOR; update_exception_bitmap()
1576 eb = ~0; update_exception_bitmap()
1578 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ update_exception_bitmap()
1580 eb &= ~(1u << NM_VECTOR); update_exception_bitmap()
1588 eb |= get_vmcs12(vcpu)->exception_bitmap; update_exception_bitmap()
1590 vmcs_write32(EXCEPTION_BITMAP, eb); update_exception_bitmap()
/linux-4.1.27/drivers/media/usb/gspca/
H A Dspca508.c394 /* READ { 0x0000, 0x860e } -> 0000: eb */
H A Dzc3xx.c3981 {0xa0, 0xeb, ZC3XX_R020_HSYNC_3}, /* 00,20,eb,cc */
4886 {0xaa, 0x0f, 0x00eb}, /* 00,0f,eb,aa */
/linux-4.1.27/drivers/mtd/onenand/
H A Donenand_base.c2305 /* loop over 64 eb batches */ onenand_multiblock_erase()
2345 /* last block of 64-eb series */ onenand_multiblock_erase()
/linux-4.1.27/drivers/mtd/nand/
H A Dnandsim.c161 " separated by commas e.g. 113:2 means eb 113"

Completed in 2568 milliseconds