Lines Matching refs:eb
27 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
34 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) in btrfs_set_lock_blocking_rw() argument
42 if (eb->lock_nested && current->pid == eb->lock_owner) in btrfs_set_lock_blocking_rw()
45 if (atomic_read(&eb->blocking_writers) == 0) { in btrfs_set_lock_blocking_rw()
46 WARN_ON(atomic_read(&eb->spinning_writers) != 1); in btrfs_set_lock_blocking_rw()
47 atomic_dec(&eb->spinning_writers); in btrfs_set_lock_blocking_rw()
48 btrfs_assert_tree_locked(eb); in btrfs_set_lock_blocking_rw()
49 atomic_inc(&eb->blocking_writers); in btrfs_set_lock_blocking_rw()
50 write_unlock(&eb->lock); in btrfs_set_lock_blocking_rw()
53 btrfs_assert_tree_read_locked(eb); in btrfs_set_lock_blocking_rw()
54 atomic_inc(&eb->blocking_readers); in btrfs_set_lock_blocking_rw()
55 WARN_ON(atomic_read(&eb->spinning_readers) == 0); in btrfs_set_lock_blocking_rw()
56 atomic_dec(&eb->spinning_readers); in btrfs_set_lock_blocking_rw()
57 read_unlock(&eb->lock); in btrfs_set_lock_blocking_rw()
66 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) in btrfs_clear_lock_blocking_rw() argument
74 if (eb->lock_nested && current->pid == eb->lock_owner) in btrfs_clear_lock_blocking_rw()
78 BUG_ON(atomic_read(&eb->blocking_writers) != 1); in btrfs_clear_lock_blocking_rw()
79 write_lock(&eb->lock); in btrfs_clear_lock_blocking_rw()
80 WARN_ON(atomic_read(&eb->spinning_writers)); in btrfs_clear_lock_blocking_rw()
81 atomic_inc(&eb->spinning_writers); in btrfs_clear_lock_blocking_rw()
82 if (atomic_dec_and_test(&eb->blocking_writers) && in btrfs_clear_lock_blocking_rw()
83 waitqueue_active(&eb->write_lock_wq)) in btrfs_clear_lock_blocking_rw()
84 wake_up(&eb->write_lock_wq); in btrfs_clear_lock_blocking_rw()
86 BUG_ON(atomic_read(&eb->blocking_readers) == 0); in btrfs_clear_lock_blocking_rw()
87 read_lock(&eb->lock); in btrfs_clear_lock_blocking_rw()
88 atomic_inc(&eb->spinning_readers); in btrfs_clear_lock_blocking_rw()
89 if (atomic_dec_and_test(&eb->blocking_readers) && in btrfs_clear_lock_blocking_rw()
90 waitqueue_active(&eb->read_lock_wq)) in btrfs_clear_lock_blocking_rw()
91 wake_up(&eb->read_lock_wq); in btrfs_clear_lock_blocking_rw()
100 void btrfs_tree_read_lock(struct extent_buffer *eb) in btrfs_tree_read_lock() argument
103 BUG_ON(!atomic_read(&eb->blocking_writers) && in btrfs_tree_read_lock()
104 current->pid == eb->lock_owner); in btrfs_tree_read_lock()
106 read_lock(&eb->lock); in btrfs_tree_read_lock()
107 if (atomic_read(&eb->blocking_writers) && in btrfs_tree_read_lock()
108 current->pid == eb->lock_owner) { in btrfs_tree_read_lock()
115 BUG_ON(eb->lock_nested); in btrfs_tree_read_lock()
116 eb->lock_nested = 1; in btrfs_tree_read_lock()
117 read_unlock(&eb->lock); in btrfs_tree_read_lock()
120 if (atomic_read(&eb->blocking_writers)) { in btrfs_tree_read_lock()
121 read_unlock(&eb->lock); in btrfs_tree_read_lock()
122 wait_event(eb->write_lock_wq, in btrfs_tree_read_lock()
123 atomic_read(&eb->blocking_writers) == 0); in btrfs_tree_read_lock()
126 atomic_inc(&eb->read_locks); in btrfs_tree_read_lock()
127 atomic_inc(&eb->spinning_readers); in btrfs_tree_read_lock()
135 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) in btrfs_tree_read_lock_atomic() argument
137 if (atomic_read(&eb->blocking_writers)) in btrfs_tree_read_lock_atomic()
140 read_lock(&eb->lock); in btrfs_tree_read_lock_atomic()
141 if (atomic_read(&eb->blocking_writers)) { in btrfs_tree_read_lock_atomic()
142 read_unlock(&eb->lock); in btrfs_tree_read_lock_atomic()
145 atomic_inc(&eb->read_locks); in btrfs_tree_read_lock_atomic()
146 atomic_inc(&eb->spinning_readers); in btrfs_tree_read_lock_atomic()
154 int btrfs_try_tree_read_lock(struct extent_buffer *eb) in btrfs_try_tree_read_lock() argument
156 if (atomic_read(&eb->blocking_writers)) in btrfs_try_tree_read_lock()
159 if (!read_trylock(&eb->lock)) in btrfs_try_tree_read_lock()
162 if (atomic_read(&eb->blocking_writers)) { in btrfs_try_tree_read_lock()
163 read_unlock(&eb->lock); in btrfs_try_tree_read_lock()
166 atomic_inc(&eb->read_locks); in btrfs_try_tree_read_lock()
167 atomic_inc(&eb->spinning_readers); in btrfs_try_tree_read_lock()
175 int btrfs_try_tree_write_lock(struct extent_buffer *eb) in btrfs_try_tree_write_lock() argument
177 if (atomic_read(&eb->blocking_writers) || in btrfs_try_tree_write_lock()
178 atomic_read(&eb->blocking_readers)) in btrfs_try_tree_write_lock()
181 write_lock(&eb->lock); in btrfs_try_tree_write_lock()
182 if (atomic_read(&eb->blocking_writers) || in btrfs_try_tree_write_lock()
183 atomic_read(&eb->blocking_readers)) { in btrfs_try_tree_write_lock()
184 write_unlock(&eb->lock); in btrfs_try_tree_write_lock()
187 atomic_inc(&eb->write_locks); in btrfs_try_tree_write_lock()
188 atomic_inc(&eb->spinning_writers); in btrfs_try_tree_write_lock()
189 eb->lock_owner = current->pid; in btrfs_try_tree_write_lock()
196 void btrfs_tree_read_unlock(struct extent_buffer *eb) in btrfs_tree_read_unlock() argument
204 if (eb->lock_nested && current->pid == eb->lock_owner) { in btrfs_tree_read_unlock()
205 eb->lock_nested = 0; in btrfs_tree_read_unlock()
208 btrfs_assert_tree_read_locked(eb); in btrfs_tree_read_unlock()
209 WARN_ON(atomic_read(&eb->spinning_readers) == 0); in btrfs_tree_read_unlock()
210 atomic_dec(&eb->spinning_readers); in btrfs_tree_read_unlock()
211 atomic_dec(&eb->read_locks); in btrfs_tree_read_unlock()
212 read_unlock(&eb->lock); in btrfs_tree_read_unlock()
218 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) in btrfs_tree_read_unlock_blocking() argument
226 if (eb->lock_nested && current->pid == eb->lock_owner) { in btrfs_tree_read_unlock_blocking()
227 eb->lock_nested = 0; in btrfs_tree_read_unlock_blocking()
230 btrfs_assert_tree_read_locked(eb); in btrfs_tree_read_unlock_blocking()
231 WARN_ON(atomic_read(&eb->blocking_readers) == 0); in btrfs_tree_read_unlock_blocking()
232 if (atomic_dec_and_test(&eb->blocking_readers) && in btrfs_tree_read_unlock_blocking()
233 waitqueue_active(&eb->read_lock_wq)) in btrfs_tree_read_unlock_blocking()
234 wake_up(&eb->read_lock_wq); in btrfs_tree_read_unlock_blocking()
235 atomic_dec(&eb->read_locks); in btrfs_tree_read_unlock_blocking()
242 void btrfs_tree_lock(struct extent_buffer *eb) in btrfs_tree_lock() argument
245 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); in btrfs_tree_lock()
246 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); in btrfs_tree_lock()
247 write_lock(&eb->lock); in btrfs_tree_lock()
248 if (atomic_read(&eb->blocking_readers)) { in btrfs_tree_lock()
249 write_unlock(&eb->lock); in btrfs_tree_lock()
250 wait_event(eb->read_lock_wq, in btrfs_tree_lock()
251 atomic_read(&eb->blocking_readers) == 0); in btrfs_tree_lock()
254 if (atomic_read(&eb->blocking_writers)) { in btrfs_tree_lock()
255 write_unlock(&eb->lock); in btrfs_tree_lock()
256 wait_event(eb->write_lock_wq, in btrfs_tree_lock()
257 atomic_read(&eb->blocking_writers) == 0); in btrfs_tree_lock()
260 WARN_ON(atomic_read(&eb->spinning_writers)); in btrfs_tree_lock()
261 atomic_inc(&eb->spinning_writers); in btrfs_tree_lock()
262 atomic_inc(&eb->write_locks); in btrfs_tree_lock()
263 eb->lock_owner = current->pid; in btrfs_tree_lock()
269 void btrfs_tree_unlock(struct extent_buffer *eb) in btrfs_tree_unlock() argument
271 int blockers = atomic_read(&eb->blocking_writers); in btrfs_tree_unlock()
275 btrfs_assert_tree_locked(eb); in btrfs_tree_unlock()
276 eb->lock_owner = 0; in btrfs_tree_unlock()
277 atomic_dec(&eb->write_locks); in btrfs_tree_unlock()
280 WARN_ON(atomic_read(&eb->spinning_writers)); in btrfs_tree_unlock()
281 atomic_dec(&eb->blocking_writers); in btrfs_tree_unlock()
283 if (waitqueue_active(&eb->write_lock_wq)) in btrfs_tree_unlock()
284 wake_up(&eb->write_lock_wq); in btrfs_tree_unlock()
286 WARN_ON(atomic_read(&eb->spinning_writers) != 1); in btrfs_tree_unlock()
287 atomic_dec(&eb->spinning_writers); in btrfs_tree_unlock()
288 write_unlock(&eb->lock); in btrfs_tree_unlock()
292 void btrfs_assert_tree_locked(struct extent_buffer *eb) in btrfs_assert_tree_locked() argument
294 BUG_ON(!atomic_read(&eb->write_locks)); in btrfs_assert_tree_locked()
297 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) in btrfs_assert_tree_read_locked() argument
299 BUG_ON(!atomic_read(&eb->read_locks)); in btrfs_assert_tree_read_locked()