This source file includes following definitions.
- get_restripe_target
- btrfs_reduce_alloc_profile
- get_alloc_profile
- btrfs_get_alloc_profile
- btrfs_get_block_group
- btrfs_put_block_group
- btrfs_add_block_group_cache
- block_group_cache_tree_search
- btrfs_lookup_first_block_group
- btrfs_lookup_block_group
- btrfs_next_block_group
- btrfs_inc_nocow_writers
- btrfs_dec_nocow_writers
- btrfs_wait_nocow_writers
- btrfs_dec_block_group_reservations
- btrfs_wait_block_group_reservations
- btrfs_get_caching_control
- btrfs_put_caching_control
- btrfs_wait_block_group_cache_progress
- btrfs_wait_block_group_cache_done
- fragment_free_space
- add_new_free_space
- load_extent_tree_free
- caching_thread
- btrfs_cache_block_group
- clear_avail_alloc_bits
- clear_incompat_bg_bits
- btrfs_remove_block_group
- btrfs_start_trans_remove_block_group
- inc_block_group_ro
- btrfs_delete_unused_bgs
- btrfs_mark_bg_unused
- find_first_block_group
- set_avail_alloc_bits
- exclude_super_stripes
- link_block_group
- btrfs_create_block_group_cache
- check_chunk_block_group_mappings
- btrfs_read_block_groups
- btrfs_create_pending_block_groups
- btrfs_make_block_group
- update_block_group_flags
- btrfs_inc_block_group_ro
- btrfs_dec_block_group_ro
- write_one_cache_group
- cache_save_setup
- btrfs_setup_space_cache
- btrfs_start_dirty_block_groups
- btrfs_write_dirty_block_groups
- btrfs_update_block_group
- btrfs_add_reserved_bytes
- btrfs_free_reserved_bytes
- force_metadata_allocation
- should_alloc_chunk
- btrfs_force_chunk_alloc
- btrfs_chunk_alloc
- get_profile_num_devs
- check_system_chunk
- btrfs_put_block_group_cache
- btrfs_free_block_groups
1
2
3 #include "misc.h"
4 #include "ctree.h"
5 #include "block-group.h"
6 #include "space-info.h"
7 #include "disk-io.h"
8 #include "free-space-cache.h"
9 #include "free-space-tree.h"
10 #include "disk-io.h"
11 #include "volumes.h"
12 #include "transaction.h"
13 #include "ref-verify.h"
14 #include "sysfs.h"
15 #include "tree-log.h"
16 #include "delalloc-space.h"
17
18
19
20
21
22
23
24 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
25 {
26 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
27 u64 target = 0;
28
29 if (!bctl)
30 return 0;
31
32 if (flags & BTRFS_BLOCK_GROUP_DATA &&
33 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
34 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
35 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
36 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
37 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
38 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
39 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
40 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
41 }
42
43 return target;
44 }
45
46
47
48
49
50
51
52
53 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
54 {
55 u64 num_devices = fs_info->fs_devices->rw_devices;
56 u64 target;
57 u64 raid_type;
58 u64 allowed = 0;
59
60
61
62
63
64 spin_lock(&fs_info->balance_lock);
65 target = get_restripe_target(fs_info, flags);
66 if (target) {
67
68 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
69 spin_unlock(&fs_info->balance_lock);
70 return extended_to_chunk(target);
71 }
72 }
73 spin_unlock(&fs_info->balance_lock);
74
75
76 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
77 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
78 allowed |= btrfs_raid_array[raid_type].bg_flag;
79 }
80 allowed &= flags;
81
82 if (allowed & BTRFS_BLOCK_GROUP_RAID6)
83 allowed = BTRFS_BLOCK_GROUP_RAID6;
84 else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
85 allowed = BTRFS_BLOCK_GROUP_RAID5;
86 else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
87 allowed = BTRFS_BLOCK_GROUP_RAID10;
88 else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
89 allowed = BTRFS_BLOCK_GROUP_RAID1;
90 else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
91 allowed = BTRFS_BLOCK_GROUP_RAID0;
92
93 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
94
95 return extended_to_chunk(flags | allowed);
96 }
97
98 static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
99 {
100 unsigned seq;
101 u64 flags;
102
103 do {
104 flags = orig_flags;
105 seq = read_seqbegin(&fs_info->profiles_lock);
106
107 if (flags & BTRFS_BLOCK_GROUP_DATA)
108 flags |= fs_info->avail_data_alloc_bits;
109 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
110 flags |= fs_info->avail_system_alloc_bits;
111 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
112 flags |= fs_info->avail_metadata_alloc_bits;
113 } while (read_seqretry(&fs_info->profiles_lock, seq));
114
115 return btrfs_reduce_alloc_profile(fs_info, flags);
116 }
117
118 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
119 {
120 return get_alloc_profile(fs_info, orig_flags);
121 }
122
123 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
124 {
125 atomic_inc(&cache->count);
126 }
127
128 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
129 {
130 if (atomic_dec_and_test(&cache->count)) {
131 WARN_ON(cache->pinned > 0);
132 WARN_ON(cache->reserved > 0);
133
134
135
136
137
138
139
140
141
142 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
143 kfree(cache->free_space_ctl);
144 kfree(cache);
145 }
146 }
147
148
149
150
151 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
152 struct btrfs_block_group_cache *block_group)
153 {
154 struct rb_node **p;
155 struct rb_node *parent = NULL;
156 struct btrfs_block_group_cache *cache;
157
158 spin_lock(&info->block_group_cache_lock);
159 p = &info->block_group_cache_tree.rb_node;
160
161 while (*p) {
162 parent = *p;
163 cache = rb_entry(parent, struct btrfs_block_group_cache,
164 cache_node);
165 if (block_group->key.objectid < cache->key.objectid) {
166 p = &(*p)->rb_left;
167 } else if (block_group->key.objectid > cache->key.objectid) {
168 p = &(*p)->rb_right;
169 } else {
170 spin_unlock(&info->block_group_cache_lock);
171 return -EEXIST;
172 }
173 }
174
175 rb_link_node(&block_group->cache_node, parent, p);
176 rb_insert_color(&block_group->cache_node,
177 &info->block_group_cache_tree);
178
179 if (info->first_logical_byte > block_group->key.objectid)
180 info->first_logical_byte = block_group->key.objectid;
181
182 spin_unlock(&info->block_group_cache_lock);
183
184 return 0;
185 }
186
187
188
189
190
191 static struct btrfs_block_group_cache *block_group_cache_tree_search(
192 struct btrfs_fs_info *info, u64 bytenr, int contains)
193 {
194 struct btrfs_block_group_cache *cache, *ret = NULL;
195 struct rb_node *n;
196 u64 end, start;
197
198 spin_lock(&info->block_group_cache_lock);
199 n = info->block_group_cache_tree.rb_node;
200
201 while (n) {
202 cache = rb_entry(n, struct btrfs_block_group_cache,
203 cache_node);
204 end = cache->key.objectid + cache->key.offset - 1;
205 start = cache->key.objectid;
206
207 if (bytenr < start) {
208 if (!contains && (!ret || start < ret->key.objectid))
209 ret = cache;
210 n = n->rb_left;
211 } else if (bytenr > start) {
212 if (contains && bytenr <= end) {
213 ret = cache;
214 break;
215 }
216 n = n->rb_right;
217 } else {
218 ret = cache;
219 break;
220 }
221 }
222 if (ret) {
223 btrfs_get_block_group(ret);
224 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
225 info->first_logical_byte = ret->key.objectid;
226 }
227 spin_unlock(&info->block_group_cache_lock);
228
229 return ret;
230 }
231
232
233
234
235 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
236 struct btrfs_fs_info *info, u64 bytenr)
237 {
238 return block_group_cache_tree_search(info, bytenr, 0);
239 }
240
241
242
243
244 struct btrfs_block_group_cache *btrfs_lookup_block_group(
245 struct btrfs_fs_info *info, u64 bytenr)
246 {
247 return block_group_cache_tree_search(info, bytenr, 1);
248 }
249
250 struct btrfs_block_group_cache *btrfs_next_block_group(
251 struct btrfs_block_group_cache *cache)
252 {
253 struct btrfs_fs_info *fs_info = cache->fs_info;
254 struct rb_node *node;
255
256 spin_lock(&fs_info->block_group_cache_lock);
257
258
259 if (RB_EMPTY_NODE(&cache->cache_node)) {
260 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
261
262 spin_unlock(&fs_info->block_group_cache_lock);
263 btrfs_put_block_group(cache);
264 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
265 }
266 node = rb_next(&cache->cache_node);
267 btrfs_put_block_group(cache);
268 if (node) {
269 cache = rb_entry(node, struct btrfs_block_group_cache,
270 cache_node);
271 btrfs_get_block_group(cache);
272 } else
273 cache = NULL;
274 spin_unlock(&fs_info->block_group_cache_lock);
275 return cache;
276 }
277
278 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
279 {
280 struct btrfs_block_group_cache *bg;
281 bool ret = true;
282
283 bg = btrfs_lookup_block_group(fs_info, bytenr);
284 if (!bg)
285 return false;
286
287 spin_lock(&bg->lock);
288 if (bg->ro)
289 ret = false;
290 else
291 atomic_inc(&bg->nocow_writers);
292 spin_unlock(&bg->lock);
293
294
295 if (!ret)
296 btrfs_put_block_group(bg);
297
298 return ret;
299 }
300
301 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
302 {
303 struct btrfs_block_group_cache *bg;
304
305 bg = btrfs_lookup_block_group(fs_info, bytenr);
306 ASSERT(bg);
307 if (atomic_dec_and_test(&bg->nocow_writers))
308 wake_up_var(&bg->nocow_writers);
309
310
311
312
313 btrfs_put_block_group(bg);
314 btrfs_put_block_group(bg);
315 }
316
317 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
318 {
319 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
320 }
321
322 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
323 const u64 start)
324 {
325 struct btrfs_block_group_cache *bg;
326
327 bg = btrfs_lookup_block_group(fs_info, start);
328 ASSERT(bg);
329 if (atomic_dec_and_test(&bg->reservations))
330 wake_up_var(&bg->reservations);
331 btrfs_put_block_group(bg);
332 }
333
334 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
335 {
336 struct btrfs_space_info *space_info = bg->space_info;
337
338 ASSERT(bg->ro);
339
340 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
341 return;
342
343
344
345
346
347
348
349
350
351
352
353 down_write(&space_info->groups_sem);
354 up_write(&space_info->groups_sem);
355
356 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
357 }
358
359 struct btrfs_caching_control *btrfs_get_caching_control(
360 struct btrfs_block_group_cache *cache)
361 {
362 struct btrfs_caching_control *ctl;
363
364 spin_lock(&cache->lock);
365 if (!cache->caching_ctl) {
366 spin_unlock(&cache->lock);
367 return NULL;
368 }
369
370 ctl = cache->caching_ctl;
371 refcount_inc(&ctl->count);
372 spin_unlock(&cache->lock);
373 return ctl;
374 }
375
376 void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
377 {
378 if (refcount_dec_and_test(&ctl->count))
379 kfree(ctl);
380 }
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
396 u64 num_bytes)
397 {
398 struct btrfs_caching_control *caching_ctl;
399
400 caching_ctl = btrfs_get_caching_control(cache);
401 if (!caching_ctl)
402 return;
403
404 wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) ||
405 (cache->free_space_ctl->free_space >= num_bytes));
406
407 btrfs_put_caching_control(caching_ctl);
408 }
409
410 int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
411 {
412 struct btrfs_caching_control *caching_ctl;
413 int ret = 0;
414
415 caching_ctl = btrfs_get_caching_control(cache);
416 if (!caching_ctl)
417 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
418
419 wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache));
420 if (cache->cached == BTRFS_CACHE_ERROR)
421 ret = -EIO;
422 btrfs_put_caching_control(caching_ctl);
423 return ret;
424 }
425
426 #ifdef CONFIG_BTRFS_DEBUG
427 static void fragment_free_space(struct btrfs_block_group_cache *block_group)
428 {
429 struct btrfs_fs_info *fs_info = block_group->fs_info;
430 u64 start = block_group->key.objectid;
431 u64 len = block_group->key.offset;
432 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
433 fs_info->nodesize : fs_info->sectorsize;
434 u64 step = chunk << 1;
435
436 while (len > chunk) {
437 btrfs_remove_free_space(block_group, start, chunk);
438 start += step;
439 if (len < step)
440 len = 0;
441 else
442 len -= step;
443 }
444 }
445 #endif
446
447
448
449
450
451
452
453 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
454 u64 start, u64 end)
455 {
456 struct btrfs_fs_info *info = block_group->fs_info;
457 u64 extent_start, extent_end, size, total_added = 0;
458 int ret;
459
460 while (start < end) {
461 ret = find_first_extent_bit(info->pinned_extents, start,
462 &extent_start, &extent_end,
463 EXTENT_DIRTY | EXTENT_UPTODATE,
464 NULL);
465 if (ret)
466 break;
467
468 if (extent_start <= start) {
469 start = extent_end + 1;
470 } else if (extent_start > start && extent_start < end) {
471 size = extent_start - start;
472 total_added += size;
473 ret = btrfs_add_free_space(block_group, start,
474 size);
475 BUG_ON(ret);
476 start = extent_end + 1;
477 } else {
478 break;
479 }
480 }
481
482 if (start < end) {
483 size = end - start;
484 total_added += size;
485 ret = btrfs_add_free_space(block_group, start, size);
486 BUG_ON(ret);
487 }
488
489 return total_added;
490 }
491
492 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
493 {
494 struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
495 struct btrfs_fs_info *fs_info = block_group->fs_info;
496 struct btrfs_root *extent_root = fs_info->extent_root;
497 struct btrfs_path *path;
498 struct extent_buffer *leaf;
499 struct btrfs_key key;
500 u64 total_found = 0;
501 u64 last = 0;
502 u32 nritems;
503 int ret;
504 bool wakeup = true;
505
506 path = btrfs_alloc_path();
507 if (!path)
508 return -ENOMEM;
509
510 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
511
512 #ifdef CONFIG_BTRFS_DEBUG
513
514
515
516
517
518 if (btrfs_should_fragment_free_space(block_group))
519 wakeup = false;
520 #endif
521
522
523
524
525
526
527 path->skip_locking = 1;
528 path->search_commit_root = 1;
529 path->reada = READA_FORWARD;
530
531 key.objectid = last;
532 key.offset = 0;
533 key.type = BTRFS_EXTENT_ITEM_KEY;
534
535 next:
536 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
537 if (ret < 0)
538 goto out;
539
540 leaf = path->nodes[0];
541 nritems = btrfs_header_nritems(leaf);
542
543 while (1) {
544 if (btrfs_fs_closing(fs_info) > 1) {
545 last = (u64)-1;
546 break;
547 }
548
549 if (path->slots[0] < nritems) {
550 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
551 } else {
552 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
553 if (ret)
554 break;
555
556 if (need_resched() ||
557 rwsem_is_contended(&fs_info->commit_root_sem)) {
558 if (wakeup)
559 caching_ctl->progress = last;
560 btrfs_release_path(path);
561 up_read(&fs_info->commit_root_sem);
562 mutex_unlock(&caching_ctl->mutex);
563 cond_resched();
564 mutex_lock(&caching_ctl->mutex);
565 down_read(&fs_info->commit_root_sem);
566 goto next;
567 }
568
569 ret = btrfs_next_leaf(extent_root, path);
570 if (ret < 0)
571 goto out;
572 if (ret)
573 break;
574 leaf = path->nodes[0];
575 nritems = btrfs_header_nritems(leaf);
576 continue;
577 }
578
579 if (key.objectid < last) {
580 key.objectid = last;
581 key.offset = 0;
582 key.type = BTRFS_EXTENT_ITEM_KEY;
583
584 if (wakeup)
585 caching_ctl->progress = last;
586 btrfs_release_path(path);
587 goto next;
588 }
589
590 if (key.objectid < block_group->key.objectid) {
591 path->slots[0]++;
592 continue;
593 }
594
595 if (key.objectid >= block_group->key.objectid +
596 block_group->key.offset)
597 break;
598
599 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
600 key.type == BTRFS_METADATA_ITEM_KEY) {
601 total_found += add_new_free_space(block_group, last,
602 key.objectid);
603 if (key.type == BTRFS_METADATA_ITEM_KEY)
604 last = key.objectid +
605 fs_info->nodesize;
606 else
607 last = key.objectid + key.offset;
608
609 if (total_found > CACHING_CTL_WAKE_UP) {
610 total_found = 0;
611 if (wakeup)
612 wake_up(&caching_ctl->wait);
613 }
614 }
615 path->slots[0]++;
616 }
617 ret = 0;
618
619 total_found += add_new_free_space(block_group, last,
620 block_group->key.objectid +
621 block_group->key.offset);
622 caching_ctl->progress = (u64)-1;
623
624 out:
625 btrfs_free_path(path);
626 return ret;
627 }
628
629 static noinline void caching_thread(struct btrfs_work *work)
630 {
631 struct btrfs_block_group_cache *block_group;
632 struct btrfs_fs_info *fs_info;
633 struct btrfs_caching_control *caching_ctl;
634 int ret;
635
636 caching_ctl = container_of(work, struct btrfs_caching_control, work);
637 block_group = caching_ctl->block_group;
638 fs_info = block_group->fs_info;
639
640 mutex_lock(&caching_ctl->mutex);
641 down_read(&fs_info->commit_root_sem);
642
643 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
644 ret = load_free_space_tree(caching_ctl);
645 else
646 ret = load_extent_tree_free(caching_ctl);
647
648 spin_lock(&block_group->lock);
649 block_group->caching_ctl = NULL;
650 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
651 spin_unlock(&block_group->lock);
652
653 #ifdef CONFIG_BTRFS_DEBUG
654 if (btrfs_should_fragment_free_space(block_group)) {
655 u64 bytes_used;
656
657 spin_lock(&block_group->space_info->lock);
658 spin_lock(&block_group->lock);
659 bytes_used = block_group->key.offset -
660 btrfs_block_group_used(&block_group->item);
661 block_group->space_info->bytes_used += bytes_used >> 1;
662 spin_unlock(&block_group->lock);
663 spin_unlock(&block_group->space_info->lock);
664 fragment_free_space(block_group);
665 }
666 #endif
667
668 caching_ctl->progress = (u64)-1;
669
670 up_read(&fs_info->commit_root_sem);
671 btrfs_free_excluded_extents(block_group);
672 mutex_unlock(&caching_ctl->mutex);
673
674 wake_up(&caching_ctl->wait);
675
676 btrfs_put_caching_control(caching_ctl);
677 btrfs_put_block_group(block_group);
678 }
679
680 int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
681 int load_cache_only)
682 {
683 DEFINE_WAIT(wait);
684 struct btrfs_fs_info *fs_info = cache->fs_info;
685 struct btrfs_caching_control *caching_ctl;
686 int ret = 0;
687
688 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
689 if (!caching_ctl)
690 return -ENOMEM;
691
692 INIT_LIST_HEAD(&caching_ctl->list);
693 mutex_init(&caching_ctl->mutex);
694 init_waitqueue_head(&caching_ctl->wait);
695 caching_ctl->block_group = cache;
696 caching_ctl->progress = cache->key.objectid;
697 refcount_set(&caching_ctl->count, 1);
698 btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
699
700 spin_lock(&cache->lock);
701
702
703
704
705
706
707
708
709
710
711
712
713 while (cache->cached == BTRFS_CACHE_FAST) {
714 struct btrfs_caching_control *ctl;
715
716 ctl = cache->caching_ctl;
717 refcount_inc(&ctl->count);
718 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
719 spin_unlock(&cache->lock);
720
721 schedule();
722
723 finish_wait(&ctl->wait, &wait);
724 btrfs_put_caching_control(ctl);
725 spin_lock(&cache->lock);
726 }
727
728 if (cache->cached != BTRFS_CACHE_NO) {
729 spin_unlock(&cache->lock);
730 kfree(caching_ctl);
731 return 0;
732 }
733 WARN_ON(cache->caching_ctl);
734 cache->caching_ctl = caching_ctl;
735 cache->cached = BTRFS_CACHE_FAST;
736 spin_unlock(&cache->lock);
737
738 if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
739 mutex_lock(&caching_ctl->mutex);
740 ret = load_free_space_cache(cache);
741
742 spin_lock(&cache->lock);
743 if (ret == 1) {
744 cache->caching_ctl = NULL;
745 cache->cached = BTRFS_CACHE_FINISHED;
746 cache->last_byte_to_unpin = (u64)-1;
747 caching_ctl->progress = (u64)-1;
748 } else {
749 if (load_cache_only) {
750 cache->caching_ctl = NULL;
751 cache->cached = BTRFS_CACHE_NO;
752 } else {
753 cache->cached = BTRFS_CACHE_STARTED;
754 cache->has_caching_ctl = 1;
755 }
756 }
757 spin_unlock(&cache->lock);
758 #ifdef CONFIG_BTRFS_DEBUG
759 if (ret == 1 &&
760 btrfs_should_fragment_free_space(cache)) {
761 u64 bytes_used;
762
763 spin_lock(&cache->space_info->lock);
764 spin_lock(&cache->lock);
765 bytes_used = cache->key.offset -
766 btrfs_block_group_used(&cache->item);
767 cache->space_info->bytes_used += bytes_used >> 1;
768 spin_unlock(&cache->lock);
769 spin_unlock(&cache->space_info->lock);
770 fragment_free_space(cache);
771 }
772 #endif
773 mutex_unlock(&caching_ctl->mutex);
774
775 wake_up(&caching_ctl->wait);
776 if (ret == 1) {
777 btrfs_put_caching_control(caching_ctl);
778 btrfs_free_excluded_extents(cache);
779 return 0;
780 }
781 } else {
782
783
784
785
786 spin_lock(&cache->lock);
787 if (load_cache_only) {
788 cache->caching_ctl = NULL;
789 cache->cached = BTRFS_CACHE_NO;
790 } else {
791 cache->cached = BTRFS_CACHE_STARTED;
792 cache->has_caching_ctl = 1;
793 }
794 spin_unlock(&cache->lock);
795 wake_up(&caching_ctl->wait);
796 }
797
798 if (load_cache_only) {
799 btrfs_put_caching_control(caching_ctl);
800 return 0;
801 }
802
803 down_write(&fs_info->commit_root_sem);
804 refcount_inc(&caching_ctl->count);
805 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
806 up_write(&fs_info->commit_root_sem);
807
808 btrfs_get_block_group(cache);
809
810 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
811
812 return ret;
813 }
814
815 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
816 {
817 u64 extra_flags = chunk_to_extended(flags) &
818 BTRFS_EXTENDED_PROFILE_MASK;
819
820 write_seqlock(&fs_info->profiles_lock);
821 if (flags & BTRFS_BLOCK_GROUP_DATA)
822 fs_info->avail_data_alloc_bits &= ~extra_flags;
823 if (flags & BTRFS_BLOCK_GROUP_METADATA)
824 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
825 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
826 fs_info->avail_system_alloc_bits &= ~extra_flags;
827 write_sequnlock(&fs_info->profiles_lock);
828 }
829
830
831
832
833
834
835
836 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
837 {
838 if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) {
839 struct list_head *head = &fs_info->space_info;
840 struct btrfs_space_info *sinfo;
841
842 list_for_each_entry_rcu(sinfo, head, list) {
843 bool found = false;
844
845 down_read(&sinfo->groups_sem);
846 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
847 found = true;
848 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
849 found = true;
850 up_read(&sinfo->groups_sem);
851
852 if (found)
853 return;
854 }
855 btrfs_clear_fs_incompat(fs_info, RAID56);
856 }
857 }
858
859 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
860 u64 group_start, struct extent_map *em)
861 {
862 struct btrfs_fs_info *fs_info = trans->fs_info;
863 struct btrfs_root *root = fs_info->extent_root;
864 struct btrfs_path *path;
865 struct btrfs_block_group_cache *block_group;
866 struct btrfs_free_cluster *cluster;
867 struct btrfs_root *tree_root = fs_info->tree_root;
868 struct btrfs_key key;
869 struct inode *inode;
870 struct kobject *kobj = NULL;
871 int ret;
872 int index;
873 int factor;
874 struct btrfs_caching_control *caching_ctl = NULL;
875 bool remove_em;
876 bool remove_rsv = false;
877
878 block_group = btrfs_lookup_block_group(fs_info, group_start);
879 BUG_ON(!block_group);
880 BUG_ON(!block_group->ro);
881
882 trace_btrfs_remove_block_group(block_group);
883
884
885
886
887 btrfs_free_excluded_extents(block_group);
888 btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
889 block_group->key.offset);
890
891 memcpy(&key, &block_group->key, sizeof(key));
892 index = btrfs_bg_flags_to_raid_index(block_group->flags);
893 factor = btrfs_bg_type_to_factor(block_group->flags);
894
895
896 cluster = &fs_info->data_alloc_cluster;
897 spin_lock(&cluster->refill_lock);
898 btrfs_return_cluster_to_free_space(block_group, cluster);
899 spin_unlock(&cluster->refill_lock);
900
901
902
903
904
905 cluster = &fs_info->meta_alloc_cluster;
906 spin_lock(&cluster->refill_lock);
907 btrfs_return_cluster_to_free_space(block_group, cluster);
908 spin_unlock(&cluster->refill_lock);
909
910 path = btrfs_alloc_path();
911 if (!path) {
912 ret = -ENOMEM;
913 goto out_put_group;
914 }
915
916
917
918
919
920 inode = lookup_free_space_inode(block_group, path);
921
922 mutex_lock(&trans->transaction->cache_write_mutex);
923
924
925
926
927 spin_lock(&trans->transaction->dirty_bgs_lock);
928 if (!list_empty(&block_group->io_list)) {
929 list_del_init(&block_group->io_list);
930
931 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
932
933 spin_unlock(&trans->transaction->dirty_bgs_lock);
934 btrfs_wait_cache_io(trans, block_group, path);
935 btrfs_put_block_group(block_group);
936 spin_lock(&trans->transaction->dirty_bgs_lock);
937 }
938
939 if (!list_empty(&block_group->dirty_list)) {
940 list_del_init(&block_group->dirty_list);
941 remove_rsv = true;
942 btrfs_put_block_group(block_group);
943 }
944 spin_unlock(&trans->transaction->dirty_bgs_lock);
945 mutex_unlock(&trans->transaction->cache_write_mutex);
946
947 if (!IS_ERR(inode)) {
948 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
949 if (ret) {
950 btrfs_add_delayed_iput(inode);
951 goto out_put_group;
952 }
953 clear_nlink(inode);
954
955 spin_lock(&block_group->lock);
956 if (block_group->iref) {
957 block_group->iref = 0;
958 block_group->inode = NULL;
959 spin_unlock(&block_group->lock);
960 iput(inode);
961 } else {
962 spin_unlock(&block_group->lock);
963 }
964
965 btrfs_add_delayed_iput(inode);
966 }
967
968 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
969 key.offset = block_group->key.objectid;
970 key.type = 0;
971
972 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
973 if (ret < 0)
974 goto out_put_group;
975 if (ret > 0)
976 btrfs_release_path(path);
977 if (ret == 0) {
978 ret = btrfs_del_item(trans, tree_root, path);
979 if (ret)
980 goto out_put_group;
981 btrfs_release_path(path);
982 }
983
984 spin_lock(&fs_info->block_group_cache_lock);
985 rb_erase(&block_group->cache_node,
986 &fs_info->block_group_cache_tree);
987 RB_CLEAR_NODE(&block_group->cache_node);
988
989 if (fs_info->first_logical_byte == block_group->key.objectid)
990 fs_info->first_logical_byte = (u64)-1;
991 spin_unlock(&fs_info->block_group_cache_lock);
992
993 down_write(&block_group->space_info->groups_sem);
994
995
996
997
998 list_del_init(&block_group->list);
999 if (list_empty(&block_group->space_info->block_groups[index])) {
1000 kobj = block_group->space_info->block_group_kobjs[index];
1001 block_group->space_info->block_group_kobjs[index] = NULL;
1002 clear_avail_alloc_bits(fs_info, block_group->flags);
1003 }
1004 up_write(&block_group->space_info->groups_sem);
1005 clear_incompat_bg_bits(fs_info, block_group->flags);
1006 if (kobj) {
1007 kobject_del(kobj);
1008 kobject_put(kobj);
1009 }
1010
1011 if (block_group->has_caching_ctl)
1012 caching_ctl = btrfs_get_caching_control(block_group);
1013 if (block_group->cached == BTRFS_CACHE_STARTED)
1014 btrfs_wait_block_group_cache_done(block_group);
1015 if (block_group->has_caching_ctl) {
1016 down_write(&fs_info->commit_root_sem);
1017 if (!caching_ctl) {
1018 struct btrfs_caching_control *ctl;
1019
1020 list_for_each_entry(ctl,
1021 &fs_info->caching_block_groups, list)
1022 if (ctl->block_group == block_group) {
1023 caching_ctl = ctl;
1024 refcount_inc(&caching_ctl->count);
1025 break;
1026 }
1027 }
1028 if (caching_ctl)
1029 list_del_init(&caching_ctl->list);
1030 up_write(&fs_info->commit_root_sem);
1031 if (caching_ctl) {
1032
1033 btrfs_put_caching_control(caching_ctl);
1034 btrfs_put_caching_control(caching_ctl);
1035 }
1036 }
1037
1038 spin_lock(&trans->transaction->dirty_bgs_lock);
1039 WARN_ON(!list_empty(&block_group->dirty_list));
1040 WARN_ON(!list_empty(&block_group->io_list));
1041 spin_unlock(&trans->transaction->dirty_bgs_lock);
1042
1043 btrfs_remove_free_space_cache(block_group);
1044
1045 spin_lock(&block_group->space_info->lock);
1046 list_del_init(&block_group->ro_list);
1047
1048 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1049 WARN_ON(block_group->space_info->total_bytes
1050 < block_group->key.offset);
1051 WARN_ON(block_group->space_info->bytes_readonly
1052 < block_group->key.offset);
1053 WARN_ON(block_group->space_info->disk_total
1054 < block_group->key.offset * factor);
1055 }
1056 block_group->space_info->total_bytes -= block_group->key.offset;
1057 block_group->space_info->bytes_readonly -= block_group->key.offset;
1058 block_group->space_info->disk_total -= block_group->key.offset * factor;
1059
1060 spin_unlock(&block_group->space_info->lock);
1061
1062 memcpy(&key, &block_group->key, sizeof(key));
1063
1064 mutex_lock(&fs_info->chunk_mutex);
1065 spin_lock(&block_group->lock);
1066 block_group->removed = 1;
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 remove_em = (atomic_read(&block_group->trimming) == 0);
1091 spin_unlock(&block_group->lock);
1092
1093 mutex_unlock(&fs_info->chunk_mutex);
1094
1095 ret = remove_block_group_free_space(trans, block_group);
1096 if (ret)
1097 goto out_put_group;
1098
1099
1100 btrfs_put_block_group(block_group);
1101
1102 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1103 if (ret > 0)
1104 ret = -EIO;
1105 if (ret < 0)
1106 goto out;
1107
1108 ret = btrfs_del_item(trans, root, path);
1109 if (ret)
1110 goto out;
1111
1112 if (remove_em) {
1113 struct extent_map_tree *em_tree;
1114
1115 em_tree = &fs_info->mapping_tree;
1116 write_lock(&em_tree->lock);
1117 remove_extent_mapping(em_tree, em);
1118 write_unlock(&em_tree->lock);
1119
1120 free_extent_map(em);
1121 }
1122
1123 out_put_group:
1124
1125 btrfs_put_block_group(block_group);
1126 out:
1127 if (remove_rsv)
1128 btrfs_delayed_refs_rsv_release(fs_info, 1);
1129 btrfs_free_path(path);
1130 return ret;
1131 }
1132
1133 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
1134 struct btrfs_fs_info *fs_info, const u64 chunk_offset)
1135 {
1136 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1137 struct extent_map *em;
1138 struct map_lookup *map;
1139 unsigned int num_items;
1140
1141 read_lock(&em_tree->lock);
1142 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1143 read_unlock(&em_tree->lock);
1144 ASSERT(em && em->start == chunk_offset);
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165 map = em->map_lookup;
1166 num_items = 3 + map->num_stripes;
1167 free_extent_map(em);
1168
1169 return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
1170 num_items, 1);
1171 }
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
1187 {
1188 struct btrfs_space_info *sinfo = cache->space_info;
1189 u64 num_bytes;
1190 u64 sinfo_used;
1191 u64 min_allocable_bytes;
1192 int ret = -ENOSPC;
1193
1194
1195
1196
1197
1198
1199 if ((sinfo->flags &
1200 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
1201 !force)
1202 min_allocable_bytes = SZ_1M;
1203 else
1204 min_allocable_bytes = 0;
1205
1206 spin_lock(&sinfo->lock);
1207 spin_lock(&cache->lock);
1208
1209 if (cache->ro) {
1210 cache->ro++;
1211 ret = 0;
1212 goto out;
1213 }
1214
1215 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
1216 cache->bytes_super - btrfs_block_group_used(&cache->item);
1217 sinfo_used = btrfs_space_info_used(sinfo, true);
1218
1219
1220
1221
1222
1223
1224
1225 if (sinfo_used + num_bytes + min_allocable_bytes <=
1226 sinfo->total_bytes) {
1227 sinfo->bytes_readonly += num_bytes;
1228 cache->ro++;
1229 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1230 ret = 0;
1231 }
1232 out:
1233 spin_unlock(&cache->lock);
1234 spin_unlock(&sinfo->lock);
1235 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1236 btrfs_info(cache->fs_info,
1237 "unable to make block group %llu ro",
1238 cache->key.objectid);
1239 btrfs_info(cache->fs_info,
1240 "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
1241 sinfo_used, num_bytes, min_allocable_bytes);
1242 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1243 }
1244 return ret;
1245 }
1246
1247
1248
1249
1250
1251 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
1252 {
1253 struct btrfs_block_group_cache *block_group;
1254 struct btrfs_space_info *space_info;
1255 struct btrfs_trans_handle *trans;
1256 int ret = 0;
1257
1258 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1259 return;
1260
1261 spin_lock(&fs_info->unused_bgs_lock);
1262 while (!list_empty(&fs_info->unused_bgs)) {
1263 u64 start, end;
1264 int trimming;
1265
1266 block_group = list_first_entry(&fs_info->unused_bgs,
1267 struct btrfs_block_group_cache,
1268 bg_list);
1269 list_del_init(&block_group->bg_list);
1270
1271 space_info = block_group->space_info;
1272
1273 if (ret || btrfs_mixed_space_info(space_info)) {
1274 btrfs_put_block_group(block_group);
1275 continue;
1276 }
1277 spin_unlock(&fs_info->unused_bgs_lock);
1278
1279 mutex_lock(&fs_info->delete_unused_bgs_mutex);
1280
1281
1282 down_write(&space_info->groups_sem);
1283 spin_lock(&block_group->lock);
1284 if (block_group->reserved || block_group->pinned ||
1285 btrfs_block_group_used(&block_group->item) ||
1286 block_group->ro ||
1287 list_is_singular(&block_group->list)) {
1288
1289
1290
1291
1292
1293
1294 trace_btrfs_skip_unused_block_group(block_group);
1295 spin_unlock(&block_group->lock);
1296 up_write(&space_info->groups_sem);
1297 goto next;
1298 }
1299 spin_unlock(&block_group->lock);
1300
1301
1302 ret = inc_block_group_ro(block_group, 0);
1303 up_write(&space_info->groups_sem);
1304 if (ret < 0) {
1305 ret = 0;
1306 goto next;
1307 }
1308
1309
1310
1311
1312
1313 trans = btrfs_start_trans_remove_block_group(fs_info,
1314 block_group->key.objectid);
1315 if (IS_ERR(trans)) {
1316 btrfs_dec_block_group_ro(block_group);
1317 ret = PTR_ERR(trans);
1318 goto next;
1319 }
1320
1321
1322
1323
1324
1325 start = block_group->key.objectid;
1326 end = start + block_group->key.offset - 1;
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 mutex_lock(&fs_info->unused_bg_unpin_mutex);
1339 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
1340 EXTENT_DIRTY);
1341 if (ret) {
1342 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1343 btrfs_dec_block_group_ro(block_group);
1344 goto end_trans;
1345 }
1346 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
1347 EXTENT_DIRTY);
1348 if (ret) {
1349 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1350 btrfs_dec_block_group_ro(block_group);
1351 goto end_trans;
1352 }
1353 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1354
1355
1356 spin_lock(&space_info->lock);
1357 spin_lock(&block_group->lock);
1358
1359 btrfs_space_info_update_bytes_pinned(fs_info, space_info,
1360 -block_group->pinned);
1361 space_info->bytes_readonly += block_group->pinned;
1362 percpu_counter_add_batch(&space_info->total_bytes_pinned,
1363 -block_group->pinned,
1364 BTRFS_TOTAL_BYTES_PINNED_BATCH);
1365 block_group->pinned = 0;
1366
1367 spin_unlock(&block_group->lock);
1368 spin_unlock(&space_info->lock);
1369
1370
1371 trimming = btrfs_test_opt(fs_info, DISCARD);
1372
1373
1374 if (trimming)
1375 btrfs_get_block_group_trimming(block_group);
1376
1377
1378
1379
1380
1381 ret = btrfs_remove_chunk(trans, block_group->key.objectid);
1382
1383 if (ret) {
1384 if (trimming)
1385 btrfs_put_block_group_trimming(block_group);
1386 goto end_trans;
1387 }
1388
1389
1390
1391
1392
1393
1394 if (trimming) {
1395 spin_lock(&fs_info->unused_bgs_lock);
1396
1397
1398
1399
1400
1401 list_move(&block_group->bg_list,
1402 &trans->transaction->deleted_bgs);
1403 spin_unlock(&fs_info->unused_bgs_lock);
1404 btrfs_get_block_group(block_group);
1405 }
1406 end_trans:
1407 btrfs_end_transaction(trans);
1408 next:
1409 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
1410 btrfs_put_block_group(block_group);
1411 spin_lock(&fs_info->unused_bgs_lock);
1412 }
1413 spin_unlock(&fs_info->unused_bgs_lock);
1414 }
1415
1416 void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
1417 {
1418 struct btrfs_fs_info *fs_info = bg->fs_info;
1419
1420 spin_lock(&fs_info->unused_bgs_lock);
1421 if (list_empty(&bg->bg_list)) {
1422 btrfs_get_block_group(bg);
1423 trace_btrfs_add_unused_block_group(bg);
1424 list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1425 }
1426 spin_unlock(&fs_info->unused_bgs_lock);
1427 }
1428
1429 static int find_first_block_group(struct btrfs_fs_info *fs_info,
1430 struct btrfs_path *path,
1431 struct btrfs_key *key)
1432 {
1433 struct btrfs_root *root = fs_info->extent_root;
1434 int ret = 0;
1435 struct btrfs_key found_key;
1436 struct extent_buffer *leaf;
1437 struct btrfs_block_group_item bg;
1438 u64 flags;
1439 int slot;
1440
1441 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1442 if (ret < 0)
1443 goto out;
1444
1445 while (1) {
1446 slot = path->slots[0];
1447 leaf = path->nodes[0];
1448 if (slot >= btrfs_header_nritems(leaf)) {
1449 ret = btrfs_next_leaf(root, path);
1450 if (ret == 0)
1451 continue;
1452 if (ret < 0)
1453 goto out;
1454 break;
1455 }
1456 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1457
1458 if (found_key.objectid >= key->objectid &&
1459 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1460 struct extent_map_tree *em_tree;
1461 struct extent_map *em;
1462
1463 em_tree = &root->fs_info->mapping_tree;
1464 read_lock(&em_tree->lock);
1465 em = lookup_extent_mapping(em_tree, found_key.objectid,
1466 found_key.offset);
1467 read_unlock(&em_tree->lock);
1468 if (!em) {
1469 btrfs_err(fs_info,
1470 "logical %llu len %llu found bg but no related chunk",
1471 found_key.objectid, found_key.offset);
1472 ret = -ENOENT;
1473 } else if (em->start != found_key.objectid ||
1474 em->len != found_key.offset) {
1475 btrfs_err(fs_info,
1476 "block group %llu len %llu mismatch with chunk %llu len %llu",
1477 found_key.objectid, found_key.offset,
1478 em->start, em->len);
1479 ret = -EUCLEAN;
1480 } else {
1481 read_extent_buffer(leaf, &bg,
1482 btrfs_item_ptr_offset(leaf, slot),
1483 sizeof(bg));
1484 flags = btrfs_block_group_flags(&bg) &
1485 BTRFS_BLOCK_GROUP_TYPE_MASK;
1486
1487 if (flags != (em->map_lookup->type &
1488 BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1489 btrfs_err(fs_info,
1490 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1491 found_key.objectid,
1492 found_key.offset, flags,
1493 (BTRFS_BLOCK_GROUP_TYPE_MASK &
1494 em->map_lookup->type));
1495 ret = -EUCLEAN;
1496 } else {
1497 ret = 0;
1498 }
1499 }
1500 free_extent_map(em);
1501 goto out;
1502 }
1503 path->slots[0]++;
1504 }
1505 out:
1506 return ret;
1507 }
1508
1509 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1510 {
1511 u64 extra_flags = chunk_to_extended(flags) &
1512 BTRFS_EXTENDED_PROFILE_MASK;
1513
1514 write_seqlock(&fs_info->profiles_lock);
1515 if (flags & BTRFS_BLOCK_GROUP_DATA)
1516 fs_info->avail_data_alloc_bits |= extra_flags;
1517 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1518 fs_info->avail_metadata_alloc_bits |= extra_flags;
1519 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1520 fs_info->avail_system_alloc_bits |= extra_flags;
1521 write_sequnlock(&fs_info->profiles_lock);
1522 }
1523
1524 static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
1525 {
1526 struct btrfs_fs_info *fs_info = cache->fs_info;
1527 u64 bytenr;
1528 u64 *logical;
1529 int stripe_len;
1530 int i, nr, ret;
1531
1532 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
1533 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
1534 cache->bytes_super += stripe_len;
1535 ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid,
1536 stripe_len);
1537 if (ret)
1538 return ret;
1539 }
1540
1541 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1542 bytenr = btrfs_sb_offset(i);
1543 ret = btrfs_rmap_block(fs_info, cache->key.objectid,
1544 bytenr, &logical, &nr, &stripe_len);
1545 if (ret)
1546 return ret;
1547
1548 while (nr--) {
1549 u64 start, len;
1550
1551 if (logical[nr] > cache->key.objectid +
1552 cache->key.offset)
1553 continue;
1554
1555 if (logical[nr] + stripe_len <= cache->key.objectid)
1556 continue;
1557
1558 start = logical[nr];
1559 if (start < cache->key.objectid) {
1560 start = cache->key.objectid;
1561 len = (logical[nr] + stripe_len) - start;
1562 } else {
1563 len = min_t(u64, stripe_len,
1564 cache->key.objectid +
1565 cache->key.offset - start);
1566 }
1567
1568 cache->bytes_super += len;
1569 ret = btrfs_add_excluded_extent(fs_info, start, len);
1570 if (ret) {
1571 kfree(logical);
1572 return ret;
1573 }
1574 }
1575
1576 kfree(logical);
1577 }
1578 return 0;
1579 }
1580
1581 static void link_block_group(struct btrfs_block_group_cache *cache)
1582 {
1583 struct btrfs_space_info *space_info = cache->space_info;
1584 int index = btrfs_bg_flags_to_raid_index(cache->flags);
1585 bool first = false;
1586
1587 down_write(&space_info->groups_sem);
1588 if (list_empty(&space_info->block_groups[index]))
1589 first = true;
1590 list_add_tail(&cache->list, &space_info->block_groups[index]);
1591 up_write(&space_info->groups_sem);
1592
1593 if (first)
1594 btrfs_sysfs_add_block_group_type(cache);
1595 }
1596
1597 static struct btrfs_block_group_cache *btrfs_create_block_group_cache(
1598 struct btrfs_fs_info *fs_info, u64 start, u64 size)
1599 {
1600 struct btrfs_block_group_cache *cache;
1601
1602 cache = kzalloc(sizeof(*cache), GFP_NOFS);
1603 if (!cache)
1604 return NULL;
1605
1606 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
1607 GFP_NOFS);
1608 if (!cache->free_space_ctl) {
1609 kfree(cache);
1610 return NULL;
1611 }
1612
1613 cache->key.objectid = start;
1614 cache->key.offset = size;
1615 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1616
1617 cache->fs_info = fs_info;
1618 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
1619 set_free_space_tree_thresholds(cache);
1620
1621 atomic_set(&cache->count, 1);
1622 spin_lock_init(&cache->lock);
1623 init_rwsem(&cache->data_rwsem);
1624 INIT_LIST_HEAD(&cache->list);
1625 INIT_LIST_HEAD(&cache->cluster_list);
1626 INIT_LIST_HEAD(&cache->bg_list);
1627 INIT_LIST_HEAD(&cache->ro_list);
1628 INIT_LIST_HEAD(&cache->dirty_list);
1629 INIT_LIST_HEAD(&cache->io_list);
1630 btrfs_init_free_space_ctl(cache);
1631 atomic_set(&cache->trimming, 0);
1632 mutex_init(&cache->free_space_lock);
1633 btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
1634
1635 return cache;
1636 }
1637
1638
1639
1640
1641
1642 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
1643 {
1644 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
1645 struct extent_map *em;
1646 struct btrfs_block_group_cache *bg;
1647 u64 start = 0;
1648 int ret = 0;
1649
1650 while (1) {
1651 read_lock(&map_tree->lock);
1652
1653
1654
1655
1656
1657 em = lookup_extent_mapping(map_tree, start, 1);
1658 read_unlock(&map_tree->lock);
1659 if (!em)
1660 break;
1661
1662 bg = btrfs_lookup_block_group(fs_info, em->start);
1663 if (!bg) {
1664 btrfs_err(fs_info,
1665 "chunk start=%llu len=%llu doesn't have corresponding block group",
1666 em->start, em->len);
1667 ret = -EUCLEAN;
1668 free_extent_map(em);
1669 break;
1670 }
1671 if (bg->key.objectid != em->start ||
1672 bg->key.offset != em->len ||
1673 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
1674 (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1675 btrfs_err(fs_info,
1676 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
1677 em->start, em->len,
1678 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
1679 bg->key.objectid, bg->key.offset,
1680 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
1681 ret = -EUCLEAN;
1682 free_extent_map(em);
1683 btrfs_put_block_group(bg);
1684 break;
1685 }
1686 start = em->start + em->len;
1687 free_extent_map(em);
1688 btrfs_put_block_group(bg);
1689 }
1690 return ret;
1691 }
1692
1693 int btrfs_read_block_groups(struct btrfs_fs_info *info)
1694 {
1695 struct btrfs_path *path;
1696 int ret;
1697 struct btrfs_block_group_cache *cache;
1698 struct btrfs_space_info *space_info;
1699 struct btrfs_key key;
1700 struct btrfs_key found_key;
1701 struct extent_buffer *leaf;
1702 int need_clear = 0;
1703 u64 cache_gen;
1704 u64 feature;
1705 int mixed;
1706
1707 feature = btrfs_super_incompat_flags(info->super_copy);
1708 mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
1709
1710 key.objectid = 0;
1711 key.offset = 0;
1712 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1713 path = btrfs_alloc_path();
1714 if (!path)
1715 return -ENOMEM;
1716 path->reada = READA_FORWARD;
1717
1718 cache_gen = btrfs_super_cache_generation(info->super_copy);
1719 if (btrfs_test_opt(info, SPACE_CACHE) &&
1720 btrfs_super_generation(info->super_copy) != cache_gen)
1721 need_clear = 1;
1722 if (btrfs_test_opt(info, CLEAR_CACHE))
1723 need_clear = 1;
1724
1725 while (1) {
1726 ret = find_first_block_group(info, path, &key);
1727 if (ret > 0)
1728 break;
1729 if (ret != 0)
1730 goto error;
1731
1732 leaf = path->nodes[0];
1733 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1734
1735 cache = btrfs_create_block_group_cache(info, found_key.objectid,
1736 found_key.offset);
1737 if (!cache) {
1738 ret = -ENOMEM;
1739 goto error;
1740 }
1741
1742 if (need_clear) {
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753 if (btrfs_test_opt(info, SPACE_CACHE))
1754 cache->disk_cache_state = BTRFS_DC_CLEAR;
1755 }
1756
1757 read_extent_buffer(leaf, &cache->item,
1758 btrfs_item_ptr_offset(leaf, path->slots[0]),
1759 sizeof(cache->item));
1760 cache->flags = btrfs_block_group_flags(&cache->item);
1761 if (!mixed &&
1762 ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
1763 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
1764 btrfs_err(info,
1765 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
1766 cache->key.objectid);
1767 btrfs_put_block_group(cache);
1768 ret = -EINVAL;
1769 goto error;
1770 }
1771
1772 key.objectid = found_key.objectid + found_key.offset;
1773 btrfs_release_path(path);
1774
1775
1776
1777
1778
1779
1780 ret = exclude_super_stripes(cache);
1781 if (ret) {
1782
1783
1784
1785
1786 btrfs_free_excluded_extents(cache);
1787 btrfs_put_block_group(cache);
1788 goto error;
1789 }
1790
1791
1792
1793
1794
1795
1796
1797
1798 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
1799 cache->last_byte_to_unpin = (u64)-1;
1800 cache->cached = BTRFS_CACHE_FINISHED;
1801 btrfs_free_excluded_extents(cache);
1802 } else if (btrfs_block_group_used(&cache->item) == 0) {
1803 cache->last_byte_to_unpin = (u64)-1;
1804 cache->cached = BTRFS_CACHE_FINISHED;
1805 add_new_free_space(cache, found_key.objectid,
1806 found_key.objectid +
1807 found_key.offset);
1808 btrfs_free_excluded_extents(cache);
1809 }
1810
1811 ret = btrfs_add_block_group_cache(info, cache);
1812 if (ret) {
1813 btrfs_remove_free_space_cache(cache);
1814 btrfs_put_block_group(cache);
1815 goto error;
1816 }
1817
1818 trace_btrfs_add_block_group(info, cache, 0);
1819 btrfs_update_space_info(info, cache->flags, found_key.offset,
1820 btrfs_block_group_used(&cache->item),
1821 cache->bytes_super, &space_info);
1822
1823 cache->space_info = space_info;
1824
1825 link_block_group(cache);
1826
1827 set_avail_alloc_bits(info, cache->flags);
1828 if (btrfs_chunk_readonly(info, cache->key.objectid)) {
1829 inc_block_group_ro(cache, 1);
1830 } else if (btrfs_block_group_used(&cache->item) == 0) {
1831 ASSERT(list_empty(&cache->bg_list));
1832 btrfs_mark_bg_unused(cache);
1833 }
1834 }
1835
1836 rcu_read_lock();
1837 list_for_each_entry_rcu(space_info, &info->space_info, list) {
1838 if (!(btrfs_get_alloc_profile(info, space_info->flags) &
1839 (BTRFS_BLOCK_GROUP_RAID10 |
1840 BTRFS_BLOCK_GROUP_RAID1_MASK |
1841 BTRFS_BLOCK_GROUP_RAID56_MASK |
1842 BTRFS_BLOCK_GROUP_DUP)))
1843 continue;
1844
1845
1846
1847
1848 list_for_each_entry(cache,
1849 &space_info->block_groups[BTRFS_RAID_RAID0],
1850 list)
1851 inc_block_group_ro(cache, 1);
1852 list_for_each_entry(cache,
1853 &space_info->block_groups[BTRFS_RAID_SINGLE],
1854 list)
1855 inc_block_group_ro(cache, 1);
1856 }
1857 rcu_read_unlock();
1858
1859 btrfs_init_global_block_rsv(info);
1860 ret = check_chunk_block_group_mappings(info);
1861 error:
1862 btrfs_free_path(path);
1863 return ret;
1864 }
1865
1866 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
1867 {
1868 struct btrfs_fs_info *fs_info = trans->fs_info;
1869 struct btrfs_block_group_cache *block_group;
1870 struct btrfs_root *extent_root = fs_info->extent_root;
1871 struct btrfs_block_group_item item;
1872 struct btrfs_key key;
1873 int ret = 0;
1874
1875 if (!trans->can_flush_pending_bgs)
1876 return;
1877
1878 while (!list_empty(&trans->new_bgs)) {
1879 block_group = list_first_entry(&trans->new_bgs,
1880 struct btrfs_block_group_cache,
1881 bg_list);
1882 if (ret)
1883 goto next;
1884
1885 spin_lock(&block_group->lock);
1886 memcpy(&item, &block_group->item, sizeof(item));
1887 memcpy(&key, &block_group->key, sizeof(key));
1888 spin_unlock(&block_group->lock);
1889
1890 ret = btrfs_insert_item(trans, extent_root, &key, &item,
1891 sizeof(item));
1892 if (ret)
1893 btrfs_abort_transaction(trans, ret);
1894 ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
1895 if (ret)
1896 btrfs_abort_transaction(trans, ret);
1897 add_block_group_free_space(trans, block_group);
1898
1899 next:
1900 btrfs_delayed_refs_rsv_release(fs_info, 1);
1901 list_del_init(&block_group->bg_list);
1902 }
1903 btrfs_trans_release_chunk_metadata(trans);
1904 }
1905
1906 int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
1907 u64 type, u64 chunk_offset, u64 size)
1908 {
1909 struct btrfs_fs_info *fs_info = trans->fs_info;
1910 struct btrfs_block_group_cache *cache;
1911 int ret;
1912
1913 btrfs_set_log_full_commit(trans);
1914
1915 cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
1916 if (!cache)
1917 return -ENOMEM;
1918
1919 btrfs_set_block_group_used(&cache->item, bytes_used);
1920 btrfs_set_block_group_chunk_objectid(&cache->item,
1921 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1922 btrfs_set_block_group_flags(&cache->item, type);
1923
1924 cache->flags = type;
1925 cache->last_byte_to_unpin = (u64)-1;
1926 cache->cached = BTRFS_CACHE_FINISHED;
1927 cache->needs_free_space = 1;
1928 ret = exclude_super_stripes(cache);
1929 if (ret) {
1930
1931 btrfs_free_excluded_extents(cache);
1932 btrfs_put_block_group(cache);
1933 return ret;
1934 }
1935
1936 add_new_free_space(cache, chunk_offset, chunk_offset + size);
1937
1938 btrfs_free_excluded_extents(cache);
1939
1940 #ifdef CONFIG_BTRFS_DEBUG
1941 if (btrfs_should_fragment_free_space(cache)) {
1942 u64 new_bytes_used = size - bytes_used;
1943
1944 bytes_used += new_bytes_used >> 1;
1945 fragment_free_space(cache);
1946 }
1947 #endif
1948
1949
1950
1951
1952
1953 cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
1954 ASSERT(cache->space_info);
1955
1956 ret = btrfs_add_block_group_cache(fs_info, cache);
1957 if (ret) {
1958 btrfs_remove_free_space_cache(cache);
1959 btrfs_put_block_group(cache);
1960 return ret;
1961 }
1962
1963
1964
1965
1966
1967 trace_btrfs_add_block_group(fs_info, cache, 1);
1968 btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
1969 cache->bytes_super, &cache->space_info);
1970 btrfs_update_global_block_rsv(fs_info);
1971
1972 link_block_group(cache);
1973
1974 list_add_tail(&cache->bg_list, &trans->new_bgs);
1975 trans->delayed_ref_updates++;
1976 btrfs_update_delayed_refs_rsv(trans);
1977
1978 set_avail_alloc_bits(fs_info, type);
1979 return 0;
1980 }
1981
1982 static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
1983 {
1984 u64 num_devices;
1985 u64 stripped;
1986
1987
1988
1989
1990
1991 stripped = get_restripe_target(fs_info, flags);
1992 if (stripped)
1993 return extended_to_chunk(stripped);
1994
1995 num_devices = fs_info->fs_devices->rw_devices;
1996
1997 stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK |
1998 BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10;
1999
2000 if (num_devices == 1) {
2001 stripped |= BTRFS_BLOCK_GROUP_DUP;
2002 stripped = flags & ~stripped;
2003
2004
2005 if (flags & BTRFS_BLOCK_GROUP_RAID0)
2006 return stripped;
2007
2008
2009 if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK |
2010 BTRFS_BLOCK_GROUP_RAID10))
2011 return stripped | BTRFS_BLOCK_GROUP_DUP;
2012 } else {
2013
2014 if (flags & stripped)
2015 return flags;
2016
2017 stripped |= BTRFS_BLOCK_GROUP_DUP;
2018 stripped = flags & ~stripped;
2019
2020
2021 if (flags & BTRFS_BLOCK_GROUP_DUP)
2022 return stripped | BTRFS_BLOCK_GROUP_RAID1;
2023
2024
2025 }
2026
2027 return flags;
2028 }
2029
2030 int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
2031
2032 {
2033 struct btrfs_fs_info *fs_info = cache->fs_info;
2034 struct btrfs_trans_handle *trans;
2035 u64 alloc_flags;
2036 int ret;
2037
2038 again:
2039 trans = btrfs_join_transaction(fs_info->extent_root);
2040 if (IS_ERR(trans))
2041 return PTR_ERR(trans);
2042
2043
2044
2045
2046
2047
2048 mutex_lock(&fs_info->ro_block_group_mutex);
2049 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
2050 u64 transid = trans->transid;
2051
2052 mutex_unlock(&fs_info->ro_block_group_mutex);
2053 btrfs_end_transaction(trans);
2054
2055 ret = btrfs_wait_for_commit(fs_info, transid);
2056 if (ret)
2057 return ret;
2058 goto again;
2059 }
2060
2061
2062
2063
2064
2065 alloc_flags = update_block_group_flags(fs_info, cache->flags);
2066 if (alloc_flags != cache->flags) {
2067 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2068
2069
2070
2071
2072
2073 if (ret == -ENOSPC)
2074 ret = 0;
2075 if (ret < 0)
2076 goto out;
2077 }
2078
2079 ret = inc_block_group_ro(cache, 0);
2080 if (!ret)
2081 goto out;
2082 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2083 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2084 if (ret < 0)
2085 goto out;
2086 ret = inc_block_group_ro(cache, 0);
2087 out:
2088 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2089 alloc_flags = update_block_group_flags(fs_info, cache->flags);
2090 mutex_lock(&fs_info->chunk_mutex);
2091 check_system_chunk(trans, alloc_flags);
2092 mutex_unlock(&fs_info->chunk_mutex);
2093 }
2094 mutex_unlock(&fs_info->ro_block_group_mutex);
2095
2096 btrfs_end_transaction(trans);
2097 return ret;
2098 }
2099
2100 void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
2101 {
2102 struct btrfs_space_info *sinfo = cache->space_info;
2103 u64 num_bytes;
2104
2105 BUG_ON(!cache->ro);
2106
2107 spin_lock(&sinfo->lock);
2108 spin_lock(&cache->lock);
2109 if (!--cache->ro) {
2110 num_bytes = cache->key.offset - cache->reserved -
2111 cache->pinned - cache->bytes_super -
2112 btrfs_block_group_used(&cache->item);
2113 sinfo->bytes_readonly -= num_bytes;
2114 list_del_init(&cache->ro_list);
2115 }
2116 spin_unlock(&cache->lock);
2117 spin_unlock(&sinfo->lock);
2118 }
2119
2120 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2121 struct btrfs_path *path,
2122 struct btrfs_block_group_cache *cache)
2123 {
2124 struct btrfs_fs_info *fs_info = trans->fs_info;
2125 int ret;
2126 struct btrfs_root *extent_root = fs_info->extent_root;
2127 unsigned long bi;
2128 struct extent_buffer *leaf;
2129
2130 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2131 if (ret) {
2132 if (ret > 0)
2133 ret = -ENOENT;
2134 goto fail;
2135 }
2136
2137 leaf = path->nodes[0];
2138 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2139 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2140 btrfs_mark_buffer_dirty(leaf);
2141 fail:
2142 btrfs_release_path(path);
2143 return ret;
2144
2145 }
2146
2147 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2148 struct btrfs_trans_handle *trans,
2149 struct btrfs_path *path)
2150 {
2151 struct btrfs_fs_info *fs_info = block_group->fs_info;
2152 struct btrfs_root *root = fs_info->tree_root;
2153 struct inode *inode = NULL;
2154 struct extent_changeset *data_reserved = NULL;
2155 u64 alloc_hint = 0;
2156 int dcs = BTRFS_DC_ERROR;
2157 u64 num_pages = 0;
2158 int retries = 0;
2159 int ret = 0;
2160
2161
2162
2163
2164
2165 if (block_group->key.offset < (100 * SZ_1M)) {
2166 spin_lock(&block_group->lock);
2167 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2168 spin_unlock(&block_group->lock);
2169 return 0;
2170 }
2171
2172 if (trans->aborted)
2173 return 0;
2174 again:
2175 inode = lookup_free_space_inode(block_group, path);
2176 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2177 ret = PTR_ERR(inode);
2178 btrfs_release_path(path);
2179 goto out;
2180 }
2181
2182 if (IS_ERR(inode)) {
2183 BUG_ON(retries);
2184 retries++;
2185
2186 if (block_group->ro)
2187 goto out_free;
2188
2189 ret = create_free_space_inode(trans, block_group, path);
2190 if (ret)
2191 goto out_free;
2192 goto again;
2193 }
2194
2195
2196
2197
2198
2199
2200 BTRFS_I(inode)->generation = 0;
2201 ret = btrfs_update_inode(trans, root, inode);
2202 if (ret) {
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213 btrfs_abort_transaction(trans, ret);
2214 goto out_put;
2215 }
2216 WARN_ON(ret);
2217
2218
2219 if (block_group->cache_generation == trans->transid &&
2220 i_size_read(inode)) {
2221 dcs = BTRFS_DC_SETUP;
2222 goto out_put;
2223 }
2224
2225 if (i_size_read(inode) > 0) {
2226 ret = btrfs_check_trunc_cache_free_space(fs_info,
2227 &fs_info->global_block_rsv);
2228 if (ret)
2229 goto out_put;
2230
2231 ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
2232 if (ret)
2233 goto out_put;
2234 }
2235
2236 spin_lock(&block_group->lock);
2237 if (block_group->cached != BTRFS_CACHE_FINISHED ||
2238 !btrfs_test_opt(fs_info, SPACE_CACHE)) {
2239
2240
2241
2242
2243
2244
2245 dcs = BTRFS_DC_WRITTEN;
2246 spin_unlock(&block_group->lock);
2247 goto out_put;
2248 }
2249 spin_unlock(&block_group->lock);
2250
2251
2252
2253
2254
2255 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
2256 ret = -ENOSPC;
2257 goto out_put;
2258 }
2259
2260
2261
2262
2263
2264
2265
2266 num_pages = div_u64(block_group->key.offset, SZ_256M);
2267 if (!num_pages)
2268 num_pages = 1;
2269
2270 num_pages *= 16;
2271 num_pages *= PAGE_SIZE;
2272
2273 ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
2274 if (ret)
2275 goto out_put;
2276
2277 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2278 num_pages, num_pages,
2279 &alloc_hint);
2280
2281
2282
2283
2284
2285
2286
2287
2288 if (!ret)
2289 dcs = BTRFS_DC_SETUP;
2290 else if (ret == -ENOSPC)
2291 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
2292
2293 out_put:
2294 iput(inode);
2295 out_free:
2296 btrfs_release_path(path);
2297 out:
2298 spin_lock(&block_group->lock);
2299 if (!ret && dcs == BTRFS_DC_SETUP)
2300 block_group->cache_generation = trans->transid;
2301 block_group->disk_cache_state = dcs;
2302 spin_unlock(&block_group->lock);
2303
2304 extent_changeset_free(data_reserved);
2305 return ret;
2306 }
2307
2308 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
2309 {
2310 struct btrfs_fs_info *fs_info = trans->fs_info;
2311 struct btrfs_block_group_cache *cache, *tmp;
2312 struct btrfs_transaction *cur_trans = trans->transaction;
2313 struct btrfs_path *path;
2314
2315 if (list_empty(&cur_trans->dirty_bgs) ||
2316 !btrfs_test_opt(fs_info, SPACE_CACHE))
2317 return 0;
2318
2319 path = btrfs_alloc_path();
2320 if (!path)
2321 return -ENOMEM;
2322
2323
2324 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
2325 dirty_list) {
2326 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2327 cache_save_setup(cache, trans, path);
2328 }
2329
2330 btrfs_free_path(path);
2331 return 0;
2332 }
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
2347 {
2348 struct btrfs_fs_info *fs_info = trans->fs_info;
2349 struct btrfs_block_group_cache *cache;
2350 struct btrfs_transaction *cur_trans = trans->transaction;
2351 int ret = 0;
2352 int should_put;
2353 struct btrfs_path *path = NULL;
2354 LIST_HEAD(dirty);
2355 struct list_head *io = &cur_trans->io_bgs;
2356 int num_started = 0;
2357 int loops = 0;
2358
2359 spin_lock(&cur_trans->dirty_bgs_lock);
2360 if (list_empty(&cur_trans->dirty_bgs)) {
2361 spin_unlock(&cur_trans->dirty_bgs_lock);
2362 return 0;
2363 }
2364 list_splice_init(&cur_trans->dirty_bgs, &dirty);
2365 spin_unlock(&cur_trans->dirty_bgs_lock);
2366
2367 again:
2368
2369 btrfs_create_pending_block_groups(trans);
2370
2371 if (!path) {
2372 path = btrfs_alloc_path();
2373 if (!path)
2374 return -ENOMEM;
2375 }
2376
2377
2378
2379
2380
2381
2382 mutex_lock(&trans->transaction->cache_write_mutex);
2383 while (!list_empty(&dirty)) {
2384 bool drop_reserve = true;
2385
2386 cache = list_first_entry(&dirty,
2387 struct btrfs_block_group_cache,
2388 dirty_list);
2389
2390
2391
2392
2393
2394 if (!list_empty(&cache->io_list)) {
2395 list_del_init(&cache->io_list);
2396 btrfs_wait_cache_io(trans, cache, path);
2397 btrfs_put_block_group(cache);
2398 }
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409 spin_lock(&cur_trans->dirty_bgs_lock);
2410 list_del_init(&cache->dirty_list);
2411 spin_unlock(&cur_trans->dirty_bgs_lock);
2412
2413 should_put = 1;
2414
2415 cache_save_setup(cache, trans, path);
2416
2417 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
2418 cache->io_ctl.inode = NULL;
2419 ret = btrfs_write_out_cache(trans, cache, path);
2420 if (ret == 0 && cache->io_ctl.inode) {
2421 num_started++;
2422 should_put = 0;
2423
2424
2425
2426
2427
2428
2429 list_add_tail(&cache->io_list, io);
2430 } else {
2431
2432
2433
2434
2435 ret = 0;
2436 }
2437 }
2438 if (!ret) {
2439 ret = write_one_cache_group(trans, path, cache);
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449 if (ret == -ENOENT) {
2450 ret = 0;
2451 spin_lock(&cur_trans->dirty_bgs_lock);
2452 if (list_empty(&cache->dirty_list)) {
2453 list_add_tail(&cache->dirty_list,
2454 &cur_trans->dirty_bgs);
2455 btrfs_get_block_group(cache);
2456 drop_reserve = false;
2457 }
2458 spin_unlock(&cur_trans->dirty_bgs_lock);
2459 } else if (ret) {
2460 btrfs_abort_transaction(trans, ret);
2461 }
2462 }
2463
2464
2465 if (should_put)
2466 btrfs_put_block_group(cache);
2467 if (drop_reserve)
2468 btrfs_delayed_refs_rsv_release(fs_info, 1);
2469
2470 if (ret)
2471 break;
2472
2473
2474
2475
2476
2477
2478 mutex_unlock(&trans->transaction->cache_write_mutex);
2479 mutex_lock(&trans->transaction->cache_write_mutex);
2480 }
2481 mutex_unlock(&trans->transaction->cache_write_mutex);
2482
2483
2484
2485
2486
2487 ret = btrfs_run_delayed_refs(trans, 0);
2488 if (!ret && loops == 0) {
2489 loops++;
2490 spin_lock(&cur_trans->dirty_bgs_lock);
2491 list_splice_init(&cur_trans->dirty_bgs, &dirty);
2492
2493
2494
2495
2496 if (!list_empty(&dirty)) {
2497 spin_unlock(&cur_trans->dirty_bgs_lock);
2498 goto again;
2499 }
2500 spin_unlock(&cur_trans->dirty_bgs_lock);
2501 } else if (ret < 0) {
2502 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
2503 }
2504
2505 btrfs_free_path(path);
2506 return ret;
2507 }
2508
2509 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
2510 {
2511 struct btrfs_fs_info *fs_info = trans->fs_info;
2512 struct btrfs_block_group_cache *cache;
2513 struct btrfs_transaction *cur_trans = trans->transaction;
2514 int ret = 0;
2515 int should_put;
2516 struct btrfs_path *path;
2517 struct list_head *io = &cur_trans->io_bgs;
2518 int num_started = 0;
2519
2520 path = btrfs_alloc_path();
2521 if (!path)
2522 return -ENOMEM;
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539 spin_lock(&cur_trans->dirty_bgs_lock);
2540 while (!list_empty(&cur_trans->dirty_bgs)) {
2541 cache = list_first_entry(&cur_trans->dirty_bgs,
2542 struct btrfs_block_group_cache,
2543 dirty_list);
2544
2545
2546
2547
2548
2549
2550 if (!list_empty(&cache->io_list)) {
2551 spin_unlock(&cur_trans->dirty_bgs_lock);
2552 list_del_init(&cache->io_list);
2553 btrfs_wait_cache_io(trans, cache, path);
2554 btrfs_put_block_group(cache);
2555 spin_lock(&cur_trans->dirty_bgs_lock);
2556 }
2557
2558
2559
2560
2561
2562 list_del_init(&cache->dirty_list);
2563 spin_unlock(&cur_trans->dirty_bgs_lock);
2564 should_put = 1;
2565
2566 cache_save_setup(cache, trans, path);
2567
2568 if (!ret)
2569 ret = btrfs_run_delayed_refs(trans,
2570 (unsigned long) -1);
2571
2572 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
2573 cache->io_ctl.inode = NULL;
2574 ret = btrfs_write_out_cache(trans, cache, path);
2575 if (ret == 0 && cache->io_ctl.inode) {
2576 num_started++;
2577 should_put = 0;
2578 list_add_tail(&cache->io_list, io);
2579 } else {
2580
2581
2582
2583
2584 ret = 0;
2585 }
2586 }
2587 if (!ret) {
2588 ret = write_one_cache_group(trans, path, cache);
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602 if (ret == -ENOENT) {
2603 wait_event(cur_trans->writer_wait,
2604 atomic_read(&cur_trans->num_writers) == 1);
2605 ret = write_one_cache_group(trans, path, cache);
2606 }
2607 if (ret)
2608 btrfs_abort_transaction(trans, ret);
2609 }
2610
2611
2612 if (should_put)
2613 btrfs_put_block_group(cache);
2614 btrfs_delayed_refs_rsv_release(fs_info, 1);
2615 spin_lock(&cur_trans->dirty_bgs_lock);
2616 }
2617 spin_unlock(&cur_trans->dirty_bgs_lock);
2618
2619
2620
2621
2622
2623 while (!list_empty(io)) {
2624 cache = list_first_entry(io, struct btrfs_block_group_cache,
2625 io_list);
2626 list_del_init(&cache->io_list);
2627 btrfs_wait_cache_io(trans, cache, path);
2628 btrfs_put_block_group(cache);
2629 }
2630
2631 btrfs_free_path(path);
2632 return ret;
2633 }
2634
2635 int btrfs_update_block_group(struct btrfs_trans_handle *trans,
2636 u64 bytenr, u64 num_bytes, int alloc)
2637 {
2638 struct btrfs_fs_info *info = trans->fs_info;
2639 struct btrfs_block_group_cache *cache = NULL;
2640 u64 total = num_bytes;
2641 u64 old_val;
2642 u64 byte_in_group;
2643 int factor;
2644 int ret = 0;
2645
2646
2647 spin_lock(&info->delalloc_root_lock);
2648 old_val = btrfs_super_bytes_used(info->super_copy);
2649 if (alloc)
2650 old_val += num_bytes;
2651 else
2652 old_val -= num_bytes;
2653 btrfs_set_super_bytes_used(info->super_copy, old_val);
2654 spin_unlock(&info->delalloc_root_lock);
2655
2656 while (total) {
2657 cache = btrfs_lookup_block_group(info, bytenr);
2658 if (!cache) {
2659 ret = -ENOENT;
2660 break;
2661 }
2662 factor = btrfs_bg_type_to_factor(cache->flags);
2663
2664
2665
2666
2667
2668
2669
2670 if (!alloc && !btrfs_block_group_cache_done(cache))
2671 btrfs_cache_block_group(cache, 1);
2672
2673 byte_in_group = bytenr - cache->key.objectid;
2674 WARN_ON(byte_in_group > cache->key.offset);
2675
2676 spin_lock(&cache->space_info->lock);
2677 spin_lock(&cache->lock);
2678
2679 if (btrfs_test_opt(info, SPACE_CACHE) &&
2680 cache->disk_cache_state < BTRFS_DC_CLEAR)
2681 cache->disk_cache_state = BTRFS_DC_CLEAR;
2682
2683 old_val = btrfs_block_group_used(&cache->item);
2684 num_bytes = min(total, cache->key.offset - byte_in_group);
2685 if (alloc) {
2686 old_val += num_bytes;
2687 btrfs_set_block_group_used(&cache->item, old_val);
2688 cache->reserved -= num_bytes;
2689 cache->space_info->bytes_reserved -= num_bytes;
2690 cache->space_info->bytes_used += num_bytes;
2691 cache->space_info->disk_used += num_bytes * factor;
2692 spin_unlock(&cache->lock);
2693 spin_unlock(&cache->space_info->lock);
2694 } else {
2695 old_val -= num_bytes;
2696 btrfs_set_block_group_used(&cache->item, old_val);
2697 cache->pinned += num_bytes;
2698 btrfs_space_info_update_bytes_pinned(info,
2699 cache->space_info, num_bytes);
2700 cache->space_info->bytes_used -= num_bytes;
2701 cache->space_info->disk_used -= num_bytes * factor;
2702 spin_unlock(&cache->lock);
2703 spin_unlock(&cache->space_info->lock);
2704
2705 percpu_counter_add_batch(
2706 &cache->space_info->total_bytes_pinned,
2707 num_bytes,
2708 BTRFS_TOTAL_BYTES_PINNED_BATCH);
2709 set_extent_dirty(info->pinned_extents,
2710 bytenr, bytenr + num_bytes - 1,
2711 GFP_NOFS | __GFP_NOFAIL);
2712 }
2713
2714 spin_lock(&trans->transaction->dirty_bgs_lock);
2715 if (list_empty(&cache->dirty_list)) {
2716 list_add_tail(&cache->dirty_list,
2717 &trans->transaction->dirty_bgs);
2718 trans->delayed_ref_updates++;
2719 btrfs_get_block_group(cache);
2720 }
2721 spin_unlock(&trans->transaction->dirty_bgs_lock);
2722
2723
2724
2725
2726
2727
2728
2729 if (!alloc && old_val == 0)
2730 btrfs_mark_bg_unused(cache);
2731
2732 btrfs_put_block_group(cache);
2733 total -= num_bytes;
2734 bytenr += num_bytes;
2735 }
2736
2737
2738 btrfs_update_delayed_refs_rsv(trans);
2739 return ret;
2740 }
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754 int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
2755 u64 ram_bytes, u64 num_bytes, int delalloc)
2756 {
2757 struct btrfs_space_info *space_info = cache->space_info;
2758 int ret = 0;
2759
2760 spin_lock(&space_info->lock);
2761 spin_lock(&cache->lock);
2762 if (cache->ro) {
2763 ret = -EAGAIN;
2764 } else {
2765 cache->reserved += num_bytes;
2766 space_info->bytes_reserved += num_bytes;
2767 trace_btrfs_space_reservation(cache->fs_info, "space_info",
2768 space_info->flags, num_bytes, 1);
2769 btrfs_space_info_update_bytes_may_use(cache->fs_info,
2770 space_info, -ram_bytes);
2771 if (delalloc)
2772 cache->delalloc_bytes += num_bytes;
2773 }
2774 spin_unlock(&cache->lock);
2775 spin_unlock(&space_info->lock);
2776 return ret;
2777 }
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790 void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
2791 u64 num_bytes, int delalloc)
2792 {
2793 struct btrfs_space_info *space_info = cache->space_info;
2794
2795 spin_lock(&space_info->lock);
2796 spin_lock(&cache->lock);
2797 if (cache->ro)
2798 space_info->bytes_readonly += num_bytes;
2799 cache->reserved -= num_bytes;
2800 space_info->bytes_reserved -= num_bytes;
2801 space_info->max_extent_size = 0;
2802
2803 if (delalloc)
2804 cache->delalloc_bytes -= num_bytes;
2805 spin_unlock(&cache->lock);
2806 spin_unlock(&space_info->lock);
2807 }
2808
2809 static void force_metadata_allocation(struct btrfs_fs_info *info)
2810 {
2811 struct list_head *head = &info->space_info;
2812 struct btrfs_space_info *found;
2813
2814 rcu_read_lock();
2815 list_for_each_entry_rcu(found, head, list) {
2816 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2817 found->force_alloc = CHUNK_ALLOC_FORCE;
2818 }
2819 rcu_read_unlock();
2820 }
2821
2822 static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
2823 struct btrfs_space_info *sinfo, int force)
2824 {
2825 u64 bytes_used = btrfs_space_info_used(sinfo, false);
2826 u64 thresh;
2827
2828 if (force == CHUNK_ALLOC_FORCE)
2829 return 1;
2830
2831
2832
2833
2834
2835 if (force == CHUNK_ALLOC_LIMITED) {
2836 thresh = btrfs_super_total_bytes(fs_info->super_copy);
2837 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
2838
2839 if (sinfo->total_bytes - bytes_used < thresh)
2840 return 1;
2841 }
2842
2843 if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
2844 return 0;
2845 return 1;
2846 }
2847
2848 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
2849 {
2850 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
2851
2852 return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2853 }
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
2865 enum btrfs_chunk_alloc_enum force)
2866 {
2867 struct btrfs_fs_info *fs_info = trans->fs_info;
2868 struct btrfs_space_info *space_info;
2869 bool wait_for_alloc = false;
2870 bool should_alloc = false;
2871 int ret = 0;
2872
2873
2874 if (trans->allocating_chunk)
2875 return -ENOSPC;
2876
2877 space_info = btrfs_find_space_info(fs_info, flags);
2878 ASSERT(space_info);
2879
2880 do {
2881 spin_lock(&space_info->lock);
2882 if (force < space_info->force_alloc)
2883 force = space_info->force_alloc;
2884 should_alloc = should_alloc_chunk(fs_info, space_info, force);
2885 if (space_info->full) {
2886
2887 if (should_alloc)
2888 ret = -ENOSPC;
2889 else
2890 ret = 0;
2891 spin_unlock(&space_info->lock);
2892 return ret;
2893 } else if (!should_alloc) {
2894 spin_unlock(&space_info->lock);
2895 return 0;
2896 } else if (space_info->chunk_alloc) {
2897
2898
2899
2900
2901
2902
2903 wait_for_alloc = true;
2904 spin_unlock(&space_info->lock);
2905 mutex_lock(&fs_info->chunk_mutex);
2906 mutex_unlock(&fs_info->chunk_mutex);
2907 } else {
2908
2909 space_info->chunk_alloc = 1;
2910 wait_for_alloc = false;
2911 spin_unlock(&space_info->lock);
2912 }
2913
2914 cond_resched();
2915 } while (wait_for_alloc);
2916
2917 mutex_lock(&fs_info->chunk_mutex);
2918 trans->allocating_chunk = true;
2919
2920
2921
2922
2923
2924 if (btrfs_mixed_space_info(space_info))
2925 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
2926
2927
2928
2929
2930
2931
2932 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
2933 fs_info->data_chunk_allocations++;
2934 if (!(fs_info->data_chunk_allocations %
2935 fs_info->metadata_ratio))
2936 force_metadata_allocation(fs_info);
2937 }
2938
2939
2940
2941
2942
2943 check_system_chunk(trans, flags);
2944
2945 ret = btrfs_alloc_chunk(trans, flags);
2946 trans->allocating_chunk = false;
2947
2948 spin_lock(&space_info->lock);
2949 if (ret < 0) {
2950 if (ret == -ENOSPC)
2951 space_info->full = 1;
2952 else
2953 goto out;
2954 } else {
2955 ret = 1;
2956 space_info->max_extent_size = 0;
2957 }
2958
2959 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
2960 out:
2961 space_info->chunk_alloc = 0;
2962 spin_unlock(&space_info->lock);
2963 mutex_unlock(&fs_info->chunk_mutex);
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978 if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
2979 btrfs_create_pending_block_groups(trans);
2980
2981 return ret;
2982 }
2983
2984 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
2985 {
2986 u64 num_dev;
2987
2988 num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
2989 if (!num_dev)
2990 num_dev = fs_info->fs_devices->rw_devices;
2991
2992 return num_dev;
2993 }
2994
2995
2996
2997
2998
2999
3000 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
3001 {
3002 struct btrfs_fs_info *fs_info = trans->fs_info;
3003 struct btrfs_space_info *info;
3004 u64 left;
3005 u64 thresh;
3006 int ret = 0;
3007 u64 num_devs;
3008
3009
3010
3011
3012
3013 lockdep_assert_held(&fs_info->chunk_mutex);
3014
3015 info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3016 spin_lock(&info->lock);
3017 left = info->total_bytes - btrfs_space_info_used(info, true);
3018 spin_unlock(&info->lock);
3019
3020 num_devs = get_profile_num_devs(fs_info, type);
3021
3022
3023 thresh = btrfs_calc_metadata_size(fs_info, num_devs) +
3024 btrfs_calc_insert_metadata_size(fs_info, 1);
3025
3026 if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
3027 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
3028 left, thresh, type);
3029 btrfs_dump_space_info(fs_info, info, 0, 0);
3030 }
3031
3032 if (left < thresh) {
3033 u64 flags = btrfs_system_alloc_profile(fs_info);
3034
3035
3036
3037
3038
3039
3040
3041 ret = btrfs_alloc_chunk(trans, flags);
3042 }
3043
3044 if (!ret) {
3045 ret = btrfs_block_rsv_add(fs_info->chunk_root,
3046 &fs_info->chunk_block_rsv,
3047 thresh, BTRFS_RESERVE_NO_FLUSH);
3048 if (!ret)
3049 trans->chunk_bytes_reserved += thresh;
3050 }
3051 }
3052
3053 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
3054 {
3055 struct btrfs_block_group_cache *block_group;
3056 u64 last = 0;
3057
3058 while (1) {
3059 struct inode *inode;
3060
3061 block_group = btrfs_lookup_first_block_group(info, last);
3062 while (block_group) {
3063 btrfs_wait_block_group_cache_done(block_group);
3064 spin_lock(&block_group->lock);
3065 if (block_group->iref)
3066 break;
3067 spin_unlock(&block_group->lock);
3068 block_group = btrfs_next_block_group(block_group);
3069 }
3070 if (!block_group) {
3071 if (last == 0)
3072 break;
3073 last = 0;
3074 continue;
3075 }
3076
3077 inode = block_group->inode;
3078 block_group->iref = 0;
3079 block_group->inode = NULL;
3080 spin_unlock(&block_group->lock);
3081 ASSERT(block_group->io_ctl.inode == NULL);
3082 iput(inode);
3083 last = block_group->key.objectid + block_group->key.offset;
3084 btrfs_put_block_group(block_group);
3085 }
3086 }
3087
3088
3089
3090
3091
3092
3093 int btrfs_free_block_groups(struct btrfs_fs_info *info)
3094 {
3095 struct btrfs_block_group_cache *block_group;
3096 struct btrfs_space_info *space_info;
3097 struct btrfs_caching_control *caching_ctl;
3098 struct rb_node *n;
3099
3100 down_write(&info->commit_root_sem);
3101 while (!list_empty(&info->caching_block_groups)) {
3102 caching_ctl = list_entry(info->caching_block_groups.next,
3103 struct btrfs_caching_control, list);
3104 list_del(&caching_ctl->list);
3105 btrfs_put_caching_control(caching_ctl);
3106 }
3107 up_write(&info->commit_root_sem);
3108
3109 spin_lock(&info->unused_bgs_lock);
3110 while (!list_empty(&info->unused_bgs)) {
3111 block_group = list_first_entry(&info->unused_bgs,
3112 struct btrfs_block_group_cache,
3113 bg_list);
3114 list_del_init(&block_group->bg_list);
3115 btrfs_put_block_group(block_group);
3116 }
3117 spin_unlock(&info->unused_bgs_lock);
3118
3119 spin_lock(&info->block_group_cache_lock);
3120 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
3121 block_group = rb_entry(n, struct btrfs_block_group_cache,
3122 cache_node);
3123 rb_erase(&block_group->cache_node,
3124 &info->block_group_cache_tree);
3125 RB_CLEAR_NODE(&block_group->cache_node);
3126 spin_unlock(&info->block_group_cache_lock);
3127
3128 down_write(&block_group->space_info->groups_sem);
3129 list_del(&block_group->list);
3130 up_write(&block_group->space_info->groups_sem);
3131
3132
3133
3134
3135
3136 if (block_group->cached == BTRFS_CACHE_NO ||
3137 block_group->cached == BTRFS_CACHE_ERROR)
3138 btrfs_free_excluded_extents(block_group);
3139
3140 btrfs_remove_free_space_cache(block_group);
3141 ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
3142 ASSERT(list_empty(&block_group->dirty_list));
3143 ASSERT(list_empty(&block_group->io_list));
3144 ASSERT(list_empty(&block_group->bg_list));
3145 ASSERT(atomic_read(&block_group->count) == 1);
3146 btrfs_put_block_group(block_group);
3147
3148 spin_lock(&info->block_group_cache_lock);
3149 }
3150 spin_unlock(&info->block_group_cache_lock);
3151
3152
3153
3154
3155
3156
3157
3158 synchronize_rcu();
3159
3160 btrfs_release_global_block_rsv(info);
3161
3162 while (!list_empty(&info->space_info)) {
3163 space_info = list_entry(info->space_info.next,
3164 struct btrfs_space_info,
3165 list);
3166
3167
3168
3169
3170
3171 if (WARN_ON(space_info->bytes_pinned > 0 ||
3172 space_info->bytes_reserved > 0 ||
3173 space_info->bytes_may_use > 0))
3174 btrfs_dump_space_info(info, space_info, 0, 0);
3175 list_del(&space_info->list);
3176 btrfs_sysfs_remove_space_info(space_info);
3177 }
3178 return 0;
3179 }