1/*
2 * Copyright (C) 2007 Oracle.  All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/pagemap.h>
20#include <linux/writeback.h>
21#include <linux/blkdev.h>
22#include <linux/sort.h>
23#include <linux/rcupdate.h>
24#include <linux/kthread.h>
25#include <linux/slab.h>
26#include <linux/ratelimit.h>
27#include <linux/percpu_counter.h>
28#include "hash.h"
29#include "tree-log.h"
30#include "disk-io.h"
31#include "print-tree.h"
32#include "volumes.h"
33#include "raid56.h"
34#include "locking.h"
35#include "free-space-cache.h"
36#include "math.h"
37#include "sysfs.h"
38#include "qgroup.h"
39
40#undef SCRAMBLE_DELAYED_REFS
41
42/*
43 * control flags for do_chunk_alloc's force field
44 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45 * if we really need one.
46 *
47 * CHUNK_ALLOC_LIMITED means to only try and allocate one
48 * if we have very few chunks already allocated.  This is
49 * used as part of the clustering code to help make sure
50 * we have a good pool of storage to cluster in, without
51 * filling the FS with empty chunks
52 *
53 * CHUNK_ALLOC_FORCE means it must try to allocate one
54 *
55 */
56enum {
57	CHUNK_ALLOC_NO_FORCE = 0,
58	CHUNK_ALLOC_LIMITED = 1,
59	CHUNK_ALLOC_FORCE = 2,
60};
61
62/*
63 * Control how reservations are dealt with.
64 *
65 * RESERVE_FREE - freeing a reservation.
66 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67 *   ENOSPC accounting
68 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69 *   bytes_may_use as the ENOSPC accounting is done elsewhere
70 */
71enum {
72	RESERVE_FREE = 0,
73	RESERVE_ALLOC = 1,
74	RESERVE_ALLOC_NO_ACCOUNT = 2,
75};
76
77static int update_block_group(struct btrfs_trans_handle *trans,
78			      struct btrfs_root *root, u64 bytenr,
79			      u64 num_bytes, int alloc);
80static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81				struct btrfs_root *root,
82				u64 bytenr, u64 num_bytes, u64 parent,
83				u64 root_objectid, u64 owner_objectid,
84				u64 owner_offset, int refs_to_drop,
85				struct btrfs_delayed_extent_op *extra_op,
86				int no_quota);
87static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
88				    struct extent_buffer *leaf,
89				    struct btrfs_extent_item *ei);
90static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
91				      struct btrfs_root *root,
92				      u64 parent, u64 root_objectid,
93				      u64 flags, u64 owner, u64 offset,
94				      struct btrfs_key *ins, int ref_mod);
95static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
96				     struct btrfs_root *root,
97				     u64 parent, u64 root_objectid,
98				     u64 flags, struct btrfs_disk_key *key,
99				     int level, struct btrfs_key *ins,
100				     int no_quota);
101static int do_chunk_alloc(struct btrfs_trans_handle *trans,
102			  struct btrfs_root *extent_root, u64 flags,
103			  int force);
104static int find_next_key(struct btrfs_path *path, int level,
105			 struct btrfs_key *key);
106static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
107			    int dump_block_groups);
108static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
109				       u64 num_bytes, int reserve,
110				       int delalloc);
111static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
112			       u64 num_bytes);
113int btrfs_pin_extent(struct btrfs_root *root,
114		     u64 bytenr, u64 num_bytes, int reserved);
115
116static noinline int
117block_group_cache_done(struct btrfs_block_group_cache *cache)
118{
119	smp_mb();
120	return cache->cached == BTRFS_CACHE_FINISHED ||
121		cache->cached == BTRFS_CACHE_ERROR;
122}
123
124static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
125{
126	return (cache->flags & bits) == bits;
127}
128
129static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
130{
131	atomic_inc(&cache->count);
132}
133
134void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
135{
136	if (atomic_dec_and_test(&cache->count)) {
137		WARN_ON(cache->pinned > 0);
138		WARN_ON(cache->reserved > 0);
139		kfree(cache->free_space_ctl);
140		kfree(cache);
141	}
142}
143
144/*
145 * this adds the block group to the fs_info rb tree for the block group
146 * cache
147 */
148static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
149				struct btrfs_block_group_cache *block_group)
150{
151	struct rb_node **p;
152	struct rb_node *parent = NULL;
153	struct btrfs_block_group_cache *cache;
154
155	spin_lock(&info->block_group_cache_lock);
156	p = &info->block_group_cache_tree.rb_node;
157
158	while (*p) {
159		parent = *p;
160		cache = rb_entry(parent, struct btrfs_block_group_cache,
161				 cache_node);
162		if (block_group->key.objectid < cache->key.objectid) {
163			p = &(*p)->rb_left;
164		} else if (block_group->key.objectid > cache->key.objectid) {
165			p = &(*p)->rb_right;
166		} else {
167			spin_unlock(&info->block_group_cache_lock);
168			return -EEXIST;
169		}
170	}
171
172	rb_link_node(&block_group->cache_node, parent, p);
173	rb_insert_color(&block_group->cache_node,
174			&info->block_group_cache_tree);
175
176	if (info->first_logical_byte > block_group->key.objectid)
177		info->first_logical_byte = block_group->key.objectid;
178
179	spin_unlock(&info->block_group_cache_lock);
180
181	return 0;
182}
183
184/*
185 * This will return the block group at or after bytenr if contains is 0, else
186 * it will return the block group that contains the bytenr
187 */
188static struct btrfs_block_group_cache *
189block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
190			      int contains)
191{
192	struct btrfs_block_group_cache *cache, *ret = NULL;
193	struct rb_node *n;
194	u64 end, start;
195
196	spin_lock(&info->block_group_cache_lock);
197	n = info->block_group_cache_tree.rb_node;
198
199	while (n) {
200		cache = rb_entry(n, struct btrfs_block_group_cache,
201				 cache_node);
202		end = cache->key.objectid + cache->key.offset - 1;
203		start = cache->key.objectid;
204
205		if (bytenr < start) {
206			if (!contains && (!ret || start < ret->key.objectid))
207				ret = cache;
208			n = n->rb_left;
209		} else if (bytenr > start) {
210			if (contains && bytenr <= end) {
211				ret = cache;
212				break;
213			}
214			n = n->rb_right;
215		} else {
216			ret = cache;
217			break;
218		}
219	}
220	if (ret) {
221		btrfs_get_block_group(ret);
222		if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
223			info->first_logical_byte = ret->key.objectid;
224	}
225	spin_unlock(&info->block_group_cache_lock);
226
227	return ret;
228}
229
230static int add_excluded_extent(struct btrfs_root *root,
231			       u64 start, u64 num_bytes)
232{
233	u64 end = start + num_bytes - 1;
234	set_extent_bits(&root->fs_info->freed_extents[0],
235			start, end, EXTENT_UPTODATE, GFP_NOFS);
236	set_extent_bits(&root->fs_info->freed_extents[1],
237			start, end, EXTENT_UPTODATE, GFP_NOFS);
238	return 0;
239}
240
241static void free_excluded_extents(struct btrfs_root *root,
242				  struct btrfs_block_group_cache *cache)
243{
244	u64 start, end;
245
246	start = cache->key.objectid;
247	end = start + cache->key.offset - 1;
248
249	clear_extent_bits(&root->fs_info->freed_extents[0],
250			  start, end, EXTENT_UPTODATE, GFP_NOFS);
251	clear_extent_bits(&root->fs_info->freed_extents[1],
252			  start, end, EXTENT_UPTODATE, GFP_NOFS);
253}
254
255static int exclude_super_stripes(struct btrfs_root *root,
256				 struct btrfs_block_group_cache *cache)
257{
258	u64 bytenr;
259	u64 *logical;
260	int stripe_len;
261	int i, nr, ret;
262
263	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
264		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
265		cache->bytes_super += stripe_len;
266		ret = add_excluded_extent(root, cache->key.objectid,
267					  stripe_len);
268		if (ret)
269			return ret;
270	}
271
272	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
273		bytenr = btrfs_sb_offset(i);
274		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
275				       cache->key.objectid, bytenr,
276				       0, &logical, &nr, &stripe_len);
277		if (ret)
278			return ret;
279
280		while (nr--) {
281			u64 start, len;
282
283			if (logical[nr] > cache->key.objectid +
284			    cache->key.offset)
285				continue;
286
287			if (logical[nr] + stripe_len <= cache->key.objectid)
288				continue;
289
290			start = logical[nr];
291			if (start < cache->key.objectid) {
292				start = cache->key.objectid;
293				len = (logical[nr] + stripe_len) - start;
294			} else {
295				len = min_t(u64, stripe_len,
296					    cache->key.objectid +
297					    cache->key.offset - start);
298			}
299
300			cache->bytes_super += len;
301			ret = add_excluded_extent(root, start, len);
302			if (ret) {
303				kfree(logical);
304				return ret;
305			}
306		}
307
308		kfree(logical);
309	}
310	return 0;
311}
312
313static struct btrfs_caching_control *
314get_caching_control(struct btrfs_block_group_cache *cache)
315{
316	struct btrfs_caching_control *ctl;
317
318	spin_lock(&cache->lock);
319	if (!cache->caching_ctl) {
320		spin_unlock(&cache->lock);
321		return NULL;
322	}
323
324	ctl = cache->caching_ctl;
325	atomic_inc(&ctl->count);
326	spin_unlock(&cache->lock);
327	return ctl;
328}
329
330static void put_caching_control(struct btrfs_caching_control *ctl)
331{
332	if (atomic_dec_and_test(&ctl->count))
333		kfree(ctl);
334}
335
336/*
337 * this is only called by cache_block_group, since we could have freed extents
338 * we need to check the pinned_extents for any extents that can't be used yet
339 * since their free space will be released as soon as the transaction commits.
340 */
341static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
342			      struct btrfs_fs_info *info, u64 start, u64 end)
343{
344	u64 extent_start, extent_end, size, total_added = 0;
345	int ret;
346
347	while (start < end) {
348		ret = find_first_extent_bit(info->pinned_extents, start,
349					    &extent_start, &extent_end,
350					    EXTENT_DIRTY | EXTENT_UPTODATE,
351					    NULL);
352		if (ret)
353			break;
354
355		if (extent_start <= start) {
356			start = extent_end + 1;
357		} else if (extent_start > start && extent_start < end) {
358			size = extent_start - start;
359			total_added += size;
360			ret = btrfs_add_free_space(block_group, start,
361						   size);
362			BUG_ON(ret); /* -ENOMEM or logic error */
363			start = extent_end + 1;
364		} else {
365			break;
366		}
367	}
368
369	if (start < end) {
370		size = end - start;
371		total_added += size;
372		ret = btrfs_add_free_space(block_group, start, size);
373		BUG_ON(ret); /* -ENOMEM or logic error */
374	}
375
376	return total_added;
377}
378
379static noinline void caching_thread(struct btrfs_work *work)
380{
381	struct btrfs_block_group_cache *block_group;
382	struct btrfs_fs_info *fs_info;
383	struct btrfs_caching_control *caching_ctl;
384	struct btrfs_root *extent_root;
385	struct btrfs_path *path;
386	struct extent_buffer *leaf;
387	struct btrfs_key key;
388	u64 total_found = 0;
389	u64 last = 0;
390	u32 nritems;
391	int ret = -ENOMEM;
392
393	caching_ctl = container_of(work, struct btrfs_caching_control, work);
394	block_group = caching_ctl->block_group;
395	fs_info = block_group->fs_info;
396	extent_root = fs_info->extent_root;
397
398	path = btrfs_alloc_path();
399	if (!path)
400		goto out;
401
402	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
403
404	/*
405	 * We don't want to deadlock with somebody trying to allocate a new
406	 * extent for the extent root while also trying to search the extent
407	 * root to add free space.  So we skip locking and search the commit
408	 * root, since its read-only
409	 */
410	path->skip_locking = 1;
411	path->search_commit_root = 1;
412	path->reada = 1;
413
414	key.objectid = last;
415	key.offset = 0;
416	key.type = BTRFS_EXTENT_ITEM_KEY;
417again:
418	mutex_lock(&caching_ctl->mutex);
419	/* need to make sure the commit_root doesn't disappear */
420	down_read(&fs_info->commit_root_sem);
421
422next:
423	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
424	if (ret < 0)
425		goto err;
426
427	leaf = path->nodes[0];
428	nritems = btrfs_header_nritems(leaf);
429
430	while (1) {
431		if (btrfs_fs_closing(fs_info) > 1) {
432			last = (u64)-1;
433			break;
434		}
435
436		if (path->slots[0] < nritems) {
437			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
438		} else {
439			ret = find_next_key(path, 0, &key);
440			if (ret)
441				break;
442
443			if (need_resched() ||
444			    rwsem_is_contended(&fs_info->commit_root_sem)) {
445				caching_ctl->progress = last;
446				btrfs_release_path(path);
447				up_read(&fs_info->commit_root_sem);
448				mutex_unlock(&caching_ctl->mutex);
449				cond_resched();
450				goto again;
451			}
452
453			ret = btrfs_next_leaf(extent_root, path);
454			if (ret < 0)
455				goto err;
456			if (ret)
457				break;
458			leaf = path->nodes[0];
459			nritems = btrfs_header_nritems(leaf);
460			continue;
461		}
462
463		if (key.objectid < last) {
464			key.objectid = last;
465			key.offset = 0;
466			key.type = BTRFS_EXTENT_ITEM_KEY;
467
468			caching_ctl->progress = last;
469			btrfs_release_path(path);
470			goto next;
471		}
472
473		if (key.objectid < block_group->key.objectid) {
474			path->slots[0]++;
475			continue;
476		}
477
478		if (key.objectid >= block_group->key.objectid +
479		    block_group->key.offset)
480			break;
481
482		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
483		    key.type == BTRFS_METADATA_ITEM_KEY) {
484			total_found += add_new_free_space(block_group,
485							  fs_info, last,
486							  key.objectid);
487			if (key.type == BTRFS_METADATA_ITEM_KEY)
488				last = key.objectid +
489					fs_info->tree_root->nodesize;
490			else
491				last = key.objectid + key.offset;
492
493			if (total_found > (1024 * 1024 * 2)) {
494				total_found = 0;
495				wake_up(&caching_ctl->wait);
496			}
497		}
498		path->slots[0]++;
499	}
500	ret = 0;
501
502	total_found += add_new_free_space(block_group, fs_info, last,
503					  block_group->key.objectid +
504					  block_group->key.offset);
505	caching_ctl->progress = (u64)-1;
506
507	spin_lock(&block_group->lock);
508	block_group->caching_ctl = NULL;
509	block_group->cached = BTRFS_CACHE_FINISHED;
510	spin_unlock(&block_group->lock);
511
512err:
513	btrfs_free_path(path);
514	up_read(&fs_info->commit_root_sem);
515
516	free_excluded_extents(extent_root, block_group);
517
518	mutex_unlock(&caching_ctl->mutex);
519out:
520	if (ret) {
521		spin_lock(&block_group->lock);
522		block_group->caching_ctl = NULL;
523		block_group->cached = BTRFS_CACHE_ERROR;
524		spin_unlock(&block_group->lock);
525	}
526	wake_up(&caching_ctl->wait);
527
528	put_caching_control(caching_ctl);
529	btrfs_put_block_group(block_group);
530}
531
532static int cache_block_group(struct btrfs_block_group_cache *cache,
533			     int load_cache_only)
534{
535	DEFINE_WAIT(wait);
536	struct btrfs_fs_info *fs_info = cache->fs_info;
537	struct btrfs_caching_control *caching_ctl;
538	int ret = 0;
539
540	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
541	if (!caching_ctl)
542		return -ENOMEM;
543
544	INIT_LIST_HEAD(&caching_ctl->list);
545	mutex_init(&caching_ctl->mutex);
546	init_waitqueue_head(&caching_ctl->wait);
547	caching_ctl->block_group = cache;
548	caching_ctl->progress = cache->key.objectid;
549	atomic_set(&caching_ctl->count, 1);
550	btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
551			caching_thread, NULL, NULL);
552
553	spin_lock(&cache->lock);
554	/*
555	 * This should be a rare occasion, but this could happen I think in the
556	 * case where one thread starts to load the space cache info, and then
557	 * some other thread starts a transaction commit which tries to do an
558	 * allocation while the other thread is still loading the space cache
559	 * info.  The previous loop should have kept us from choosing this block
560	 * group, but if we've moved to the state where we will wait on caching
561	 * block groups we need to first check if we're doing a fast load here,
562	 * so we can wait for it to finish, otherwise we could end up allocating
563	 * from a block group who's cache gets evicted for one reason or
564	 * another.
565	 */
566	while (cache->cached == BTRFS_CACHE_FAST) {
567		struct btrfs_caching_control *ctl;
568
569		ctl = cache->caching_ctl;
570		atomic_inc(&ctl->count);
571		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
572		spin_unlock(&cache->lock);
573
574		schedule();
575
576		finish_wait(&ctl->wait, &wait);
577		put_caching_control(ctl);
578		spin_lock(&cache->lock);
579	}
580
581	if (cache->cached != BTRFS_CACHE_NO) {
582		spin_unlock(&cache->lock);
583		kfree(caching_ctl);
584		return 0;
585	}
586	WARN_ON(cache->caching_ctl);
587	cache->caching_ctl = caching_ctl;
588	cache->cached = BTRFS_CACHE_FAST;
589	spin_unlock(&cache->lock);
590
591	if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
592		mutex_lock(&caching_ctl->mutex);
593		ret = load_free_space_cache(fs_info, cache);
594
595		spin_lock(&cache->lock);
596		if (ret == 1) {
597			cache->caching_ctl = NULL;
598			cache->cached = BTRFS_CACHE_FINISHED;
599			cache->last_byte_to_unpin = (u64)-1;
600			caching_ctl->progress = (u64)-1;
601		} else {
602			if (load_cache_only) {
603				cache->caching_ctl = NULL;
604				cache->cached = BTRFS_CACHE_NO;
605			} else {
606				cache->cached = BTRFS_CACHE_STARTED;
607				cache->has_caching_ctl = 1;
608			}
609		}
610		spin_unlock(&cache->lock);
611		mutex_unlock(&caching_ctl->mutex);
612
613		wake_up(&caching_ctl->wait);
614		if (ret == 1) {
615			put_caching_control(caching_ctl);
616			free_excluded_extents(fs_info->extent_root, cache);
617			return 0;
618		}
619	} else {
620		/*
621		 * We are not going to do the fast caching, set cached to the
622		 * appropriate value and wakeup any waiters.
623		 */
624		spin_lock(&cache->lock);
625		if (load_cache_only) {
626			cache->caching_ctl = NULL;
627			cache->cached = BTRFS_CACHE_NO;
628		} else {
629			cache->cached = BTRFS_CACHE_STARTED;
630			cache->has_caching_ctl = 1;
631		}
632		spin_unlock(&cache->lock);
633		wake_up(&caching_ctl->wait);
634	}
635
636	if (load_cache_only) {
637		put_caching_control(caching_ctl);
638		return 0;
639	}
640
641	down_write(&fs_info->commit_root_sem);
642	atomic_inc(&caching_ctl->count);
643	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
644	up_write(&fs_info->commit_root_sem);
645
646	btrfs_get_block_group(cache);
647
648	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
649
650	return ret;
651}
652
653/*
654 * return the block group that starts at or after bytenr
655 */
656static struct btrfs_block_group_cache *
657btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
658{
659	struct btrfs_block_group_cache *cache;
660
661	cache = block_group_cache_tree_search(info, bytenr, 0);
662
663	return cache;
664}
665
666/*
667 * return the block group that contains the given bytenr
668 */
669struct btrfs_block_group_cache *btrfs_lookup_block_group(
670						 struct btrfs_fs_info *info,
671						 u64 bytenr)
672{
673	struct btrfs_block_group_cache *cache;
674
675	cache = block_group_cache_tree_search(info, bytenr, 1);
676
677	return cache;
678}
679
680static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
681						  u64 flags)
682{
683	struct list_head *head = &info->space_info;
684	struct btrfs_space_info *found;
685
686	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
687
688	rcu_read_lock();
689	list_for_each_entry_rcu(found, head, list) {
690		if (found->flags & flags) {
691			rcu_read_unlock();
692			return found;
693		}
694	}
695	rcu_read_unlock();
696	return NULL;
697}
698
699/*
700 * after adding space to the filesystem, we need to clear the full flags
701 * on all the space infos.
702 */
703void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
704{
705	struct list_head *head = &info->space_info;
706	struct btrfs_space_info *found;
707
708	rcu_read_lock();
709	list_for_each_entry_rcu(found, head, list)
710		found->full = 0;
711	rcu_read_unlock();
712}
713
714/* simple helper to search for an existing data extent at a given offset */
715int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
716{
717	int ret;
718	struct btrfs_key key;
719	struct btrfs_path *path;
720
721	path = btrfs_alloc_path();
722	if (!path)
723		return -ENOMEM;
724
725	key.objectid = start;
726	key.offset = len;
727	key.type = BTRFS_EXTENT_ITEM_KEY;
728	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
729				0, 0);
730	btrfs_free_path(path);
731	return ret;
732}
733
734/*
735 * helper function to lookup reference count and flags of a tree block.
736 *
737 * the head node for delayed ref is used to store the sum of all the
738 * reference count modifications queued up in the rbtree. the head
739 * node may also store the extent flags to set. This way you can check
740 * to see what the reference count and extent flags would be if all of
741 * the delayed refs are not processed.
742 */
743int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
744			     struct btrfs_root *root, u64 bytenr,
745			     u64 offset, int metadata, u64 *refs, u64 *flags)
746{
747	struct btrfs_delayed_ref_head *head;
748	struct btrfs_delayed_ref_root *delayed_refs;
749	struct btrfs_path *path;
750	struct btrfs_extent_item *ei;
751	struct extent_buffer *leaf;
752	struct btrfs_key key;
753	u32 item_size;
754	u64 num_refs;
755	u64 extent_flags;
756	int ret;
757
758	/*
759	 * If we don't have skinny metadata, don't bother doing anything
760	 * different
761	 */
762	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
763		offset = root->nodesize;
764		metadata = 0;
765	}
766
767	path = btrfs_alloc_path();
768	if (!path)
769		return -ENOMEM;
770
771	if (!trans) {
772		path->skip_locking = 1;
773		path->search_commit_root = 1;
774	}
775
776search_again:
777	key.objectid = bytenr;
778	key.offset = offset;
779	if (metadata)
780		key.type = BTRFS_METADATA_ITEM_KEY;
781	else
782		key.type = BTRFS_EXTENT_ITEM_KEY;
783
784	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
785				&key, path, 0, 0);
786	if (ret < 0)
787		goto out_free;
788
789	if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
790		if (path->slots[0]) {
791			path->slots[0]--;
792			btrfs_item_key_to_cpu(path->nodes[0], &key,
793					      path->slots[0]);
794			if (key.objectid == bytenr &&
795			    key.type == BTRFS_EXTENT_ITEM_KEY &&
796			    key.offset == root->nodesize)
797				ret = 0;
798		}
799	}
800
801	if (ret == 0) {
802		leaf = path->nodes[0];
803		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
804		if (item_size >= sizeof(*ei)) {
805			ei = btrfs_item_ptr(leaf, path->slots[0],
806					    struct btrfs_extent_item);
807			num_refs = btrfs_extent_refs(leaf, ei);
808			extent_flags = btrfs_extent_flags(leaf, ei);
809		} else {
810#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
811			struct btrfs_extent_item_v0 *ei0;
812			BUG_ON(item_size != sizeof(*ei0));
813			ei0 = btrfs_item_ptr(leaf, path->slots[0],
814					     struct btrfs_extent_item_v0);
815			num_refs = btrfs_extent_refs_v0(leaf, ei0);
816			/* FIXME: this isn't correct for data */
817			extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
818#else
819			BUG();
820#endif
821		}
822		BUG_ON(num_refs == 0);
823	} else {
824		num_refs = 0;
825		extent_flags = 0;
826		ret = 0;
827	}
828
829	if (!trans)
830		goto out;
831
832	delayed_refs = &trans->transaction->delayed_refs;
833	spin_lock(&delayed_refs->lock);
834	head = btrfs_find_delayed_ref_head(trans, bytenr);
835	if (head) {
836		if (!mutex_trylock(&head->mutex)) {
837			atomic_inc(&head->node.refs);
838			spin_unlock(&delayed_refs->lock);
839
840			btrfs_release_path(path);
841
842			/*
843			 * Mutex was contended, block until it's released and try
844			 * again
845			 */
846			mutex_lock(&head->mutex);
847			mutex_unlock(&head->mutex);
848			btrfs_put_delayed_ref(&head->node);
849			goto search_again;
850		}
851		spin_lock(&head->lock);
852		if (head->extent_op && head->extent_op->update_flags)
853			extent_flags |= head->extent_op->flags_to_set;
854		else
855			BUG_ON(num_refs == 0);
856
857		num_refs += head->node.ref_mod;
858		spin_unlock(&head->lock);
859		mutex_unlock(&head->mutex);
860	}
861	spin_unlock(&delayed_refs->lock);
862out:
863	WARN_ON(num_refs == 0);
864	if (refs)
865		*refs = num_refs;
866	if (flags)
867		*flags = extent_flags;
868out_free:
869	btrfs_free_path(path);
870	return ret;
871}
872
873/*
874 * Back reference rules.  Back refs have three main goals:
875 *
876 * 1) differentiate between all holders of references to an extent so that
877 *    when a reference is dropped we can make sure it was a valid reference
878 *    before freeing the extent.
879 *
880 * 2) Provide enough information to quickly find the holders of an extent
881 *    if we notice a given block is corrupted or bad.
882 *
883 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
884 *    maintenance.  This is actually the same as #2, but with a slightly
885 *    different use case.
886 *
887 * There are two kinds of back refs. The implicit back refs is optimized
888 * for pointers in non-shared tree blocks. For a given pointer in a block,
889 * back refs of this kind provide information about the block's owner tree
890 * and the pointer's key. These information allow us to find the block by
891 * b-tree searching. The full back refs is for pointers in tree blocks not
892 * referenced by their owner trees. The location of tree block is recorded
893 * in the back refs. Actually the full back refs is generic, and can be
894 * used in all cases the implicit back refs is used. The major shortcoming
895 * of the full back refs is its overhead. Every time a tree block gets
896 * COWed, we have to update back refs entry for all pointers in it.
897 *
898 * For a newly allocated tree block, we use implicit back refs for
899 * pointers in it. This means most tree related operations only involve
900 * implicit back refs. For a tree block created in old transaction, the
901 * only way to drop a reference to it is COW it. So we can detect the
902 * event that tree block loses its owner tree's reference and do the
903 * back refs conversion.
904 *
905 * When a tree block is COW'd through a tree, there are four cases:
906 *
907 * The reference count of the block is one and the tree is the block's
908 * owner tree. Nothing to do in this case.
909 *
910 * The reference count of the block is one and the tree is not the
911 * block's owner tree. In this case, full back refs is used for pointers
912 * in the block. Remove these full back refs, add implicit back refs for
913 * every pointers in the new block.
914 *
915 * The reference count of the block is greater than one and the tree is
916 * the block's owner tree. In this case, implicit back refs is used for
917 * pointers in the block. Add full back refs for every pointers in the
918 * block, increase lower level extents' reference counts. The original
919 * implicit back refs are entailed to the new block.
920 *
921 * The reference count of the block is greater than one and the tree is
922 * not the block's owner tree. Add implicit back refs for every pointer in
923 * the new block, increase lower level extents' reference count.
924 *
925 * Back Reference Key composing:
926 *
927 * The key objectid corresponds to the first byte in the extent,
928 * The key type is used to differentiate between types of back refs.
929 * There are different meanings of the key offset for different types
930 * of back refs.
931 *
932 * File extents can be referenced by:
933 *
934 * - multiple snapshots, subvolumes, or different generations in one subvol
935 * - different files inside a single subvolume
936 * - different offsets inside a file (bookend extents in file.c)
937 *
938 * The extent ref structure for the implicit back refs has fields for:
939 *
940 * - Objectid of the subvolume root
941 * - objectid of the file holding the reference
942 * - original offset in the file
943 * - how many bookend extents
944 *
945 * The key offset for the implicit back refs is hash of the first
946 * three fields.
947 *
948 * The extent ref structure for the full back refs has field for:
949 *
950 * - number of pointers in the tree leaf
951 *
952 * The key offset for the implicit back refs is the first byte of
953 * the tree leaf
954 *
955 * When a file extent is allocated, The implicit back refs is used.
956 * the fields are filled in:
957 *
958 *     (root_key.objectid, inode objectid, offset in file, 1)
959 *
960 * When a file extent is removed file truncation, we find the
961 * corresponding implicit back refs and check the following fields:
962 *
963 *     (btrfs_header_owner(leaf), inode objectid, offset in file)
964 *
965 * Btree extents can be referenced by:
966 *
967 * - Different subvolumes
968 *
969 * Both the implicit back refs and the full back refs for tree blocks
970 * only consist of key. The key offset for the implicit back refs is
971 * objectid of block's owner tree. The key offset for the full back refs
972 * is the first byte of parent block.
973 *
974 * When implicit back refs is used, information about the lowest key and
975 * level of the tree block are required. These information are stored in
976 * tree block info structure.
977 */
978
979#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
980static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
981				  struct btrfs_root *root,
982				  struct btrfs_path *path,
983				  u64 owner, u32 extra_size)
984{
985	struct btrfs_extent_item *item;
986	struct btrfs_extent_item_v0 *ei0;
987	struct btrfs_extent_ref_v0 *ref0;
988	struct btrfs_tree_block_info *bi;
989	struct extent_buffer *leaf;
990	struct btrfs_key key;
991	struct btrfs_key found_key;
992	u32 new_size = sizeof(*item);
993	u64 refs;
994	int ret;
995
996	leaf = path->nodes[0];
997	BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
998
999	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1000	ei0 = btrfs_item_ptr(leaf, path->slots[0],
1001			     struct btrfs_extent_item_v0);
1002	refs = btrfs_extent_refs_v0(leaf, ei0);
1003
1004	if (owner == (u64)-1) {
1005		while (1) {
1006			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1007				ret = btrfs_next_leaf(root, path);
1008				if (ret < 0)
1009					return ret;
1010				BUG_ON(ret > 0); /* Corruption */
1011				leaf = path->nodes[0];
1012			}
1013			btrfs_item_key_to_cpu(leaf, &found_key,
1014					      path->slots[0]);
1015			BUG_ON(key.objectid != found_key.objectid);
1016			if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1017				path->slots[0]++;
1018				continue;
1019			}
1020			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1021					      struct btrfs_extent_ref_v0);
1022			owner = btrfs_ref_objectid_v0(leaf, ref0);
1023			break;
1024		}
1025	}
1026	btrfs_release_path(path);
1027
1028	if (owner < BTRFS_FIRST_FREE_OBJECTID)
1029		new_size += sizeof(*bi);
1030
1031	new_size -= sizeof(*ei0);
1032	ret = btrfs_search_slot(trans, root, &key, path,
1033				new_size + extra_size, 1);
1034	if (ret < 0)
1035		return ret;
1036	BUG_ON(ret); /* Corruption */
1037
1038	btrfs_extend_item(root, path, new_size);
1039
1040	leaf = path->nodes[0];
1041	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1042	btrfs_set_extent_refs(leaf, item, refs);
1043	/* FIXME: get real generation */
1044	btrfs_set_extent_generation(leaf, item, 0);
1045	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1046		btrfs_set_extent_flags(leaf, item,
1047				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
1048				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
1049		bi = (struct btrfs_tree_block_info *)(item + 1);
1050		/* FIXME: get first key of the block */
1051		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1052		btrfs_set_tree_block_level(leaf, bi, (int)owner);
1053	} else {
1054		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1055	}
1056	btrfs_mark_buffer_dirty(leaf);
1057	return 0;
1058}
1059#endif
1060
1061static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1062{
1063	u32 high_crc = ~(u32)0;
1064	u32 low_crc = ~(u32)0;
1065	__le64 lenum;
1066
1067	lenum = cpu_to_le64(root_objectid);
1068	high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1069	lenum = cpu_to_le64(owner);
1070	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1071	lenum = cpu_to_le64(offset);
1072	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1073
1074	return ((u64)high_crc << 31) ^ (u64)low_crc;
1075}
1076
1077static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1078				     struct btrfs_extent_data_ref *ref)
1079{
1080	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1081				    btrfs_extent_data_ref_objectid(leaf, ref),
1082				    btrfs_extent_data_ref_offset(leaf, ref));
1083}
1084
1085static int match_extent_data_ref(struct extent_buffer *leaf,
1086				 struct btrfs_extent_data_ref *ref,
1087				 u64 root_objectid, u64 owner, u64 offset)
1088{
1089	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1090	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1091	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
1092		return 0;
1093	return 1;
1094}
1095
1096static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1097					   struct btrfs_root *root,
1098					   struct btrfs_path *path,
1099					   u64 bytenr, u64 parent,
1100					   u64 root_objectid,
1101					   u64 owner, u64 offset)
1102{
1103	struct btrfs_key key;
1104	struct btrfs_extent_data_ref *ref;
1105	struct extent_buffer *leaf;
1106	u32 nritems;
1107	int ret;
1108	int recow;
1109	int err = -ENOENT;
1110
1111	key.objectid = bytenr;
1112	if (parent) {
1113		key.type = BTRFS_SHARED_DATA_REF_KEY;
1114		key.offset = parent;
1115	} else {
1116		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1117		key.offset = hash_extent_data_ref(root_objectid,
1118						  owner, offset);
1119	}
1120again:
1121	recow = 0;
1122	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1123	if (ret < 0) {
1124		err = ret;
1125		goto fail;
1126	}
1127
1128	if (parent) {
1129		if (!ret)
1130			return 0;
1131#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1132		key.type = BTRFS_EXTENT_REF_V0_KEY;
1133		btrfs_release_path(path);
1134		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1135		if (ret < 0) {
1136			err = ret;
1137			goto fail;
1138		}
1139		if (!ret)
1140			return 0;
1141#endif
1142		goto fail;
1143	}
1144
1145	leaf = path->nodes[0];
1146	nritems = btrfs_header_nritems(leaf);
1147	while (1) {
1148		if (path->slots[0] >= nritems) {
1149			ret = btrfs_next_leaf(root, path);
1150			if (ret < 0)
1151				err = ret;
1152			if (ret)
1153				goto fail;
1154
1155			leaf = path->nodes[0];
1156			nritems = btrfs_header_nritems(leaf);
1157			recow = 1;
1158		}
1159
1160		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1161		if (key.objectid != bytenr ||
1162		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
1163			goto fail;
1164
1165		ref = btrfs_item_ptr(leaf, path->slots[0],
1166				     struct btrfs_extent_data_ref);
1167
1168		if (match_extent_data_ref(leaf, ref, root_objectid,
1169					  owner, offset)) {
1170			if (recow) {
1171				btrfs_release_path(path);
1172				goto again;
1173			}
1174			err = 0;
1175			break;
1176		}
1177		path->slots[0]++;
1178	}
1179fail:
1180	return err;
1181}
1182
1183static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1184					   struct btrfs_root *root,
1185					   struct btrfs_path *path,
1186					   u64 bytenr, u64 parent,
1187					   u64 root_objectid, u64 owner,
1188					   u64 offset, int refs_to_add)
1189{
1190	struct btrfs_key key;
1191	struct extent_buffer *leaf;
1192	u32 size;
1193	u32 num_refs;
1194	int ret;
1195
1196	key.objectid = bytenr;
1197	if (parent) {
1198		key.type = BTRFS_SHARED_DATA_REF_KEY;
1199		key.offset = parent;
1200		size = sizeof(struct btrfs_shared_data_ref);
1201	} else {
1202		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1203		key.offset = hash_extent_data_ref(root_objectid,
1204						  owner, offset);
1205		size = sizeof(struct btrfs_extent_data_ref);
1206	}
1207
1208	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1209	if (ret && ret != -EEXIST)
1210		goto fail;
1211
1212	leaf = path->nodes[0];
1213	if (parent) {
1214		struct btrfs_shared_data_ref *ref;
1215		ref = btrfs_item_ptr(leaf, path->slots[0],
1216				     struct btrfs_shared_data_ref);
1217		if (ret == 0) {
1218			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1219		} else {
1220			num_refs = btrfs_shared_data_ref_count(leaf, ref);
1221			num_refs += refs_to_add;
1222			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1223		}
1224	} else {
1225		struct btrfs_extent_data_ref *ref;
1226		while (ret == -EEXIST) {
1227			ref = btrfs_item_ptr(leaf, path->slots[0],
1228					     struct btrfs_extent_data_ref);
1229			if (match_extent_data_ref(leaf, ref, root_objectid,
1230						  owner, offset))
1231				break;
1232			btrfs_release_path(path);
1233			key.offset++;
1234			ret = btrfs_insert_empty_item(trans, root, path, &key,
1235						      size);
1236			if (ret && ret != -EEXIST)
1237				goto fail;
1238
1239			leaf = path->nodes[0];
1240		}
1241		ref = btrfs_item_ptr(leaf, path->slots[0],
1242				     struct btrfs_extent_data_ref);
1243		if (ret == 0) {
1244			btrfs_set_extent_data_ref_root(leaf, ref,
1245						       root_objectid);
1246			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1247			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1248			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1249		} else {
1250			num_refs = btrfs_extent_data_ref_count(leaf, ref);
1251			num_refs += refs_to_add;
1252			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1253		}
1254	}
1255	btrfs_mark_buffer_dirty(leaf);
1256	ret = 0;
1257fail:
1258	btrfs_release_path(path);
1259	return ret;
1260}
1261
1262static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1263					   struct btrfs_root *root,
1264					   struct btrfs_path *path,
1265					   int refs_to_drop, int *last_ref)
1266{
1267	struct btrfs_key key;
1268	struct btrfs_extent_data_ref *ref1 = NULL;
1269	struct btrfs_shared_data_ref *ref2 = NULL;
1270	struct extent_buffer *leaf;
1271	u32 num_refs = 0;
1272	int ret = 0;
1273
1274	leaf = path->nodes[0];
1275	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1276
1277	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1278		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1279				      struct btrfs_extent_data_ref);
1280		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1281	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1282		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1283				      struct btrfs_shared_data_ref);
1284		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1285#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1286	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1287		struct btrfs_extent_ref_v0 *ref0;
1288		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1289				      struct btrfs_extent_ref_v0);
1290		num_refs = btrfs_ref_count_v0(leaf, ref0);
1291#endif
1292	} else {
1293		BUG();
1294	}
1295
1296	BUG_ON(num_refs < refs_to_drop);
1297	num_refs -= refs_to_drop;
1298
1299	if (num_refs == 0) {
1300		ret = btrfs_del_item(trans, root, path);
1301		*last_ref = 1;
1302	} else {
1303		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1304			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1305		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1306			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1307#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1308		else {
1309			struct btrfs_extent_ref_v0 *ref0;
1310			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1311					struct btrfs_extent_ref_v0);
1312			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1313		}
1314#endif
1315		btrfs_mark_buffer_dirty(leaf);
1316	}
1317	return ret;
1318}
1319
1320static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1321					  struct btrfs_path *path,
1322					  struct btrfs_extent_inline_ref *iref)
1323{
1324	struct btrfs_key key;
1325	struct extent_buffer *leaf;
1326	struct btrfs_extent_data_ref *ref1;
1327	struct btrfs_shared_data_ref *ref2;
1328	u32 num_refs = 0;
1329
1330	leaf = path->nodes[0];
1331	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1332	if (iref) {
1333		if (btrfs_extent_inline_ref_type(leaf, iref) ==
1334		    BTRFS_EXTENT_DATA_REF_KEY) {
1335			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1336			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1337		} else {
1338			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1339			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1340		}
1341	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1342		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1343				      struct btrfs_extent_data_ref);
1344		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1345	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1346		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1347				      struct btrfs_shared_data_ref);
1348		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1349#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1350	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1351		struct btrfs_extent_ref_v0 *ref0;
1352		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1353				      struct btrfs_extent_ref_v0);
1354		num_refs = btrfs_ref_count_v0(leaf, ref0);
1355#endif
1356	} else {
1357		WARN_ON(1);
1358	}
1359	return num_refs;
1360}
1361
1362static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1363					  struct btrfs_root *root,
1364					  struct btrfs_path *path,
1365					  u64 bytenr, u64 parent,
1366					  u64 root_objectid)
1367{
1368	struct btrfs_key key;
1369	int ret;
1370
1371	key.objectid = bytenr;
1372	if (parent) {
1373		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1374		key.offset = parent;
1375	} else {
1376		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1377		key.offset = root_objectid;
1378	}
1379
1380	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1381	if (ret > 0)
1382		ret = -ENOENT;
1383#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1384	if (ret == -ENOENT && parent) {
1385		btrfs_release_path(path);
1386		key.type = BTRFS_EXTENT_REF_V0_KEY;
1387		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1388		if (ret > 0)
1389			ret = -ENOENT;
1390	}
1391#endif
1392	return ret;
1393}
1394
1395static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1396					  struct btrfs_root *root,
1397					  struct btrfs_path *path,
1398					  u64 bytenr, u64 parent,
1399					  u64 root_objectid)
1400{
1401	struct btrfs_key key;
1402	int ret;
1403
1404	key.objectid = bytenr;
1405	if (parent) {
1406		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1407		key.offset = parent;
1408	} else {
1409		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1410		key.offset = root_objectid;
1411	}
1412
1413	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1414	btrfs_release_path(path);
1415	return ret;
1416}
1417
1418static inline int extent_ref_type(u64 parent, u64 owner)
1419{
1420	int type;
1421	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1422		if (parent > 0)
1423			type = BTRFS_SHARED_BLOCK_REF_KEY;
1424		else
1425			type = BTRFS_TREE_BLOCK_REF_KEY;
1426	} else {
1427		if (parent > 0)
1428			type = BTRFS_SHARED_DATA_REF_KEY;
1429		else
1430			type = BTRFS_EXTENT_DATA_REF_KEY;
1431	}
1432	return type;
1433}
1434
1435static int find_next_key(struct btrfs_path *path, int level,
1436			 struct btrfs_key *key)
1437
1438{
1439	for (; level < BTRFS_MAX_LEVEL; level++) {
1440		if (!path->nodes[level])
1441			break;
1442		if (path->slots[level] + 1 >=
1443		    btrfs_header_nritems(path->nodes[level]))
1444			continue;
1445		if (level == 0)
1446			btrfs_item_key_to_cpu(path->nodes[level], key,
1447					      path->slots[level] + 1);
1448		else
1449			btrfs_node_key_to_cpu(path->nodes[level], key,
1450					      path->slots[level] + 1);
1451		return 0;
1452	}
1453	return 1;
1454}
1455
1456/*
1457 * look for inline back ref. if back ref is found, *ref_ret is set
1458 * to the address of inline back ref, and 0 is returned.
1459 *
1460 * if back ref isn't found, *ref_ret is set to the address where it
1461 * should be inserted, and -ENOENT is returned.
1462 *
1463 * if insert is true and there are too many inline back refs, the path
1464 * points to the extent item, and -EAGAIN is returned.
1465 *
1466 * NOTE: inline back refs are ordered in the same way that back ref
1467 *	 items in the tree are ordered.
1468 */
1469static noinline_for_stack
1470int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1471				 struct btrfs_root *root,
1472				 struct btrfs_path *path,
1473				 struct btrfs_extent_inline_ref **ref_ret,
1474				 u64 bytenr, u64 num_bytes,
1475				 u64 parent, u64 root_objectid,
1476				 u64 owner, u64 offset, int insert)
1477{
1478	struct btrfs_key key;
1479	struct extent_buffer *leaf;
1480	struct btrfs_extent_item *ei;
1481	struct btrfs_extent_inline_ref *iref;
1482	u64 flags;
1483	u64 item_size;
1484	unsigned long ptr;
1485	unsigned long end;
1486	int extra_size;
1487	int type;
1488	int want;
1489	int ret;
1490	int err = 0;
1491	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1492						 SKINNY_METADATA);
1493
1494	key.objectid = bytenr;
1495	key.type = BTRFS_EXTENT_ITEM_KEY;
1496	key.offset = num_bytes;
1497
1498	want = extent_ref_type(parent, owner);
1499	if (insert) {
1500		extra_size = btrfs_extent_inline_ref_size(want);
1501		path->keep_locks = 1;
1502	} else
1503		extra_size = -1;
1504
1505	/*
1506	 * Owner is our parent level, so we can just add one to get the level
1507	 * for the block we are interested in.
1508	 */
1509	if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1510		key.type = BTRFS_METADATA_ITEM_KEY;
1511		key.offset = owner;
1512	}
1513
1514again:
1515	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1516	if (ret < 0) {
1517		err = ret;
1518		goto out;
1519	}
1520
1521	/*
1522	 * We may be a newly converted file system which still has the old fat
1523	 * extent entries for metadata, so try and see if we have one of those.
1524	 */
1525	if (ret > 0 && skinny_metadata) {
1526		skinny_metadata = false;
1527		if (path->slots[0]) {
1528			path->slots[0]--;
1529			btrfs_item_key_to_cpu(path->nodes[0], &key,
1530					      path->slots[0]);
1531			if (key.objectid == bytenr &&
1532			    key.type == BTRFS_EXTENT_ITEM_KEY &&
1533			    key.offset == num_bytes)
1534				ret = 0;
1535		}
1536		if (ret) {
1537			key.objectid = bytenr;
1538			key.type = BTRFS_EXTENT_ITEM_KEY;
1539			key.offset = num_bytes;
1540			btrfs_release_path(path);
1541			goto again;
1542		}
1543	}
1544
1545	if (ret && !insert) {
1546		err = -ENOENT;
1547		goto out;
1548	} else if (WARN_ON(ret)) {
1549		err = -EIO;
1550		goto out;
1551	}
1552
1553	leaf = path->nodes[0];
1554	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1555#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1556	if (item_size < sizeof(*ei)) {
1557		if (!insert) {
1558			err = -ENOENT;
1559			goto out;
1560		}
1561		ret = convert_extent_item_v0(trans, root, path, owner,
1562					     extra_size);
1563		if (ret < 0) {
1564			err = ret;
1565			goto out;
1566		}
1567		leaf = path->nodes[0];
1568		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1569	}
1570#endif
1571	BUG_ON(item_size < sizeof(*ei));
1572
1573	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1574	flags = btrfs_extent_flags(leaf, ei);
1575
1576	ptr = (unsigned long)(ei + 1);
1577	end = (unsigned long)ei + item_size;
1578
1579	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1580		ptr += sizeof(struct btrfs_tree_block_info);
1581		BUG_ON(ptr > end);
1582	}
1583
1584	err = -ENOENT;
1585	while (1) {
1586		if (ptr >= end) {
1587			WARN_ON(ptr > end);
1588			break;
1589		}
1590		iref = (struct btrfs_extent_inline_ref *)ptr;
1591		type = btrfs_extent_inline_ref_type(leaf, iref);
1592		if (want < type)
1593			break;
1594		if (want > type) {
1595			ptr += btrfs_extent_inline_ref_size(type);
1596			continue;
1597		}
1598
1599		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1600			struct btrfs_extent_data_ref *dref;
1601			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1602			if (match_extent_data_ref(leaf, dref, root_objectid,
1603						  owner, offset)) {
1604				err = 0;
1605				break;
1606			}
1607			if (hash_extent_data_ref_item(leaf, dref) <
1608			    hash_extent_data_ref(root_objectid, owner, offset))
1609				break;
1610		} else {
1611			u64 ref_offset;
1612			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1613			if (parent > 0) {
1614				if (parent == ref_offset) {
1615					err = 0;
1616					break;
1617				}
1618				if (ref_offset < parent)
1619					break;
1620			} else {
1621				if (root_objectid == ref_offset) {
1622					err = 0;
1623					break;
1624				}
1625				if (ref_offset < root_objectid)
1626					break;
1627			}
1628		}
1629		ptr += btrfs_extent_inline_ref_size(type);
1630	}
1631	if (err == -ENOENT && insert) {
1632		if (item_size + extra_size >=
1633		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1634			err = -EAGAIN;
1635			goto out;
1636		}
1637		/*
1638		 * To add new inline back ref, we have to make sure
1639		 * there is no corresponding back ref item.
1640		 * For simplicity, we just do not add new inline back
1641		 * ref if there is any kind of item for this block
1642		 */
1643		if (find_next_key(path, 0, &key) == 0 &&
1644		    key.objectid == bytenr &&
1645		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1646			err = -EAGAIN;
1647			goto out;
1648		}
1649	}
1650	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1651out:
1652	if (insert) {
1653		path->keep_locks = 0;
1654		btrfs_unlock_up_safe(path, 1);
1655	}
1656	return err;
1657}
1658
1659/*
1660 * helper to add new inline back ref
1661 */
1662static noinline_for_stack
1663void setup_inline_extent_backref(struct btrfs_root *root,
1664				 struct btrfs_path *path,
1665				 struct btrfs_extent_inline_ref *iref,
1666				 u64 parent, u64 root_objectid,
1667				 u64 owner, u64 offset, int refs_to_add,
1668				 struct btrfs_delayed_extent_op *extent_op)
1669{
1670	struct extent_buffer *leaf;
1671	struct btrfs_extent_item *ei;
1672	unsigned long ptr;
1673	unsigned long end;
1674	unsigned long item_offset;
1675	u64 refs;
1676	int size;
1677	int type;
1678
1679	leaf = path->nodes[0];
1680	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1681	item_offset = (unsigned long)iref - (unsigned long)ei;
1682
1683	type = extent_ref_type(parent, owner);
1684	size = btrfs_extent_inline_ref_size(type);
1685
1686	btrfs_extend_item(root, path, size);
1687
1688	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1689	refs = btrfs_extent_refs(leaf, ei);
1690	refs += refs_to_add;
1691	btrfs_set_extent_refs(leaf, ei, refs);
1692	if (extent_op)
1693		__run_delayed_extent_op(extent_op, leaf, ei);
1694
1695	ptr = (unsigned long)ei + item_offset;
1696	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1697	if (ptr < end - size)
1698		memmove_extent_buffer(leaf, ptr + size, ptr,
1699				      end - size - ptr);
1700
1701	iref = (struct btrfs_extent_inline_ref *)ptr;
1702	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1703	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1704		struct btrfs_extent_data_ref *dref;
1705		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1706		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1707		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1708		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1709		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1710	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1711		struct btrfs_shared_data_ref *sref;
1712		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1713		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1714		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1715	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1716		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1717	} else {
1718		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1719	}
1720	btrfs_mark_buffer_dirty(leaf);
1721}
1722
1723static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1724				 struct btrfs_root *root,
1725				 struct btrfs_path *path,
1726				 struct btrfs_extent_inline_ref **ref_ret,
1727				 u64 bytenr, u64 num_bytes, u64 parent,
1728				 u64 root_objectid, u64 owner, u64 offset)
1729{
1730	int ret;
1731
1732	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1733					   bytenr, num_bytes, parent,
1734					   root_objectid, owner, offset, 0);
1735	if (ret != -ENOENT)
1736		return ret;
1737
1738	btrfs_release_path(path);
1739	*ref_ret = NULL;
1740
1741	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1742		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1743					    root_objectid);
1744	} else {
1745		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1746					     root_objectid, owner, offset);
1747	}
1748	return ret;
1749}
1750
1751/*
1752 * helper to update/remove inline back ref
1753 */
1754static noinline_for_stack
1755void update_inline_extent_backref(struct btrfs_root *root,
1756				  struct btrfs_path *path,
1757				  struct btrfs_extent_inline_ref *iref,
1758				  int refs_to_mod,
1759				  struct btrfs_delayed_extent_op *extent_op,
1760				  int *last_ref)
1761{
1762	struct extent_buffer *leaf;
1763	struct btrfs_extent_item *ei;
1764	struct btrfs_extent_data_ref *dref = NULL;
1765	struct btrfs_shared_data_ref *sref = NULL;
1766	unsigned long ptr;
1767	unsigned long end;
1768	u32 item_size;
1769	int size;
1770	int type;
1771	u64 refs;
1772
1773	leaf = path->nodes[0];
1774	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1775	refs = btrfs_extent_refs(leaf, ei);
1776	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1777	refs += refs_to_mod;
1778	btrfs_set_extent_refs(leaf, ei, refs);
1779	if (extent_op)
1780		__run_delayed_extent_op(extent_op, leaf, ei);
1781
1782	type = btrfs_extent_inline_ref_type(leaf, iref);
1783
1784	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1785		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1786		refs = btrfs_extent_data_ref_count(leaf, dref);
1787	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1788		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1789		refs = btrfs_shared_data_ref_count(leaf, sref);
1790	} else {
1791		refs = 1;
1792		BUG_ON(refs_to_mod != -1);
1793	}
1794
1795	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1796	refs += refs_to_mod;
1797
1798	if (refs > 0) {
1799		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1800			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1801		else
1802			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1803	} else {
1804		*last_ref = 1;
1805		size =  btrfs_extent_inline_ref_size(type);
1806		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1807		ptr = (unsigned long)iref;
1808		end = (unsigned long)ei + item_size;
1809		if (ptr + size < end)
1810			memmove_extent_buffer(leaf, ptr, ptr + size,
1811					      end - ptr - size);
1812		item_size -= size;
1813		btrfs_truncate_item(root, path, item_size, 1);
1814	}
1815	btrfs_mark_buffer_dirty(leaf);
1816}
1817
1818static noinline_for_stack
1819int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1820				 struct btrfs_root *root,
1821				 struct btrfs_path *path,
1822				 u64 bytenr, u64 num_bytes, u64 parent,
1823				 u64 root_objectid, u64 owner,
1824				 u64 offset, int refs_to_add,
1825				 struct btrfs_delayed_extent_op *extent_op)
1826{
1827	struct btrfs_extent_inline_ref *iref;
1828	int ret;
1829
1830	ret = lookup_inline_extent_backref(trans, root, path, &iref,
1831					   bytenr, num_bytes, parent,
1832					   root_objectid, owner, offset, 1);
1833	if (ret == 0) {
1834		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1835		update_inline_extent_backref(root, path, iref,
1836					     refs_to_add, extent_op, NULL);
1837	} else if (ret == -ENOENT) {
1838		setup_inline_extent_backref(root, path, iref, parent,
1839					    root_objectid, owner, offset,
1840					    refs_to_add, extent_op);
1841		ret = 0;
1842	}
1843	return ret;
1844}
1845
1846static int insert_extent_backref(struct btrfs_trans_handle *trans,
1847				 struct btrfs_root *root,
1848				 struct btrfs_path *path,
1849				 u64 bytenr, u64 parent, u64 root_objectid,
1850				 u64 owner, u64 offset, int refs_to_add)
1851{
1852	int ret;
1853	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1854		BUG_ON(refs_to_add != 1);
1855		ret = insert_tree_block_ref(trans, root, path, bytenr,
1856					    parent, root_objectid);
1857	} else {
1858		ret = insert_extent_data_ref(trans, root, path, bytenr,
1859					     parent, root_objectid,
1860					     owner, offset, refs_to_add);
1861	}
1862	return ret;
1863}
1864
1865static int remove_extent_backref(struct btrfs_trans_handle *trans,
1866				 struct btrfs_root *root,
1867				 struct btrfs_path *path,
1868				 struct btrfs_extent_inline_ref *iref,
1869				 int refs_to_drop, int is_data, int *last_ref)
1870{
1871	int ret = 0;
1872
1873	BUG_ON(!is_data && refs_to_drop != 1);
1874	if (iref) {
1875		update_inline_extent_backref(root, path, iref,
1876					     -refs_to_drop, NULL, last_ref);
1877	} else if (is_data) {
1878		ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1879					     last_ref);
1880	} else {
1881		*last_ref = 1;
1882		ret = btrfs_del_item(trans, root, path);
1883	}
1884	return ret;
1885}
1886
1887static int btrfs_issue_discard(struct block_device *bdev,
1888				u64 start, u64 len)
1889{
1890	return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1891}
1892
1893int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1894			 u64 num_bytes, u64 *actual_bytes)
1895{
1896	int ret;
1897	u64 discarded_bytes = 0;
1898	struct btrfs_bio *bbio = NULL;
1899
1900
1901	/* Tell the block device(s) that the sectors can be discarded */
1902	ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1903			      bytenr, &num_bytes, &bbio, 0);
1904	/* Error condition is -ENOMEM */
1905	if (!ret) {
1906		struct btrfs_bio_stripe *stripe = bbio->stripes;
1907		int i;
1908
1909
1910		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1911			if (!stripe->dev->can_discard)
1912				continue;
1913
1914			ret = btrfs_issue_discard(stripe->dev->bdev,
1915						  stripe->physical,
1916						  stripe->length);
1917			if (!ret)
1918				discarded_bytes += stripe->length;
1919			else if (ret != -EOPNOTSUPP)
1920				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1921
1922			/*
1923			 * Just in case we get back EOPNOTSUPP for some reason,
1924			 * just ignore the return value so we don't screw up
1925			 * people calling discard_extent.
1926			 */
1927			ret = 0;
1928		}
1929		btrfs_put_bbio(bbio);
1930	}
1931
1932	if (actual_bytes)
1933		*actual_bytes = discarded_bytes;
1934
1935
1936	if (ret == -EOPNOTSUPP)
1937		ret = 0;
1938	return ret;
1939}
1940
1941/* Can return -ENOMEM */
1942int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1943			 struct btrfs_root *root,
1944			 u64 bytenr, u64 num_bytes, u64 parent,
1945			 u64 root_objectid, u64 owner, u64 offset,
1946			 int no_quota)
1947{
1948	int ret;
1949	struct btrfs_fs_info *fs_info = root->fs_info;
1950
1951	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1952	       root_objectid == BTRFS_TREE_LOG_OBJECTID);
1953
1954	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1955		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1956					num_bytes,
1957					parent, root_objectid, (int)owner,
1958					BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1959	} else {
1960		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1961					num_bytes,
1962					parent, root_objectid, owner, offset,
1963					BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1964	}
1965	return ret;
1966}
1967
1968static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1969				  struct btrfs_root *root,
1970				  u64 bytenr, u64 num_bytes,
1971				  u64 parent, u64 root_objectid,
1972				  u64 owner, u64 offset, int refs_to_add,
1973				  int no_quota,
1974				  struct btrfs_delayed_extent_op *extent_op)
1975{
1976	struct btrfs_fs_info *fs_info = root->fs_info;
1977	struct btrfs_path *path;
1978	struct extent_buffer *leaf;
1979	struct btrfs_extent_item *item;
1980	struct btrfs_key key;
1981	u64 refs;
1982	int ret;
1983	enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_ADD_EXCL;
1984
1985	path = btrfs_alloc_path();
1986	if (!path)
1987		return -ENOMEM;
1988
1989	if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
1990		no_quota = 1;
1991
1992	path->reada = 1;
1993	path->leave_spinning = 1;
1994	/* this will setup the path even if it fails to insert the back ref */
1995	ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
1996					   bytenr, num_bytes, parent,
1997					   root_objectid, owner, offset,
1998					   refs_to_add, extent_op);
1999	if ((ret < 0 && ret != -EAGAIN) || (!ret && no_quota))
2000		goto out;
2001	/*
2002	 * Ok we were able to insert an inline extent and it appears to be a new
2003	 * reference, deal with the qgroup accounting.
2004	 */
2005	if (!ret && !no_quota) {
2006		ASSERT(root->fs_info->quota_enabled);
2007		leaf = path->nodes[0];
2008		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2009		item = btrfs_item_ptr(leaf, path->slots[0],
2010				      struct btrfs_extent_item);
2011		if (btrfs_extent_refs(leaf, item) > (u64)refs_to_add)
2012			type = BTRFS_QGROUP_OPER_ADD_SHARED;
2013		btrfs_release_path(path);
2014
2015		ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2016					      bytenr, num_bytes, type, 0);
2017		goto out;
2018	}
2019
2020	/*
2021	 * Ok we had -EAGAIN which means we didn't have space to insert and
2022	 * inline extent ref, so just update the reference count and add a
2023	 * normal backref.
2024	 */
2025	leaf = path->nodes[0];
2026	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2027	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2028	refs = btrfs_extent_refs(leaf, item);
2029	if (refs)
2030		type = BTRFS_QGROUP_OPER_ADD_SHARED;
2031	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2032	if (extent_op)
2033		__run_delayed_extent_op(extent_op, leaf, item);
2034
2035	btrfs_mark_buffer_dirty(leaf);
2036	btrfs_release_path(path);
2037
2038	if (!no_quota) {
2039		ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2040					      bytenr, num_bytes, type, 0);
2041		if (ret)
2042			goto out;
2043	}
2044
2045	path->reada = 1;
2046	path->leave_spinning = 1;
2047	/* now insert the actual backref */
2048	ret = insert_extent_backref(trans, root->fs_info->extent_root,
2049				    path, bytenr, parent, root_objectid,
2050				    owner, offset, refs_to_add);
2051	if (ret)
2052		btrfs_abort_transaction(trans, root, ret);
2053out:
2054	btrfs_free_path(path);
2055	return ret;
2056}
2057
2058static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2059				struct btrfs_root *root,
2060				struct btrfs_delayed_ref_node *node,
2061				struct btrfs_delayed_extent_op *extent_op,
2062				int insert_reserved)
2063{
2064	int ret = 0;
2065	struct btrfs_delayed_data_ref *ref;
2066	struct btrfs_key ins;
2067	u64 parent = 0;
2068	u64 ref_root = 0;
2069	u64 flags = 0;
2070
2071	ins.objectid = node->bytenr;
2072	ins.offset = node->num_bytes;
2073	ins.type = BTRFS_EXTENT_ITEM_KEY;
2074
2075	ref = btrfs_delayed_node_to_data_ref(node);
2076	trace_run_delayed_data_ref(node, ref, node->action);
2077
2078	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2079		parent = ref->parent;
2080	ref_root = ref->root;
2081
2082	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2083		if (extent_op)
2084			flags |= extent_op->flags_to_set;
2085		ret = alloc_reserved_file_extent(trans, root,
2086						 parent, ref_root, flags,
2087						 ref->objectid, ref->offset,
2088						 &ins, node->ref_mod);
2089	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2090		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2091					     node->num_bytes, parent,
2092					     ref_root, ref->objectid,
2093					     ref->offset, node->ref_mod,
2094					     node->no_quota, extent_op);
2095	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2096		ret = __btrfs_free_extent(trans, root, node->bytenr,
2097					  node->num_bytes, parent,
2098					  ref_root, ref->objectid,
2099					  ref->offset, node->ref_mod,
2100					  extent_op, node->no_quota);
2101	} else {
2102		BUG();
2103	}
2104	return ret;
2105}
2106
2107static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2108				    struct extent_buffer *leaf,
2109				    struct btrfs_extent_item *ei)
2110{
2111	u64 flags = btrfs_extent_flags(leaf, ei);
2112	if (extent_op->update_flags) {
2113		flags |= extent_op->flags_to_set;
2114		btrfs_set_extent_flags(leaf, ei, flags);
2115	}
2116
2117	if (extent_op->update_key) {
2118		struct btrfs_tree_block_info *bi;
2119		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2120		bi = (struct btrfs_tree_block_info *)(ei + 1);
2121		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2122	}
2123}
2124
2125static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2126				 struct btrfs_root *root,
2127				 struct btrfs_delayed_ref_node *node,
2128				 struct btrfs_delayed_extent_op *extent_op)
2129{
2130	struct btrfs_key key;
2131	struct btrfs_path *path;
2132	struct btrfs_extent_item *ei;
2133	struct extent_buffer *leaf;
2134	u32 item_size;
2135	int ret;
2136	int err = 0;
2137	int metadata = !extent_op->is_data;
2138
2139	if (trans->aborted)
2140		return 0;
2141
2142	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2143		metadata = 0;
2144
2145	path = btrfs_alloc_path();
2146	if (!path)
2147		return -ENOMEM;
2148
2149	key.objectid = node->bytenr;
2150
2151	if (metadata) {
2152		key.type = BTRFS_METADATA_ITEM_KEY;
2153		key.offset = extent_op->level;
2154	} else {
2155		key.type = BTRFS_EXTENT_ITEM_KEY;
2156		key.offset = node->num_bytes;
2157	}
2158
2159again:
2160	path->reada = 1;
2161	path->leave_spinning = 1;
2162	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2163				path, 0, 1);
2164	if (ret < 0) {
2165		err = ret;
2166		goto out;
2167	}
2168	if (ret > 0) {
2169		if (metadata) {
2170			if (path->slots[0] > 0) {
2171				path->slots[0]--;
2172				btrfs_item_key_to_cpu(path->nodes[0], &key,
2173						      path->slots[0]);
2174				if (key.objectid == node->bytenr &&
2175				    key.type == BTRFS_EXTENT_ITEM_KEY &&
2176				    key.offset == node->num_bytes)
2177					ret = 0;
2178			}
2179			if (ret > 0) {
2180				btrfs_release_path(path);
2181				metadata = 0;
2182
2183				key.objectid = node->bytenr;
2184				key.offset = node->num_bytes;
2185				key.type = BTRFS_EXTENT_ITEM_KEY;
2186				goto again;
2187			}
2188		} else {
2189			err = -EIO;
2190			goto out;
2191		}
2192	}
2193
2194	leaf = path->nodes[0];
2195	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2196#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2197	if (item_size < sizeof(*ei)) {
2198		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2199					     path, (u64)-1, 0);
2200		if (ret < 0) {
2201			err = ret;
2202			goto out;
2203		}
2204		leaf = path->nodes[0];
2205		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2206	}
2207#endif
2208	BUG_ON(item_size < sizeof(*ei));
2209	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2210	__run_delayed_extent_op(extent_op, leaf, ei);
2211
2212	btrfs_mark_buffer_dirty(leaf);
2213out:
2214	btrfs_free_path(path);
2215	return err;
2216}
2217
2218static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2219				struct btrfs_root *root,
2220				struct btrfs_delayed_ref_node *node,
2221				struct btrfs_delayed_extent_op *extent_op,
2222				int insert_reserved)
2223{
2224	int ret = 0;
2225	struct btrfs_delayed_tree_ref *ref;
2226	struct btrfs_key ins;
2227	u64 parent = 0;
2228	u64 ref_root = 0;
2229	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2230						 SKINNY_METADATA);
2231
2232	ref = btrfs_delayed_node_to_tree_ref(node);
2233	trace_run_delayed_tree_ref(node, ref, node->action);
2234
2235	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2236		parent = ref->parent;
2237	ref_root = ref->root;
2238
2239	ins.objectid = node->bytenr;
2240	if (skinny_metadata) {
2241		ins.offset = ref->level;
2242		ins.type = BTRFS_METADATA_ITEM_KEY;
2243	} else {
2244		ins.offset = node->num_bytes;
2245		ins.type = BTRFS_EXTENT_ITEM_KEY;
2246	}
2247
2248	BUG_ON(node->ref_mod != 1);
2249	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2250		BUG_ON(!extent_op || !extent_op->update_flags);
2251		ret = alloc_reserved_tree_block(trans, root,
2252						parent, ref_root,
2253						extent_op->flags_to_set,
2254						&extent_op->key,
2255						ref->level, &ins,
2256						node->no_quota);
2257	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2258		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2259					     node->num_bytes, parent, ref_root,
2260					     ref->level, 0, 1, node->no_quota,
2261					     extent_op);
2262	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2263		ret = __btrfs_free_extent(trans, root, node->bytenr,
2264					  node->num_bytes, parent, ref_root,
2265					  ref->level, 0, 1, extent_op,
2266					  node->no_quota);
2267	} else {
2268		BUG();
2269	}
2270	return ret;
2271}
2272
2273/* helper function to actually process a single delayed ref entry */
2274static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2275			       struct btrfs_root *root,
2276			       struct btrfs_delayed_ref_node *node,
2277			       struct btrfs_delayed_extent_op *extent_op,
2278			       int insert_reserved)
2279{
2280	int ret = 0;
2281
2282	if (trans->aborted) {
2283		if (insert_reserved)
2284			btrfs_pin_extent(root, node->bytenr,
2285					 node->num_bytes, 1);
2286		return 0;
2287	}
2288
2289	if (btrfs_delayed_ref_is_head(node)) {
2290		struct btrfs_delayed_ref_head *head;
2291		/*
2292		 * we've hit the end of the chain and we were supposed
2293		 * to insert this extent into the tree.  But, it got
2294		 * deleted before we ever needed to insert it, so all
2295		 * we have to do is clean up the accounting
2296		 */
2297		BUG_ON(extent_op);
2298		head = btrfs_delayed_node_to_head(node);
2299		trace_run_delayed_ref_head(node, head, node->action);
2300
2301		if (insert_reserved) {
2302			btrfs_pin_extent(root, node->bytenr,
2303					 node->num_bytes, 1);
2304			if (head->is_data) {
2305				ret = btrfs_del_csums(trans, root,
2306						      node->bytenr,
2307						      node->num_bytes);
2308			}
2309		}
2310		return ret;
2311	}
2312
2313	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2314	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2315		ret = run_delayed_tree_ref(trans, root, node, extent_op,
2316					   insert_reserved);
2317	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2318		 node->type == BTRFS_SHARED_DATA_REF_KEY)
2319		ret = run_delayed_data_ref(trans, root, node, extent_op,
2320					   insert_reserved);
2321	else
2322		BUG();
2323	return ret;
2324}
2325
2326static noinline struct btrfs_delayed_ref_node *
2327select_delayed_ref(struct btrfs_delayed_ref_head *head)
2328{
2329	struct rb_node *node;
2330	struct btrfs_delayed_ref_node *ref, *last = NULL;;
2331
2332	/*
2333	 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2334	 * this prevents ref count from going down to zero when
2335	 * there still are pending delayed ref.
2336	 */
2337	node = rb_first(&head->ref_root);
2338	while (node) {
2339		ref = rb_entry(node, struct btrfs_delayed_ref_node,
2340				rb_node);
2341		if (ref->action == BTRFS_ADD_DELAYED_REF)
2342			return ref;
2343		else if (last == NULL)
2344			last = ref;
2345		node = rb_next(node);
2346	}
2347	return last;
2348}
2349
2350/*
2351 * Returns 0 on success or if called with an already aborted transaction.
2352 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2353 */
2354static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2355					     struct btrfs_root *root,
2356					     unsigned long nr)
2357{
2358	struct btrfs_delayed_ref_root *delayed_refs;
2359	struct btrfs_delayed_ref_node *ref;
2360	struct btrfs_delayed_ref_head *locked_ref = NULL;
2361	struct btrfs_delayed_extent_op *extent_op;
2362	struct btrfs_fs_info *fs_info = root->fs_info;
2363	ktime_t start = ktime_get();
2364	int ret;
2365	unsigned long count = 0;
2366	unsigned long actual_count = 0;
2367	int must_insert_reserved = 0;
2368
2369	delayed_refs = &trans->transaction->delayed_refs;
2370	while (1) {
2371		if (!locked_ref) {
2372			if (count >= nr)
2373				break;
2374
2375			spin_lock(&delayed_refs->lock);
2376			locked_ref = btrfs_select_ref_head(trans);
2377			if (!locked_ref) {
2378				spin_unlock(&delayed_refs->lock);
2379				break;
2380			}
2381
2382			/* grab the lock that says we are going to process
2383			 * all the refs for this head */
2384			ret = btrfs_delayed_ref_lock(trans, locked_ref);
2385			spin_unlock(&delayed_refs->lock);
2386			/*
2387			 * we may have dropped the spin lock to get the head
2388			 * mutex lock, and that might have given someone else
2389			 * time to free the head.  If that's true, it has been
2390			 * removed from our list and we can move on.
2391			 */
2392			if (ret == -EAGAIN) {
2393				locked_ref = NULL;
2394				count++;
2395				continue;
2396			}
2397		}
2398
2399		/*
2400		 * We need to try and merge add/drops of the same ref since we
2401		 * can run into issues with relocate dropping the implicit ref
2402		 * and then it being added back again before the drop can
2403		 * finish.  If we merged anything we need to re-loop so we can
2404		 * get a good ref.
2405		 */
2406		spin_lock(&locked_ref->lock);
2407		btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2408					 locked_ref);
2409
2410		/*
2411		 * locked_ref is the head node, so we have to go one
2412		 * node back for any delayed ref updates
2413		 */
2414		ref = select_delayed_ref(locked_ref);
2415
2416		if (ref && ref->seq &&
2417		    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2418			spin_unlock(&locked_ref->lock);
2419			btrfs_delayed_ref_unlock(locked_ref);
2420			spin_lock(&delayed_refs->lock);
2421			locked_ref->processing = 0;
2422			delayed_refs->num_heads_ready++;
2423			spin_unlock(&delayed_refs->lock);
2424			locked_ref = NULL;
2425			cond_resched();
2426			count++;
2427			continue;
2428		}
2429
2430		/*
2431		 * record the must insert reserved flag before we
2432		 * drop the spin lock.
2433		 */
2434		must_insert_reserved = locked_ref->must_insert_reserved;
2435		locked_ref->must_insert_reserved = 0;
2436
2437		extent_op = locked_ref->extent_op;
2438		locked_ref->extent_op = NULL;
2439
2440		if (!ref) {
2441
2442
2443			/* All delayed refs have been processed, Go ahead
2444			 * and send the head node to run_one_delayed_ref,
2445			 * so that any accounting fixes can happen
2446			 */
2447			ref = &locked_ref->node;
2448
2449			if (extent_op && must_insert_reserved) {
2450				btrfs_free_delayed_extent_op(extent_op);
2451				extent_op = NULL;
2452			}
2453
2454			if (extent_op) {
2455				spin_unlock(&locked_ref->lock);
2456				ret = run_delayed_extent_op(trans, root,
2457							    ref, extent_op);
2458				btrfs_free_delayed_extent_op(extent_op);
2459
2460				if (ret) {
2461					/*
2462					 * Need to reset must_insert_reserved if
2463					 * there was an error so the abort stuff
2464					 * can cleanup the reserved space
2465					 * properly.
2466					 */
2467					if (must_insert_reserved)
2468						locked_ref->must_insert_reserved = 1;
2469					locked_ref->processing = 0;
2470					btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2471					btrfs_delayed_ref_unlock(locked_ref);
2472					return ret;
2473				}
2474				continue;
2475			}
2476
2477			/*
2478			 * Need to drop our head ref lock and re-aqcuire the
2479			 * delayed ref lock and then re-check to make sure
2480			 * nobody got added.
2481			 */
2482			spin_unlock(&locked_ref->lock);
2483			spin_lock(&delayed_refs->lock);
2484			spin_lock(&locked_ref->lock);
2485			if (rb_first(&locked_ref->ref_root) ||
2486			    locked_ref->extent_op) {
2487				spin_unlock(&locked_ref->lock);
2488				spin_unlock(&delayed_refs->lock);
2489				continue;
2490			}
2491			ref->in_tree = 0;
2492			delayed_refs->num_heads--;
2493			rb_erase(&locked_ref->href_node,
2494				 &delayed_refs->href_root);
2495			spin_unlock(&delayed_refs->lock);
2496		} else {
2497			actual_count++;
2498			ref->in_tree = 0;
2499			rb_erase(&ref->rb_node, &locked_ref->ref_root);
2500		}
2501		atomic_dec(&delayed_refs->num_entries);
2502
2503		if (!btrfs_delayed_ref_is_head(ref)) {
2504			/*
2505			 * when we play the delayed ref, also correct the
2506			 * ref_mod on head
2507			 */
2508			switch (ref->action) {
2509			case BTRFS_ADD_DELAYED_REF:
2510			case BTRFS_ADD_DELAYED_EXTENT:
2511				locked_ref->node.ref_mod -= ref->ref_mod;
2512				break;
2513			case BTRFS_DROP_DELAYED_REF:
2514				locked_ref->node.ref_mod += ref->ref_mod;
2515				break;
2516			default:
2517				WARN_ON(1);
2518			}
2519		}
2520		spin_unlock(&locked_ref->lock);
2521
2522		ret = run_one_delayed_ref(trans, root, ref, extent_op,
2523					  must_insert_reserved);
2524
2525		btrfs_free_delayed_extent_op(extent_op);
2526		if (ret) {
2527			locked_ref->processing = 0;
2528			btrfs_delayed_ref_unlock(locked_ref);
2529			btrfs_put_delayed_ref(ref);
2530			btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2531			return ret;
2532		}
2533
2534		/*
2535		 * If this node is a head, that means all the refs in this head
2536		 * have been dealt with, and we will pick the next head to deal
2537		 * with, so we must unlock the head and drop it from the cluster
2538		 * list before we release it.
2539		 */
2540		if (btrfs_delayed_ref_is_head(ref)) {
2541			if (locked_ref->is_data &&
2542			    locked_ref->total_ref_mod < 0) {
2543				spin_lock(&delayed_refs->lock);
2544				delayed_refs->pending_csums -= ref->num_bytes;
2545				spin_unlock(&delayed_refs->lock);
2546			}
2547			btrfs_delayed_ref_unlock(locked_ref);
2548			locked_ref = NULL;
2549		}
2550		btrfs_put_delayed_ref(ref);
2551		count++;
2552		cond_resched();
2553	}
2554
2555	/*
2556	 * We don't want to include ref heads since we can have empty ref heads
2557	 * and those will drastically skew our runtime down since we just do
2558	 * accounting, no actual extent tree updates.
2559	 */
2560	if (actual_count > 0) {
2561		u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2562		u64 avg;
2563
2564		/*
2565		 * We weigh the current average higher than our current runtime
2566		 * to avoid large swings in the average.
2567		 */
2568		spin_lock(&delayed_refs->lock);
2569		avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2570		fs_info->avg_delayed_ref_runtime = avg >> 2;	/* div by 4 */
2571		spin_unlock(&delayed_refs->lock);
2572	}
2573	return 0;
2574}
2575
2576#ifdef SCRAMBLE_DELAYED_REFS
2577/*
2578 * Normally delayed refs get processed in ascending bytenr order. This
2579 * correlates in most cases to the order added. To expose dependencies on this
2580 * order, we start to process the tree in the middle instead of the beginning
2581 */
2582static u64 find_middle(struct rb_root *root)
2583{
2584	struct rb_node *n = root->rb_node;
2585	struct btrfs_delayed_ref_node *entry;
2586	int alt = 1;
2587	u64 middle;
2588	u64 first = 0, last = 0;
2589
2590	n = rb_first(root);
2591	if (n) {
2592		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2593		first = entry->bytenr;
2594	}
2595	n = rb_last(root);
2596	if (n) {
2597		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2598		last = entry->bytenr;
2599	}
2600	n = root->rb_node;
2601
2602	while (n) {
2603		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2604		WARN_ON(!entry->in_tree);
2605
2606		middle = entry->bytenr;
2607
2608		if (alt)
2609			n = n->rb_left;
2610		else
2611			n = n->rb_right;
2612
2613		alt = 1 - alt;
2614	}
2615	return middle;
2616}
2617#endif
2618
2619static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2620{
2621	u64 num_bytes;
2622
2623	num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2624			     sizeof(struct btrfs_extent_inline_ref));
2625	if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2626		num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2627
2628	/*
2629	 * We don't ever fill up leaves all the way so multiply by 2 just to be
2630	 * closer to what we're really going to want to ouse.
2631	 */
2632	return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2633}
2634
2635/*
2636 * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2637 * would require to store the csums for that many bytes.
2638 */
2639u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2640{
2641	u64 csum_size;
2642	u64 num_csums_per_leaf;
2643	u64 num_csums;
2644
2645	csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2646	num_csums_per_leaf = div64_u64(csum_size,
2647			(u64)btrfs_super_csum_size(root->fs_info->super_copy));
2648	num_csums = div64_u64(csum_bytes, root->sectorsize);
2649	num_csums += num_csums_per_leaf - 1;
2650	num_csums = div64_u64(num_csums, num_csums_per_leaf);
2651	return num_csums;
2652}
2653
2654int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2655				       struct btrfs_root *root)
2656{
2657	struct btrfs_block_rsv *global_rsv;
2658	u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2659	u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2660	u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2661	u64 num_bytes, num_dirty_bgs_bytes;
2662	int ret = 0;
2663
2664	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2665	num_heads = heads_to_leaves(root, num_heads);
2666	if (num_heads > 1)
2667		num_bytes += (num_heads - 1) * root->nodesize;
2668	num_bytes <<= 1;
2669	num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2670	num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2671							     num_dirty_bgs);
2672	global_rsv = &root->fs_info->global_block_rsv;
2673
2674	/*
2675	 * If we can't allocate any more chunks lets make sure we have _lots_ of
2676	 * wiggle room since running delayed refs can create more delayed refs.
2677	 */
2678	if (global_rsv->space_info->full) {
2679		num_dirty_bgs_bytes <<= 1;
2680		num_bytes <<= 1;
2681	}
2682
2683	spin_lock(&global_rsv->lock);
2684	if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2685		ret = 1;
2686	spin_unlock(&global_rsv->lock);
2687	return ret;
2688}
2689
2690int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2691				       struct btrfs_root *root)
2692{
2693	struct btrfs_fs_info *fs_info = root->fs_info;
2694	u64 num_entries =
2695		atomic_read(&trans->transaction->delayed_refs.num_entries);
2696	u64 avg_runtime;
2697	u64 val;
2698
2699	smp_mb();
2700	avg_runtime = fs_info->avg_delayed_ref_runtime;
2701	val = num_entries * avg_runtime;
2702	if (num_entries * avg_runtime >= NSEC_PER_SEC)
2703		return 1;
2704	if (val >= NSEC_PER_SEC / 2)
2705		return 2;
2706
2707	return btrfs_check_space_for_delayed_refs(trans, root);
2708}
2709
2710struct async_delayed_refs {
2711	struct btrfs_root *root;
2712	int count;
2713	int error;
2714	int sync;
2715	struct completion wait;
2716	struct btrfs_work work;
2717};
2718
2719static void delayed_ref_async_start(struct btrfs_work *work)
2720{
2721	struct async_delayed_refs *async;
2722	struct btrfs_trans_handle *trans;
2723	int ret;
2724
2725	async = container_of(work, struct async_delayed_refs, work);
2726
2727	trans = btrfs_join_transaction(async->root);
2728	if (IS_ERR(trans)) {
2729		async->error = PTR_ERR(trans);
2730		goto done;
2731	}
2732
2733	/*
2734	 * trans->sync means that when we call end_transaciton, we won't
2735	 * wait on delayed refs
2736	 */
2737	trans->sync = true;
2738	ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2739	if (ret)
2740		async->error = ret;
2741
2742	ret = btrfs_end_transaction(trans, async->root);
2743	if (ret && !async->error)
2744		async->error = ret;
2745done:
2746	if (async->sync)
2747		complete(&async->wait);
2748	else
2749		kfree(async);
2750}
2751
2752int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2753				 unsigned long count, int wait)
2754{
2755	struct async_delayed_refs *async;
2756	int ret;
2757
2758	async = kmalloc(sizeof(*async), GFP_NOFS);
2759	if (!async)
2760		return -ENOMEM;
2761
2762	async->root = root->fs_info->tree_root;
2763	async->count = count;
2764	async->error = 0;
2765	if (wait)
2766		async->sync = 1;
2767	else
2768		async->sync = 0;
2769	init_completion(&async->wait);
2770
2771	btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2772			delayed_ref_async_start, NULL, NULL);
2773
2774	btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2775
2776	if (wait) {
2777		wait_for_completion(&async->wait);
2778		ret = async->error;
2779		kfree(async);
2780		return ret;
2781	}
2782	return 0;
2783}
2784
2785/*
2786 * this starts processing the delayed reference count updates and
2787 * extent insertions we have queued up so far.  count can be
2788 * 0, which means to process everything in the tree at the start
2789 * of the run (but not newly added entries), or it can be some target
2790 * number you'd like to process.
2791 *
2792 * Returns 0 on success or if called with an aborted transaction
2793 * Returns <0 on error and aborts the transaction
2794 */
2795int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2796			   struct btrfs_root *root, unsigned long count)
2797{
2798	struct rb_node *node;
2799	struct btrfs_delayed_ref_root *delayed_refs;
2800	struct btrfs_delayed_ref_head *head;
2801	int ret;
2802	int run_all = count == (unsigned long)-1;
2803
2804	/* We'll clean this up in btrfs_cleanup_transaction */
2805	if (trans->aborted)
2806		return 0;
2807
2808	if (root == root->fs_info->extent_root)
2809		root = root->fs_info->tree_root;
2810
2811	delayed_refs = &trans->transaction->delayed_refs;
2812	if (count == 0)
2813		count = atomic_read(&delayed_refs->num_entries) * 2;
2814
2815again:
2816#ifdef SCRAMBLE_DELAYED_REFS
2817	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2818#endif
2819	ret = __btrfs_run_delayed_refs(trans, root, count);
2820	if (ret < 0) {
2821		btrfs_abort_transaction(trans, root, ret);
2822		return ret;
2823	}
2824
2825	if (run_all) {
2826		if (!list_empty(&trans->new_bgs))
2827			btrfs_create_pending_block_groups(trans, root);
2828
2829		spin_lock(&delayed_refs->lock);
2830		node = rb_first(&delayed_refs->href_root);
2831		if (!node) {
2832			spin_unlock(&delayed_refs->lock);
2833			goto out;
2834		}
2835		count = (unsigned long)-1;
2836
2837		while (node) {
2838			head = rb_entry(node, struct btrfs_delayed_ref_head,
2839					href_node);
2840			if (btrfs_delayed_ref_is_head(&head->node)) {
2841				struct btrfs_delayed_ref_node *ref;
2842
2843				ref = &head->node;
2844				atomic_inc(&ref->refs);
2845
2846				spin_unlock(&delayed_refs->lock);
2847				/*
2848				 * Mutex was contended, block until it's
2849				 * released and try again
2850				 */
2851				mutex_lock(&head->mutex);
2852				mutex_unlock(&head->mutex);
2853
2854				btrfs_put_delayed_ref(ref);
2855				cond_resched();
2856				goto again;
2857			} else {
2858				WARN_ON(1);
2859			}
2860			node = rb_next(node);
2861		}
2862		spin_unlock(&delayed_refs->lock);
2863		cond_resched();
2864		goto again;
2865	}
2866out:
2867	ret = btrfs_delayed_qgroup_accounting(trans, root->fs_info);
2868	if (ret)
2869		return ret;
2870	assert_qgroups_uptodate(trans);
2871	return 0;
2872}
2873
2874int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2875				struct btrfs_root *root,
2876				u64 bytenr, u64 num_bytes, u64 flags,
2877				int level, int is_data)
2878{
2879	struct btrfs_delayed_extent_op *extent_op;
2880	int ret;
2881
2882	extent_op = btrfs_alloc_delayed_extent_op();
2883	if (!extent_op)
2884		return -ENOMEM;
2885
2886	extent_op->flags_to_set = flags;
2887	extent_op->update_flags = 1;
2888	extent_op->update_key = 0;
2889	extent_op->is_data = is_data ? 1 : 0;
2890	extent_op->level = level;
2891
2892	ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2893					  num_bytes, extent_op);
2894	if (ret)
2895		btrfs_free_delayed_extent_op(extent_op);
2896	return ret;
2897}
2898
2899static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2900				      struct btrfs_root *root,
2901				      struct btrfs_path *path,
2902				      u64 objectid, u64 offset, u64 bytenr)
2903{
2904	struct btrfs_delayed_ref_head *head;
2905	struct btrfs_delayed_ref_node *ref;
2906	struct btrfs_delayed_data_ref *data_ref;
2907	struct btrfs_delayed_ref_root *delayed_refs;
2908	struct rb_node *node;
2909	int ret = 0;
2910
2911	delayed_refs = &trans->transaction->delayed_refs;
2912	spin_lock(&delayed_refs->lock);
2913	head = btrfs_find_delayed_ref_head(trans, bytenr);
2914	if (!head) {
2915		spin_unlock(&delayed_refs->lock);
2916		return 0;
2917	}
2918
2919	if (!mutex_trylock(&head->mutex)) {
2920		atomic_inc(&head->node.refs);
2921		spin_unlock(&delayed_refs->lock);
2922
2923		btrfs_release_path(path);
2924
2925		/*
2926		 * Mutex was contended, block until it's released and let
2927		 * caller try again
2928		 */
2929		mutex_lock(&head->mutex);
2930		mutex_unlock(&head->mutex);
2931		btrfs_put_delayed_ref(&head->node);
2932		return -EAGAIN;
2933	}
2934	spin_unlock(&delayed_refs->lock);
2935
2936	spin_lock(&head->lock);
2937	node = rb_first(&head->ref_root);
2938	while (node) {
2939		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2940		node = rb_next(node);
2941
2942		/* If it's a shared ref we know a cross reference exists */
2943		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2944			ret = 1;
2945			break;
2946		}
2947
2948		data_ref = btrfs_delayed_node_to_data_ref(ref);
2949
2950		/*
2951		 * If our ref doesn't match the one we're currently looking at
2952		 * then we have a cross reference.
2953		 */
2954		if (data_ref->root != root->root_key.objectid ||
2955		    data_ref->objectid != objectid ||
2956		    data_ref->offset != offset) {
2957			ret = 1;
2958			break;
2959		}
2960	}
2961	spin_unlock(&head->lock);
2962	mutex_unlock(&head->mutex);
2963	return ret;
2964}
2965
2966static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2967					struct btrfs_root *root,
2968					struct btrfs_path *path,
2969					u64 objectid, u64 offset, u64 bytenr)
2970{
2971	struct btrfs_root *extent_root = root->fs_info->extent_root;
2972	struct extent_buffer *leaf;
2973	struct btrfs_extent_data_ref *ref;
2974	struct btrfs_extent_inline_ref *iref;
2975	struct btrfs_extent_item *ei;
2976	struct btrfs_key key;
2977	u32 item_size;
2978	int ret;
2979
2980	key.objectid = bytenr;
2981	key.offset = (u64)-1;
2982	key.type = BTRFS_EXTENT_ITEM_KEY;
2983
2984	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2985	if (ret < 0)
2986		goto out;
2987	BUG_ON(ret == 0); /* Corruption */
2988
2989	ret = -ENOENT;
2990	if (path->slots[0] == 0)
2991		goto out;
2992
2993	path->slots[0]--;
2994	leaf = path->nodes[0];
2995	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2996
2997	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2998		goto out;
2999
3000	ret = 1;
3001	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3002#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3003	if (item_size < sizeof(*ei)) {
3004		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3005		goto out;
3006	}
3007#endif
3008	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3009
3010	if (item_size != sizeof(*ei) +
3011	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3012		goto out;
3013
3014	if (btrfs_extent_generation(leaf, ei) <=
3015	    btrfs_root_last_snapshot(&root->root_item))
3016		goto out;
3017
3018	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3019	if (btrfs_extent_inline_ref_type(leaf, iref) !=
3020	    BTRFS_EXTENT_DATA_REF_KEY)
3021		goto out;
3022
3023	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3024	if (btrfs_extent_refs(leaf, ei) !=
3025	    btrfs_extent_data_ref_count(leaf, ref) ||
3026	    btrfs_extent_data_ref_root(leaf, ref) !=
3027	    root->root_key.objectid ||
3028	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3029	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
3030		goto out;
3031
3032	ret = 0;
3033out:
3034	return ret;
3035}
3036
3037int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3038			  struct btrfs_root *root,
3039			  u64 objectid, u64 offset, u64 bytenr)
3040{
3041	struct btrfs_path *path;
3042	int ret;
3043	int ret2;
3044
3045	path = btrfs_alloc_path();
3046	if (!path)
3047		return -ENOENT;
3048
3049	do {
3050		ret = check_committed_ref(trans, root, path, objectid,
3051					  offset, bytenr);
3052		if (ret && ret != -ENOENT)
3053			goto out;
3054
3055		ret2 = check_delayed_ref(trans, root, path, objectid,
3056					 offset, bytenr);
3057	} while (ret2 == -EAGAIN);
3058
3059	if (ret2 && ret2 != -ENOENT) {
3060		ret = ret2;
3061		goto out;
3062	}
3063
3064	if (ret != -ENOENT || ret2 != -ENOENT)
3065		ret = 0;
3066out:
3067	btrfs_free_path(path);
3068	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3069		WARN_ON(ret > 0);
3070	return ret;
3071}
3072
3073static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3074			   struct btrfs_root *root,
3075			   struct extent_buffer *buf,
3076			   int full_backref, int inc)
3077{
3078	u64 bytenr;
3079	u64 num_bytes;
3080	u64 parent;
3081	u64 ref_root;
3082	u32 nritems;
3083	struct btrfs_key key;
3084	struct btrfs_file_extent_item *fi;
3085	int i;
3086	int level;
3087	int ret = 0;
3088	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3089			    u64, u64, u64, u64, u64, u64, int);
3090
3091
3092	if (btrfs_test_is_dummy_root(root))
3093		return 0;
3094
3095	ref_root = btrfs_header_owner(buf);
3096	nritems = btrfs_header_nritems(buf);
3097	level = btrfs_header_level(buf);
3098
3099	if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3100		return 0;
3101
3102	if (inc)
3103		process_func = btrfs_inc_extent_ref;
3104	else
3105		process_func = btrfs_free_extent;
3106
3107	if (full_backref)
3108		parent = buf->start;
3109	else
3110		parent = 0;
3111
3112	for (i = 0; i < nritems; i++) {
3113		if (level == 0) {
3114			btrfs_item_key_to_cpu(buf, &key, i);
3115			if (key.type != BTRFS_EXTENT_DATA_KEY)
3116				continue;
3117			fi = btrfs_item_ptr(buf, i,
3118					    struct btrfs_file_extent_item);
3119			if (btrfs_file_extent_type(buf, fi) ==
3120			    BTRFS_FILE_EXTENT_INLINE)
3121				continue;
3122			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3123			if (bytenr == 0)
3124				continue;
3125
3126			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3127			key.offset -= btrfs_file_extent_offset(buf, fi);
3128			ret = process_func(trans, root, bytenr, num_bytes,
3129					   parent, ref_root, key.objectid,
3130					   key.offset, 1);
3131			if (ret)
3132				goto fail;
3133		} else {
3134			bytenr = btrfs_node_blockptr(buf, i);
3135			num_bytes = root->nodesize;
3136			ret = process_func(trans, root, bytenr, num_bytes,
3137					   parent, ref_root, level - 1, 0,
3138					   1);
3139			if (ret)
3140				goto fail;
3141		}
3142	}
3143	return 0;
3144fail:
3145	return ret;
3146}
3147
3148int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3149		  struct extent_buffer *buf, int full_backref)
3150{
3151	return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3152}
3153
3154int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3155		  struct extent_buffer *buf, int full_backref)
3156{
3157	return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3158}
3159
3160static int write_one_cache_group(struct btrfs_trans_handle *trans,
3161				 struct btrfs_root *root,
3162				 struct btrfs_path *path,
3163				 struct btrfs_block_group_cache *cache)
3164{
3165	int ret;
3166	struct btrfs_root *extent_root = root->fs_info->extent_root;
3167	unsigned long bi;
3168	struct extent_buffer *leaf;
3169
3170	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3171	if (ret) {
3172		if (ret > 0)
3173			ret = -ENOENT;
3174		goto fail;
3175	}
3176
3177	leaf = path->nodes[0];
3178	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3179	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3180	btrfs_mark_buffer_dirty(leaf);
3181fail:
3182	btrfs_release_path(path);
3183	return ret;
3184
3185}
3186
3187static struct btrfs_block_group_cache *
3188next_block_group(struct btrfs_root *root,
3189		 struct btrfs_block_group_cache *cache)
3190{
3191	struct rb_node *node;
3192
3193	spin_lock(&root->fs_info->block_group_cache_lock);
3194
3195	/* If our block group was removed, we need a full search. */
3196	if (RB_EMPTY_NODE(&cache->cache_node)) {
3197		const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3198
3199		spin_unlock(&root->fs_info->block_group_cache_lock);
3200		btrfs_put_block_group(cache);
3201		cache = btrfs_lookup_first_block_group(root->fs_info,
3202						       next_bytenr);
3203		return cache;
3204	}
3205	node = rb_next(&cache->cache_node);
3206	btrfs_put_block_group(cache);
3207	if (node) {
3208		cache = rb_entry(node, struct btrfs_block_group_cache,
3209				 cache_node);
3210		btrfs_get_block_group(cache);
3211	} else
3212		cache = NULL;
3213	spin_unlock(&root->fs_info->block_group_cache_lock);
3214	return cache;
3215}
3216
3217static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3218			    struct btrfs_trans_handle *trans,
3219			    struct btrfs_path *path)
3220{
3221	struct btrfs_root *root = block_group->fs_info->tree_root;
3222	struct inode *inode = NULL;
3223	u64 alloc_hint = 0;
3224	int dcs = BTRFS_DC_ERROR;
3225	u64 num_pages = 0;
3226	int retries = 0;
3227	int ret = 0;
3228
3229	/*
3230	 * If this block group is smaller than 100 megs don't bother caching the
3231	 * block group.
3232	 */
3233	if (block_group->key.offset < (100 * 1024 * 1024)) {
3234		spin_lock(&block_group->lock);
3235		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3236		spin_unlock(&block_group->lock);
3237		return 0;
3238	}
3239
3240	if (trans->aborted)
3241		return 0;
3242again:
3243	inode = lookup_free_space_inode(root, block_group, path);
3244	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3245		ret = PTR_ERR(inode);
3246		btrfs_release_path(path);
3247		goto out;
3248	}
3249
3250	if (IS_ERR(inode)) {
3251		BUG_ON(retries);
3252		retries++;
3253
3254		if (block_group->ro)
3255			goto out_free;
3256
3257		ret = create_free_space_inode(root, trans, block_group, path);
3258		if (ret)
3259			goto out_free;
3260		goto again;
3261	}
3262
3263	/* We've already setup this transaction, go ahead and exit */
3264	if (block_group->cache_generation == trans->transid &&
3265	    i_size_read(inode)) {
3266		dcs = BTRFS_DC_SETUP;
3267		goto out_put;
3268	}
3269
3270	/*
3271	 * We want to set the generation to 0, that way if anything goes wrong
3272	 * from here on out we know not to trust this cache when we load up next
3273	 * time.
3274	 */
3275	BTRFS_I(inode)->generation = 0;
3276	ret = btrfs_update_inode(trans, root, inode);
3277	if (ret) {
3278		/*
3279		 * So theoretically we could recover from this, simply set the
3280		 * super cache generation to 0 so we know to invalidate the
3281		 * cache, but then we'd have to keep track of the block groups
3282		 * that fail this way so we know we _have_ to reset this cache
3283		 * before the next commit or risk reading stale cache.  So to
3284		 * limit our exposure to horrible edge cases lets just abort the
3285		 * transaction, this only happens in really bad situations
3286		 * anyway.
3287		 */
3288		btrfs_abort_transaction(trans, root, ret);
3289		goto out_put;
3290	}
3291	WARN_ON(ret);
3292
3293	if (i_size_read(inode) > 0) {
3294		ret = btrfs_check_trunc_cache_free_space(root,
3295					&root->fs_info->global_block_rsv);
3296		if (ret)
3297			goto out_put;
3298
3299		ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3300		if (ret)
3301			goto out_put;
3302	}
3303
3304	spin_lock(&block_group->lock);
3305	if (block_group->cached != BTRFS_CACHE_FINISHED ||
3306	    !btrfs_test_opt(root, SPACE_CACHE)) {
3307		/*
3308		 * don't bother trying to write stuff out _if_
3309		 * a) we're not cached,
3310		 * b) we're with nospace_cache mount option.
3311		 */
3312		dcs = BTRFS_DC_WRITTEN;
3313		spin_unlock(&block_group->lock);
3314		goto out_put;
3315	}
3316	spin_unlock(&block_group->lock);
3317
3318	/*
3319	 * Try to preallocate enough space based on how big the block group is.
3320	 * Keep in mind this has to include any pinned space which could end up
3321	 * taking up quite a bit since it's not folded into the other space
3322	 * cache.
3323	 */
3324	num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3325	if (!num_pages)
3326		num_pages = 1;
3327
3328	num_pages *= 16;
3329	num_pages *= PAGE_CACHE_SIZE;
3330
3331	ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
3332	if (ret)
3333		goto out_put;
3334
3335	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3336					      num_pages, num_pages,
3337					      &alloc_hint);
3338	if (!ret)
3339		dcs = BTRFS_DC_SETUP;
3340	btrfs_free_reserved_data_space(inode, num_pages);
3341
3342out_put:
3343	iput(inode);
3344out_free:
3345	btrfs_release_path(path);
3346out:
3347	spin_lock(&block_group->lock);
3348	if (!ret && dcs == BTRFS_DC_SETUP)
3349		block_group->cache_generation = trans->transid;
3350	block_group->disk_cache_state = dcs;
3351	spin_unlock(&block_group->lock);
3352
3353	return ret;
3354}
3355
3356int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3357			    struct btrfs_root *root)
3358{
3359	struct btrfs_block_group_cache *cache, *tmp;
3360	struct btrfs_transaction *cur_trans = trans->transaction;
3361	struct btrfs_path *path;
3362
3363	if (list_empty(&cur_trans->dirty_bgs) ||
3364	    !btrfs_test_opt(root, SPACE_CACHE))
3365		return 0;
3366
3367	path = btrfs_alloc_path();
3368	if (!path)
3369		return -ENOMEM;
3370
3371	/* Could add new block groups, use _safe just in case */
3372	list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3373				 dirty_list) {
3374		if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3375			cache_save_setup(cache, trans, path);
3376	}
3377
3378	btrfs_free_path(path);
3379	return 0;
3380}
3381
3382/*
3383 * transaction commit does final block group cache writeback during a
3384 * critical section where nothing is allowed to change the FS.  This is
3385 * required in order for the cache to actually match the block group,
3386 * but can introduce a lot of latency into the commit.
3387 *
3388 * So, btrfs_start_dirty_block_groups is here to kick off block group
3389 * cache IO.  There's a chance we'll have to redo some of it if the
3390 * block group changes again during the commit, but it greatly reduces
3391 * the commit latency by getting rid of the easy block groups while
3392 * we're still allowing others to join the commit.
3393 */
3394int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3395				   struct btrfs_root *root)
3396{
3397	struct btrfs_block_group_cache *cache;
3398	struct btrfs_transaction *cur_trans = trans->transaction;
3399	int ret = 0;
3400	int should_put;
3401	struct btrfs_path *path = NULL;
3402	LIST_HEAD(dirty);
3403	struct list_head *io = &cur_trans->io_bgs;
3404	int num_started = 0;
3405	int loops = 0;
3406
3407	spin_lock(&cur_trans->dirty_bgs_lock);
3408	if (list_empty(&cur_trans->dirty_bgs)) {
3409		spin_unlock(&cur_trans->dirty_bgs_lock);
3410		return 0;
3411	}
3412	list_splice_init(&cur_trans->dirty_bgs, &dirty);
3413	spin_unlock(&cur_trans->dirty_bgs_lock);
3414
3415again:
3416	/*
3417	 * make sure all the block groups on our dirty list actually
3418	 * exist
3419	 */
3420	btrfs_create_pending_block_groups(trans, root);
3421
3422	if (!path) {
3423		path = btrfs_alloc_path();
3424		if (!path)
3425			return -ENOMEM;
3426	}
3427
3428	/*
3429	 * cache_write_mutex is here only to save us from balance or automatic
3430	 * removal of empty block groups deleting this block group while we are
3431	 * writing out the cache
3432	 */
3433	mutex_lock(&trans->transaction->cache_write_mutex);
3434	while (!list_empty(&dirty)) {
3435		cache = list_first_entry(&dirty,
3436					 struct btrfs_block_group_cache,
3437					 dirty_list);
3438		/*
3439		 * this can happen if something re-dirties a block
3440		 * group that is already under IO.  Just wait for it to
3441		 * finish and then do it all again
3442		 */
3443		if (!list_empty(&cache->io_list)) {
3444			list_del_init(&cache->io_list);
3445			btrfs_wait_cache_io(root, trans, cache,
3446					    &cache->io_ctl, path,
3447					    cache->key.objectid);
3448			btrfs_put_block_group(cache);
3449		}
3450
3451
3452		/*
3453		 * btrfs_wait_cache_io uses the cache->dirty_list to decide
3454		 * if it should update the cache_state.  Don't delete
3455		 * until after we wait.
3456		 *
3457		 * Since we're not running in the commit critical section
3458		 * we need the dirty_bgs_lock to protect from update_block_group
3459		 */
3460		spin_lock(&cur_trans->dirty_bgs_lock);
3461		list_del_init(&cache->dirty_list);
3462		spin_unlock(&cur_trans->dirty_bgs_lock);
3463
3464		should_put = 1;
3465
3466		cache_save_setup(cache, trans, path);
3467
3468		if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3469			cache->io_ctl.inode = NULL;
3470			ret = btrfs_write_out_cache(root, trans, cache, path);
3471			if (ret == 0 && cache->io_ctl.inode) {
3472				num_started++;
3473				should_put = 0;
3474
3475				/*
3476				 * the cache_write_mutex is protecting
3477				 * the io_list
3478				 */
3479				list_add_tail(&cache->io_list, io);
3480			} else {
3481				/*
3482				 * if we failed to write the cache, the
3483				 * generation will be bad and life goes on
3484				 */
3485				ret = 0;
3486			}
3487		}
3488		if (!ret) {
3489			ret = write_one_cache_group(trans, root, path, cache);
3490			/*
3491			 * Our block group might still be attached to the list
3492			 * of new block groups in the transaction handle of some
3493			 * other task (struct btrfs_trans_handle->new_bgs). This
3494			 * means its block group item isn't yet in the extent
3495			 * tree. If this happens ignore the error, as we will
3496			 * try again later in the critical section of the
3497			 * transaction commit.
3498			 */
3499			if (ret == -ENOENT) {
3500				ret = 0;
3501				spin_lock(&cur_trans->dirty_bgs_lock);
3502				if (list_empty(&cache->dirty_list)) {
3503					list_add_tail(&cache->dirty_list,
3504						      &cur_trans->dirty_bgs);
3505					btrfs_get_block_group(cache);
3506				}
3507				spin_unlock(&cur_trans->dirty_bgs_lock);
3508			} else if (ret) {
3509				btrfs_abort_transaction(trans, root, ret);
3510			}
3511		}
3512
3513		/* if its not on the io list, we need to put the block group */
3514		if (should_put)
3515			btrfs_put_block_group(cache);
3516
3517		if (ret)
3518			break;
3519
3520		/*
3521		 * Avoid blocking other tasks for too long. It might even save
3522		 * us from writing caches for block groups that are going to be
3523		 * removed.
3524		 */
3525		mutex_unlock(&trans->transaction->cache_write_mutex);
3526		mutex_lock(&trans->transaction->cache_write_mutex);
3527	}
3528	mutex_unlock(&trans->transaction->cache_write_mutex);
3529
3530	/*
3531	 * go through delayed refs for all the stuff we've just kicked off
3532	 * and then loop back (just once)
3533	 */
3534	ret = btrfs_run_delayed_refs(trans, root, 0);
3535	if (!ret && loops == 0) {
3536		loops++;
3537		spin_lock(&cur_trans->dirty_bgs_lock);
3538		list_splice_init(&cur_trans->dirty_bgs, &dirty);
3539		/*
3540		 * dirty_bgs_lock protects us from concurrent block group
3541		 * deletes too (not just cache_write_mutex).
3542		 */
3543		if (!list_empty(&dirty)) {
3544			spin_unlock(&cur_trans->dirty_bgs_lock);
3545			goto again;
3546		}
3547		spin_unlock(&cur_trans->dirty_bgs_lock);
3548	}
3549
3550	btrfs_free_path(path);
3551	return ret;
3552}
3553
3554int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3555				   struct btrfs_root *root)
3556{
3557	struct btrfs_block_group_cache *cache;
3558	struct btrfs_transaction *cur_trans = trans->transaction;
3559	int ret = 0;
3560	int should_put;
3561	struct btrfs_path *path;
3562	struct list_head *io = &cur_trans->io_bgs;
3563	int num_started = 0;
3564
3565	path = btrfs_alloc_path();
3566	if (!path)
3567		return -ENOMEM;
3568
3569	/*
3570	 * We don't need the lock here since we are protected by the transaction
3571	 * commit.  We want to do the cache_save_setup first and then run the
3572	 * delayed refs to make sure we have the best chance at doing this all
3573	 * in one shot.
3574	 */
3575	while (!list_empty(&cur_trans->dirty_bgs)) {
3576		cache = list_first_entry(&cur_trans->dirty_bgs,
3577					 struct btrfs_block_group_cache,
3578					 dirty_list);
3579
3580		/*
3581		 * this can happen if cache_save_setup re-dirties a block
3582		 * group that is already under IO.  Just wait for it to
3583		 * finish and then do it all again
3584		 */
3585		if (!list_empty(&cache->io_list)) {
3586			list_del_init(&cache->io_list);
3587			btrfs_wait_cache_io(root, trans, cache,
3588					    &cache->io_ctl, path,
3589					    cache->key.objectid);
3590			btrfs_put_block_group(cache);
3591		}
3592
3593		/*
3594		 * don't remove from the dirty list until after we've waited
3595		 * on any pending IO
3596		 */
3597		list_del_init(&cache->dirty_list);
3598		should_put = 1;
3599
3600		cache_save_setup(cache, trans, path);
3601
3602		if (!ret)
3603			ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3604
3605		if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3606			cache->io_ctl.inode = NULL;
3607			ret = btrfs_write_out_cache(root, trans, cache, path);
3608			if (ret == 0 && cache->io_ctl.inode) {
3609				num_started++;
3610				should_put = 0;
3611				list_add_tail(&cache->io_list, io);
3612			} else {
3613				/*
3614				 * if we failed to write the cache, the
3615				 * generation will be bad and life goes on
3616				 */
3617				ret = 0;
3618			}
3619		}
3620		if (!ret) {
3621			ret = write_one_cache_group(trans, root, path, cache);
3622			if (ret)
3623				btrfs_abort_transaction(trans, root, ret);
3624		}
3625
3626		/* if its not on the io list, we need to put the block group */
3627		if (should_put)
3628			btrfs_put_block_group(cache);
3629	}
3630
3631	while (!list_empty(io)) {
3632		cache = list_first_entry(io, struct btrfs_block_group_cache,
3633					 io_list);
3634		list_del_init(&cache->io_list);
3635		btrfs_wait_cache_io(root, trans, cache,
3636				    &cache->io_ctl, path, cache->key.objectid);
3637		btrfs_put_block_group(cache);
3638	}
3639
3640	btrfs_free_path(path);
3641	return ret;
3642}
3643
3644int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3645{
3646	struct btrfs_block_group_cache *block_group;
3647	int readonly = 0;
3648
3649	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3650	if (!block_group || block_group->ro)
3651		readonly = 1;
3652	if (block_group)
3653		btrfs_put_block_group(block_group);
3654	return readonly;
3655}
3656
3657static const char *alloc_name(u64 flags)
3658{
3659	switch (flags) {
3660	case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3661		return "mixed";
3662	case BTRFS_BLOCK_GROUP_METADATA:
3663		return "metadata";
3664	case BTRFS_BLOCK_GROUP_DATA:
3665		return "data";
3666	case BTRFS_BLOCK_GROUP_SYSTEM:
3667		return "system";
3668	default:
3669		WARN_ON(1);
3670		return "invalid-combination";
3671	};
3672}
3673
3674static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3675			     u64 total_bytes, u64 bytes_used,
3676			     struct btrfs_space_info **space_info)
3677{
3678	struct btrfs_space_info *found;
3679	int i;
3680	int factor;
3681	int ret;
3682
3683	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3684		     BTRFS_BLOCK_GROUP_RAID10))
3685		factor = 2;
3686	else
3687		factor = 1;
3688
3689	found = __find_space_info(info, flags);
3690	if (found) {
3691		spin_lock(&found->lock);
3692		found->total_bytes += total_bytes;
3693		found->disk_total += total_bytes * factor;
3694		found->bytes_used += bytes_used;
3695		found->disk_used += bytes_used * factor;
3696		found->full = 0;
3697		spin_unlock(&found->lock);
3698		*space_info = found;
3699		return 0;
3700	}
3701	found = kzalloc(sizeof(*found), GFP_NOFS);
3702	if (!found)
3703		return -ENOMEM;
3704
3705	ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3706	if (ret) {
3707		kfree(found);
3708		return ret;
3709	}
3710
3711	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3712		INIT_LIST_HEAD(&found->block_groups[i]);
3713	init_rwsem(&found->groups_sem);
3714	spin_lock_init(&found->lock);
3715	found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3716	found->total_bytes = total_bytes;
3717	found->disk_total = total_bytes * factor;
3718	found->bytes_used = bytes_used;
3719	found->disk_used = bytes_used * factor;
3720	found->bytes_pinned = 0;
3721	found->bytes_reserved = 0;
3722	found->bytes_readonly = 0;
3723	found->bytes_may_use = 0;
3724	found->full = 0;
3725	found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3726	found->chunk_alloc = 0;
3727	found->flush = 0;
3728	init_waitqueue_head(&found->wait);
3729	INIT_LIST_HEAD(&found->ro_bgs);
3730
3731	ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3732				    info->space_info_kobj, "%s",
3733				    alloc_name(found->flags));
3734	if (ret) {
3735		kfree(found);
3736		return ret;
3737	}
3738
3739	*space_info = found;
3740	list_add_rcu(&found->list, &info->space_info);
3741	if (flags & BTRFS_BLOCK_GROUP_DATA)
3742		info->data_sinfo = found;
3743
3744	return ret;
3745}
3746
3747static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3748{
3749	u64 extra_flags = chunk_to_extended(flags) &
3750				BTRFS_EXTENDED_PROFILE_MASK;
3751
3752	write_seqlock(&fs_info->profiles_lock);
3753	if (flags & BTRFS_BLOCK_GROUP_DATA)
3754		fs_info->avail_data_alloc_bits |= extra_flags;
3755	if (flags & BTRFS_BLOCK_GROUP_METADATA)
3756		fs_info->avail_metadata_alloc_bits |= extra_flags;
3757	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3758		fs_info->avail_system_alloc_bits |= extra_flags;
3759	write_sequnlock(&fs_info->profiles_lock);
3760}
3761
3762/*
3763 * returns target flags in extended format or 0 if restripe for this
3764 * chunk_type is not in progress
3765 *
3766 * should be called with either volume_mutex or balance_lock held
3767 */
3768static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3769{
3770	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3771	u64 target = 0;
3772
3773	if (!bctl)
3774		return 0;
3775
3776	if (flags & BTRFS_BLOCK_GROUP_DATA &&
3777	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3778		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3779	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3780		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3781		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3782	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3783		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3784		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3785	}
3786
3787	return target;
3788}
3789
3790/*
3791 * @flags: available profiles in extended format (see ctree.h)
3792 *
3793 * Returns reduced profile in chunk format.  If profile changing is in
3794 * progress (either running or paused) picks the target profile (if it's
3795 * already available), otherwise falls back to plain reducing.
3796 */
3797static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3798{
3799	u64 num_devices = root->fs_info->fs_devices->rw_devices;
3800	u64 target;
3801	u64 tmp;
3802
3803	/*
3804	 * see if restripe for this chunk_type is in progress, if so
3805	 * try to reduce to the target profile
3806	 */
3807	spin_lock(&root->fs_info->balance_lock);
3808	target = get_restripe_target(root->fs_info, flags);
3809	if (target) {
3810		/* pick target profile only if it's already available */
3811		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3812			spin_unlock(&root->fs_info->balance_lock);
3813			return extended_to_chunk(target);
3814		}
3815	}
3816	spin_unlock(&root->fs_info->balance_lock);
3817
3818	/* First, mask out the RAID levels which aren't possible */
3819	if (num_devices == 1)
3820		flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3821			   BTRFS_BLOCK_GROUP_RAID5);
3822	if (num_devices < 3)
3823		flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3824	if (num_devices < 4)
3825		flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3826
3827	tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3828		       BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3829		       BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3830	flags &= ~tmp;
3831
3832	if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3833		tmp = BTRFS_BLOCK_GROUP_RAID6;
3834	else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3835		tmp = BTRFS_BLOCK_GROUP_RAID5;
3836	else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3837		tmp = BTRFS_BLOCK_GROUP_RAID10;
3838	else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3839		tmp = BTRFS_BLOCK_GROUP_RAID1;
3840	else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3841		tmp = BTRFS_BLOCK_GROUP_RAID0;
3842
3843	return extended_to_chunk(flags | tmp);
3844}
3845
3846static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3847{
3848	unsigned seq;
3849	u64 flags;
3850
3851	do {
3852		flags = orig_flags;
3853		seq = read_seqbegin(&root->fs_info->profiles_lock);
3854
3855		if (flags & BTRFS_BLOCK_GROUP_DATA)
3856			flags |= root->fs_info->avail_data_alloc_bits;
3857		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3858			flags |= root->fs_info->avail_system_alloc_bits;
3859		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3860			flags |= root->fs_info->avail_metadata_alloc_bits;
3861	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
3862
3863	return btrfs_reduce_alloc_profile(root, flags);
3864}
3865
3866u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3867{
3868	u64 flags;
3869	u64 ret;
3870
3871	if (data)
3872		flags = BTRFS_BLOCK_GROUP_DATA;
3873	else if (root == root->fs_info->chunk_root)
3874		flags = BTRFS_BLOCK_GROUP_SYSTEM;
3875	else
3876		flags = BTRFS_BLOCK_GROUP_METADATA;
3877
3878	ret = get_alloc_profile(root, flags);
3879	return ret;
3880}
3881
3882/*
3883 * This will check the space that the inode allocates from to make sure we have
3884 * enough space for bytes.
3885 */
3886int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
3887{
3888	struct btrfs_space_info *data_sinfo;
3889	struct btrfs_root *root = BTRFS_I(inode)->root;
3890	struct btrfs_fs_info *fs_info = root->fs_info;
3891	u64 used;
3892	int ret = 0;
3893	int need_commit = 2;
3894	int have_pinned_space;
3895
3896	/* make sure bytes are sectorsize aligned */
3897	bytes = ALIGN(bytes, root->sectorsize);
3898
3899	if (btrfs_is_free_space_inode(inode)) {
3900		need_commit = 0;
3901		ASSERT(current->journal_info);
3902	}
3903
3904	data_sinfo = fs_info->data_sinfo;
3905	if (!data_sinfo)
3906		goto alloc;
3907
3908again:
3909	/* make sure we have enough space to handle the data first */
3910	spin_lock(&data_sinfo->lock);
3911	used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3912		data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3913		data_sinfo->bytes_may_use;
3914
3915	if (used + bytes > data_sinfo->total_bytes) {
3916		struct btrfs_trans_handle *trans;
3917
3918		/*
3919		 * if we don't have enough free bytes in this space then we need
3920		 * to alloc a new chunk.
3921		 */
3922		if (!data_sinfo->full) {
3923			u64 alloc_target;
3924
3925			data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3926			spin_unlock(&data_sinfo->lock);
3927alloc:
3928			alloc_target = btrfs_get_alloc_profile(root, 1);
3929			/*
3930			 * It is ugly that we don't call nolock join
3931			 * transaction for the free space inode case here.
3932			 * But it is safe because we only do the data space
3933			 * reservation for the free space cache in the
3934			 * transaction context, the common join transaction
3935			 * just increase the counter of the current transaction
3936			 * handler, doesn't try to acquire the trans_lock of
3937			 * the fs.
3938			 */
3939			trans = btrfs_join_transaction(root);
3940			if (IS_ERR(trans))
3941				return PTR_ERR(trans);
3942
3943			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3944					     alloc_target,
3945					     CHUNK_ALLOC_NO_FORCE);
3946			btrfs_end_transaction(trans, root);
3947			if (ret < 0) {
3948				if (ret != -ENOSPC)
3949					return ret;
3950				else {
3951					have_pinned_space = 1;
3952					goto commit_trans;
3953				}
3954			}
3955
3956			if (!data_sinfo)
3957				data_sinfo = fs_info->data_sinfo;
3958
3959			goto again;
3960		}
3961
3962		/*
3963		 * If we don't have enough pinned space to deal with this
3964		 * allocation, and no removed chunk in current transaction,
3965		 * don't bother committing the transaction.
3966		 */
3967		have_pinned_space = percpu_counter_compare(
3968			&data_sinfo->total_bytes_pinned,
3969			used + bytes - data_sinfo->total_bytes);
3970		spin_unlock(&data_sinfo->lock);
3971
3972		/* commit the current transaction and try again */
3973commit_trans:
3974		if (need_commit &&
3975		    !atomic_read(&root->fs_info->open_ioctl_trans)) {
3976			need_commit--;
3977
3978			if (need_commit > 0) {
3979				btrfs_start_delalloc_roots(fs_info, 0, -1);
3980				btrfs_wait_ordered_roots(fs_info, -1);
3981			}
3982
3983			trans = btrfs_join_transaction(root);
3984			if (IS_ERR(trans))
3985				return PTR_ERR(trans);
3986			if (have_pinned_space >= 0 ||
3987			    trans->transaction->have_free_bgs ||
3988			    need_commit > 0) {
3989				ret = btrfs_commit_transaction(trans, root);
3990				if (ret)
3991					return ret;
3992				/*
3993				 * The cleaner kthread might still be doing iput
3994				 * operations. Wait for it to finish so that
3995				 * more space is released.
3996				 */
3997				mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
3998				mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
3999				goto again;
4000			} else {
4001				btrfs_end_transaction(trans, root);
4002			}
4003		}
4004
4005		trace_btrfs_space_reservation(root->fs_info,
4006					      "space_info:enospc",
4007					      data_sinfo->flags, bytes, 1);
4008		return -ENOSPC;
4009	}
4010	ret = btrfs_qgroup_reserve(root, write_bytes);
4011	if (ret)
4012		goto out;
4013	data_sinfo->bytes_may_use += bytes;
4014	trace_btrfs_space_reservation(root->fs_info, "space_info",
4015				      data_sinfo->flags, bytes, 1);
4016out:
4017	spin_unlock(&data_sinfo->lock);
4018
4019	return ret;
4020}
4021
4022/*
4023 * Called if we need to clear a data reservation for this inode.
4024 */
4025void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
4026{
4027	struct btrfs_root *root = BTRFS_I(inode)->root;
4028	struct btrfs_space_info *data_sinfo;
4029
4030	/* make sure bytes are sectorsize aligned */
4031	bytes = ALIGN(bytes, root->sectorsize);
4032
4033	data_sinfo = root->fs_info->data_sinfo;
4034	spin_lock(&data_sinfo->lock);
4035	WARN_ON(data_sinfo->bytes_may_use < bytes);
4036	data_sinfo->bytes_may_use -= bytes;
4037	trace_btrfs_space_reservation(root->fs_info, "space_info",
4038				      data_sinfo->flags, bytes, 0);
4039	spin_unlock(&data_sinfo->lock);
4040}
4041
4042static void force_metadata_allocation(struct btrfs_fs_info *info)
4043{
4044	struct list_head *head = &info->space_info;
4045	struct btrfs_space_info *found;
4046
4047	rcu_read_lock();
4048	list_for_each_entry_rcu(found, head, list) {
4049		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4050			found->force_alloc = CHUNK_ALLOC_FORCE;
4051	}
4052	rcu_read_unlock();
4053}
4054
4055static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4056{
4057	return (global->size << 1);
4058}
4059
4060static int should_alloc_chunk(struct btrfs_root *root,
4061			      struct btrfs_space_info *sinfo, int force)
4062{
4063	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4064	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4065	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4066	u64 thresh;
4067
4068	if (force == CHUNK_ALLOC_FORCE)
4069		return 1;
4070
4071	/*
4072	 * We need to take into account the global rsv because for all intents
4073	 * and purposes it's used space.  Don't worry about locking the
4074	 * global_rsv, it doesn't change except when the transaction commits.
4075	 */
4076	if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4077		num_allocated += calc_global_rsv_need_space(global_rsv);
4078
4079	/*
4080	 * in limited mode, we want to have some free space up to
4081	 * about 1% of the FS size.
4082	 */
4083	if (force == CHUNK_ALLOC_LIMITED) {
4084		thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4085		thresh = max_t(u64, 64 * 1024 * 1024,
4086			       div_factor_fine(thresh, 1));
4087
4088		if (num_bytes - num_allocated < thresh)
4089			return 1;
4090	}
4091
4092	if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4093		return 0;
4094	return 1;
4095}
4096
4097static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
4098{
4099	u64 num_dev;
4100
4101	if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4102		    BTRFS_BLOCK_GROUP_RAID0 |
4103		    BTRFS_BLOCK_GROUP_RAID5 |
4104		    BTRFS_BLOCK_GROUP_RAID6))
4105		num_dev = root->fs_info->fs_devices->rw_devices;
4106	else if (type & BTRFS_BLOCK_GROUP_RAID1)
4107		num_dev = 2;
4108	else
4109		num_dev = 1;	/* DUP or single */
4110
4111	/* metadata for updaing devices and chunk tree */
4112	return btrfs_calc_trans_metadata_size(root, num_dev + 1);
4113}
4114
4115static void check_system_chunk(struct btrfs_trans_handle *trans,
4116			       struct btrfs_root *root, u64 type)
4117{
4118	struct btrfs_space_info *info;
4119	u64 left;
4120	u64 thresh;
4121
4122	info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4123	spin_lock(&info->lock);
4124	left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4125		info->bytes_reserved - info->bytes_readonly;
4126	spin_unlock(&info->lock);
4127
4128	thresh = get_system_chunk_thresh(root, type);
4129	if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4130		btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4131			left, thresh, type);
4132		dump_space_info(info, 0, 0);
4133	}
4134
4135	if (left < thresh) {
4136		u64 flags;
4137
4138		flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4139		btrfs_alloc_chunk(trans, root, flags);
4140	}
4141}
4142
4143static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4144			  struct btrfs_root *extent_root, u64 flags, int force)
4145{
4146	struct btrfs_space_info *space_info;
4147	struct btrfs_fs_info *fs_info = extent_root->fs_info;
4148	int wait_for_alloc = 0;
4149	int ret = 0;
4150
4151	/* Don't re-enter if we're already allocating a chunk */
4152	if (trans->allocating_chunk)
4153		return -ENOSPC;
4154
4155	space_info = __find_space_info(extent_root->fs_info, flags);
4156	if (!space_info) {
4157		ret = update_space_info(extent_root->fs_info, flags,
4158					0, 0, &space_info);
4159		BUG_ON(ret); /* -ENOMEM */
4160	}
4161	BUG_ON(!space_info); /* Logic error */
4162
4163again:
4164	spin_lock(&space_info->lock);
4165	if (force < space_info->force_alloc)
4166		force = space_info->force_alloc;
4167	if (space_info->full) {
4168		if (should_alloc_chunk(extent_root, space_info, force))
4169			ret = -ENOSPC;
4170		else
4171			ret = 0;
4172		spin_unlock(&space_info->lock);
4173		return ret;
4174	}
4175
4176	if (!should_alloc_chunk(extent_root, space_info, force)) {
4177		spin_unlock(&space_info->lock);
4178		return 0;
4179	} else if (space_info->chunk_alloc) {
4180		wait_for_alloc = 1;
4181	} else {
4182		space_info->chunk_alloc = 1;
4183	}
4184
4185	spin_unlock(&space_info->lock);
4186
4187	mutex_lock(&fs_info->chunk_mutex);
4188
4189	/*
4190	 * The chunk_mutex is held throughout the entirety of a chunk
4191	 * allocation, so once we've acquired the chunk_mutex we know that the
4192	 * other guy is done and we need to recheck and see if we should
4193	 * allocate.
4194	 */
4195	if (wait_for_alloc) {
4196		mutex_unlock(&fs_info->chunk_mutex);
4197		wait_for_alloc = 0;
4198		goto again;
4199	}
4200
4201	trans->allocating_chunk = true;
4202
4203	/*
4204	 * If we have mixed data/metadata chunks we want to make sure we keep
4205	 * allocating mixed chunks instead of individual chunks.
4206	 */
4207	if (btrfs_mixed_space_info(space_info))
4208		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4209
4210	/*
4211	 * if we're doing a data chunk, go ahead and make sure that
4212	 * we keep a reasonable number of metadata chunks allocated in the
4213	 * FS as well.
4214	 */
4215	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4216		fs_info->data_chunk_allocations++;
4217		if (!(fs_info->data_chunk_allocations %
4218		      fs_info->metadata_ratio))
4219			force_metadata_allocation(fs_info);
4220	}
4221
4222	/*
4223	 * Check if we have enough space in SYSTEM chunk because we may need
4224	 * to update devices.
4225	 */
4226	check_system_chunk(trans, extent_root, flags);
4227
4228	ret = btrfs_alloc_chunk(trans, extent_root, flags);
4229	trans->allocating_chunk = false;
4230
4231	spin_lock(&space_info->lock);
4232	if (ret < 0 && ret != -ENOSPC)
4233		goto out;
4234	if (ret)
4235		space_info->full = 1;
4236	else
4237		ret = 1;
4238
4239	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4240out:
4241	space_info->chunk_alloc = 0;
4242	spin_unlock(&space_info->lock);
4243	mutex_unlock(&fs_info->chunk_mutex);
4244	return ret;
4245}
4246
4247static int can_overcommit(struct btrfs_root *root,
4248			  struct btrfs_space_info *space_info, u64 bytes,
4249			  enum btrfs_reserve_flush_enum flush)
4250{
4251	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4252	u64 profile = btrfs_get_alloc_profile(root, 0);
4253	u64 space_size;
4254	u64 avail;
4255	u64 used;
4256
4257	used = space_info->bytes_used + space_info->bytes_reserved +
4258		space_info->bytes_pinned + space_info->bytes_readonly;
4259
4260	/*
4261	 * We only want to allow over committing if we have lots of actual space
4262	 * free, but if we don't have enough space to handle the global reserve
4263	 * space then we could end up having a real enospc problem when trying
4264	 * to allocate a chunk or some other such important allocation.
4265	 */
4266	spin_lock(&global_rsv->lock);
4267	space_size = calc_global_rsv_need_space(global_rsv);
4268	spin_unlock(&global_rsv->lock);
4269	if (used + space_size >= space_info->total_bytes)
4270		return 0;
4271
4272	used += space_info->bytes_may_use;
4273
4274	spin_lock(&root->fs_info->free_chunk_lock);
4275	avail = root->fs_info->free_chunk_space;
4276	spin_unlock(&root->fs_info->free_chunk_lock);
4277
4278	/*
4279	 * If we have dup, raid1 or raid10 then only half of the free
4280	 * space is actually useable.  For raid56, the space info used
4281	 * doesn't include the parity drive, so we don't have to
4282	 * change the math
4283	 */
4284	if (profile & (BTRFS_BLOCK_GROUP_DUP |
4285		       BTRFS_BLOCK_GROUP_RAID1 |
4286		       BTRFS_BLOCK_GROUP_RAID10))
4287		avail >>= 1;
4288
4289	/*
4290	 * If we aren't flushing all things, let us overcommit up to
4291	 * 1/2th of the space. If we can flush, don't let us overcommit
4292	 * too much, let it overcommit up to 1/8 of the space.
4293	 */
4294	if (flush == BTRFS_RESERVE_FLUSH_ALL)
4295		avail >>= 3;
4296	else
4297		avail >>= 1;
4298
4299	if (used + bytes < space_info->total_bytes + avail)
4300		return 1;
4301	return 0;
4302}
4303
4304static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4305					 unsigned long nr_pages, int nr_items)
4306{
4307	struct super_block *sb = root->fs_info->sb;
4308
4309	if (down_read_trylock(&sb->s_umount)) {
4310		writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4311		up_read(&sb->s_umount);
4312	} else {
4313		/*
4314		 * We needn't worry the filesystem going from r/w to r/o though
4315		 * we don't acquire ->s_umount mutex, because the filesystem
4316		 * should guarantee the delalloc inodes list be empty after
4317		 * the filesystem is readonly(all dirty pages are written to
4318		 * the disk).
4319		 */
4320		btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4321		if (!current->journal_info)
4322			btrfs_wait_ordered_roots(root->fs_info, nr_items);
4323	}
4324}
4325
4326static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4327{
4328	u64 bytes;
4329	int nr;
4330
4331	bytes = btrfs_calc_trans_metadata_size(root, 1);
4332	nr = (int)div64_u64(to_reclaim, bytes);
4333	if (!nr)
4334		nr = 1;
4335	return nr;
4336}
4337
4338#define EXTENT_SIZE_PER_ITEM	(256 * 1024)
4339
4340/*
4341 * shrink metadata reservation for delalloc
4342 */
4343static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4344			    bool wait_ordered)
4345{
4346	struct btrfs_block_rsv *block_rsv;
4347	struct btrfs_space_info *space_info;
4348	struct btrfs_trans_handle *trans;
4349	u64 delalloc_bytes;
4350	u64 max_reclaim;
4351	long time_left;
4352	unsigned long nr_pages;
4353	int loops;
4354	int items;
4355	enum btrfs_reserve_flush_enum flush;
4356
4357	/* Calc the number of the pages we need flush for space reservation */
4358	items = calc_reclaim_items_nr(root, to_reclaim);
4359	to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4360
4361	trans = (struct btrfs_trans_handle *)current->journal_info;
4362	block_rsv = &root->fs_info->delalloc_block_rsv;
4363	space_info = block_rsv->space_info;
4364
4365	delalloc_bytes = percpu_counter_sum_positive(
4366						&root->fs_info->delalloc_bytes);
4367	if (delalloc_bytes == 0) {
4368		if (trans)
4369			return;
4370		if (wait_ordered)
4371			btrfs_wait_ordered_roots(root->fs_info, items);
4372		return;
4373	}
4374
4375	loops = 0;
4376	while (delalloc_bytes && loops < 3) {
4377		max_reclaim = min(delalloc_bytes, to_reclaim);
4378		nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4379		btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4380		/*
4381		 * We need to wait for the async pages to actually start before
4382		 * we do anything.
4383		 */
4384		max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4385		if (!max_reclaim)
4386			goto skip_async;
4387
4388		if (max_reclaim <= nr_pages)
4389			max_reclaim = 0;
4390		else
4391			max_reclaim -= nr_pages;
4392
4393		wait_event(root->fs_info->async_submit_wait,
4394			   atomic_read(&root->fs_info->async_delalloc_pages) <=
4395			   (int)max_reclaim);
4396skip_async:
4397		if (!trans)
4398			flush = BTRFS_RESERVE_FLUSH_ALL;
4399		else
4400			flush = BTRFS_RESERVE_NO_FLUSH;
4401		spin_lock(&space_info->lock);
4402		if (can_overcommit(root, space_info, orig, flush)) {
4403			spin_unlock(&space_info->lock);
4404			break;
4405		}
4406		spin_unlock(&space_info->lock);
4407
4408		loops++;
4409		if (wait_ordered && !trans) {
4410			btrfs_wait_ordered_roots(root->fs_info, items);
4411		} else {
4412			time_left = schedule_timeout_killable(1);
4413			if (time_left)
4414				break;
4415		}
4416		delalloc_bytes = percpu_counter_sum_positive(
4417						&root->fs_info->delalloc_bytes);
4418	}
4419}
4420
4421/**
4422 * maybe_commit_transaction - possibly commit the transaction if its ok to
4423 * @root - the root we're allocating for
4424 * @bytes - the number of bytes we want to reserve
4425 * @force - force the commit
4426 *
4427 * This will check to make sure that committing the transaction will actually
4428 * get us somewhere and then commit the transaction if it does.  Otherwise it
4429 * will return -ENOSPC.
4430 */
4431static int may_commit_transaction(struct btrfs_root *root,
4432				  struct btrfs_space_info *space_info,
4433				  u64 bytes, int force)
4434{
4435	struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4436	struct btrfs_trans_handle *trans;
4437
4438	trans = (struct btrfs_trans_handle *)current->journal_info;
4439	if (trans)
4440		return -EAGAIN;
4441
4442	if (force)
4443		goto commit;
4444
4445	/* See if there is enough pinned space to make this reservation */
4446	if (percpu_counter_compare(&space_info->total_bytes_pinned,
4447				   bytes) >= 0)
4448		goto commit;
4449
4450	/*
4451	 * See if there is some space in the delayed insertion reservation for
4452	 * this reservation.
4453	 */
4454	if (space_info != delayed_rsv->space_info)
4455		return -ENOSPC;
4456
4457	spin_lock(&delayed_rsv->lock);
4458	if (percpu_counter_compare(&space_info->total_bytes_pinned,
4459				   bytes - delayed_rsv->size) >= 0) {
4460		spin_unlock(&delayed_rsv->lock);
4461		return -ENOSPC;
4462	}
4463	spin_unlock(&delayed_rsv->lock);
4464
4465commit:
4466	trans = btrfs_join_transaction(root);
4467	if (IS_ERR(trans))
4468		return -ENOSPC;
4469
4470	return btrfs_commit_transaction(trans, root);
4471}
4472
4473enum flush_state {
4474	FLUSH_DELAYED_ITEMS_NR	=	1,
4475	FLUSH_DELAYED_ITEMS	=	2,
4476	FLUSH_DELALLOC		=	3,
4477	FLUSH_DELALLOC_WAIT	=	4,
4478	ALLOC_CHUNK		=	5,
4479	COMMIT_TRANS		=	6,
4480};
4481
4482static int flush_space(struct btrfs_root *root,
4483		       struct btrfs_space_info *space_info, u64 num_bytes,
4484		       u64 orig_bytes, int state)
4485{
4486	struct btrfs_trans_handle *trans;
4487	int nr;
4488	int ret = 0;
4489
4490	switch (state) {
4491	case FLUSH_DELAYED_ITEMS_NR:
4492	case FLUSH_DELAYED_ITEMS:
4493		if (state == FLUSH_DELAYED_ITEMS_NR)
4494			nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4495		else
4496			nr = -1;
4497
4498		trans = btrfs_join_transaction(root);
4499		if (IS_ERR(trans)) {
4500			ret = PTR_ERR(trans);
4501			break;
4502		}
4503		ret = btrfs_run_delayed_items_nr(trans, root, nr);
4504		btrfs_end_transaction(trans, root);
4505		break;
4506	case FLUSH_DELALLOC:
4507	case FLUSH_DELALLOC_WAIT:
4508		shrink_delalloc(root, num_bytes * 2, orig_bytes,
4509				state == FLUSH_DELALLOC_WAIT);
4510		break;
4511	case ALLOC_CHUNK:
4512		trans = btrfs_join_transaction(root);
4513		if (IS_ERR(trans)) {
4514			ret = PTR_ERR(trans);
4515			break;
4516		}
4517		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4518				     btrfs_get_alloc_profile(root, 0),
4519				     CHUNK_ALLOC_NO_FORCE);
4520		btrfs_end_transaction(trans, root);
4521		if (ret == -ENOSPC)
4522			ret = 0;
4523		break;
4524	case COMMIT_TRANS:
4525		ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4526		break;
4527	default:
4528		ret = -ENOSPC;
4529		break;
4530	}
4531
4532	return ret;
4533}
4534
4535static inline u64
4536btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4537				 struct btrfs_space_info *space_info)
4538{
4539	u64 used;
4540	u64 expected;
4541	u64 to_reclaim;
4542
4543	to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4544				16 * 1024 * 1024);
4545	spin_lock(&space_info->lock);
4546	if (can_overcommit(root, space_info, to_reclaim,
4547			   BTRFS_RESERVE_FLUSH_ALL)) {
4548		to_reclaim = 0;
4549		goto out;
4550	}
4551
4552	used = space_info->bytes_used + space_info->bytes_reserved +
4553	       space_info->bytes_pinned + space_info->bytes_readonly +
4554	       space_info->bytes_may_use;
4555	if (can_overcommit(root, space_info, 1024 * 1024,
4556			   BTRFS_RESERVE_FLUSH_ALL))
4557		expected = div_factor_fine(space_info->total_bytes, 95);
4558	else
4559		expected = div_factor_fine(space_info->total_bytes, 90);
4560
4561	if (used > expected)
4562		to_reclaim = used - expected;
4563	else
4564		to_reclaim = 0;
4565	to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4566				     space_info->bytes_reserved);
4567out:
4568	spin_unlock(&space_info->lock);
4569
4570	return to_reclaim;
4571}
4572
4573static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4574					struct btrfs_fs_info *fs_info, u64 used)
4575{
4576	u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4577
4578	/* If we're just plain full then async reclaim just slows us down. */
4579	if (space_info->bytes_used >= thresh)
4580		return 0;
4581
4582	return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4583		!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4584}
4585
4586static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4587				       struct btrfs_fs_info *fs_info,
4588				       int flush_state)
4589{
4590	u64 used;
4591
4592	spin_lock(&space_info->lock);
4593	/*
4594	 * We run out of space and have not got any free space via flush_space,
4595	 * so don't bother doing async reclaim.
4596	 */
4597	if (flush_state > COMMIT_TRANS && space_info->full) {
4598		spin_unlock(&space_info->lock);
4599		return 0;
4600	}
4601
4602	used = space_info->bytes_used + space_info->bytes_reserved +
4603	       space_info->bytes_pinned + space_info->bytes_readonly +
4604	       space_info->bytes_may_use;
4605	if (need_do_async_reclaim(space_info, fs_info, used)) {
4606		spin_unlock(&space_info->lock);
4607		return 1;
4608	}
4609	spin_unlock(&space_info->lock);
4610
4611	return 0;
4612}
4613
4614static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4615{
4616	struct btrfs_fs_info *fs_info;
4617	struct btrfs_space_info *space_info;
4618	u64 to_reclaim;
4619	int flush_state;
4620
4621	fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4622	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4623
4624	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4625						      space_info);
4626	if (!to_reclaim)
4627		return;
4628
4629	flush_state = FLUSH_DELAYED_ITEMS_NR;
4630	do {
4631		flush_space(fs_info->fs_root, space_info, to_reclaim,
4632			    to_reclaim, flush_state);
4633		flush_state++;
4634		if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4635						 flush_state))
4636			return;
4637	} while (flush_state < COMMIT_TRANS);
4638}
4639
4640void btrfs_init_async_reclaim_work(struct work_struct *work)
4641{
4642	INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4643}
4644
4645/**
4646 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4647 * @root - the root we're allocating for
4648 * @block_rsv - the block_rsv we're allocating for
4649 * @orig_bytes - the number of bytes we want
4650 * @flush - whether or not we can flush to make our reservation
4651 *
4652 * This will reserve orgi_bytes number of bytes from the space info associated
4653 * with the block_rsv.  If there is not enough space it will make an attempt to
4654 * flush out space to make room.  It will do this by flushing delalloc if
4655 * possible or committing the transaction.  If flush is 0 then no attempts to
4656 * regain reservations will be made and this will fail if there is not enough
4657 * space already.
4658 */
4659static int reserve_metadata_bytes(struct btrfs_root *root,
4660				  struct btrfs_block_rsv *block_rsv,
4661				  u64 orig_bytes,
4662				  enum btrfs_reserve_flush_enum flush)
4663{
4664	struct btrfs_space_info *space_info = block_rsv->space_info;
4665	u64 used;
4666	u64 num_bytes = orig_bytes;
4667	int flush_state = FLUSH_DELAYED_ITEMS_NR;
4668	int ret = 0;
4669	bool flushing = false;
4670
4671again:
4672	ret = 0;
4673	spin_lock(&space_info->lock);
4674	/*
4675	 * We only want to wait if somebody other than us is flushing and we
4676	 * are actually allowed to flush all things.
4677	 */
4678	while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4679	       space_info->flush) {
4680		spin_unlock(&space_info->lock);
4681		/*
4682		 * If we have a trans handle we can't wait because the flusher
4683		 * may have to commit the transaction, which would mean we would
4684		 * deadlock since we are waiting for the flusher to finish, but
4685		 * hold the current transaction open.
4686		 */
4687		if (current->journal_info)
4688			return -EAGAIN;
4689		ret = wait_event_killable(space_info->wait, !space_info->flush);
4690		/* Must have been killed, return */
4691		if (ret)
4692			return -EINTR;
4693
4694		spin_lock(&space_info->lock);
4695	}
4696
4697	ret = -ENOSPC;
4698	used = space_info->bytes_used + space_info->bytes_reserved +
4699		space_info->bytes_pinned + space_info->bytes_readonly +
4700		space_info->bytes_may_use;
4701
4702	/*
4703	 * The idea here is that we've not already over-reserved the block group
4704	 * then we can go ahead and save our reservation first and then start
4705	 * flushing if we need to.  Otherwise if we've already overcommitted
4706	 * lets start flushing stuff first and then come back and try to make
4707	 * our reservation.
4708	 */
4709	if (used <= space_info->total_bytes) {
4710		if (used + orig_bytes <= space_info->total_bytes) {
4711			space_info->bytes_may_use += orig_bytes;
4712			trace_btrfs_space_reservation(root->fs_info,
4713				"space_info", space_info->flags, orig_bytes, 1);
4714			ret = 0;
4715		} else {
4716			/*
4717			 * Ok set num_bytes to orig_bytes since we aren't
4718			 * overocmmitted, this way we only try and reclaim what
4719			 * we need.
4720			 */
4721			num_bytes = orig_bytes;
4722		}
4723	} else {
4724		/*
4725		 * Ok we're over committed, set num_bytes to the overcommitted
4726		 * amount plus the amount of bytes that we need for this
4727		 * reservation.
4728		 */
4729		num_bytes = used - space_info->total_bytes +
4730			(orig_bytes * 2);
4731	}
4732
4733	if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4734		space_info->bytes_may_use += orig_bytes;
4735		trace_btrfs_space_reservation(root->fs_info, "space_info",
4736					      space_info->flags, orig_bytes,
4737					      1);
4738		ret = 0;
4739	}
4740
4741	/*
4742	 * Couldn't make our reservation, save our place so while we're trying
4743	 * to reclaim space we can actually use it instead of somebody else
4744	 * stealing it from us.
4745	 *
4746	 * We make the other tasks wait for the flush only when we can flush
4747	 * all things.
4748	 */
4749	if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4750		flushing = true;
4751		space_info->flush = 1;
4752	} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4753		used += orig_bytes;
4754		/*
4755		 * We will do the space reservation dance during log replay,
4756		 * which means we won't have fs_info->fs_root set, so don't do
4757		 * the async reclaim as we will panic.
4758		 */
4759		if (!root->fs_info->log_root_recovering &&
4760		    need_do_async_reclaim(space_info, root->fs_info, used) &&
4761		    !work_busy(&root->fs_info->async_reclaim_work))
4762			queue_work(system_unbound_wq,
4763				   &root->fs_info->async_reclaim_work);
4764	}
4765	spin_unlock(&space_info->lock);
4766
4767	if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4768		goto out;
4769
4770	ret = flush_space(root, space_info, num_bytes, orig_bytes,
4771			  flush_state);
4772	flush_state++;
4773
4774	/*
4775	 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4776	 * would happen. So skip delalloc flush.
4777	 */
4778	if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4779	    (flush_state == FLUSH_DELALLOC ||
4780	     flush_state == FLUSH_DELALLOC_WAIT))
4781		flush_state = ALLOC_CHUNK;
4782
4783	if (!ret)
4784		goto again;
4785	else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4786		 flush_state < COMMIT_TRANS)
4787		goto again;
4788	else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4789		 flush_state <= COMMIT_TRANS)
4790		goto again;
4791
4792out:
4793	if (ret == -ENOSPC &&
4794	    unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4795		struct btrfs_block_rsv *global_rsv =
4796			&root->fs_info->global_block_rsv;
4797
4798		if (block_rsv != global_rsv &&
4799		    !block_rsv_use_bytes(global_rsv, orig_bytes))
4800			ret = 0;
4801	}
4802	if (ret == -ENOSPC)
4803		trace_btrfs_space_reservation(root->fs_info,
4804					      "space_info:enospc",
4805					      space_info->flags, orig_bytes, 1);
4806	if (flushing) {
4807		spin_lock(&space_info->lock);
4808		space_info->flush = 0;
4809		wake_up_all(&space_info->wait);
4810		spin_unlock(&space_info->lock);
4811	}
4812	return ret;
4813}
4814
4815static struct btrfs_block_rsv *get_block_rsv(
4816					const struct btrfs_trans_handle *trans,
4817					const struct btrfs_root *root)
4818{
4819	struct btrfs_block_rsv *block_rsv = NULL;
4820
4821	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4822		block_rsv = trans->block_rsv;
4823
4824	if (root == root->fs_info->csum_root && trans->adding_csums)
4825		block_rsv = trans->block_rsv;
4826
4827	if (root == root->fs_info->uuid_root)
4828		block_rsv = trans->block_rsv;
4829
4830	if (!block_rsv)
4831		block_rsv = root->block_rsv;
4832
4833	if (!block_rsv)
4834		block_rsv = &root->fs_info->empty_block_rsv;
4835
4836	return block_rsv;
4837}
4838
4839static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4840			       u64 num_bytes)
4841{
4842	int ret = -ENOSPC;
4843	spin_lock(&block_rsv->lock);
4844	if (block_rsv->reserved >= num_bytes) {
4845		block_rsv->reserved -= num_bytes;
4846		if (block_rsv->reserved < block_rsv->size)
4847			block_rsv->full = 0;
4848		ret = 0;
4849	}
4850	spin_unlock(&block_rsv->lock);
4851	return ret;
4852}
4853
4854static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4855				u64 num_bytes, int update_size)
4856{
4857	spin_lock(&block_rsv->lock);
4858	block_rsv->reserved += num_bytes;
4859	if (update_size)
4860		block_rsv->size += num_bytes;
4861	else if (block_rsv->reserved >= block_rsv->size)
4862		block_rsv->full = 1;
4863	spin_unlock(&block_rsv->lock);
4864}
4865
4866int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4867			     struct btrfs_block_rsv *dest, u64 num_bytes,
4868			     int min_factor)
4869{
4870	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4871	u64 min_bytes;
4872
4873	if (global_rsv->space_info != dest->space_info)
4874		return -ENOSPC;
4875
4876	spin_lock(&global_rsv->lock);
4877	min_bytes = div_factor(global_rsv->size, min_factor);
4878	if (global_rsv->reserved < min_bytes + num_bytes) {
4879		spin_unlock(&global_rsv->lock);
4880		return -ENOSPC;
4881	}
4882	global_rsv->reserved -= num_bytes;
4883	if (global_rsv->reserved < global_rsv->size)
4884		global_rsv->full = 0;
4885	spin_unlock(&global_rsv->lock);
4886
4887	block_rsv_add_bytes(dest, num_bytes, 1);
4888	return 0;
4889}
4890
4891static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4892				    struct btrfs_block_rsv *block_rsv,
4893				    struct btrfs_block_rsv *dest, u64 num_bytes)
4894{
4895	struct btrfs_space_info *space_info = block_rsv->space_info;
4896
4897	spin_lock(&block_rsv->lock);
4898	if (num_bytes == (u64)-1)
4899		num_bytes = block_rsv->size;
4900	block_rsv->size -= num_bytes;
4901	if (block_rsv->reserved >= block_rsv->size) {
4902		num_bytes = block_rsv->reserved - block_rsv->size;
4903		block_rsv->reserved = block_rsv->size;
4904		block_rsv->full = 1;
4905	} else {
4906		num_bytes = 0;
4907	}
4908	spin_unlock(&block_rsv->lock);
4909
4910	if (num_bytes > 0) {
4911		if (dest) {
4912			spin_lock(&dest->lock);
4913			if (!dest->full) {
4914				u64 bytes_to_add;
4915
4916				bytes_to_add = dest->size - dest->reserved;
4917				bytes_to_add = min(num_bytes, bytes_to_add);
4918				dest->reserved += bytes_to_add;
4919				if (dest->reserved >= dest->size)
4920					dest->full = 1;
4921				num_bytes -= bytes_to_add;
4922			}
4923			spin_unlock(&dest->lock);
4924		}
4925		if (num_bytes) {
4926			spin_lock(&space_info->lock);
4927			space_info->bytes_may_use -= num_bytes;
4928			trace_btrfs_space_reservation(fs_info, "space_info",
4929					space_info->flags, num_bytes, 0);
4930			spin_unlock(&space_info->lock);
4931		}
4932	}
4933}
4934
4935static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4936				   struct btrfs_block_rsv *dst, u64 num_bytes)
4937{
4938	int ret;
4939
4940	ret = block_rsv_use_bytes(src, num_bytes);
4941	if (ret)
4942		return ret;
4943
4944	block_rsv_add_bytes(dst, num_bytes, 1);
4945	return 0;
4946}
4947
4948void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4949{
4950	memset(rsv, 0, sizeof(*rsv));
4951	spin_lock_init(&rsv->lock);
4952	rsv->type = type;
4953}
4954
4955struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4956					      unsigned short type)
4957{
4958	struct btrfs_block_rsv *block_rsv;
4959	struct btrfs_fs_info *fs_info = root->fs_info;
4960
4961	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4962	if (!block_rsv)
4963		return NULL;
4964
4965	btrfs_init_block_rsv(block_rsv, type);
4966	block_rsv->space_info = __find_space_info(fs_info,
4967						  BTRFS_BLOCK_GROUP_METADATA);
4968	return block_rsv;
4969}
4970
4971void btrfs_free_block_rsv(struct btrfs_root *root,
4972			  struct btrfs_block_rsv *rsv)
4973{
4974	if (!rsv)
4975		return;
4976	btrfs_block_rsv_release(root, rsv, (u64)-1);
4977	kfree(rsv);
4978}
4979
4980void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
4981{
4982	kfree(rsv);
4983}
4984
4985int btrfs_block_rsv_add(struct btrfs_root *root,
4986			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4987			enum btrfs_reserve_flush_enum flush)
4988{
4989	int ret;
4990
4991	if (num_bytes == 0)
4992		return 0;
4993
4994	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4995	if (!ret) {
4996		block_rsv_add_bytes(block_rsv, num_bytes, 1);
4997		return 0;
4998	}
4999
5000	return ret;
5001}
5002
5003int btrfs_block_rsv_check(struct btrfs_root *root,
5004			  struct btrfs_block_rsv *block_rsv, int min_factor)
5005{
5006	u64 num_bytes = 0;
5007	int ret = -ENOSPC;
5008
5009	if (!block_rsv)
5010		return 0;
5011
5012	spin_lock(&block_rsv->lock);
5013	num_bytes = div_factor(block_rsv->size, min_factor);
5014	if (block_rsv->reserved >= num_bytes)
5015		ret = 0;
5016	spin_unlock(&block_rsv->lock);
5017
5018	return ret;
5019}
5020
5021int btrfs_block_rsv_refill(struct btrfs_root *root,
5022			   struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5023			   enum btrfs_reserve_flush_enum flush)
5024{
5025	u64 num_bytes = 0;
5026	int ret = -ENOSPC;
5027
5028	if (!block_rsv)
5029		return 0;
5030
5031	spin_lock(&block_rsv->lock);
5032	num_bytes = min_reserved;
5033	if (block_rsv->reserved >= num_bytes)
5034		ret = 0;
5035	else
5036		num_bytes -= block_rsv->reserved;
5037	spin_unlock(&block_rsv->lock);
5038
5039	if (!ret)
5040		return 0;
5041
5042	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5043	if (!ret) {
5044		block_rsv_add_bytes(block_rsv, num_bytes, 0);
5045		return 0;
5046	}
5047
5048	return ret;
5049}
5050
5051int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5052			    struct btrfs_block_rsv *dst_rsv,
5053			    u64 num_bytes)
5054{
5055	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5056}
5057
5058void btrfs_block_rsv_release(struct btrfs_root *root,
5059			     struct btrfs_block_rsv *block_rsv,
5060			     u64 num_bytes)
5061{
5062	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5063	if (global_rsv == block_rsv ||
5064	    block_rsv->space_info != global_rsv->space_info)
5065		global_rsv = NULL;
5066	block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5067				num_bytes);
5068}
5069
5070/*
5071 * helper to calculate size of global block reservation.
5072 * the desired value is sum of space used by extent tree,
5073 * checksum tree and root tree
5074 */
5075static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5076{
5077	struct btrfs_space_info *sinfo;
5078	u64 num_bytes;
5079	u64 meta_used;
5080	u64 data_used;
5081	int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5082
5083	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5084	spin_lock(&sinfo->lock);
5085	data_used = sinfo->bytes_used;
5086	spin_unlock(&sinfo->lock);
5087
5088	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5089	spin_lock(&sinfo->lock);
5090	if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5091		data_used = 0;
5092	meta_used = sinfo->bytes_used;
5093	spin_unlock(&sinfo->lock);
5094
5095	num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5096		    csum_size * 2;
5097	num_bytes += div_u64(data_used + meta_used, 50);
5098
5099	if (num_bytes * 3 > meta_used)
5100		num_bytes = div_u64(meta_used, 3);
5101
5102	return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5103}
5104
5105static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5106{
5107	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5108	struct btrfs_space_info *sinfo = block_rsv->space_info;
5109	u64 num_bytes;
5110
5111	num_bytes = calc_global_metadata_size(fs_info);
5112
5113	spin_lock(&sinfo->lock);
5114	spin_lock(&block_rsv->lock);
5115
5116	block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5117
5118	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5119		    sinfo->bytes_reserved + sinfo->bytes_readonly +
5120		    sinfo->bytes_may_use;
5121
5122	if (sinfo->total_bytes > num_bytes) {
5123		num_bytes = sinfo->total_bytes - num_bytes;
5124		block_rsv->reserved += num_bytes;
5125		sinfo->bytes_may_use += num_bytes;
5126		trace_btrfs_space_reservation(fs_info, "space_info",
5127				      sinfo->flags, num_bytes, 1);
5128	}
5129
5130	if (block_rsv->reserved >= block_rsv->size) {
5131		num_bytes = block_rsv->reserved - block_rsv->size;
5132		sinfo->bytes_may_use -= num_bytes;
5133		trace_btrfs_space_reservation(fs_info, "space_info",
5134				      sinfo->flags, num_bytes, 0);
5135		block_rsv->reserved = block_rsv->size;
5136		block_rsv->full = 1;
5137	}
5138
5139	spin_unlock(&block_rsv->lock);
5140	spin_unlock(&sinfo->lock);
5141}
5142
5143static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5144{
5145	struct btrfs_space_info *space_info;
5146
5147	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5148	fs_info->chunk_block_rsv.space_info = space_info;
5149
5150	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5151	fs_info->global_block_rsv.space_info = space_info;
5152	fs_info->delalloc_block_rsv.space_info = space_info;
5153	fs_info->trans_block_rsv.space_info = space_info;
5154	fs_info->empty_block_rsv.space_info = space_info;
5155	fs_info->delayed_block_rsv.space_info = space_info;
5156
5157	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5158	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5159	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5160	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5161	if (fs_info->quota_root)
5162		fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5163	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5164
5165	update_global_block_rsv(fs_info);
5166}
5167
5168static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5169{
5170	block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5171				(u64)-1);
5172	WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5173	WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5174	WARN_ON(fs_info->trans_block_rsv.size > 0);
5175	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5176	WARN_ON(fs_info->chunk_block_rsv.size > 0);
5177	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5178	WARN_ON(fs_info->delayed_block_rsv.size > 0);
5179	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5180}
5181
5182void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5183				  struct btrfs_root *root)
5184{
5185	if (!trans->block_rsv)
5186		return;
5187
5188	if (!trans->bytes_reserved)
5189		return;
5190
5191	trace_btrfs_space_reservation(root->fs_info, "transaction",
5192				      trans->transid, trans->bytes_reserved, 0);
5193	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5194	trans->bytes_reserved = 0;
5195}
5196
5197/* Can only return 0 or -ENOSPC */
5198int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5199				  struct inode *inode)
5200{
5201	struct btrfs_root *root = BTRFS_I(inode)->root;
5202	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5203	struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5204
5205	/*
5206	 * We need to hold space in order to delete our orphan item once we've
5207	 * added it, so this takes the reservation so we can release it later
5208	 * when we are truly done with the orphan item.
5209	 */
5210	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5211	trace_btrfs_space_reservation(root->fs_info, "orphan",
5212				      btrfs_ino(inode), num_bytes, 1);
5213	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5214}
5215
5216void btrfs_orphan_release_metadata(struct inode *inode)
5217{
5218	struct btrfs_root *root = BTRFS_I(inode)->root;
5219	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5220	trace_btrfs_space_reservation(root->fs_info, "orphan",
5221				      btrfs_ino(inode), num_bytes, 0);
5222	btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5223}
5224
5225/*
5226 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5227 * root: the root of the parent directory
5228 * rsv: block reservation
5229 * items: the number of items that we need do reservation
5230 * qgroup_reserved: used to return the reserved size in qgroup
5231 *
5232 * This function is used to reserve the space for snapshot/subvolume
5233 * creation and deletion. Those operations are different with the
5234 * common file/directory operations, they change two fs/file trees
5235 * and root tree, the number of items that the qgroup reserves is
5236 * different with the free space reservation. So we can not use
5237 * the space reseravtion mechanism in start_transaction().
5238 */
5239int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5240				     struct btrfs_block_rsv *rsv,
5241				     int items,
5242				     u64 *qgroup_reserved,
5243				     bool use_global_rsv)
5244{
5245	u64 num_bytes;
5246	int ret;
5247	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5248
5249	if (root->fs_info->quota_enabled) {
5250		/* One for parent inode, two for dir entries */
5251		num_bytes = 3 * root->nodesize;
5252		ret = btrfs_qgroup_reserve(root, num_bytes);
5253		if (ret)
5254			return ret;
5255	} else {
5256		num_bytes = 0;
5257	}
5258
5259	*qgroup_reserved = num_bytes;
5260
5261	num_bytes = btrfs_calc_trans_metadata_size(root, items);
5262	rsv->space_info = __find_space_info(root->fs_info,
5263					    BTRFS_BLOCK_GROUP_METADATA);
5264	ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5265				  BTRFS_RESERVE_FLUSH_ALL);
5266
5267	if (ret == -ENOSPC && use_global_rsv)
5268		ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5269
5270	if (ret) {
5271		if (*qgroup_reserved)
5272			btrfs_qgroup_free(root, *qgroup_reserved);
5273	}
5274
5275	return ret;
5276}
5277
5278void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5279				      struct btrfs_block_rsv *rsv,
5280				      u64 qgroup_reserved)
5281{
5282	btrfs_block_rsv_release(root, rsv, (u64)-1);
5283}
5284
5285/**
5286 * drop_outstanding_extent - drop an outstanding extent
5287 * @inode: the inode we're dropping the extent for
5288 * @num_bytes: the number of bytes we're relaseing.
5289 *
5290 * This is called when we are freeing up an outstanding extent, either called
5291 * after an error or after an extent is written.  This will return the number of
5292 * reserved extents that need to be freed.  This must be called with
5293 * BTRFS_I(inode)->lock held.
5294 */
5295static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5296{
5297	unsigned drop_inode_space = 0;
5298	unsigned dropped_extents = 0;
5299	unsigned num_extents = 0;
5300
5301	num_extents = (unsigned)div64_u64(num_bytes +
5302					  BTRFS_MAX_EXTENT_SIZE - 1,
5303					  BTRFS_MAX_EXTENT_SIZE);
5304	ASSERT(num_extents);
5305	ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5306	BTRFS_I(inode)->outstanding_extents -= num_extents;
5307
5308	if (BTRFS_I(inode)->outstanding_extents == 0 &&
5309	    test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5310			       &BTRFS_I(inode)->runtime_flags))
5311		drop_inode_space = 1;
5312
5313	/*
5314	 * If we have more or the same amount of outsanding extents than we have
5315	 * reserved then we need to leave the reserved extents count alone.
5316	 */
5317	if (BTRFS_I(inode)->outstanding_extents >=
5318	    BTRFS_I(inode)->reserved_extents)
5319		return drop_inode_space;
5320
5321	dropped_extents = BTRFS_I(inode)->reserved_extents -
5322		BTRFS_I(inode)->outstanding_extents;
5323	BTRFS_I(inode)->reserved_extents -= dropped_extents;
5324	return dropped_extents + drop_inode_space;
5325}
5326
5327/**
5328 * calc_csum_metadata_size - return the amount of metada space that must be
5329 *	reserved/free'd for the given bytes.
5330 * @inode: the inode we're manipulating
5331 * @num_bytes: the number of bytes in question
5332 * @reserve: 1 if we are reserving space, 0 if we are freeing space
5333 *
5334 * This adjusts the number of csum_bytes in the inode and then returns the
5335 * correct amount of metadata that must either be reserved or freed.  We
5336 * calculate how many checksums we can fit into one leaf and then divide the
5337 * number of bytes that will need to be checksumed by this value to figure out
5338 * how many checksums will be required.  If we are adding bytes then the number
5339 * may go up and we will return the number of additional bytes that must be
5340 * reserved.  If it is going down we will return the number of bytes that must
5341 * be freed.
5342 *
5343 * This must be called with BTRFS_I(inode)->lock held.
5344 */
5345static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5346				   int reserve)
5347{
5348	struct btrfs_root *root = BTRFS_I(inode)->root;
5349	u64 old_csums, num_csums;
5350
5351	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5352	    BTRFS_I(inode)->csum_bytes == 0)
5353		return 0;
5354
5355	old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5356	if (reserve)
5357		BTRFS_I(inode)->csum_bytes += num_bytes;
5358	else
5359		BTRFS_I(inode)->csum_bytes -= num_bytes;
5360	num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5361
5362	/* No change, no need to reserve more */
5363	if (old_csums == num_csums)
5364		return 0;
5365
5366	if (reserve)
5367		return btrfs_calc_trans_metadata_size(root,
5368						      num_csums - old_csums);
5369
5370	return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5371}
5372
5373int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5374{
5375	struct btrfs_root *root = BTRFS_I(inode)->root;
5376	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5377	u64 to_reserve = 0;
5378	u64 csum_bytes;
5379	unsigned nr_extents = 0;
5380	int extra_reserve = 0;
5381	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5382	int ret = 0;
5383	bool delalloc_lock = true;
5384	u64 to_free = 0;
5385	unsigned dropped;
5386
5387	/* If we are a free space inode we need to not flush since we will be in
5388	 * the middle of a transaction commit.  We also don't need the delalloc
5389	 * mutex since we won't race with anybody.  We need this mostly to make
5390	 * lockdep shut its filthy mouth.
5391	 */
5392	if (btrfs_is_free_space_inode(inode)) {
5393		flush = BTRFS_RESERVE_NO_FLUSH;
5394		delalloc_lock = false;
5395	}
5396
5397	if (flush != BTRFS_RESERVE_NO_FLUSH &&
5398	    btrfs_transaction_in_commit(root->fs_info))
5399		schedule_timeout(1);
5400
5401	if (delalloc_lock)
5402		mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5403
5404	num_bytes = ALIGN(num_bytes, root->sectorsize);
5405
5406	spin_lock(&BTRFS_I(inode)->lock);
5407	nr_extents = (unsigned)div64_u64(num_bytes +
5408					 BTRFS_MAX_EXTENT_SIZE - 1,
5409					 BTRFS_MAX_EXTENT_SIZE);
5410	BTRFS_I(inode)->outstanding_extents += nr_extents;
5411	nr_extents = 0;
5412
5413	if (BTRFS_I(inode)->outstanding_extents >
5414	    BTRFS_I(inode)->reserved_extents)
5415		nr_extents = BTRFS_I(inode)->outstanding_extents -
5416			BTRFS_I(inode)->reserved_extents;
5417
5418	/*
5419	 * Add an item to reserve for updating the inode when we complete the
5420	 * delalloc io.
5421	 */
5422	if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5423		      &BTRFS_I(inode)->runtime_flags)) {
5424		nr_extents++;
5425		extra_reserve = 1;
5426	}
5427
5428	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5429	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5430	csum_bytes = BTRFS_I(inode)->csum_bytes;
5431	spin_unlock(&BTRFS_I(inode)->lock);
5432
5433	if (root->fs_info->quota_enabled) {
5434		ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
5435		if (ret)
5436			goto out_fail;
5437	}
5438
5439	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5440	if (unlikely(ret)) {
5441		if (root->fs_info->quota_enabled)
5442			btrfs_qgroup_free(root, nr_extents * root->nodesize);
5443		goto out_fail;
5444	}
5445
5446	spin_lock(&BTRFS_I(inode)->lock);
5447	if (extra_reserve) {
5448		set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5449			&BTRFS_I(inode)->runtime_flags);
5450		nr_extents--;
5451	}
5452	BTRFS_I(inode)->reserved_extents += nr_extents;
5453	spin_unlock(&BTRFS_I(inode)->lock);
5454
5455	if (delalloc_lock)
5456		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5457
5458	if (to_reserve)
5459		trace_btrfs_space_reservation(root->fs_info, "delalloc",
5460					      btrfs_ino(inode), to_reserve, 1);
5461	block_rsv_add_bytes(block_rsv, to_reserve, 1);
5462
5463	return 0;
5464
5465out_fail:
5466	spin_lock(&BTRFS_I(inode)->lock);
5467	dropped = drop_outstanding_extent(inode, num_bytes);
5468	/*
5469	 * If the inodes csum_bytes is the same as the original
5470	 * csum_bytes then we know we haven't raced with any free()ers
5471	 * so we can just reduce our inodes csum bytes and carry on.
5472	 */
5473	if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5474		calc_csum_metadata_size(inode, num_bytes, 0);
5475	} else {
5476		u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5477		u64 bytes;
5478
5479		/*
5480		 * This is tricky, but first we need to figure out how much we
5481		 * free'd from any free-ers that occured during this
5482		 * reservation, so we reset ->csum_bytes to the csum_bytes
5483		 * before we dropped our lock, and then call the free for the
5484		 * number of bytes that were freed while we were trying our
5485		 * reservation.
5486		 */
5487		bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5488		BTRFS_I(inode)->csum_bytes = csum_bytes;
5489		to_free = calc_csum_metadata_size(inode, bytes, 0);
5490
5491
5492		/*
5493		 * Now we need to see how much we would have freed had we not
5494		 * been making this reservation and our ->csum_bytes were not
5495		 * artificially inflated.
5496		 */
5497		BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5498		bytes = csum_bytes - orig_csum_bytes;
5499		bytes = calc_csum_metadata_size(inode, bytes, 0);
5500
5501		/*
5502		 * Now reset ->csum_bytes to what it should be.  If bytes is
5503		 * more than to_free then we would have free'd more space had we
5504		 * not had an artificially high ->csum_bytes, so we need to free
5505		 * the remainder.  If bytes is the same or less then we don't
5506		 * need to do anything, the other free-ers did the correct
5507		 * thing.
5508		 */
5509		BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5510		if (bytes > to_free)
5511			to_free = bytes - to_free;
5512		else
5513			to_free = 0;
5514	}
5515	spin_unlock(&BTRFS_I(inode)->lock);
5516	if (dropped)
5517		to_free += btrfs_calc_trans_metadata_size(root, dropped);
5518
5519	if (to_free) {
5520		btrfs_block_rsv_release(root, block_rsv, to_free);
5521		trace_btrfs_space_reservation(root->fs_info, "delalloc",
5522					      btrfs_ino(inode), to_free, 0);
5523	}
5524	if (delalloc_lock)
5525		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5526	return ret;
5527}
5528
5529/**
5530 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5531 * @inode: the inode to release the reservation for
5532 * @num_bytes: the number of bytes we're releasing
5533 *
5534 * This will release the metadata reservation for an inode.  This can be called
5535 * once we complete IO for a given set of bytes to release their metadata
5536 * reservations.
5537 */
5538void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5539{
5540	struct btrfs_root *root = BTRFS_I(inode)->root;
5541	u64 to_free = 0;
5542	unsigned dropped;
5543
5544	num_bytes = ALIGN(num_bytes, root->sectorsize);
5545	spin_lock(&BTRFS_I(inode)->lock);
5546	dropped = drop_outstanding_extent(inode, num_bytes);
5547
5548	if (num_bytes)
5549		to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5550	spin_unlock(&BTRFS_I(inode)->lock);
5551	if (dropped > 0)
5552		to_free += btrfs_calc_trans_metadata_size(root, dropped);
5553
5554	if (btrfs_test_is_dummy_root(root))
5555		return;
5556
5557	trace_btrfs_space_reservation(root->fs_info, "delalloc",
5558				      btrfs_ino(inode), to_free, 0);
5559
5560	btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5561				to_free);
5562}
5563
5564/**
5565 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5566 * @inode: inode we're writing to
5567 * @num_bytes: the number of bytes we want to allocate
5568 *
5569 * This will do the following things
5570 *
5571 * o reserve space in the data space info for num_bytes
5572 * o reserve space in the metadata space info based on number of outstanding
5573 *   extents and how much csums will be needed
5574 * o add to the inodes ->delalloc_bytes
5575 * o add it to the fs_info's delalloc inodes list.
5576 *
5577 * This will return 0 for success and -ENOSPC if there is no space left.
5578 */
5579int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5580{
5581	int ret;
5582
5583	ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
5584	if (ret)
5585		return ret;
5586
5587	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5588	if (ret) {
5589		btrfs_free_reserved_data_space(inode, num_bytes);
5590		return ret;
5591	}
5592
5593	return 0;
5594}
5595
5596/**
5597 * btrfs_delalloc_release_space - release data and metadata space for delalloc
5598 * @inode: inode we're releasing space for
5599 * @num_bytes: the number of bytes we want to free up
5600 *
5601 * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5602 * called in the case that we don't need the metadata AND data reservations
5603 * anymore.  So if there is an error or we insert an inline extent.
5604 *
5605 * This function will release the metadata space that was not used and will
5606 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5607 * list if there are no delalloc bytes left.
5608 */
5609void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5610{
5611	btrfs_delalloc_release_metadata(inode, num_bytes);
5612	btrfs_free_reserved_data_space(inode, num_bytes);
5613}
5614
5615static int update_block_group(struct btrfs_trans_handle *trans,
5616			      struct btrfs_root *root, u64 bytenr,
5617			      u64 num_bytes, int alloc)
5618{
5619	struct btrfs_block_group_cache *cache = NULL;
5620	struct btrfs_fs_info *info = root->fs_info;
5621	u64 total = num_bytes;
5622	u64 old_val;
5623	u64 byte_in_group;
5624	int factor;
5625
5626	/* block accounting for super block */
5627	spin_lock(&info->delalloc_root_lock);
5628	old_val = btrfs_super_bytes_used(info->super_copy);
5629	if (alloc)
5630		old_val += num_bytes;
5631	else
5632		old_val -= num_bytes;
5633	btrfs_set_super_bytes_used(info->super_copy, old_val);
5634	spin_unlock(&info->delalloc_root_lock);
5635
5636	while (total) {
5637		cache = btrfs_lookup_block_group(info, bytenr);
5638		if (!cache)
5639			return -ENOENT;
5640		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5641				    BTRFS_BLOCK_GROUP_RAID1 |
5642				    BTRFS_BLOCK_GROUP_RAID10))
5643			factor = 2;
5644		else
5645			factor = 1;
5646		/*
5647		 * If this block group has free space cache written out, we
5648		 * need to make sure to load it if we are removing space.  This
5649		 * is because we need the unpinning stage to actually add the
5650		 * space back to the block group, otherwise we will leak space.
5651		 */
5652		if (!alloc && cache->cached == BTRFS_CACHE_NO)
5653			cache_block_group(cache, 1);
5654
5655		byte_in_group = bytenr - cache->key.objectid;
5656		WARN_ON(byte_in_group > cache->key.offset);
5657
5658		spin_lock(&cache->space_info->lock);
5659		spin_lock(&cache->lock);
5660
5661		if (btrfs_test_opt(root, SPACE_CACHE) &&
5662		    cache->disk_cache_state < BTRFS_DC_CLEAR)
5663			cache->disk_cache_state = BTRFS_DC_CLEAR;
5664
5665		old_val = btrfs_block_group_used(&cache->item);
5666		num_bytes = min(total, cache->key.offset - byte_in_group);
5667		if (alloc) {
5668			old_val += num_bytes;
5669			btrfs_set_block_group_used(&cache->item, old_val);
5670			cache->reserved -= num_bytes;
5671			cache->space_info->bytes_reserved -= num_bytes;
5672			cache->space_info->bytes_used += num_bytes;
5673			cache->space_info->disk_used += num_bytes * factor;
5674			spin_unlock(&cache->lock);
5675			spin_unlock(&cache->space_info->lock);
5676		} else {
5677			old_val -= num_bytes;
5678			btrfs_set_block_group_used(&cache->item, old_val);
5679			cache->pinned += num_bytes;
5680			cache->space_info->bytes_pinned += num_bytes;
5681			cache->space_info->bytes_used -= num_bytes;
5682			cache->space_info->disk_used -= num_bytes * factor;
5683			spin_unlock(&cache->lock);
5684			spin_unlock(&cache->space_info->lock);
5685
5686			set_extent_dirty(info->pinned_extents,
5687					 bytenr, bytenr + num_bytes - 1,
5688					 GFP_NOFS | __GFP_NOFAIL);
5689			/*
5690			 * No longer have used bytes in this block group, queue
5691			 * it for deletion.
5692			 */
5693			if (old_val == 0) {
5694				spin_lock(&info->unused_bgs_lock);
5695				if (list_empty(&cache->bg_list)) {
5696					btrfs_get_block_group(cache);
5697					list_add_tail(&cache->bg_list,
5698						      &info->unused_bgs);
5699				}
5700				spin_unlock(&info->unused_bgs_lock);
5701			}
5702		}
5703
5704		spin_lock(&trans->transaction->dirty_bgs_lock);
5705		if (list_empty(&cache->dirty_list)) {
5706			list_add_tail(&cache->dirty_list,
5707				      &trans->transaction->dirty_bgs);
5708				trans->transaction->num_dirty_bgs++;
5709			btrfs_get_block_group(cache);
5710		}
5711		spin_unlock(&trans->transaction->dirty_bgs_lock);
5712
5713		btrfs_put_block_group(cache);
5714		total -= num_bytes;
5715		bytenr += num_bytes;
5716	}
5717	return 0;
5718}
5719
5720static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5721{
5722	struct btrfs_block_group_cache *cache;
5723	u64 bytenr;
5724
5725	spin_lock(&root->fs_info->block_group_cache_lock);
5726	bytenr = root->fs_info->first_logical_byte;
5727	spin_unlock(&root->fs_info->block_group_cache_lock);
5728
5729	if (bytenr < (u64)-1)
5730		return bytenr;
5731
5732	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5733	if (!cache)
5734		return 0;
5735
5736	bytenr = cache->key.objectid;
5737	btrfs_put_block_group(cache);
5738
5739	return bytenr;
5740}
5741
5742static int pin_down_extent(struct btrfs_root *root,
5743			   struct btrfs_block_group_cache *cache,
5744			   u64 bytenr, u64 num_bytes, int reserved)
5745{
5746	spin_lock(&cache->space_info->lock);
5747	spin_lock(&cache->lock);
5748	cache->pinned += num_bytes;
5749	cache->space_info->bytes_pinned += num_bytes;
5750	if (reserved) {
5751		cache->reserved -= num_bytes;
5752		cache->space_info->bytes_reserved -= num_bytes;
5753	}
5754	spin_unlock(&cache->lock);
5755	spin_unlock(&cache->space_info->lock);
5756
5757	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5758			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5759	if (reserved)
5760		trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5761	return 0;
5762}
5763
5764/*
5765 * this function must be called within transaction
5766 */
5767int btrfs_pin_extent(struct btrfs_root *root,
5768		     u64 bytenr, u64 num_bytes, int reserved)
5769{
5770	struct btrfs_block_group_cache *cache;
5771
5772	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5773	BUG_ON(!cache); /* Logic error */
5774
5775	pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5776
5777	btrfs_put_block_group(cache);
5778	return 0;
5779}
5780
5781/*
5782 * this function must be called within transaction
5783 */
5784int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5785				    u64 bytenr, u64 num_bytes)
5786{
5787	struct btrfs_block_group_cache *cache;
5788	int ret;
5789
5790	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5791	if (!cache)
5792		return -EINVAL;
5793
5794	/*
5795	 * pull in the free space cache (if any) so that our pin
5796	 * removes the free space from the cache.  We have load_only set
5797	 * to one because the slow code to read in the free extents does check
5798	 * the pinned extents.
5799	 */
5800	cache_block_group(cache, 1);
5801
5802	pin_down_extent(root, cache, bytenr, num_bytes, 0);
5803
5804	/* remove us from the free space cache (if we're there at all) */
5805	ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5806	btrfs_put_block_group(cache);
5807	return ret;
5808}
5809
5810static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5811{
5812	int ret;
5813	struct btrfs_block_group_cache *block_group;
5814	struct btrfs_caching_control *caching_ctl;
5815
5816	block_group = btrfs_lookup_block_group(root->fs_info, start);
5817	if (!block_group)
5818		return -EINVAL;
5819
5820	cache_block_group(block_group, 0);
5821	caching_ctl = get_caching_control(block_group);
5822
5823	if (!caching_ctl) {
5824		/* Logic error */
5825		BUG_ON(!block_group_cache_done(block_group));
5826		ret = btrfs_remove_free_space(block_group, start, num_bytes);
5827	} else {
5828		mutex_lock(&caching_ctl->mutex);
5829
5830		if (start >= caching_ctl->progress) {
5831			ret = add_excluded_extent(root, start, num_bytes);
5832		} else if (start + num_bytes <= caching_ctl->progress) {
5833			ret = btrfs_remove_free_space(block_group,
5834						      start, num_bytes);
5835		} else {
5836			num_bytes = caching_ctl->progress - start;
5837			ret = btrfs_remove_free_space(block_group,
5838						      start, num_bytes);
5839			if (ret)
5840				goto out_lock;
5841
5842			num_bytes = (start + num_bytes) -
5843				caching_ctl->progress;
5844			start = caching_ctl->progress;
5845			ret = add_excluded_extent(root, start, num_bytes);
5846		}
5847out_lock:
5848		mutex_unlock(&caching_ctl->mutex);
5849		put_caching_control(caching_ctl);
5850	}
5851	btrfs_put_block_group(block_group);
5852	return ret;
5853}
5854
5855int btrfs_exclude_logged_extents(struct btrfs_root *log,
5856				 struct extent_buffer *eb)
5857{
5858	struct btrfs_file_extent_item *item;
5859	struct btrfs_key key;
5860	int found_type;
5861	int i;
5862
5863	if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5864		return 0;
5865
5866	for (i = 0; i < btrfs_header_nritems(eb); i++) {
5867		btrfs_item_key_to_cpu(eb, &key, i);
5868		if (key.type != BTRFS_EXTENT_DATA_KEY)
5869			continue;
5870		item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5871		found_type = btrfs_file_extent_type(eb, item);
5872		if (found_type == BTRFS_FILE_EXTENT_INLINE)
5873			continue;
5874		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5875			continue;
5876		key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5877		key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5878		__exclude_logged_extent(log, key.objectid, key.offset);
5879	}
5880
5881	return 0;
5882}
5883
5884/**
5885 * btrfs_update_reserved_bytes - update the block_group and space info counters
5886 * @cache:	The cache we are manipulating
5887 * @num_bytes:	The number of bytes in question
5888 * @reserve:	One of the reservation enums
5889 * @delalloc:   The blocks are allocated for the delalloc write
5890 *
5891 * This is called by the allocator when it reserves space, or by somebody who is
5892 * freeing space that was never actually used on disk.  For example if you
5893 * reserve some space for a new leaf in transaction A and before transaction A
5894 * commits you free that leaf, you call this with reserve set to 0 in order to
5895 * clear the reservation.
5896 *
5897 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5898 * ENOSPC accounting.  For data we handle the reservation through clearing the
5899 * delalloc bits in the io_tree.  We have to do this since we could end up
5900 * allocating less disk space for the amount of data we have reserved in the
5901 * case of compression.
5902 *
5903 * If this is a reservation and the block group has become read only we cannot
5904 * make the reservation and return -EAGAIN, otherwise this function always
5905 * succeeds.
5906 */
5907static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5908				       u64 num_bytes, int reserve, int delalloc)
5909{
5910	struct btrfs_space_info *space_info = cache->space_info;
5911	int ret = 0;
5912
5913	spin_lock(&space_info->lock);
5914	spin_lock(&cache->lock);
5915	if (reserve != RESERVE_FREE) {
5916		if (cache->ro) {
5917			ret = -EAGAIN;
5918		} else {
5919			cache->reserved += num_bytes;
5920			space_info->bytes_reserved += num_bytes;
5921			if (reserve == RESERVE_ALLOC) {
5922				trace_btrfs_space_reservation(cache->fs_info,
5923						"space_info", space_info->flags,
5924						num_bytes, 0);
5925				space_info->bytes_may_use -= num_bytes;
5926			}
5927
5928			if (delalloc)
5929				cache->delalloc_bytes += num_bytes;
5930		}
5931	} else {
5932		if (cache->ro)
5933			space_info->bytes_readonly += num_bytes;
5934		cache->reserved -= num_bytes;
5935		space_info->bytes_reserved -= num_bytes;
5936
5937		if (delalloc)
5938			cache->delalloc_bytes -= num_bytes;
5939	}
5940	spin_unlock(&cache->lock);
5941	spin_unlock(&space_info->lock);
5942	return ret;
5943}
5944
5945void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5946				struct btrfs_root *root)
5947{
5948	struct btrfs_fs_info *fs_info = root->fs_info;
5949	struct btrfs_caching_control *next;
5950	struct btrfs_caching_control *caching_ctl;
5951	struct btrfs_block_group_cache *cache;
5952
5953	down_write(&fs_info->commit_root_sem);
5954
5955	list_for_each_entry_safe(caching_ctl, next,
5956				 &fs_info->caching_block_groups, list) {
5957		cache = caching_ctl->block_group;
5958		if (block_group_cache_done(cache)) {
5959			cache->last_byte_to_unpin = (u64)-1;
5960			list_del_init(&caching_ctl->list);
5961			put_caching_control(caching_ctl);
5962		} else {
5963			cache->last_byte_to_unpin = caching_ctl->progress;
5964		}
5965	}
5966
5967	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5968		fs_info->pinned_extents = &fs_info->freed_extents[1];
5969	else
5970		fs_info->pinned_extents = &fs_info->freed_extents[0];
5971
5972	up_write(&fs_info->commit_root_sem);
5973
5974	update_global_block_rsv(fs_info);
5975}
5976
5977static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
5978			      const bool return_free_space)
5979{
5980	struct btrfs_fs_info *fs_info = root->fs_info;
5981	struct btrfs_block_group_cache *cache = NULL;
5982	struct btrfs_space_info *space_info;
5983	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5984	u64 len;
5985	bool readonly;
5986
5987	while (start <= end) {
5988		readonly = false;
5989		if (!cache ||
5990		    start >= cache->key.objectid + cache->key.offset) {
5991			if (cache)
5992				btrfs_put_block_group(cache);
5993			cache = btrfs_lookup_block_group(fs_info, start);
5994			BUG_ON(!cache); /* Logic error */
5995		}
5996
5997		len = cache->key.objectid + cache->key.offset - start;
5998		len = min(len, end + 1 - start);
5999
6000		if (start < cache->last_byte_to_unpin) {
6001			len = min(len, cache->last_byte_to_unpin - start);
6002			if (return_free_space)
6003				btrfs_add_free_space(cache, start, len);
6004		}
6005
6006		start += len;
6007		space_info = cache->space_info;
6008
6009		spin_lock(&space_info->lock);
6010		spin_lock(&cache->lock);
6011		cache->pinned -= len;
6012		space_info->bytes_pinned -= len;
6013		percpu_counter_add(&space_info->total_bytes_pinned, -len);
6014		if (cache->ro) {
6015			space_info->bytes_readonly += len;
6016			readonly = true;
6017		}
6018		spin_unlock(&cache->lock);
6019		if (!readonly && global_rsv->space_info == space_info) {
6020			spin_lock(&global_rsv->lock);
6021			if (!global_rsv->full) {
6022				len = min(len, global_rsv->size -
6023					  global_rsv->reserved);
6024				global_rsv->reserved += len;
6025				space_info->bytes_may_use += len;
6026				if (global_rsv->reserved >= global_rsv->size)
6027					global_rsv->full = 1;
6028			}
6029			spin_unlock(&global_rsv->lock);
6030		}
6031		spin_unlock(&space_info->lock);
6032	}
6033
6034	if (cache)
6035		btrfs_put_block_group(cache);
6036	return 0;
6037}
6038
6039int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6040			       struct btrfs_root *root)
6041{
6042	struct btrfs_fs_info *fs_info = root->fs_info;
6043	struct extent_io_tree *unpin;
6044	u64 start;
6045	u64 end;
6046	int ret;
6047
6048	if (trans->aborted)
6049		return 0;
6050
6051	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6052		unpin = &fs_info->freed_extents[1];
6053	else
6054		unpin = &fs_info->freed_extents[0];
6055
6056	while (1) {
6057		mutex_lock(&fs_info->unused_bg_unpin_mutex);
6058		ret = find_first_extent_bit(unpin, 0, &start, &end,
6059					    EXTENT_DIRTY, NULL);
6060		if (ret) {
6061			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6062			break;
6063		}
6064
6065		if (btrfs_test_opt(root, DISCARD))
6066			ret = btrfs_discard_extent(root, start,
6067						   end + 1 - start, NULL);
6068
6069		clear_extent_dirty(unpin, start, end, GFP_NOFS);
6070		unpin_extent_range(root, start, end, true);
6071		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6072		cond_resched();
6073	}
6074
6075	return 0;
6076}
6077
6078static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6079			     u64 owner, u64 root_objectid)
6080{
6081	struct btrfs_space_info *space_info;
6082	u64 flags;
6083
6084	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6085		if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6086			flags = BTRFS_BLOCK_GROUP_SYSTEM;
6087		else
6088			flags = BTRFS_BLOCK_GROUP_METADATA;
6089	} else {
6090		flags = BTRFS_BLOCK_GROUP_DATA;
6091	}
6092
6093	space_info = __find_space_info(fs_info, flags);
6094	BUG_ON(!space_info); /* Logic bug */
6095	percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6096}
6097
6098
6099static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6100				struct btrfs_root *root,
6101				u64 bytenr, u64 num_bytes, u64 parent,
6102				u64 root_objectid, u64 owner_objectid,
6103				u64 owner_offset, int refs_to_drop,
6104				struct btrfs_delayed_extent_op *extent_op,
6105				int no_quota)
6106{
6107	struct btrfs_key key;
6108	struct btrfs_path *path;
6109	struct btrfs_fs_info *info = root->fs_info;
6110	struct btrfs_root *extent_root = info->extent_root;
6111	struct extent_buffer *leaf;
6112	struct btrfs_extent_item *ei;
6113	struct btrfs_extent_inline_ref *iref;
6114	int ret;
6115	int is_data;
6116	int extent_slot = 0;
6117	int found_extent = 0;
6118	int num_to_del = 1;
6119	u32 item_size;
6120	u64 refs;
6121	int last_ref = 0;
6122	enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_SUB_EXCL;
6123	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6124						 SKINNY_METADATA);
6125
6126	if (!info->quota_enabled || !is_fstree(root_objectid))
6127		no_quota = 1;
6128
6129	path = btrfs_alloc_path();
6130	if (!path)
6131		return -ENOMEM;
6132
6133	path->reada = 1;
6134	path->leave_spinning = 1;
6135
6136	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6137	BUG_ON(!is_data && refs_to_drop != 1);
6138
6139	if (is_data)
6140		skinny_metadata = 0;
6141
6142	ret = lookup_extent_backref(trans, extent_root, path, &iref,
6143				    bytenr, num_bytes, parent,
6144				    root_objectid, owner_objectid,
6145				    owner_offset);
6146	if (ret == 0) {
6147		extent_slot = path->slots[0];
6148		while (extent_slot >= 0) {
6149			btrfs_item_key_to_cpu(path->nodes[0], &key,
6150					      extent_slot);
6151			if (key.objectid != bytenr)
6152				break;
6153			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6154			    key.offset == num_bytes) {
6155				found_extent = 1;
6156				break;
6157			}
6158			if (key.type == BTRFS_METADATA_ITEM_KEY &&
6159			    key.offset == owner_objectid) {
6160				found_extent = 1;
6161				break;
6162			}
6163			if (path->slots[0] - extent_slot > 5)
6164				break;
6165			extent_slot--;
6166		}
6167#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6168		item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6169		if (found_extent && item_size < sizeof(*ei))
6170			found_extent = 0;
6171#endif
6172		if (!found_extent) {
6173			BUG_ON(iref);
6174			ret = remove_extent_backref(trans, extent_root, path,
6175						    NULL, refs_to_drop,
6176						    is_data, &last_ref);
6177			if (ret) {
6178				btrfs_abort_transaction(trans, extent_root, ret);
6179				goto out;
6180			}
6181			btrfs_release_path(path);
6182			path->leave_spinning = 1;
6183
6184			key.objectid = bytenr;
6185			key.type = BTRFS_EXTENT_ITEM_KEY;
6186			key.offset = num_bytes;
6187
6188			if (!is_data && skinny_metadata) {
6189				key.type = BTRFS_METADATA_ITEM_KEY;
6190				key.offset = owner_objectid;
6191			}
6192
6193			ret = btrfs_search_slot(trans, extent_root,
6194						&key, path, -1, 1);
6195			if (ret > 0 && skinny_metadata && path->slots[0]) {
6196				/*
6197				 * Couldn't find our skinny metadata item,
6198				 * see if we have ye olde extent item.
6199				 */
6200				path->slots[0]--;
6201				btrfs_item_key_to_cpu(path->nodes[0], &key,
6202						      path->slots[0]);
6203				if (key.objectid == bytenr &&
6204				    key.type == BTRFS_EXTENT_ITEM_KEY &&
6205				    key.offset == num_bytes)
6206					ret = 0;
6207			}
6208
6209			if (ret > 0 && skinny_metadata) {
6210				skinny_metadata = false;
6211				key.objectid = bytenr;
6212				key.type = BTRFS_EXTENT_ITEM_KEY;
6213				key.offset = num_bytes;
6214				btrfs_release_path(path);
6215				ret = btrfs_search_slot(trans, extent_root,
6216							&key, path, -1, 1);
6217			}
6218
6219			if (ret) {
6220				btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6221					ret, bytenr);
6222				if (ret > 0)
6223					btrfs_print_leaf(extent_root,
6224							 path->nodes[0]);
6225			}
6226			if (ret < 0) {
6227				btrfs_abort_transaction(trans, extent_root, ret);
6228				goto out;
6229			}
6230			extent_slot = path->slots[0];
6231		}
6232	} else if (WARN_ON(ret == -ENOENT)) {
6233		btrfs_print_leaf(extent_root, path->nodes[0]);
6234		btrfs_err(info,
6235			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6236			bytenr, parent, root_objectid, owner_objectid,
6237			owner_offset);
6238		btrfs_abort_transaction(trans, extent_root, ret);
6239		goto out;
6240	} else {
6241		btrfs_abort_transaction(trans, extent_root, ret);
6242		goto out;
6243	}
6244
6245	leaf = path->nodes[0];
6246	item_size = btrfs_item_size_nr(leaf, extent_slot);
6247#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6248	if (item_size < sizeof(*ei)) {
6249		BUG_ON(found_extent || extent_slot != path->slots[0]);
6250		ret = convert_extent_item_v0(trans, extent_root, path,
6251					     owner_objectid, 0);
6252		if (ret < 0) {
6253			btrfs_abort_transaction(trans, extent_root, ret);
6254			goto out;
6255		}
6256
6257		btrfs_release_path(path);
6258		path->leave_spinning = 1;
6259
6260		key.objectid = bytenr;
6261		key.type = BTRFS_EXTENT_ITEM_KEY;
6262		key.offset = num_bytes;
6263
6264		ret = btrfs_search_slot(trans, extent_root, &key, path,
6265					-1, 1);
6266		if (ret) {
6267			btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6268				ret, bytenr);
6269			btrfs_print_leaf(extent_root, path->nodes[0]);
6270		}
6271		if (ret < 0) {
6272			btrfs_abort_transaction(trans, extent_root, ret);
6273			goto out;
6274		}
6275
6276		extent_slot = path->slots[0];
6277		leaf = path->nodes[0];
6278		item_size = btrfs_item_size_nr(leaf, extent_slot);
6279	}
6280#endif
6281	BUG_ON(item_size < sizeof(*ei));
6282	ei = btrfs_item_ptr(leaf, extent_slot,
6283			    struct btrfs_extent_item);
6284	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6285	    key.type == BTRFS_EXTENT_ITEM_KEY) {
6286		struct btrfs_tree_block_info *bi;
6287		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6288		bi = (struct btrfs_tree_block_info *)(ei + 1);
6289		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6290	}
6291
6292	refs = btrfs_extent_refs(leaf, ei);
6293	if (refs < refs_to_drop) {
6294		btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6295			  "for bytenr %Lu", refs_to_drop, refs, bytenr);
6296		ret = -EINVAL;
6297		btrfs_abort_transaction(trans, extent_root, ret);
6298		goto out;
6299	}
6300	refs -= refs_to_drop;
6301
6302	if (refs > 0) {
6303		type = BTRFS_QGROUP_OPER_SUB_SHARED;
6304		if (extent_op)
6305			__run_delayed_extent_op(extent_op, leaf, ei);
6306		/*
6307		 * In the case of inline back ref, reference count will
6308		 * be updated by remove_extent_backref
6309		 */
6310		if (iref) {
6311			BUG_ON(!found_extent);
6312		} else {
6313			btrfs_set_extent_refs(leaf, ei, refs);
6314			btrfs_mark_buffer_dirty(leaf);
6315		}
6316		if (found_extent) {
6317			ret = remove_extent_backref(trans, extent_root, path,
6318						    iref, refs_to_drop,
6319						    is_data, &last_ref);
6320			if (ret) {
6321				btrfs_abort_transaction(trans, extent_root, ret);
6322				goto out;
6323			}
6324		}
6325		add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6326				 root_objectid);
6327	} else {
6328		if (found_extent) {
6329			BUG_ON(is_data && refs_to_drop !=
6330			       extent_data_ref_count(root, path, iref));
6331			if (iref) {
6332				BUG_ON(path->slots[0] != extent_slot);
6333			} else {
6334				BUG_ON(path->slots[0] != extent_slot + 1);
6335				path->slots[0] = extent_slot;
6336				num_to_del = 2;
6337			}
6338		}
6339
6340		last_ref = 1;
6341		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6342				      num_to_del);
6343		if (ret) {
6344			btrfs_abort_transaction(trans, extent_root, ret);
6345			goto out;
6346		}
6347		btrfs_release_path(path);
6348
6349		if (is_data) {
6350			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6351			if (ret) {
6352				btrfs_abort_transaction(trans, extent_root, ret);
6353				goto out;
6354			}
6355		}
6356
6357		ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6358		if (ret) {
6359			btrfs_abort_transaction(trans, extent_root, ret);
6360			goto out;
6361		}
6362	}
6363	btrfs_release_path(path);
6364
6365	/* Deal with the quota accounting */
6366	if (!ret && last_ref && !no_quota) {
6367		int mod_seq = 0;
6368
6369		if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
6370		    type == BTRFS_QGROUP_OPER_SUB_SHARED)
6371			mod_seq = 1;
6372
6373		ret = btrfs_qgroup_record_ref(trans, info, root_objectid,
6374					      bytenr, num_bytes, type,
6375					      mod_seq);
6376	}
6377out:
6378	btrfs_free_path(path);
6379	return ret;
6380}
6381
6382/*
6383 * when we free an block, it is possible (and likely) that we free the last
6384 * delayed ref for that extent as well.  This searches the delayed ref tree for
6385 * a given extent, and if there are no other delayed refs to be processed, it
6386 * removes it from the tree.
6387 */
6388static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6389				      struct btrfs_root *root, u64 bytenr)
6390{
6391	struct btrfs_delayed_ref_head *head;
6392	struct btrfs_delayed_ref_root *delayed_refs;
6393	int ret = 0;
6394
6395	delayed_refs = &trans->transaction->delayed_refs;
6396	spin_lock(&delayed_refs->lock);
6397	head = btrfs_find_delayed_ref_head(trans, bytenr);
6398	if (!head)
6399		goto out_delayed_unlock;
6400
6401	spin_lock(&head->lock);
6402	if (rb_first(&head->ref_root))
6403		goto out;
6404
6405	if (head->extent_op) {
6406		if (!head->must_insert_reserved)
6407			goto out;
6408		btrfs_free_delayed_extent_op(head->extent_op);
6409		head->extent_op = NULL;
6410	}
6411
6412	/*
6413	 * waiting for the lock here would deadlock.  If someone else has it
6414	 * locked they are already in the process of dropping it anyway
6415	 */
6416	if (!mutex_trylock(&head->mutex))
6417		goto out;
6418
6419	/*
6420	 * at this point we have a head with no other entries.  Go
6421	 * ahead and process it.
6422	 */
6423	head->node.in_tree = 0;
6424	rb_erase(&head->href_node, &delayed_refs->href_root);
6425
6426	atomic_dec(&delayed_refs->num_entries);
6427
6428	/*
6429	 * we don't take a ref on the node because we're removing it from the
6430	 * tree, so we just steal the ref the tree was holding.
6431	 */
6432	delayed_refs->num_heads--;
6433	if (head->processing == 0)
6434		delayed_refs->num_heads_ready--;
6435	head->processing = 0;
6436	spin_unlock(&head->lock);
6437	spin_unlock(&delayed_refs->lock);
6438
6439	BUG_ON(head->extent_op);
6440	if (head->must_insert_reserved)
6441		ret = 1;
6442
6443	mutex_unlock(&head->mutex);
6444	btrfs_put_delayed_ref(&head->node);
6445	return ret;
6446out:
6447	spin_unlock(&head->lock);
6448
6449out_delayed_unlock:
6450	spin_unlock(&delayed_refs->lock);
6451	return 0;
6452}
6453
6454void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6455			   struct btrfs_root *root,
6456			   struct extent_buffer *buf,
6457			   u64 parent, int last_ref)
6458{
6459	int pin = 1;
6460	int ret;
6461
6462	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6463		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6464					buf->start, buf->len,
6465					parent, root->root_key.objectid,
6466					btrfs_header_level(buf),
6467					BTRFS_DROP_DELAYED_REF, NULL, 0);
6468		BUG_ON(ret); /* -ENOMEM */
6469	}
6470
6471	if (!last_ref)
6472		return;
6473
6474	if (btrfs_header_generation(buf) == trans->transid) {
6475		struct btrfs_block_group_cache *cache;
6476
6477		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6478			ret = check_ref_cleanup(trans, root, buf->start);
6479			if (!ret)
6480				goto out;
6481		}
6482
6483		cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6484
6485		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6486			pin_down_extent(root, cache, buf->start, buf->len, 1);
6487			btrfs_put_block_group(cache);
6488			goto out;
6489		}
6490
6491		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6492
6493		btrfs_add_free_space(cache, buf->start, buf->len);
6494		btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6495		btrfs_put_block_group(cache);
6496		trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6497		pin = 0;
6498	}
6499out:
6500	if (pin)
6501		add_pinned_bytes(root->fs_info, buf->len,
6502				 btrfs_header_level(buf),
6503				 root->root_key.objectid);
6504
6505	/*
6506	 * Deleting the buffer, clear the corrupt flag since it doesn't matter
6507	 * anymore.
6508	 */
6509	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6510}
6511
6512/* Can return -ENOMEM */
6513int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6514		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6515		      u64 owner, u64 offset, int no_quota)
6516{
6517	int ret;
6518	struct btrfs_fs_info *fs_info = root->fs_info;
6519
6520	if (btrfs_test_is_dummy_root(root))
6521		return 0;
6522
6523	add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6524
6525	/*
6526	 * tree log blocks never actually go into the extent allocation
6527	 * tree, just update pinning info and exit early.
6528	 */
6529	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6530		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6531		/* unlocks the pinned mutex */
6532		btrfs_pin_extent(root, bytenr, num_bytes, 1);
6533		ret = 0;
6534	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6535		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6536					num_bytes,
6537					parent, root_objectid, (int)owner,
6538					BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6539	} else {
6540		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6541						num_bytes,
6542						parent, root_objectid, owner,
6543						offset, BTRFS_DROP_DELAYED_REF,
6544						NULL, no_quota);
6545	}
6546	return ret;
6547}
6548
6549/*
6550 * when we wait for progress in the block group caching, its because
6551 * our allocation attempt failed at least once.  So, we must sleep
6552 * and let some progress happen before we try again.
6553 *
6554 * This function will sleep at least once waiting for new free space to
6555 * show up, and then it will check the block group free space numbers
6556 * for our min num_bytes.  Another option is to have it go ahead
6557 * and look in the rbtree for a free extent of a given size, but this
6558 * is a good start.
6559 *
6560 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6561 * any of the information in this block group.
6562 */
6563static noinline void
6564wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6565				u64 num_bytes)
6566{
6567	struct btrfs_caching_control *caching_ctl;
6568
6569	caching_ctl = get_caching_control(cache);
6570	if (!caching_ctl)
6571		return;
6572
6573	wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6574		   (cache->free_space_ctl->free_space >= num_bytes));
6575
6576	put_caching_control(caching_ctl);
6577}
6578
6579static noinline int
6580wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6581{
6582	struct btrfs_caching_control *caching_ctl;
6583	int ret = 0;
6584
6585	caching_ctl = get_caching_control(cache);
6586	if (!caching_ctl)
6587		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6588
6589	wait_event(caching_ctl->wait, block_group_cache_done(cache));
6590	if (cache->cached == BTRFS_CACHE_ERROR)
6591		ret = -EIO;
6592	put_caching_control(caching_ctl);
6593	return ret;
6594}
6595
6596int __get_raid_index(u64 flags)
6597{
6598	if (flags & BTRFS_BLOCK_GROUP_RAID10)
6599		return BTRFS_RAID_RAID10;
6600	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6601		return BTRFS_RAID_RAID1;
6602	else if (flags & BTRFS_BLOCK_GROUP_DUP)
6603		return BTRFS_RAID_DUP;
6604	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6605		return BTRFS_RAID_RAID0;
6606	else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6607		return BTRFS_RAID_RAID5;
6608	else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6609		return BTRFS_RAID_RAID6;
6610
6611	return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6612}
6613
6614int get_block_group_index(struct btrfs_block_group_cache *cache)
6615{
6616	return __get_raid_index(cache->flags);
6617}
6618
6619static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6620	[BTRFS_RAID_RAID10]	= "raid10",
6621	[BTRFS_RAID_RAID1]	= "raid1",
6622	[BTRFS_RAID_DUP]	= "dup",
6623	[BTRFS_RAID_RAID0]	= "raid0",
6624	[BTRFS_RAID_SINGLE]	= "single",
6625	[BTRFS_RAID_RAID5]	= "raid5",
6626	[BTRFS_RAID_RAID6]	= "raid6",
6627};
6628
6629static const char *get_raid_name(enum btrfs_raid_types type)
6630{
6631	if (type >= BTRFS_NR_RAID_TYPES)
6632		return NULL;
6633
6634	return btrfs_raid_type_names[type];
6635}
6636
6637enum btrfs_loop_type {
6638	LOOP_CACHING_NOWAIT = 0,
6639	LOOP_CACHING_WAIT = 1,
6640	LOOP_ALLOC_CHUNK = 2,
6641	LOOP_NO_EMPTY_SIZE = 3,
6642};
6643
6644static inline void
6645btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6646		       int delalloc)
6647{
6648	if (delalloc)
6649		down_read(&cache->data_rwsem);
6650}
6651
6652static inline void
6653btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6654		       int delalloc)
6655{
6656	btrfs_get_block_group(cache);
6657	if (delalloc)
6658		down_read(&cache->data_rwsem);
6659}
6660
6661static struct btrfs_block_group_cache *
6662btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6663		   struct btrfs_free_cluster *cluster,
6664		   int delalloc)
6665{
6666	struct btrfs_block_group_cache *used_bg;
6667	bool locked = false;
6668again:
6669	spin_lock(&cluster->refill_lock);
6670	if (locked) {
6671		if (used_bg == cluster->block_group)
6672			return used_bg;
6673
6674		up_read(&used_bg->data_rwsem);
6675		btrfs_put_block_group(used_bg);
6676	}
6677
6678	used_bg = cluster->block_group;
6679	if (!used_bg)
6680		return NULL;
6681
6682	if (used_bg == block_group)
6683		return used_bg;
6684
6685	btrfs_get_block_group(used_bg);
6686
6687	if (!delalloc)
6688		return used_bg;
6689
6690	if (down_read_trylock(&used_bg->data_rwsem))
6691		return used_bg;
6692
6693	spin_unlock(&cluster->refill_lock);
6694	down_read(&used_bg->data_rwsem);
6695	locked = true;
6696	goto again;
6697}
6698
6699static inline void
6700btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6701			 int delalloc)
6702{
6703	if (delalloc)
6704		up_read(&cache->data_rwsem);
6705	btrfs_put_block_group(cache);
6706}
6707
6708/*
6709 * walks the btree of allocated extents and find a hole of a given size.
6710 * The key ins is changed to record the hole:
6711 * ins->objectid == start position
6712 * ins->flags = BTRFS_EXTENT_ITEM_KEY
6713 * ins->offset == the size of the hole.
6714 * Any available blocks before search_start are skipped.
6715 *
6716 * If there is no suitable free space, we will record the max size of
6717 * the free space extent currently.
6718 */
6719static noinline int find_free_extent(struct btrfs_root *orig_root,
6720				     u64 num_bytes, u64 empty_size,
6721				     u64 hint_byte, struct btrfs_key *ins,
6722				     u64 flags, int delalloc)
6723{
6724	int ret = 0;
6725	struct btrfs_root *root = orig_root->fs_info->extent_root;
6726	struct btrfs_free_cluster *last_ptr = NULL;
6727	struct btrfs_block_group_cache *block_group = NULL;
6728	u64 search_start = 0;
6729	u64 max_extent_size = 0;
6730	int empty_cluster = 2 * 1024 * 1024;
6731	struct btrfs_space_info *space_info;
6732	int loop = 0;
6733	int index = __get_raid_index(flags);
6734	int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6735		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6736	bool failed_cluster_refill = false;
6737	bool failed_alloc = false;
6738	bool use_cluster = true;
6739	bool have_caching_bg = false;
6740
6741	WARN_ON(num_bytes < root->sectorsize);
6742	ins->type = BTRFS_EXTENT_ITEM_KEY;
6743	ins->objectid = 0;
6744	ins->offset = 0;
6745
6746	trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6747
6748	space_info = __find_space_info(root->fs_info, flags);
6749	if (!space_info) {
6750		btrfs_err(root->fs_info, "No space info for %llu", flags);
6751		return -ENOSPC;
6752	}
6753
6754	/*
6755	 * If the space info is for both data and metadata it means we have a
6756	 * small filesystem and we can't use the clustering stuff.
6757	 */
6758	if (btrfs_mixed_space_info(space_info))
6759		use_cluster = false;
6760
6761	if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6762		last_ptr = &root->fs_info->meta_alloc_cluster;
6763		if (!btrfs_test_opt(root, SSD))
6764			empty_cluster = 64 * 1024;
6765	}
6766
6767	if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6768	    btrfs_test_opt(root, SSD)) {
6769		last_ptr = &root->fs_info->data_alloc_cluster;
6770	}
6771
6772	if (last_ptr) {
6773		spin_lock(&last_ptr->lock);
6774		if (last_ptr->block_group)
6775			hint_byte = last_ptr->window_start;
6776		spin_unlock(&last_ptr->lock);
6777	}
6778
6779	search_start = max(search_start, first_logical_byte(root, 0));
6780	search_start = max(search_start, hint_byte);
6781
6782	if (!last_ptr)
6783		empty_cluster = 0;
6784
6785	if (search_start == hint_byte) {
6786		block_group = btrfs_lookup_block_group(root->fs_info,
6787						       search_start);
6788		/*
6789		 * we don't want to use the block group if it doesn't match our
6790		 * allocation bits, or if its not cached.
6791		 *
6792		 * However if we are re-searching with an ideal block group
6793		 * picked out then we don't care that the block group is cached.
6794		 */
6795		if (block_group && block_group_bits(block_group, flags) &&
6796		    block_group->cached != BTRFS_CACHE_NO) {
6797			down_read(&space_info->groups_sem);
6798			if (list_empty(&block_group->list) ||
6799			    block_group->ro) {
6800				/*
6801				 * someone is removing this block group,
6802				 * we can't jump into the have_block_group
6803				 * target because our list pointers are not
6804				 * valid
6805				 */
6806				btrfs_put_block_group(block_group);
6807				up_read(&space_info->groups_sem);
6808			} else {
6809				index = get_block_group_index(block_group);
6810				btrfs_lock_block_group(block_group, delalloc);
6811				goto have_block_group;
6812			}
6813		} else if (block_group) {
6814			btrfs_put_block_group(block_group);
6815		}
6816	}
6817search:
6818	have_caching_bg = false;
6819	down_read(&space_info->groups_sem);
6820	list_for_each_entry(block_group, &space_info->block_groups[index],
6821			    list) {
6822		u64 offset;
6823		int cached;
6824
6825		btrfs_grab_block_group(block_group, delalloc);
6826		search_start = block_group->key.objectid;
6827
6828		/*
6829		 * this can happen if we end up cycling through all the
6830		 * raid types, but we want to make sure we only allocate
6831		 * for the proper type.
6832		 */
6833		if (!block_group_bits(block_group, flags)) {
6834		    u64 extra = BTRFS_BLOCK_GROUP_DUP |
6835				BTRFS_BLOCK_GROUP_RAID1 |
6836				BTRFS_BLOCK_GROUP_RAID5 |
6837				BTRFS_BLOCK_GROUP_RAID6 |
6838				BTRFS_BLOCK_GROUP_RAID10;
6839
6840			/*
6841			 * if they asked for extra copies and this block group
6842			 * doesn't provide them, bail.  This does allow us to
6843			 * fill raid0 from raid1.
6844			 */
6845			if ((flags & extra) && !(block_group->flags & extra))
6846				goto loop;
6847		}
6848
6849have_block_group:
6850		cached = block_group_cache_done(block_group);
6851		if (unlikely(!cached)) {
6852			ret = cache_block_group(block_group, 0);
6853			BUG_ON(ret < 0);
6854			ret = 0;
6855		}
6856
6857		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6858			goto loop;
6859		if (unlikely(block_group->ro))
6860			goto loop;
6861
6862		/*
6863		 * Ok we want to try and use the cluster allocator, so
6864		 * lets look there
6865		 */
6866		if (last_ptr) {
6867			struct btrfs_block_group_cache *used_block_group;
6868			unsigned long aligned_cluster;
6869			/*
6870			 * the refill lock keeps out other
6871			 * people trying to start a new cluster
6872			 */
6873			used_block_group = btrfs_lock_cluster(block_group,
6874							      last_ptr,
6875							      delalloc);
6876			if (!used_block_group)
6877				goto refill_cluster;
6878
6879			if (used_block_group != block_group &&
6880			    (used_block_group->ro ||
6881			     !block_group_bits(used_block_group, flags)))
6882				goto release_cluster;
6883
6884			offset = btrfs_alloc_from_cluster(used_block_group,
6885						last_ptr,
6886						num_bytes,
6887						used_block_group->key.objectid,
6888						&max_extent_size);
6889			if (offset) {
6890				/* we have a block, we're done */
6891				spin_unlock(&last_ptr->refill_lock);
6892				trace_btrfs_reserve_extent_cluster(root,
6893						used_block_group,
6894						search_start, num_bytes);
6895				if (used_block_group != block_group) {
6896					btrfs_release_block_group(block_group,
6897								  delalloc);
6898					block_group = used_block_group;
6899				}
6900				goto checks;
6901			}
6902
6903			WARN_ON(last_ptr->block_group != used_block_group);
6904release_cluster:
6905			/* If we are on LOOP_NO_EMPTY_SIZE, we can't
6906			 * set up a new clusters, so lets just skip it
6907			 * and let the allocator find whatever block
6908			 * it can find.  If we reach this point, we
6909			 * will have tried the cluster allocator
6910			 * plenty of times and not have found
6911			 * anything, so we are likely way too
6912			 * fragmented for the clustering stuff to find
6913			 * anything.
6914			 *
6915			 * However, if the cluster is taken from the
6916			 * current block group, release the cluster
6917			 * first, so that we stand a better chance of
6918			 * succeeding in the unclustered
6919			 * allocation.  */
6920			if (loop >= LOOP_NO_EMPTY_SIZE &&
6921			    used_block_group != block_group) {
6922				spin_unlock(&last_ptr->refill_lock);
6923				btrfs_release_block_group(used_block_group,
6924							  delalloc);
6925				goto unclustered_alloc;
6926			}
6927
6928			/*
6929			 * this cluster didn't work out, free it and
6930			 * start over
6931			 */
6932			btrfs_return_cluster_to_free_space(NULL, last_ptr);
6933
6934			if (used_block_group != block_group)
6935				btrfs_release_block_group(used_block_group,
6936							  delalloc);
6937refill_cluster:
6938			if (loop >= LOOP_NO_EMPTY_SIZE) {
6939				spin_unlock(&last_ptr->refill_lock);
6940				goto unclustered_alloc;
6941			}
6942
6943			aligned_cluster = max_t(unsigned long,
6944						empty_cluster + empty_size,
6945					      block_group->full_stripe_len);
6946
6947			/* allocate a cluster in this block group */
6948			ret = btrfs_find_space_cluster(root, block_group,
6949						       last_ptr, search_start,
6950						       num_bytes,
6951						       aligned_cluster);
6952			if (ret == 0) {
6953				/*
6954				 * now pull our allocation out of this
6955				 * cluster
6956				 */
6957				offset = btrfs_alloc_from_cluster(block_group,
6958							last_ptr,
6959							num_bytes,
6960							search_start,
6961							&max_extent_size);
6962				if (offset) {
6963					/* we found one, proceed */
6964					spin_unlock(&last_ptr->refill_lock);
6965					trace_btrfs_reserve_extent_cluster(root,
6966						block_group, search_start,
6967						num_bytes);
6968					goto checks;
6969				}
6970			} else if (!cached && loop > LOOP_CACHING_NOWAIT
6971				   && !failed_cluster_refill) {
6972				spin_unlock(&last_ptr->refill_lock);
6973
6974				failed_cluster_refill = true;
6975				wait_block_group_cache_progress(block_group,
6976				       num_bytes + empty_cluster + empty_size);
6977				goto have_block_group;
6978			}
6979
6980			/*
6981			 * at this point we either didn't find a cluster
6982			 * or we weren't able to allocate a block from our
6983			 * cluster.  Free the cluster we've been trying
6984			 * to use, and go to the next block group
6985			 */
6986			btrfs_return_cluster_to_free_space(NULL, last_ptr);
6987			spin_unlock(&last_ptr->refill_lock);
6988			goto loop;
6989		}
6990
6991unclustered_alloc:
6992		spin_lock(&block_group->free_space_ctl->tree_lock);
6993		if (cached &&
6994		    block_group->free_space_ctl->free_space <
6995		    num_bytes + empty_cluster + empty_size) {
6996			if (block_group->free_space_ctl->free_space >
6997			    max_extent_size)
6998				max_extent_size =
6999					block_group->free_space_ctl->free_space;
7000			spin_unlock(&block_group->free_space_ctl->tree_lock);
7001			goto loop;
7002		}
7003		spin_unlock(&block_group->free_space_ctl->tree_lock);
7004
7005		offset = btrfs_find_space_for_alloc(block_group, search_start,
7006						    num_bytes, empty_size,
7007						    &max_extent_size);
7008		/*
7009		 * If we didn't find a chunk, and we haven't failed on this
7010		 * block group before, and this block group is in the middle of
7011		 * caching and we are ok with waiting, then go ahead and wait
7012		 * for progress to be made, and set failed_alloc to true.
7013		 *
7014		 * If failed_alloc is true then we've already waited on this
7015		 * block group once and should move on to the next block group.
7016		 */
7017		if (!offset && !failed_alloc && !cached &&
7018		    loop > LOOP_CACHING_NOWAIT) {
7019			wait_block_group_cache_progress(block_group,
7020						num_bytes + empty_size);
7021			failed_alloc = true;
7022			goto have_block_group;
7023		} else if (!offset) {
7024			if (!cached)
7025				have_caching_bg = true;
7026			goto loop;
7027		}
7028checks:
7029		search_start = ALIGN(offset, root->stripesize);
7030
7031		/* move on to the next group */
7032		if (search_start + num_bytes >
7033		    block_group->key.objectid + block_group->key.offset) {
7034			btrfs_add_free_space(block_group, offset, num_bytes);
7035			goto loop;
7036		}
7037
7038		if (offset < search_start)
7039			btrfs_add_free_space(block_group, offset,
7040					     search_start - offset);
7041		BUG_ON(offset > search_start);
7042
7043		ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7044						  alloc_type, delalloc);
7045		if (ret == -EAGAIN) {
7046			btrfs_add_free_space(block_group, offset, num_bytes);
7047			goto loop;
7048		}
7049
7050		/* we are all good, lets return */
7051		ins->objectid = search_start;
7052		ins->offset = num_bytes;
7053
7054		trace_btrfs_reserve_extent(orig_root, block_group,
7055					   search_start, num_bytes);
7056		btrfs_release_block_group(block_group, delalloc);
7057		break;
7058loop:
7059		failed_cluster_refill = false;
7060		failed_alloc = false;
7061		BUG_ON(index != get_block_group_index(block_group));
7062		btrfs_release_block_group(block_group, delalloc);
7063	}
7064	up_read(&space_info->groups_sem);
7065
7066	if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7067		goto search;
7068
7069	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7070		goto search;
7071
7072	/*
7073	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7074	 *			caching kthreads as we move along
7075	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7076	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7077	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7078	 *			again
7079	 */
7080	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7081		index = 0;
7082		loop++;
7083		if (loop == LOOP_ALLOC_CHUNK) {
7084			struct btrfs_trans_handle *trans;
7085			int exist = 0;
7086
7087			trans = current->journal_info;
7088			if (trans)
7089				exist = 1;
7090			else
7091				trans = btrfs_join_transaction(root);
7092
7093			if (IS_ERR(trans)) {
7094				ret = PTR_ERR(trans);
7095				goto out;
7096			}
7097
7098			ret = do_chunk_alloc(trans, root, flags,
7099					     CHUNK_ALLOC_FORCE);
7100			/*
7101			 * Do not bail out on ENOSPC since we
7102			 * can do more things.
7103			 */
7104			if (ret < 0 && ret != -ENOSPC)
7105				btrfs_abort_transaction(trans,
7106							root, ret);
7107			else
7108				ret = 0;
7109			if (!exist)
7110				btrfs_end_transaction(trans, root);
7111			if (ret)
7112				goto out;
7113		}
7114
7115		if (loop == LOOP_NO_EMPTY_SIZE) {
7116			empty_size = 0;
7117			empty_cluster = 0;
7118		}
7119
7120		goto search;
7121	} else if (!ins->objectid) {
7122		ret = -ENOSPC;
7123	} else if (ins->objectid) {
7124		ret = 0;
7125	}
7126out:
7127	if (ret == -ENOSPC)
7128		ins->offset = max_extent_size;
7129	return ret;
7130}
7131
7132static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7133			    int dump_block_groups)
7134{
7135	struct btrfs_block_group_cache *cache;
7136	int index = 0;
7137
7138	spin_lock(&info->lock);
7139	printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7140	       info->flags,
7141	       info->total_bytes - info->bytes_used - info->bytes_pinned -
7142	       info->bytes_reserved - info->bytes_readonly,
7143	       (info->full) ? "" : "not ");
7144	printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7145	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
7146	       info->total_bytes, info->bytes_used, info->bytes_pinned,
7147	       info->bytes_reserved, info->bytes_may_use,
7148	       info->bytes_readonly);
7149	spin_unlock(&info->lock);
7150
7151	if (!dump_block_groups)
7152		return;
7153
7154	down_read(&info->groups_sem);
7155again:
7156	list_for_each_entry(cache, &info->block_groups[index], list) {
7157		spin_lock(&cache->lock);
7158		printk(KERN_INFO "BTRFS: "
7159			   "block group %llu has %llu bytes, "
7160			   "%llu used %llu pinned %llu reserved %s\n",
7161		       cache->key.objectid, cache->key.offset,
7162		       btrfs_block_group_used(&cache->item), cache->pinned,
7163		       cache->reserved, cache->ro ? "[readonly]" : "");
7164		btrfs_dump_free_space(cache, bytes);
7165		spin_unlock(&cache->lock);
7166	}
7167	if (++index < BTRFS_NR_RAID_TYPES)
7168		goto again;
7169	up_read(&info->groups_sem);
7170}
7171
7172int btrfs_reserve_extent(struct btrfs_root *root,
7173			 u64 num_bytes, u64 min_alloc_size,
7174			 u64 empty_size, u64 hint_byte,
7175			 struct btrfs_key *ins, int is_data, int delalloc)
7176{
7177	bool final_tried = false;
7178	u64 flags;
7179	int ret;
7180
7181	flags = btrfs_get_alloc_profile(root, is_data);
7182again:
7183	WARN_ON(num_bytes < root->sectorsize);
7184	ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7185			       flags, delalloc);
7186
7187	if (ret == -ENOSPC) {
7188		if (!final_tried && ins->offset) {
7189			num_bytes = min(num_bytes >> 1, ins->offset);
7190			num_bytes = round_down(num_bytes, root->sectorsize);
7191			num_bytes = max(num_bytes, min_alloc_size);
7192			if (num_bytes == min_alloc_size)
7193				final_tried = true;
7194			goto again;
7195		} else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7196			struct btrfs_space_info *sinfo;
7197
7198			sinfo = __find_space_info(root->fs_info, flags);
7199			btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7200				flags, num_bytes);
7201			if (sinfo)
7202				dump_space_info(sinfo, num_bytes, 1);
7203		}
7204	}
7205
7206	return ret;
7207}
7208
7209static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7210					u64 start, u64 len,
7211					int pin, int delalloc)
7212{
7213	struct btrfs_block_group_cache *cache;
7214	int ret = 0;
7215
7216	cache = btrfs_lookup_block_group(root->fs_info, start);
7217	if (!cache) {
7218		btrfs_err(root->fs_info, "Unable to find block group for %llu",
7219			start);
7220		return -ENOSPC;
7221	}
7222
7223	if (pin)
7224		pin_down_extent(root, cache, start, len, 1);
7225	else {
7226		if (btrfs_test_opt(root, DISCARD))
7227			ret = btrfs_discard_extent(root, start, len, NULL);
7228		btrfs_add_free_space(cache, start, len);
7229		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7230	}
7231
7232	btrfs_put_block_group(cache);
7233
7234	trace_btrfs_reserved_extent_free(root, start, len);
7235
7236	return ret;
7237}
7238
7239int btrfs_free_reserved_extent(struct btrfs_root *root,
7240			       u64 start, u64 len, int delalloc)
7241{
7242	return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7243}
7244
7245int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7246				       u64 start, u64 len)
7247{
7248	return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7249}
7250
7251static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7252				      struct btrfs_root *root,
7253				      u64 parent, u64 root_objectid,
7254				      u64 flags, u64 owner, u64 offset,
7255				      struct btrfs_key *ins, int ref_mod)
7256{
7257	int ret;
7258	struct btrfs_fs_info *fs_info = root->fs_info;
7259	struct btrfs_extent_item *extent_item;
7260	struct btrfs_extent_inline_ref *iref;
7261	struct btrfs_path *path;
7262	struct extent_buffer *leaf;
7263	int type;
7264	u32 size;
7265
7266	if (parent > 0)
7267		type = BTRFS_SHARED_DATA_REF_KEY;
7268	else
7269		type = BTRFS_EXTENT_DATA_REF_KEY;
7270
7271	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7272
7273	path = btrfs_alloc_path();
7274	if (!path)
7275		return -ENOMEM;
7276
7277	path->leave_spinning = 1;
7278	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7279				      ins, size);
7280	if (ret) {
7281		btrfs_free_path(path);
7282		return ret;
7283	}
7284
7285	leaf = path->nodes[0];
7286	extent_item = btrfs_item_ptr(leaf, path->slots[0],
7287				     struct btrfs_extent_item);
7288	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7289	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7290	btrfs_set_extent_flags(leaf, extent_item,
7291			       flags | BTRFS_EXTENT_FLAG_DATA);
7292
7293	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7294	btrfs_set_extent_inline_ref_type(leaf, iref, type);
7295	if (parent > 0) {
7296		struct btrfs_shared_data_ref *ref;
7297		ref = (struct btrfs_shared_data_ref *)(iref + 1);
7298		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7299		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7300	} else {
7301		struct btrfs_extent_data_ref *ref;
7302		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7303		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7304		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7305		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7306		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7307	}
7308
7309	btrfs_mark_buffer_dirty(path->nodes[0]);
7310	btrfs_free_path(path);
7311
7312	/* Always set parent to 0 here since its exclusive anyway. */
7313	ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7314				      ins->objectid, ins->offset,
7315				      BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7316	if (ret)
7317		return ret;
7318
7319	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7320	if (ret) { /* -ENOENT, logic error */
7321		btrfs_err(fs_info, "update block group failed for %llu %llu",
7322			ins->objectid, ins->offset);
7323		BUG();
7324	}
7325	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7326	return ret;
7327}
7328
7329static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7330				     struct btrfs_root *root,
7331				     u64 parent, u64 root_objectid,
7332				     u64 flags, struct btrfs_disk_key *key,
7333				     int level, struct btrfs_key *ins,
7334				     int no_quota)
7335{
7336	int ret;
7337	struct btrfs_fs_info *fs_info = root->fs_info;
7338	struct btrfs_extent_item *extent_item;
7339	struct btrfs_tree_block_info *block_info;
7340	struct btrfs_extent_inline_ref *iref;
7341	struct btrfs_path *path;
7342	struct extent_buffer *leaf;
7343	u32 size = sizeof(*extent_item) + sizeof(*iref);
7344	u64 num_bytes = ins->offset;
7345	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7346						 SKINNY_METADATA);
7347
7348	if (!skinny_metadata)
7349		size += sizeof(*block_info);
7350
7351	path = btrfs_alloc_path();
7352	if (!path) {
7353		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7354						   root->nodesize);
7355		return -ENOMEM;
7356	}
7357
7358	path->leave_spinning = 1;
7359	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7360				      ins, size);
7361	if (ret) {
7362		btrfs_free_path(path);
7363		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7364						   root->nodesize);
7365		return ret;
7366	}
7367
7368	leaf = path->nodes[0];
7369	extent_item = btrfs_item_ptr(leaf, path->slots[0],
7370				     struct btrfs_extent_item);
7371	btrfs_set_extent_refs(leaf, extent_item, 1);
7372	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7373	btrfs_set_extent_flags(leaf, extent_item,
7374			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7375
7376	if (skinny_metadata) {
7377		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7378		num_bytes = root->nodesize;
7379	} else {
7380		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7381		btrfs_set_tree_block_key(leaf, block_info, key);
7382		btrfs_set_tree_block_level(leaf, block_info, level);
7383		iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7384	}
7385
7386	if (parent > 0) {
7387		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7388		btrfs_set_extent_inline_ref_type(leaf, iref,
7389						 BTRFS_SHARED_BLOCK_REF_KEY);
7390		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7391	} else {
7392		btrfs_set_extent_inline_ref_type(leaf, iref,
7393						 BTRFS_TREE_BLOCK_REF_KEY);
7394		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7395	}
7396
7397	btrfs_mark_buffer_dirty(leaf);
7398	btrfs_free_path(path);
7399
7400	if (!no_quota) {
7401		ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7402					      ins->objectid, num_bytes,
7403					      BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7404		if (ret)
7405			return ret;
7406	}
7407
7408	ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7409				 1);
7410	if (ret) { /* -ENOENT, logic error */
7411		btrfs_err(fs_info, "update block group failed for %llu %llu",
7412			ins->objectid, ins->offset);
7413		BUG();
7414	}
7415
7416	trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7417	return ret;
7418}
7419
7420int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7421				     struct btrfs_root *root,
7422				     u64 root_objectid, u64 owner,
7423				     u64 offset, struct btrfs_key *ins)
7424{
7425	int ret;
7426
7427	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7428
7429	ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7430					 ins->offset, 0,
7431					 root_objectid, owner, offset,
7432					 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7433	return ret;
7434}
7435
7436/*
7437 * this is used by the tree logging recovery code.  It records that
7438 * an extent has been allocated and makes sure to clear the free
7439 * space cache bits as well
7440 */
7441int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7442				   struct btrfs_root *root,
7443				   u64 root_objectid, u64 owner, u64 offset,
7444				   struct btrfs_key *ins)
7445{
7446	int ret;
7447	struct btrfs_block_group_cache *block_group;
7448
7449	/*
7450	 * Mixed block groups will exclude before processing the log so we only
7451	 * need to do the exlude dance if this fs isn't mixed.
7452	 */
7453	if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7454		ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7455		if (ret)
7456			return ret;
7457	}
7458
7459	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7460	if (!block_group)
7461		return -EINVAL;
7462
7463	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7464					  RESERVE_ALLOC_NO_ACCOUNT, 0);
7465	BUG_ON(ret); /* logic error */
7466	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7467					 0, owner, offset, ins, 1);
7468	btrfs_put_block_group(block_group);
7469	return ret;
7470}
7471
7472static struct extent_buffer *
7473btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7474		      u64 bytenr, int level)
7475{
7476	struct extent_buffer *buf;
7477
7478	buf = btrfs_find_create_tree_block(root, bytenr);
7479	if (!buf)
7480		return ERR_PTR(-ENOMEM);
7481	btrfs_set_header_generation(buf, trans->transid);
7482	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7483	btrfs_tree_lock(buf);
7484	clean_tree_block(trans, root->fs_info, buf);
7485	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7486
7487	btrfs_set_lock_blocking(buf);
7488	btrfs_set_buffer_uptodate(buf);
7489
7490	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7491		buf->log_index = root->log_transid % 2;
7492		/*
7493		 * we allow two log transactions at a time, use different
7494		 * EXENT bit to differentiate dirty pages.
7495		 */
7496		if (buf->log_index == 0)
7497			set_extent_dirty(&root->dirty_log_pages, buf->start,
7498					buf->start + buf->len - 1, GFP_NOFS);
7499		else
7500			set_extent_new(&root->dirty_log_pages, buf->start,
7501					buf->start + buf->len - 1, GFP_NOFS);
7502	} else {
7503		buf->log_index = -1;
7504		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7505			 buf->start + buf->len - 1, GFP_NOFS);
7506	}
7507	trans->blocks_used++;
7508	/* this returns a buffer locked for blocking */
7509	return buf;
7510}
7511
7512static struct btrfs_block_rsv *
7513use_block_rsv(struct btrfs_trans_handle *trans,
7514	      struct btrfs_root *root, u32 blocksize)
7515{
7516	struct btrfs_block_rsv *block_rsv;
7517	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7518	int ret;
7519	bool global_updated = false;
7520
7521	block_rsv = get_block_rsv(trans, root);
7522
7523	if (unlikely(block_rsv->size == 0))
7524		goto try_reserve;
7525again:
7526	ret = block_rsv_use_bytes(block_rsv, blocksize);
7527	if (!ret)
7528		return block_rsv;
7529
7530	if (block_rsv->failfast)
7531		return ERR_PTR(ret);
7532
7533	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7534		global_updated = true;
7535		update_global_block_rsv(root->fs_info);
7536		goto again;
7537	}
7538
7539	if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7540		static DEFINE_RATELIMIT_STATE(_rs,
7541				DEFAULT_RATELIMIT_INTERVAL * 10,
7542				/*DEFAULT_RATELIMIT_BURST*/ 1);
7543		if (__ratelimit(&_rs))
7544			WARN(1, KERN_DEBUG
7545				"BTRFS: block rsv returned %d\n", ret);
7546	}
7547try_reserve:
7548	ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7549				     BTRFS_RESERVE_NO_FLUSH);
7550	if (!ret)
7551		return block_rsv;
7552	/*
7553	 * If we couldn't reserve metadata bytes try and use some from
7554	 * the global reserve if its space type is the same as the global
7555	 * reservation.
7556	 */
7557	if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7558	    block_rsv->space_info == global_rsv->space_info) {
7559		ret = block_rsv_use_bytes(global_rsv, blocksize);
7560		if (!ret)
7561			return global_rsv;
7562	}
7563	return ERR_PTR(ret);
7564}
7565
7566static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7567			    struct btrfs_block_rsv *block_rsv, u32 blocksize)
7568{
7569	block_rsv_add_bytes(block_rsv, blocksize, 0);
7570	block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7571}
7572
7573/*
7574 * finds a free extent and does all the dirty work required for allocation
7575 * returns the key for the extent through ins, and a tree buffer for
7576 * the first block of the extent through buf.
7577 *
7578 * returns the tree buffer or an ERR_PTR on error.
7579 */
7580struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7581					struct btrfs_root *root,
7582					u64 parent, u64 root_objectid,
7583					struct btrfs_disk_key *key, int level,
7584					u64 hint, u64 empty_size)
7585{
7586	struct btrfs_key ins;
7587	struct btrfs_block_rsv *block_rsv;
7588	struct extent_buffer *buf;
7589	struct btrfs_delayed_extent_op *extent_op;
7590	u64 flags = 0;
7591	int ret;
7592	u32 blocksize = root->nodesize;
7593	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7594						 SKINNY_METADATA);
7595
7596	if (btrfs_test_is_dummy_root(root)) {
7597		buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7598					    level);
7599		if (!IS_ERR(buf))
7600			root->alloc_bytenr += blocksize;
7601		return buf;
7602	}
7603
7604	block_rsv = use_block_rsv(trans, root, blocksize);
7605	if (IS_ERR(block_rsv))
7606		return ERR_CAST(block_rsv);
7607
7608	ret = btrfs_reserve_extent(root, blocksize, blocksize,
7609				   empty_size, hint, &ins, 0, 0);
7610	if (ret)
7611		goto out_unuse;
7612
7613	buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7614	if (IS_ERR(buf)) {
7615		ret = PTR_ERR(buf);
7616		goto out_free_reserved;
7617	}
7618
7619	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7620		if (parent == 0)
7621			parent = ins.objectid;
7622		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7623	} else
7624		BUG_ON(parent > 0);
7625
7626	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7627		extent_op = btrfs_alloc_delayed_extent_op();
7628		if (!extent_op) {
7629			ret = -ENOMEM;
7630			goto out_free_buf;
7631		}
7632		if (key)
7633			memcpy(&extent_op->key, key, sizeof(extent_op->key));
7634		else
7635			memset(&extent_op->key, 0, sizeof(extent_op->key));
7636		extent_op->flags_to_set = flags;
7637		if (skinny_metadata)
7638			extent_op->update_key = 0;
7639		else
7640			extent_op->update_key = 1;
7641		extent_op->update_flags = 1;
7642		extent_op->is_data = 0;
7643		extent_op->level = level;
7644
7645		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7646						 ins.objectid, ins.offset,
7647						 parent, root_objectid, level,
7648						 BTRFS_ADD_DELAYED_EXTENT,
7649						 extent_op, 0);
7650		if (ret)
7651			goto out_free_delayed;
7652	}
7653	return buf;
7654
7655out_free_delayed:
7656	btrfs_free_delayed_extent_op(extent_op);
7657out_free_buf:
7658	free_extent_buffer(buf);
7659out_free_reserved:
7660	btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
7661out_unuse:
7662	unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7663	return ERR_PTR(ret);
7664}
7665
7666struct walk_control {
7667	u64 refs[BTRFS_MAX_LEVEL];
7668	u64 flags[BTRFS_MAX_LEVEL];
7669	struct btrfs_key update_progress;
7670	int stage;
7671	int level;
7672	int shared_level;
7673	int update_ref;
7674	int keep_locks;
7675	int reada_slot;
7676	int reada_count;
7677	int for_reloc;
7678};
7679
7680#define DROP_REFERENCE	1
7681#define UPDATE_BACKREF	2
7682
7683static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7684				     struct btrfs_root *root,
7685				     struct walk_control *wc,
7686				     struct btrfs_path *path)
7687{
7688	u64 bytenr;
7689	u64 generation;
7690	u64 refs;
7691	u64 flags;
7692	u32 nritems;
7693	u32 blocksize;
7694	struct btrfs_key key;
7695	struct extent_buffer *eb;
7696	int ret;
7697	int slot;
7698	int nread = 0;
7699
7700	if (path->slots[wc->level] < wc->reada_slot) {
7701		wc->reada_count = wc->reada_count * 2 / 3;
7702		wc->reada_count = max(wc->reada_count, 2);
7703	} else {
7704		wc->reada_count = wc->reada_count * 3 / 2;
7705		wc->reada_count = min_t(int, wc->reada_count,
7706					BTRFS_NODEPTRS_PER_BLOCK(root));
7707	}
7708
7709	eb = path->nodes[wc->level];
7710	nritems = btrfs_header_nritems(eb);
7711	blocksize = root->nodesize;
7712
7713	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7714		if (nread >= wc->reada_count)
7715			break;
7716
7717		cond_resched();
7718		bytenr = btrfs_node_blockptr(eb, slot);
7719		generation = btrfs_node_ptr_generation(eb, slot);
7720
7721		if (slot == path->slots[wc->level])
7722			goto reada;
7723
7724		if (wc->stage == UPDATE_BACKREF &&
7725		    generation <= root->root_key.offset)
7726			continue;
7727
7728		/* We don't lock the tree block, it's OK to be racy here */
7729		ret = btrfs_lookup_extent_info(trans, root, bytenr,
7730					       wc->level - 1, 1, &refs,
7731					       &flags);
7732		/* We don't care about errors in readahead. */
7733		if (ret < 0)
7734			continue;
7735		BUG_ON(refs == 0);
7736
7737		if (wc->stage == DROP_REFERENCE) {
7738			if (refs == 1)
7739				goto reada;
7740
7741			if (wc->level == 1 &&
7742			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7743				continue;
7744			if (!wc->update_ref ||
7745			    generation <= root->root_key.offset)
7746				continue;
7747			btrfs_node_key_to_cpu(eb, &key, slot);
7748			ret = btrfs_comp_cpu_keys(&key,
7749						  &wc->update_progress);
7750			if (ret < 0)
7751				continue;
7752		} else {
7753			if (wc->level == 1 &&
7754			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7755				continue;
7756		}
7757reada:
7758		readahead_tree_block(root, bytenr);
7759		nread++;
7760	}
7761	wc->reada_slot = slot;
7762}
7763
7764static int account_leaf_items(struct btrfs_trans_handle *trans,
7765			      struct btrfs_root *root,
7766			      struct extent_buffer *eb)
7767{
7768	int nr = btrfs_header_nritems(eb);
7769	int i, extent_type, ret;
7770	struct btrfs_key key;
7771	struct btrfs_file_extent_item *fi;
7772	u64 bytenr, num_bytes;
7773
7774	for (i = 0; i < nr; i++) {
7775		btrfs_item_key_to_cpu(eb, &key, i);
7776
7777		if (key.type != BTRFS_EXTENT_DATA_KEY)
7778			continue;
7779
7780		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7781		/* filter out non qgroup-accountable extents  */
7782		extent_type = btrfs_file_extent_type(eb, fi);
7783
7784		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7785			continue;
7786
7787		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7788		if (!bytenr)
7789			continue;
7790
7791		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7792
7793		ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7794					      root->objectid,
7795					      bytenr, num_bytes,
7796					      BTRFS_QGROUP_OPER_SUB_SUBTREE, 0);
7797		if (ret)
7798			return ret;
7799	}
7800	return 0;
7801}
7802
7803/*
7804 * Walk up the tree from the bottom, freeing leaves and any interior
7805 * nodes which have had all slots visited. If a node (leaf or
7806 * interior) is freed, the node above it will have it's slot
7807 * incremented. The root node will never be freed.
7808 *
7809 * At the end of this function, we should have a path which has all
7810 * slots incremented to the next position for a search. If we need to
7811 * read a new node it will be NULL and the node above it will have the
7812 * correct slot selected for a later read.
7813 *
7814 * If we increment the root nodes slot counter past the number of
7815 * elements, 1 is returned to signal completion of the search.
7816 */
7817static int adjust_slots_upwards(struct btrfs_root *root,
7818				struct btrfs_path *path, int root_level)
7819{
7820	int level = 0;
7821	int nr, slot;
7822	struct extent_buffer *eb;
7823
7824	if (root_level == 0)
7825		return 1;
7826
7827	while (level <= root_level) {
7828		eb = path->nodes[level];
7829		nr = btrfs_header_nritems(eb);
7830		path->slots[level]++;
7831		slot = path->slots[level];
7832		if (slot >= nr || level == 0) {
7833			/*
7834			 * Don't free the root -  we will detect this
7835			 * condition after our loop and return a
7836			 * positive value for caller to stop walking the tree.
7837			 */
7838			if (level != root_level) {
7839				btrfs_tree_unlock_rw(eb, path->locks[level]);
7840				path->locks[level] = 0;
7841
7842				free_extent_buffer(eb);
7843				path->nodes[level] = NULL;
7844				path->slots[level] = 0;
7845			}
7846		} else {
7847			/*
7848			 * We have a valid slot to walk back down
7849			 * from. Stop here so caller can process these
7850			 * new nodes.
7851			 */
7852			break;
7853		}
7854
7855		level++;
7856	}
7857
7858	eb = path->nodes[root_level];
7859	if (path->slots[root_level] >= btrfs_header_nritems(eb))
7860		return 1;
7861
7862	return 0;
7863}
7864
7865/*
7866 * root_eb is the subtree root and is locked before this function is called.
7867 */
7868static int account_shared_subtree(struct btrfs_trans_handle *trans,
7869				  struct btrfs_root *root,
7870				  struct extent_buffer *root_eb,
7871				  u64 root_gen,
7872				  int root_level)
7873{
7874	int ret = 0;
7875	int level;
7876	struct extent_buffer *eb = root_eb;
7877	struct btrfs_path *path = NULL;
7878
7879	BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7880	BUG_ON(root_eb == NULL);
7881
7882	if (!root->fs_info->quota_enabled)
7883		return 0;
7884
7885	if (!extent_buffer_uptodate(root_eb)) {
7886		ret = btrfs_read_buffer(root_eb, root_gen);
7887		if (ret)
7888			goto out;
7889	}
7890
7891	if (root_level == 0) {
7892		ret = account_leaf_items(trans, root, root_eb);
7893		goto out;
7894	}
7895
7896	path = btrfs_alloc_path();
7897	if (!path)
7898		return -ENOMEM;
7899
7900	/*
7901	 * Walk down the tree.  Missing extent blocks are filled in as
7902	 * we go. Metadata is accounted every time we read a new
7903	 * extent block.
7904	 *
7905	 * When we reach a leaf, we account for file extent items in it,
7906	 * walk back up the tree (adjusting slot pointers as we go)
7907	 * and restart the search process.
7908	 */
7909	extent_buffer_get(root_eb); /* For path */
7910	path->nodes[root_level] = root_eb;
7911	path->slots[root_level] = 0;
7912	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
7913walk_down:
7914	level = root_level;
7915	while (level >= 0) {
7916		if (path->nodes[level] == NULL) {
7917			int parent_slot;
7918			u64 child_gen;
7919			u64 child_bytenr;
7920
7921			/* We need to get child blockptr/gen from
7922			 * parent before we can read it. */
7923			eb = path->nodes[level + 1];
7924			parent_slot = path->slots[level + 1];
7925			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
7926			child_gen = btrfs_node_ptr_generation(eb, parent_slot);
7927
7928			eb = read_tree_block(root, child_bytenr, child_gen);
7929			if (!eb || !extent_buffer_uptodate(eb)) {
7930				ret = -EIO;
7931				goto out;
7932			}
7933
7934			path->nodes[level] = eb;
7935			path->slots[level] = 0;
7936
7937			btrfs_tree_read_lock(eb);
7938			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7939			path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
7940
7941			ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7942						root->objectid,
7943						child_bytenr,
7944						root->nodesize,
7945						BTRFS_QGROUP_OPER_SUB_SUBTREE,
7946						0);
7947			if (ret)
7948				goto out;
7949
7950		}
7951
7952		if (level == 0) {
7953			ret = account_leaf_items(trans, root, path->nodes[level]);
7954			if (ret)
7955				goto out;
7956
7957			/* Nonzero return here means we completed our search */
7958			ret = adjust_slots_upwards(root, path, root_level);
7959			if (ret)
7960				break;
7961
7962			/* Restart search with new slots */
7963			goto walk_down;
7964		}
7965
7966		level--;
7967	}
7968
7969	ret = 0;
7970out:
7971	btrfs_free_path(path);
7972
7973	return ret;
7974}
7975
7976/*
7977 * helper to process tree block while walking down the tree.
7978 *
7979 * when wc->stage == UPDATE_BACKREF, this function updates
7980 * back refs for pointers in the block.
7981 *
7982 * NOTE: return value 1 means we should stop walking down.
7983 */
7984static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7985				   struct btrfs_root *root,
7986				   struct btrfs_path *path,
7987				   struct walk_control *wc, int lookup_info)
7988{
7989	int level = wc->level;
7990	struct extent_buffer *eb = path->nodes[level];
7991	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7992	int ret;
7993
7994	if (wc->stage == UPDATE_BACKREF &&
7995	    btrfs_header_owner(eb) != root->root_key.objectid)
7996		return 1;
7997
7998	/*
7999	 * when reference count of tree block is 1, it won't increase
8000	 * again. once full backref flag is set, we never clear it.
8001	 */
8002	if (lookup_info &&
8003	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8004	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8005		BUG_ON(!path->locks[level]);
8006		ret = btrfs_lookup_extent_info(trans, root,
8007					       eb->start, level, 1,
8008					       &wc->refs[level],
8009					       &wc->flags[level]);
8010		BUG_ON(ret == -ENOMEM);
8011		if (ret)
8012			return ret;
8013		BUG_ON(wc->refs[level] == 0);
8014	}
8015
8016	if (wc->stage == DROP_REFERENCE) {
8017		if (wc->refs[level] > 1)
8018			return 1;
8019
8020		if (path->locks[level] && !wc->keep_locks) {
8021			btrfs_tree_unlock_rw(eb, path->locks[level]);
8022			path->locks[level] = 0;
8023		}
8024		return 0;
8025	}
8026
8027	/* wc->stage == UPDATE_BACKREF */
8028	if (!(wc->flags[level] & flag)) {
8029		BUG_ON(!path->locks[level]);
8030		ret = btrfs_inc_ref(trans, root, eb, 1);
8031		BUG_ON(ret); /* -ENOMEM */
8032		ret = btrfs_dec_ref(trans, root, eb, 0);
8033		BUG_ON(ret); /* -ENOMEM */
8034		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8035						  eb->len, flag,
8036						  btrfs_header_level(eb), 0);
8037		BUG_ON(ret); /* -ENOMEM */
8038		wc->flags[level] |= flag;
8039	}
8040
8041	/*
8042	 * the block is shared by multiple trees, so it's not good to
8043	 * keep the tree lock
8044	 */
8045	if (path->locks[level] && level > 0) {
8046		btrfs_tree_unlock_rw(eb, path->locks[level]);
8047		path->locks[level] = 0;
8048	}
8049	return 0;
8050}
8051
8052/*
8053 * helper to process tree block pointer.
8054 *
8055 * when wc->stage == DROP_REFERENCE, this function checks
8056 * reference count of the block pointed to. if the block
8057 * is shared and we need update back refs for the subtree
8058 * rooted at the block, this function changes wc->stage to
8059 * UPDATE_BACKREF. if the block is shared and there is no
8060 * need to update back, this function drops the reference
8061 * to the block.
8062 *
8063 * NOTE: return value 1 means we should stop walking down.
8064 */
8065static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8066				 struct btrfs_root *root,
8067				 struct btrfs_path *path,
8068				 struct walk_control *wc, int *lookup_info)
8069{
8070	u64 bytenr;
8071	u64 generation;
8072	u64 parent;
8073	u32 blocksize;
8074	struct btrfs_key key;
8075	struct extent_buffer *next;
8076	int level = wc->level;
8077	int reada = 0;
8078	int ret = 0;
8079	bool need_account = false;
8080
8081	generation = btrfs_node_ptr_generation(path->nodes[level],
8082					       path->slots[level]);
8083	/*
8084	 * if the lower level block was created before the snapshot
8085	 * was created, we know there is no need to update back refs
8086	 * for the subtree
8087	 */
8088	if (wc->stage == UPDATE_BACKREF &&
8089	    generation <= root->root_key.offset) {
8090		*lookup_info = 1;
8091		return 1;
8092	}
8093
8094	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8095	blocksize = root->nodesize;
8096
8097	next = btrfs_find_tree_block(root->fs_info, bytenr);
8098	if (!next) {
8099		next = btrfs_find_create_tree_block(root, bytenr);
8100		if (!next)
8101			return -ENOMEM;
8102		btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8103					       level - 1);
8104		reada = 1;
8105	}
8106	btrfs_tree_lock(next);
8107	btrfs_set_lock_blocking(next);
8108
8109	ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8110				       &wc->refs[level - 1],
8111				       &wc->flags[level - 1]);
8112	if (ret < 0) {
8113		btrfs_tree_unlock(next);
8114		return ret;
8115	}
8116
8117	if (unlikely(wc->refs[level - 1] == 0)) {
8118		btrfs_err(root->fs_info, "Missing references.");
8119		BUG();
8120	}
8121	*lookup_info = 0;
8122
8123	if (wc->stage == DROP_REFERENCE) {
8124		if (wc->refs[level - 1] > 1) {
8125			need_account = true;
8126			if (level == 1 &&
8127			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8128				goto skip;
8129
8130			if (!wc->update_ref ||
8131			    generation <= root->root_key.offset)
8132				goto skip;
8133
8134			btrfs_node_key_to_cpu(path->nodes[level], &key,
8135					      path->slots[level]);
8136			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8137			if (ret < 0)
8138				goto skip;
8139
8140			wc->stage = UPDATE_BACKREF;
8141			wc->shared_level = level - 1;
8142		}
8143	} else {
8144		if (level == 1 &&
8145		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8146			goto skip;
8147	}
8148
8149	if (!btrfs_buffer_uptodate(next, generation, 0)) {
8150		btrfs_tree_unlock(next);
8151		free_extent_buffer(next);
8152		next = NULL;
8153		*lookup_info = 1;
8154	}
8155
8156	if (!next) {
8157		if (reada && level == 1)
8158			reada_walk_down(trans, root, wc, path);
8159		next = read_tree_block(root, bytenr, generation);
8160		if (!next || !extent_buffer_uptodate(next)) {
8161			free_extent_buffer(next);
8162			return -EIO;
8163		}
8164		btrfs_tree_lock(next);
8165		btrfs_set_lock_blocking(next);
8166	}
8167
8168	level--;
8169	BUG_ON(level != btrfs_header_level(next));
8170	path->nodes[level] = next;
8171	path->slots[level] = 0;
8172	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8173	wc->level = level;
8174	if (wc->level == 1)
8175		wc->reada_slot = 0;
8176	return 0;
8177skip:
8178	wc->refs[level - 1] = 0;
8179	wc->flags[level - 1] = 0;
8180	if (wc->stage == DROP_REFERENCE) {
8181		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8182			parent = path->nodes[level]->start;
8183		} else {
8184			BUG_ON(root->root_key.objectid !=
8185			       btrfs_header_owner(path->nodes[level]));
8186			parent = 0;
8187		}
8188
8189		if (need_account) {
8190			ret = account_shared_subtree(trans, root, next,
8191						     generation, level - 1);
8192			if (ret) {
8193				printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8194					"%d accounting shared subtree. Quota "
8195					"is out of sync, rescan required.\n",
8196					root->fs_info->sb->s_id, ret);
8197			}
8198		}
8199		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8200				root->root_key.objectid, level - 1, 0, 0);
8201		BUG_ON(ret); /* -ENOMEM */
8202	}
8203	btrfs_tree_unlock(next);
8204	free_extent_buffer(next);
8205	*lookup_info = 1;
8206	return 1;
8207}
8208
8209/*
8210 * helper to process tree block while walking up the tree.
8211 *
8212 * when wc->stage == DROP_REFERENCE, this function drops
8213 * reference count on the block.
8214 *
8215 * when wc->stage == UPDATE_BACKREF, this function changes
8216 * wc->stage back to DROP_REFERENCE if we changed wc->stage
8217 * to UPDATE_BACKREF previously while processing the block.
8218 *
8219 * NOTE: return value 1 means we should stop walking up.
8220 */
8221static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8222				 struct btrfs_root *root,
8223				 struct btrfs_path *path,
8224				 struct walk_control *wc)
8225{
8226	int ret;
8227	int level = wc->level;
8228	struct extent_buffer *eb = path->nodes[level];
8229	u64 parent = 0;
8230
8231	if (wc->stage == UPDATE_BACKREF) {
8232		BUG_ON(wc->shared_level < level);
8233		if (level < wc->shared_level)
8234			goto out;
8235
8236		ret = find_next_key(path, level + 1, &wc->update_progress);
8237		if (ret > 0)
8238			wc->update_ref = 0;
8239
8240		wc->stage = DROP_REFERENCE;
8241		wc->shared_level = -1;
8242		path->slots[level] = 0;
8243
8244		/*
8245		 * check reference count again if the block isn't locked.
8246		 * we should start walking down the tree again if reference
8247		 * count is one.
8248		 */
8249		if (!path->locks[level]) {
8250			BUG_ON(level == 0);
8251			btrfs_tree_lock(eb);
8252			btrfs_set_lock_blocking(eb);
8253			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8254
8255			ret = btrfs_lookup_extent_info(trans, root,
8256						       eb->start, level, 1,
8257						       &wc->refs[level],
8258						       &wc->flags[level]);
8259			if (ret < 0) {
8260				btrfs_tree_unlock_rw(eb, path->locks[level]);
8261				path->locks[level] = 0;
8262				return ret;
8263			}
8264			BUG_ON(wc->refs[level] == 0);
8265			if (wc->refs[level] == 1) {
8266				btrfs_tree_unlock_rw(eb, path->locks[level]);
8267				path->locks[level] = 0;
8268				return 1;
8269			}
8270		}
8271	}
8272
8273	/* wc->stage == DROP_REFERENCE */
8274	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8275
8276	if (wc->refs[level] == 1) {
8277		if (level == 0) {
8278			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8279				ret = btrfs_dec_ref(trans, root, eb, 1);
8280			else
8281				ret = btrfs_dec_ref(trans, root, eb, 0);
8282			BUG_ON(ret); /* -ENOMEM */
8283			ret = account_leaf_items(trans, root, eb);
8284			if (ret) {
8285				printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8286					"%d accounting leaf items. Quota "
8287					"is out of sync, rescan required.\n",
8288					root->fs_info->sb->s_id, ret);
8289			}
8290		}
8291		/* make block locked assertion in clean_tree_block happy */
8292		if (!path->locks[level] &&
8293		    btrfs_header_generation(eb) == trans->transid) {
8294			btrfs_tree_lock(eb);
8295			btrfs_set_lock_blocking(eb);
8296			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8297		}
8298		clean_tree_block(trans, root->fs_info, eb);
8299	}
8300
8301	if (eb == root->node) {
8302		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8303			parent = eb->start;
8304		else
8305			BUG_ON(root->root_key.objectid !=
8306			       btrfs_header_owner(eb));
8307	} else {
8308		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8309			parent = path->nodes[level + 1]->start;
8310		else
8311			BUG_ON(root->root_key.objectid !=
8312			       btrfs_header_owner(path->nodes[level + 1]));
8313	}
8314
8315	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8316out:
8317	wc->refs[level] = 0;
8318	wc->flags[level] = 0;
8319	return 0;
8320}
8321
8322static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8323				   struct btrfs_root *root,
8324				   struct btrfs_path *path,
8325				   struct walk_control *wc)
8326{
8327	int level = wc->level;
8328	int lookup_info = 1;
8329	int ret;
8330
8331	while (level >= 0) {
8332		ret = walk_down_proc(trans, root, path, wc, lookup_info);
8333		if (ret > 0)
8334			break;
8335
8336		if (level == 0)
8337			break;
8338
8339		if (path->slots[level] >=
8340		    btrfs_header_nritems(path->nodes[level]))
8341			break;
8342
8343		ret = do_walk_down(trans, root, path, wc, &lookup_info);
8344		if (ret > 0) {
8345			path->slots[level]++;
8346			continue;
8347		} else if (ret < 0)
8348			return ret;
8349		level = wc->level;
8350	}
8351	return 0;
8352}
8353
8354static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8355				 struct btrfs_root *root,
8356				 struct btrfs_path *path,
8357				 struct walk_control *wc, int max_level)
8358{
8359	int level = wc->level;
8360	int ret;
8361
8362	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8363	while (level < max_level && path->nodes[level]) {
8364		wc->level = level;
8365		if (path->slots[level] + 1 <
8366		    btrfs_header_nritems(path->nodes[level])) {
8367			path->slots[level]++;
8368			return 0;
8369		} else {
8370			ret = walk_up_proc(trans, root, path, wc);
8371			if (ret > 0)
8372				return 0;
8373
8374			if (path->locks[level]) {
8375				btrfs_tree_unlock_rw(path->nodes[level],
8376						     path->locks[level]);
8377				path->locks[level] = 0;
8378			}
8379			free_extent_buffer(path->nodes[level]);
8380			path->nodes[level] = NULL;
8381			level++;
8382		}
8383	}
8384	return 1;
8385}
8386
8387/*
8388 * drop a subvolume tree.
8389 *
8390 * this function traverses the tree freeing any blocks that only
8391 * referenced by the tree.
8392 *
8393 * when a shared tree block is found. this function decreases its
8394 * reference count by one. if update_ref is true, this function
8395 * also make sure backrefs for the shared block and all lower level
8396 * blocks are properly updated.
8397 *
8398 * If called with for_reloc == 0, may exit early with -EAGAIN
8399 */
8400int btrfs_drop_snapshot(struct btrfs_root *root,
8401			 struct btrfs_block_rsv *block_rsv, int update_ref,
8402			 int for_reloc)
8403{
8404	struct btrfs_path *path;
8405	struct btrfs_trans_handle *trans;
8406	struct btrfs_root *tree_root = root->fs_info->tree_root;
8407	struct btrfs_root_item *root_item = &root->root_item;
8408	struct walk_control *wc;
8409	struct btrfs_key key;
8410	int err = 0;
8411	int ret;
8412	int level;
8413	bool root_dropped = false;
8414
8415	btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8416
8417	path = btrfs_alloc_path();
8418	if (!path) {
8419		err = -ENOMEM;
8420		goto out;
8421	}
8422
8423	wc = kzalloc(sizeof(*wc), GFP_NOFS);
8424	if (!wc) {
8425		btrfs_free_path(path);
8426		err = -ENOMEM;
8427		goto out;
8428	}
8429
8430	trans = btrfs_start_transaction(tree_root, 0);
8431	if (IS_ERR(trans)) {
8432		err = PTR_ERR(trans);
8433		goto out_free;
8434	}
8435
8436	if (block_rsv)
8437		trans->block_rsv = block_rsv;
8438
8439	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8440		level = btrfs_header_level(root->node);
8441		path->nodes[level] = btrfs_lock_root_node(root);
8442		btrfs_set_lock_blocking(path->nodes[level]);
8443		path->slots[level] = 0;
8444		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8445		memset(&wc->update_progress, 0,
8446		       sizeof(wc->update_progress));
8447	} else {
8448		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8449		memcpy(&wc->update_progress, &key,
8450		       sizeof(wc->update_progress));
8451
8452		level = root_item->drop_level;
8453		BUG_ON(level == 0);
8454		path->lowest_level = level;
8455		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8456		path->lowest_level = 0;
8457		if (ret < 0) {
8458			err = ret;
8459			goto out_end_trans;
8460		}
8461		WARN_ON(ret > 0);
8462
8463		/*
8464		 * unlock our path, this is safe because only this
8465		 * function is allowed to delete this snapshot
8466		 */
8467		btrfs_unlock_up_safe(path, 0);
8468
8469		level = btrfs_header_level(root->node);
8470		while (1) {
8471			btrfs_tree_lock(path->nodes[level]);
8472			btrfs_set_lock_blocking(path->nodes[level]);
8473			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8474
8475			ret = btrfs_lookup_extent_info(trans, root,
8476						path->nodes[level]->start,
8477						level, 1, &wc->refs[level],
8478						&wc->flags[level]);
8479			if (ret < 0) {
8480				err = ret;
8481				goto out_end_trans;
8482			}
8483			BUG_ON(wc->refs[level] == 0);
8484
8485			if (level == root_item->drop_level)
8486				break;
8487
8488			btrfs_tree_unlock(path->nodes[level]);
8489			path->locks[level] = 0;
8490			WARN_ON(wc->refs[level] != 1);
8491			level--;
8492		}
8493	}
8494
8495	wc->level = level;
8496	wc->shared_level = -1;
8497	wc->stage = DROP_REFERENCE;
8498	wc->update_ref = update_ref;
8499	wc->keep_locks = 0;
8500	wc->for_reloc = for_reloc;
8501	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8502
8503	while (1) {
8504
8505		ret = walk_down_tree(trans, root, path, wc);
8506		if (ret < 0) {
8507			err = ret;
8508			break;
8509		}
8510
8511		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8512		if (ret < 0) {
8513			err = ret;
8514			break;
8515		}
8516
8517		if (ret > 0) {
8518			BUG_ON(wc->stage != DROP_REFERENCE);
8519			break;
8520		}
8521
8522		if (wc->stage == DROP_REFERENCE) {
8523			level = wc->level;
8524			btrfs_node_key(path->nodes[level],
8525				       &root_item->drop_progress,
8526				       path->slots[level]);
8527			root_item->drop_level = level;
8528		}
8529
8530		BUG_ON(wc->level == 0);
8531		if (btrfs_should_end_transaction(trans, tree_root) ||
8532		    (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8533			ret = btrfs_update_root(trans, tree_root,
8534						&root->root_key,
8535						root_item);
8536			if (ret) {
8537				btrfs_abort_transaction(trans, tree_root, ret);
8538				err = ret;
8539				goto out_end_trans;
8540			}
8541
8542			/*
8543			 * Qgroup update accounting is run from
8544			 * delayed ref handling. This usually works
8545			 * out because delayed refs are normally the
8546			 * only way qgroup updates are added. However,
8547			 * we may have added updates during our tree
8548			 * walk so run qgroups here to make sure we
8549			 * don't lose any updates.
8550			 */
8551			ret = btrfs_delayed_qgroup_accounting(trans,
8552							      root->fs_info);
8553			if (ret)
8554				printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8555						   "running qgroup updates "
8556						   "during snapshot delete. "
8557						   "Quota is out of sync, "
8558						   "rescan required.\n", ret);
8559
8560			btrfs_end_transaction_throttle(trans, tree_root);
8561			if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8562				pr_debug("BTRFS: drop snapshot early exit\n");
8563				err = -EAGAIN;
8564				goto out_free;
8565			}
8566
8567			trans = btrfs_start_transaction(tree_root, 0);
8568			if (IS_ERR(trans)) {
8569				err = PTR_ERR(trans);
8570				goto out_free;
8571			}
8572			if (block_rsv)
8573				trans->block_rsv = block_rsv;
8574		}
8575	}
8576	btrfs_release_path(path);
8577	if (err)
8578		goto out_end_trans;
8579
8580	ret = btrfs_del_root(trans, tree_root, &root->root_key);
8581	if (ret) {
8582		btrfs_abort_transaction(trans, tree_root, ret);
8583		goto out_end_trans;
8584	}
8585
8586	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8587		ret = btrfs_find_root(tree_root, &root->root_key, path,
8588				      NULL, NULL);
8589		if (ret < 0) {
8590			btrfs_abort_transaction(trans, tree_root, ret);
8591			err = ret;
8592			goto out_end_trans;
8593		} else if (ret > 0) {
8594			/* if we fail to delete the orphan item this time
8595			 * around, it'll get picked up the next time.
8596			 *
8597			 * The most common failure here is just -ENOENT.
8598			 */
8599			btrfs_del_orphan_item(trans, tree_root,
8600					      root->root_key.objectid);
8601		}
8602	}
8603
8604	if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8605		btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
8606	} else {
8607		free_extent_buffer(root->node);
8608		free_extent_buffer(root->commit_root);
8609		btrfs_put_fs_root(root);
8610	}
8611	root_dropped = true;
8612out_end_trans:
8613	ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info);
8614	if (ret)
8615		printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8616				   "running qgroup updates "
8617				   "during snapshot delete. "
8618				   "Quota is out of sync, "
8619				   "rescan required.\n", ret);
8620
8621	btrfs_end_transaction_throttle(trans, tree_root);
8622out_free:
8623	kfree(wc);
8624	btrfs_free_path(path);
8625out:
8626	/*
8627	 * So if we need to stop dropping the snapshot for whatever reason we
8628	 * need to make sure to add it back to the dead root list so that we
8629	 * keep trying to do the work later.  This also cleans up roots if we
8630	 * don't have it in the radix (like when we recover after a power fail
8631	 * or unmount) so we don't leak memory.
8632	 */
8633	if (!for_reloc && root_dropped == false)
8634		btrfs_add_dead_root(root);
8635	if (err && err != -EAGAIN)
8636		btrfs_std_error(root->fs_info, err);
8637	return err;
8638}
8639
8640/*
8641 * drop subtree rooted at tree block 'node'.
8642 *
8643 * NOTE: this function will unlock and release tree block 'node'
8644 * only used by relocation code
8645 */
8646int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8647			struct btrfs_root *root,
8648			struct extent_buffer *node,
8649			struct extent_buffer *parent)
8650{
8651	struct btrfs_path *path;
8652	struct walk_control *wc;
8653	int level;
8654	int parent_level;
8655	int ret = 0;
8656	int wret;
8657
8658	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8659
8660	path = btrfs_alloc_path();
8661	if (!path)
8662		return -ENOMEM;
8663
8664	wc = kzalloc(sizeof(*wc), GFP_NOFS);
8665	if (!wc) {
8666		btrfs_free_path(path);
8667		return -ENOMEM;
8668	}
8669
8670	btrfs_assert_tree_locked(parent);
8671	parent_level = btrfs_header_level(parent);
8672	extent_buffer_get(parent);
8673	path->nodes[parent_level] = parent;
8674	path->slots[parent_level] = btrfs_header_nritems(parent);
8675
8676	btrfs_assert_tree_locked(node);
8677	level = btrfs_header_level(node);
8678	path->nodes[level] = node;
8679	path->slots[level] = 0;
8680	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8681
8682	wc->refs[parent_level] = 1;
8683	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8684	wc->level = level;
8685	wc->shared_level = -1;
8686	wc->stage = DROP_REFERENCE;
8687	wc->update_ref = 0;
8688	wc->keep_locks = 1;
8689	wc->for_reloc = 1;
8690	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8691
8692	while (1) {
8693		wret = walk_down_tree(trans, root, path, wc);
8694		if (wret < 0) {
8695			ret = wret;
8696			break;
8697		}
8698
8699		wret = walk_up_tree(trans, root, path, wc, parent_level);
8700		if (wret < 0)
8701			ret = wret;
8702		if (wret != 0)
8703			break;
8704	}
8705
8706	kfree(wc);
8707	btrfs_free_path(path);
8708	return ret;
8709}
8710
8711static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8712{
8713	u64 num_devices;
8714	u64 stripped;
8715
8716	/*
8717	 * if restripe for this chunk_type is on pick target profile and
8718	 * return, otherwise do the usual balance
8719	 */
8720	stripped = get_restripe_target(root->fs_info, flags);
8721	if (stripped)
8722		return extended_to_chunk(stripped);
8723
8724	num_devices = root->fs_info->fs_devices->rw_devices;
8725
8726	stripped = BTRFS_BLOCK_GROUP_RAID0 |
8727		BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8728		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8729
8730	if (num_devices == 1) {
8731		stripped |= BTRFS_BLOCK_GROUP_DUP;
8732		stripped = flags & ~stripped;
8733
8734		/* turn raid0 into single device chunks */
8735		if (flags & BTRFS_BLOCK_GROUP_RAID0)
8736			return stripped;
8737
8738		/* turn mirroring into duplication */
8739		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8740			     BTRFS_BLOCK_GROUP_RAID10))
8741			return stripped | BTRFS_BLOCK_GROUP_DUP;
8742	} else {
8743		/* they already had raid on here, just return */
8744		if (flags & stripped)
8745			return flags;
8746
8747		stripped |= BTRFS_BLOCK_GROUP_DUP;
8748		stripped = flags & ~stripped;
8749
8750		/* switch duplicated blocks with raid1 */
8751		if (flags & BTRFS_BLOCK_GROUP_DUP)
8752			return stripped | BTRFS_BLOCK_GROUP_RAID1;
8753
8754		/* this is drive concat, leave it alone */
8755	}
8756
8757	return flags;
8758}
8759
8760static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8761{
8762	struct btrfs_space_info *sinfo = cache->space_info;
8763	u64 num_bytes;
8764	u64 min_allocable_bytes;
8765	int ret = -ENOSPC;
8766
8767
8768	/*
8769	 * We need some metadata space and system metadata space for
8770	 * allocating chunks in some corner cases until we force to set
8771	 * it to be readonly.
8772	 */
8773	if ((sinfo->flags &
8774	     (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8775	    !force)
8776		min_allocable_bytes = 1 * 1024 * 1024;
8777	else
8778		min_allocable_bytes = 0;
8779
8780	spin_lock(&sinfo->lock);
8781	spin_lock(&cache->lock);
8782
8783	if (cache->ro) {
8784		ret = 0;
8785		goto out;
8786	}
8787
8788	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8789		    cache->bytes_super - btrfs_block_group_used(&cache->item);
8790
8791	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8792	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8793	    min_allocable_bytes <= sinfo->total_bytes) {
8794		sinfo->bytes_readonly += num_bytes;
8795		cache->ro = 1;
8796		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8797		ret = 0;
8798	}
8799out:
8800	spin_unlock(&cache->lock);
8801	spin_unlock(&sinfo->lock);
8802	return ret;
8803}
8804
8805int btrfs_set_block_group_ro(struct btrfs_root *root,
8806			     struct btrfs_block_group_cache *cache)
8807
8808{
8809	struct btrfs_trans_handle *trans;
8810	u64 alloc_flags;
8811	int ret;
8812
8813	BUG_ON(cache->ro);
8814
8815again:
8816	trans = btrfs_join_transaction(root);
8817	if (IS_ERR(trans))
8818		return PTR_ERR(trans);
8819
8820	/*
8821	 * we're not allowed to set block groups readonly after the dirty
8822	 * block groups cache has started writing.  If it already started,
8823	 * back off and let this transaction commit
8824	 */
8825	mutex_lock(&root->fs_info->ro_block_group_mutex);
8826	if (trans->transaction->dirty_bg_run) {
8827		u64 transid = trans->transid;
8828
8829		mutex_unlock(&root->fs_info->ro_block_group_mutex);
8830		btrfs_end_transaction(trans, root);
8831
8832		ret = btrfs_wait_for_commit(root, transid);
8833		if (ret)
8834			return ret;
8835		goto again;
8836	}
8837
8838	/*
8839	 * if we are changing raid levels, try to allocate a corresponding
8840	 * block group with the new raid level.
8841	 */
8842	alloc_flags = update_block_group_flags(root, cache->flags);
8843	if (alloc_flags != cache->flags) {
8844		ret = do_chunk_alloc(trans, root, alloc_flags,
8845				     CHUNK_ALLOC_FORCE);
8846		/*
8847		 * ENOSPC is allowed here, we may have enough space
8848		 * already allocated at the new raid level to
8849		 * carry on
8850		 */
8851		if (ret == -ENOSPC)
8852			ret = 0;
8853		if (ret < 0)
8854			goto out;
8855	}
8856
8857	ret = set_block_group_ro(cache, 0);
8858	if (!ret)
8859		goto out;
8860	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8861	ret = do_chunk_alloc(trans, root, alloc_flags,
8862			     CHUNK_ALLOC_FORCE);
8863	if (ret < 0)
8864		goto out;
8865	ret = set_block_group_ro(cache, 0);
8866out:
8867	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8868		alloc_flags = update_block_group_flags(root, cache->flags);
8869		lock_chunks(root->fs_info->chunk_root);
8870		check_system_chunk(trans, root, alloc_flags);
8871		unlock_chunks(root->fs_info->chunk_root);
8872	}
8873	mutex_unlock(&root->fs_info->ro_block_group_mutex);
8874
8875	btrfs_end_transaction(trans, root);
8876	return ret;
8877}
8878
8879int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8880			    struct btrfs_root *root, u64 type)
8881{
8882	u64 alloc_flags = get_alloc_profile(root, type);
8883	return do_chunk_alloc(trans, root, alloc_flags,
8884			      CHUNK_ALLOC_FORCE);
8885}
8886
8887/*
8888 * helper to account the unused space of all the readonly block group in the
8889 * space_info. takes mirrors into account.
8890 */
8891u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8892{
8893	struct btrfs_block_group_cache *block_group;
8894	u64 free_bytes = 0;
8895	int factor;
8896
8897	/* It's df, we don't care if it's racey */
8898	if (list_empty(&sinfo->ro_bgs))
8899		return 0;
8900
8901	spin_lock(&sinfo->lock);
8902	list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8903		spin_lock(&block_group->lock);
8904
8905		if (!block_group->ro) {
8906			spin_unlock(&block_group->lock);
8907			continue;
8908		}
8909
8910		if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8911					  BTRFS_BLOCK_GROUP_RAID10 |
8912					  BTRFS_BLOCK_GROUP_DUP))
8913			factor = 2;
8914		else
8915			factor = 1;
8916
8917		free_bytes += (block_group->key.offset -
8918			       btrfs_block_group_used(&block_group->item)) *
8919			       factor;
8920
8921		spin_unlock(&block_group->lock);
8922	}
8923	spin_unlock(&sinfo->lock);
8924
8925	return free_bytes;
8926}
8927
8928void btrfs_set_block_group_rw(struct btrfs_root *root,
8929			      struct btrfs_block_group_cache *cache)
8930{
8931	struct btrfs_space_info *sinfo = cache->space_info;
8932	u64 num_bytes;
8933
8934	BUG_ON(!cache->ro);
8935
8936	spin_lock(&sinfo->lock);
8937	spin_lock(&cache->lock);
8938	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8939		    cache->bytes_super - btrfs_block_group_used(&cache->item);
8940	sinfo->bytes_readonly -= num_bytes;
8941	cache->ro = 0;
8942	list_del_init(&cache->ro_list);
8943	spin_unlock(&cache->lock);
8944	spin_unlock(&sinfo->lock);
8945}
8946
8947/*
8948 * checks to see if its even possible to relocate this block group.
8949 *
8950 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8951 * ok to go ahead and try.
8952 */
8953int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8954{
8955	struct btrfs_block_group_cache *block_group;
8956	struct btrfs_space_info *space_info;
8957	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8958	struct btrfs_device *device;
8959	struct btrfs_trans_handle *trans;
8960	u64 min_free;
8961	u64 dev_min = 1;
8962	u64 dev_nr = 0;
8963	u64 target;
8964	int index;
8965	int full = 0;
8966	int ret = 0;
8967
8968	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8969
8970	/* odd, couldn't find the block group, leave it alone */
8971	if (!block_group)
8972		return -1;
8973
8974	min_free = btrfs_block_group_used(&block_group->item);
8975
8976	/* no bytes used, we're good */
8977	if (!min_free)
8978		goto out;
8979
8980	space_info = block_group->space_info;
8981	spin_lock(&space_info->lock);
8982
8983	full = space_info->full;
8984
8985	/*
8986	 * if this is the last block group we have in this space, we can't
8987	 * relocate it unless we're able to allocate a new chunk below.
8988	 *
8989	 * Otherwise, we need to make sure we have room in the space to handle
8990	 * all of the extents from this block group.  If we can, we're good
8991	 */
8992	if ((space_info->total_bytes != block_group->key.offset) &&
8993	    (space_info->bytes_used + space_info->bytes_reserved +
8994	     space_info->bytes_pinned + space_info->bytes_readonly +
8995	     min_free < space_info->total_bytes)) {
8996		spin_unlock(&space_info->lock);
8997		goto out;
8998	}
8999	spin_unlock(&space_info->lock);
9000
9001	/*
9002	 * ok we don't have enough space, but maybe we have free space on our
9003	 * devices to allocate new chunks for relocation, so loop through our
9004	 * alloc devices and guess if we have enough space.  if this block
9005	 * group is going to be restriped, run checks against the target
9006	 * profile instead of the current one.
9007	 */
9008	ret = -1;
9009
9010	/*
9011	 * index:
9012	 *      0: raid10
9013	 *      1: raid1
9014	 *      2: dup
9015	 *      3: raid0
9016	 *      4: single
9017	 */
9018	target = get_restripe_target(root->fs_info, block_group->flags);
9019	if (target) {
9020		index = __get_raid_index(extended_to_chunk(target));
9021	} else {
9022		/*
9023		 * this is just a balance, so if we were marked as full
9024		 * we know there is no space for a new chunk
9025		 */
9026		if (full)
9027			goto out;
9028
9029		index = get_block_group_index(block_group);
9030	}
9031
9032	if (index == BTRFS_RAID_RAID10) {
9033		dev_min = 4;
9034		/* Divide by 2 */
9035		min_free >>= 1;
9036	} else if (index == BTRFS_RAID_RAID1) {
9037		dev_min = 2;
9038	} else if (index == BTRFS_RAID_DUP) {
9039		/* Multiply by 2 */
9040		min_free <<= 1;
9041	} else if (index == BTRFS_RAID_RAID0) {
9042		dev_min = fs_devices->rw_devices;
9043		min_free = div64_u64(min_free, dev_min);
9044	}
9045
9046	/* We need to do this so that we can look at pending chunks */
9047	trans = btrfs_join_transaction(root);
9048	if (IS_ERR(trans)) {
9049		ret = PTR_ERR(trans);
9050		goto out;
9051	}
9052
9053	mutex_lock(&root->fs_info->chunk_mutex);
9054	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9055		u64 dev_offset;
9056
9057		/*
9058		 * check to make sure we can actually find a chunk with enough
9059		 * space to fit our block group in.
9060		 */
9061		if (device->total_bytes > device->bytes_used + min_free &&
9062		    !device->is_tgtdev_for_dev_replace) {
9063			ret = find_free_dev_extent(trans, device, min_free,
9064						   &dev_offset, NULL);
9065			if (!ret)
9066				dev_nr++;
9067
9068			if (dev_nr >= dev_min)
9069				break;
9070
9071			ret = -1;
9072		}
9073	}
9074	mutex_unlock(&root->fs_info->chunk_mutex);
9075	btrfs_end_transaction(trans, root);
9076out:
9077	btrfs_put_block_group(block_group);
9078	return ret;
9079}
9080
9081static int find_first_block_group(struct btrfs_root *root,
9082		struct btrfs_path *path, struct btrfs_key *key)
9083{
9084	int ret = 0;
9085	struct btrfs_key found_key;
9086	struct extent_buffer *leaf;
9087	int slot;
9088
9089	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9090	if (ret < 0)
9091		goto out;
9092
9093	while (1) {
9094		slot = path->slots[0];
9095		leaf = path->nodes[0];
9096		if (slot >= btrfs_header_nritems(leaf)) {
9097			ret = btrfs_next_leaf(root, path);
9098			if (ret == 0)
9099				continue;
9100			if (ret < 0)
9101				goto out;
9102			break;
9103		}
9104		btrfs_item_key_to_cpu(leaf, &found_key, slot);
9105
9106		if (found_key.objectid >= key->objectid &&
9107		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9108			ret = 0;
9109			goto out;
9110		}
9111		path->slots[0]++;
9112	}
9113out:
9114	return ret;
9115}
9116
9117void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9118{
9119	struct btrfs_block_group_cache *block_group;
9120	u64 last = 0;
9121
9122	while (1) {
9123		struct inode *inode;
9124
9125		block_group = btrfs_lookup_first_block_group(info, last);
9126		while (block_group) {
9127			spin_lock(&block_group->lock);
9128			if (block_group->iref)
9129				break;
9130			spin_unlock(&block_group->lock);
9131			block_group = next_block_group(info->tree_root,
9132						       block_group);
9133		}
9134		if (!block_group) {
9135			if (last == 0)
9136				break;
9137			last = 0;
9138			continue;
9139		}
9140
9141		inode = block_group->inode;
9142		block_group->iref = 0;
9143		block_group->inode = NULL;
9144		spin_unlock(&block_group->lock);
9145		iput(inode);
9146		last = block_group->key.objectid + block_group->key.offset;
9147		btrfs_put_block_group(block_group);
9148	}
9149}
9150
9151int btrfs_free_block_groups(struct btrfs_fs_info *info)
9152{
9153	struct btrfs_block_group_cache *block_group;
9154	struct btrfs_space_info *space_info;
9155	struct btrfs_caching_control *caching_ctl;
9156	struct rb_node *n;
9157
9158	down_write(&info->commit_root_sem);
9159	while (!list_empty(&info->caching_block_groups)) {
9160		caching_ctl = list_entry(info->caching_block_groups.next,
9161					 struct btrfs_caching_control, list);
9162		list_del(&caching_ctl->list);
9163		put_caching_control(caching_ctl);
9164	}
9165	up_write(&info->commit_root_sem);
9166
9167	spin_lock(&info->unused_bgs_lock);
9168	while (!list_empty(&info->unused_bgs)) {
9169		block_group = list_first_entry(&info->unused_bgs,
9170					       struct btrfs_block_group_cache,
9171					       bg_list);
9172		list_del_init(&block_group->bg_list);
9173		btrfs_put_block_group(block_group);
9174	}
9175	spin_unlock(&info->unused_bgs_lock);
9176
9177	spin_lock(&info->block_group_cache_lock);
9178	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9179		block_group = rb_entry(n, struct btrfs_block_group_cache,
9180				       cache_node);
9181		rb_erase(&block_group->cache_node,
9182			 &info->block_group_cache_tree);
9183		RB_CLEAR_NODE(&block_group->cache_node);
9184		spin_unlock(&info->block_group_cache_lock);
9185
9186		down_write(&block_group->space_info->groups_sem);
9187		list_del(&block_group->list);
9188		up_write(&block_group->space_info->groups_sem);
9189
9190		if (block_group->cached == BTRFS_CACHE_STARTED)
9191			wait_block_group_cache_done(block_group);
9192
9193		/*
9194		 * We haven't cached this block group, which means we could
9195		 * possibly have excluded extents on this block group.
9196		 */
9197		if (block_group->cached == BTRFS_CACHE_NO ||
9198		    block_group->cached == BTRFS_CACHE_ERROR)
9199			free_excluded_extents(info->extent_root, block_group);
9200
9201		btrfs_remove_free_space_cache(block_group);
9202		btrfs_put_block_group(block_group);
9203
9204		spin_lock(&info->block_group_cache_lock);
9205	}
9206	spin_unlock(&info->block_group_cache_lock);
9207
9208	/* now that all the block groups are freed, go through and
9209	 * free all the space_info structs.  This is only called during
9210	 * the final stages of unmount, and so we know nobody is
9211	 * using them.  We call synchronize_rcu() once before we start,
9212	 * just to be on the safe side.
9213	 */
9214	synchronize_rcu();
9215
9216	release_global_block_rsv(info);
9217
9218	while (!list_empty(&info->space_info)) {
9219		int i;
9220
9221		space_info = list_entry(info->space_info.next,
9222					struct btrfs_space_info,
9223					list);
9224		if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9225			if (WARN_ON(space_info->bytes_pinned > 0 ||
9226			    space_info->bytes_reserved > 0 ||
9227			    space_info->bytes_may_use > 0)) {
9228				dump_space_info(space_info, 0, 0);
9229			}
9230		}
9231		list_del(&space_info->list);
9232		for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9233			struct kobject *kobj;
9234			kobj = space_info->block_group_kobjs[i];
9235			space_info->block_group_kobjs[i] = NULL;
9236			if (kobj) {
9237				kobject_del(kobj);
9238				kobject_put(kobj);
9239			}
9240		}
9241		kobject_del(&space_info->kobj);
9242		kobject_put(&space_info->kobj);
9243	}
9244	return 0;
9245}
9246
9247static void __link_block_group(struct btrfs_space_info *space_info,
9248			       struct btrfs_block_group_cache *cache)
9249{
9250	int index = get_block_group_index(cache);
9251	bool first = false;
9252
9253	down_write(&space_info->groups_sem);
9254	if (list_empty(&space_info->block_groups[index]))
9255		first = true;
9256	list_add_tail(&cache->list, &space_info->block_groups[index]);
9257	up_write(&space_info->groups_sem);
9258
9259	if (first) {
9260		struct raid_kobject *rkobj;
9261		int ret;
9262
9263		rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9264		if (!rkobj)
9265			goto out_err;
9266		rkobj->raid_type = index;
9267		kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9268		ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9269				  "%s", get_raid_name(index));
9270		if (ret) {
9271			kobject_put(&rkobj->kobj);
9272			goto out_err;
9273		}
9274		space_info->block_group_kobjs[index] = &rkobj->kobj;
9275	}
9276
9277	return;
9278out_err:
9279	pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9280}
9281
9282static struct btrfs_block_group_cache *
9283btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9284{
9285	struct btrfs_block_group_cache *cache;
9286
9287	cache = kzalloc(sizeof(*cache), GFP_NOFS);
9288	if (!cache)
9289		return NULL;
9290
9291	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9292					GFP_NOFS);
9293	if (!cache->free_space_ctl) {
9294		kfree(cache);
9295		return NULL;
9296	}
9297
9298	cache->key.objectid = start;
9299	cache->key.offset = size;
9300	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9301
9302	cache->sectorsize = root->sectorsize;
9303	cache->fs_info = root->fs_info;
9304	cache->full_stripe_len = btrfs_full_stripe_len(root,
9305					       &root->fs_info->mapping_tree,
9306					       start);
9307	atomic_set(&cache->count, 1);
9308	spin_lock_init(&cache->lock);
9309	init_rwsem(&cache->data_rwsem);
9310	INIT_LIST_HEAD(&cache->list);
9311	INIT_LIST_HEAD(&cache->cluster_list);
9312	INIT_LIST_HEAD(&cache->bg_list);
9313	INIT_LIST_HEAD(&cache->ro_list);
9314	INIT_LIST_HEAD(&cache->dirty_list);
9315	INIT_LIST_HEAD(&cache->io_list);
9316	btrfs_init_free_space_ctl(cache);
9317	atomic_set(&cache->trimming, 0);
9318
9319	return cache;
9320}
9321
9322int btrfs_read_block_groups(struct btrfs_root *root)
9323{
9324	struct btrfs_path *path;
9325	int ret;
9326	struct btrfs_block_group_cache *cache;
9327	struct btrfs_fs_info *info = root->fs_info;
9328	struct btrfs_space_info *space_info;
9329	struct btrfs_key key;
9330	struct btrfs_key found_key;
9331	struct extent_buffer *leaf;
9332	int need_clear = 0;
9333	u64 cache_gen;
9334
9335	root = info->extent_root;
9336	key.objectid = 0;
9337	key.offset = 0;
9338	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9339	path = btrfs_alloc_path();
9340	if (!path)
9341		return -ENOMEM;
9342	path->reada = 1;
9343
9344	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9345	if (btrfs_test_opt(root, SPACE_CACHE) &&
9346	    btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9347		need_clear = 1;
9348	if (btrfs_test_opt(root, CLEAR_CACHE))
9349		need_clear = 1;
9350
9351	while (1) {
9352		ret = find_first_block_group(root, path, &key);
9353		if (ret > 0)
9354			break;
9355		if (ret != 0)
9356			goto error;
9357
9358		leaf = path->nodes[0];
9359		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9360
9361		cache = btrfs_create_block_group_cache(root, found_key.objectid,
9362						       found_key.offset);
9363		if (!cache) {
9364			ret = -ENOMEM;
9365			goto error;
9366		}
9367
9368		if (need_clear) {
9369			/*
9370			 * When we mount with old space cache, we need to
9371			 * set BTRFS_DC_CLEAR and set dirty flag.
9372			 *
9373			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9374			 *    truncate the old free space cache inode and
9375			 *    setup a new one.
9376			 * b) Setting 'dirty flag' makes sure that we flush
9377			 *    the new space cache info onto disk.
9378			 */
9379			if (btrfs_test_opt(root, SPACE_CACHE))
9380				cache->disk_cache_state = BTRFS_DC_CLEAR;
9381		}
9382
9383		read_extent_buffer(leaf, &cache->item,
9384				   btrfs_item_ptr_offset(leaf, path->slots[0]),
9385				   sizeof(cache->item));
9386		cache->flags = btrfs_block_group_flags(&cache->item);
9387
9388		key.objectid = found_key.objectid + found_key.offset;
9389		btrfs_release_path(path);
9390
9391		/*
9392		 * We need to exclude the super stripes now so that the space
9393		 * info has super bytes accounted for, otherwise we'll think
9394		 * we have more space than we actually do.
9395		 */
9396		ret = exclude_super_stripes(root, cache);
9397		if (ret) {
9398			/*
9399			 * We may have excluded something, so call this just in
9400			 * case.
9401			 */
9402			free_excluded_extents(root, cache);
9403			btrfs_put_block_group(cache);
9404			goto error;
9405		}
9406
9407		/*
9408		 * check for two cases, either we are full, and therefore
9409		 * don't need to bother with the caching work since we won't
9410		 * find any space, or we are empty, and we can just add all
9411		 * the space in and be done with it.  This saves us _alot_ of
9412		 * time, particularly in the full case.
9413		 */
9414		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9415			cache->last_byte_to_unpin = (u64)-1;
9416			cache->cached = BTRFS_CACHE_FINISHED;
9417			free_excluded_extents(root, cache);
9418		} else if (btrfs_block_group_used(&cache->item) == 0) {
9419			cache->last_byte_to_unpin = (u64)-1;
9420			cache->cached = BTRFS_CACHE_FINISHED;
9421			add_new_free_space(cache, root->fs_info,
9422					   found_key.objectid,
9423					   found_key.objectid +
9424					   found_key.offset);
9425			free_excluded_extents(root, cache);
9426		}
9427
9428		ret = btrfs_add_block_group_cache(root->fs_info, cache);
9429		if (ret) {
9430			btrfs_remove_free_space_cache(cache);
9431			btrfs_put_block_group(cache);
9432			goto error;
9433		}
9434
9435		ret = update_space_info(info, cache->flags, found_key.offset,
9436					btrfs_block_group_used(&cache->item),
9437					&space_info);
9438		if (ret) {
9439			btrfs_remove_free_space_cache(cache);
9440			spin_lock(&info->block_group_cache_lock);
9441			rb_erase(&cache->cache_node,
9442				 &info->block_group_cache_tree);
9443			RB_CLEAR_NODE(&cache->cache_node);
9444			spin_unlock(&info->block_group_cache_lock);
9445			btrfs_put_block_group(cache);
9446			goto error;
9447		}
9448
9449		cache->space_info = space_info;
9450		spin_lock(&cache->space_info->lock);
9451		cache->space_info->bytes_readonly += cache->bytes_super;
9452		spin_unlock(&cache->space_info->lock);
9453
9454		__link_block_group(space_info, cache);
9455
9456		set_avail_alloc_bits(root->fs_info, cache->flags);
9457		if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9458			set_block_group_ro(cache, 1);
9459		} else if (btrfs_block_group_used(&cache->item) == 0) {
9460			spin_lock(&info->unused_bgs_lock);
9461			/* Should always be true but just in case. */
9462			if (list_empty(&cache->bg_list)) {
9463				btrfs_get_block_group(cache);
9464				list_add_tail(&cache->bg_list,
9465					      &info->unused_bgs);
9466			}
9467			spin_unlock(&info->unused_bgs_lock);
9468		}
9469	}
9470
9471	list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9472		if (!(get_alloc_profile(root, space_info->flags) &
9473		      (BTRFS_BLOCK_GROUP_RAID10 |
9474		       BTRFS_BLOCK_GROUP_RAID1 |
9475		       BTRFS_BLOCK_GROUP_RAID5 |
9476		       BTRFS_BLOCK_GROUP_RAID6 |
9477		       BTRFS_BLOCK_GROUP_DUP)))
9478			continue;
9479		/*
9480		 * avoid allocating from un-mirrored block group if there are
9481		 * mirrored block groups.
9482		 */
9483		list_for_each_entry(cache,
9484				&space_info->block_groups[BTRFS_RAID_RAID0],
9485				list)
9486			set_block_group_ro(cache, 1);
9487		list_for_each_entry(cache,
9488				&space_info->block_groups[BTRFS_RAID_SINGLE],
9489				list)
9490			set_block_group_ro(cache, 1);
9491	}
9492
9493	init_global_block_rsv(info);
9494	ret = 0;
9495error:
9496	btrfs_free_path(path);
9497	return ret;
9498}
9499
9500void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9501				       struct btrfs_root *root)
9502{
9503	struct btrfs_block_group_cache *block_group, *tmp;
9504	struct btrfs_root *extent_root = root->fs_info->extent_root;
9505	struct btrfs_block_group_item item;
9506	struct btrfs_key key;
9507	int ret = 0;
9508
9509	list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9510		if (ret)
9511			goto next;
9512
9513		spin_lock(&block_group->lock);
9514		memcpy(&item, &block_group->item, sizeof(item));
9515		memcpy(&key, &block_group->key, sizeof(key));
9516		spin_unlock(&block_group->lock);
9517
9518		ret = btrfs_insert_item(trans, extent_root, &key, &item,
9519					sizeof(item));
9520		if (ret)
9521			btrfs_abort_transaction(trans, extent_root, ret);
9522		ret = btrfs_finish_chunk_alloc(trans, extent_root,
9523					       key.objectid, key.offset);
9524		if (ret)
9525			btrfs_abort_transaction(trans, extent_root, ret);
9526next:
9527		list_del_init(&block_group->bg_list);
9528	}
9529}
9530
9531int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9532			   struct btrfs_root *root, u64 bytes_used,
9533			   u64 type, u64 chunk_objectid, u64 chunk_offset,
9534			   u64 size)
9535{
9536	int ret;
9537	struct btrfs_root *extent_root;
9538	struct btrfs_block_group_cache *cache;
9539
9540	extent_root = root->fs_info->extent_root;
9541
9542	btrfs_set_log_full_commit(root->fs_info, trans);
9543
9544	cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9545	if (!cache)
9546		return -ENOMEM;
9547
9548	btrfs_set_block_group_used(&cache->item, bytes_used);
9549	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9550	btrfs_set_block_group_flags(&cache->item, type);
9551
9552	cache->flags = type;
9553	cache->last_byte_to_unpin = (u64)-1;
9554	cache->cached = BTRFS_CACHE_FINISHED;
9555	ret = exclude_super_stripes(root, cache);
9556	if (ret) {
9557		/*
9558		 * We may have excluded something, so call this just in
9559		 * case.
9560		 */
9561		free_excluded_extents(root, cache);
9562		btrfs_put_block_group(cache);
9563		return ret;
9564	}
9565
9566	add_new_free_space(cache, root->fs_info, chunk_offset,
9567			   chunk_offset + size);
9568
9569	free_excluded_extents(root, cache);
9570
9571	ret = btrfs_add_block_group_cache(root->fs_info, cache);
9572	if (ret) {
9573		btrfs_remove_free_space_cache(cache);
9574		btrfs_put_block_group(cache);
9575		return ret;
9576	}
9577
9578	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9579				&cache->space_info);
9580	if (ret) {
9581		btrfs_remove_free_space_cache(cache);
9582		spin_lock(&root->fs_info->block_group_cache_lock);
9583		rb_erase(&cache->cache_node,
9584			 &root->fs_info->block_group_cache_tree);
9585		RB_CLEAR_NODE(&cache->cache_node);
9586		spin_unlock(&root->fs_info->block_group_cache_lock);
9587		btrfs_put_block_group(cache);
9588		return ret;
9589	}
9590	update_global_block_rsv(root->fs_info);
9591
9592	spin_lock(&cache->space_info->lock);
9593	cache->space_info->bytes_readonly += cache->bytes_super;
9594	spin_unlock(&cache->space_info->lock);
9595
9596	__link_block_group(cache->space_info, cache);
9597
9598	list_add_tail(&cache->bg_list, &trans->new_bgs);
9599
9600	set_avail_alloc_bits(extent_root->fs_info, type);
9601
9602	return 0;
9603}
9604
9605static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9606{
9607	u64 extra_flags = chunk_to_extended(flags) &
9608				BTRFS_EXTENDED_PROFILE_MASK;
9609
9610	write_seqlock(&fs_info->profiles_lock);
9611	if (flags & BTRFS_BLOCK_GROUP_DATA)
9612		fs_info->avail_data_alloc_bits &= ~extra_flags;
9613	if (flags & BTRFS_BLOCK_GROUP_METADATA)
9614		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9615	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9616		fs_info->avail_system_alloc_bits &= ~extra_flags;
9617	write_sequnlock(&fs_info->profiles_lock);
9618}
9619
9620int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9621			     struct btrfs_root *root, u64 group_start,
9622			     struct extent_map *em)
9623{
9624	struct btrfs_path *path;
9625	struct btrfs_block_group_cache *block_group;
9626	struct btrfs_free_cluster *cluster;
9627	struct btrfs_root *tree_root = root->fs_info->tree_root;
9628	struct btrfs_key key;
9629	struct inode *inode;
9630	struct kobject *kobj = NULL;
9631	int ret;
9632	int index;
9633	int factor;
9634	struct btrfs_caching_control *caching_ctl = NULL;
9635	bool remove_em;
9636
9637	root = root->fs_info->extent_root;
9638
9639	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9640	BUG_ON(!block_group);
9641	BUG_ON(!block_group->ro);
9642
9643	/*
9644	 * Free the reserved super bytes from this block group before
9645	 * remove it.
9646	 */
9647	free_excluded_extents(root, block_group);
9648
9649	memcpy(&key, &block_group->key, sizeof(key));
9650	index = get_block_group_index(block_group);
9651	if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9652				  BTRFS_BLOCK_GROUP_RAID1 |
9653				  BTRFS_BLOCK_GROUP_RAID10))
9654		factor = 2;
9655	else
9656		factor = 1;
9657
9658	/* make sure this block group isn't part of an allocation cluster */
9659	cluster = &root->fs_info->data_alloc_cluster;
9660	spin_lock(&cluster->refill_lock);
9661	btrfs_return_cluster_to_free_space(block_group, cluster);
9662	spin_unlock(&cluster->refill_lock);
9663
9664	/*
9665	 * make sure this block group isn't part of a metadata
9666	 * allocation cluster
9667	 */
9668	cluster = &root->fs_info->meta_alloc_cluster;
9669	spin_lock(&cluster->refill_lock);
9670	btrfs_return_cluster_to_free_space(block_group, cluster);
9671	spin_unlock(&cluster->refill_lock);
9672
9673	path = btrfs_alloc_path();
9674	if (!path) {
9675		ret = -ENOMEM;
9676		goto out;
9677	}
9678
9679	/*
9680	 * get the inode first so any iput calls done for the io_list
9681	 * aren't the final iput (no unlinks allowed now)
9682	 */
9683	inode = lookup_free_space_inode(tree_root, block_group, path);
9684
9685	mutex_lock(&trans->transaction->cache_write_mutex);
9686	/*
9687	 * make sure our free spache cache IO is done before remove the
9688	 * free space inode
9689	 */
9690	spin_lock(&trans->transaction->dirty_bgs_lock);
9691	if (!list_empty(&block_group->io_list)) {
9692		list_del_init(&block_group->io_list);
9693
9694		WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
9695
9696		spin_unlock(&trans->transaction->dirty_bgs_lock);
9697		btrfs_wait_cache_io(root, trans, block_group,
9698				    &block_group->io_ctl, path,
9699				    block_group->key.objectid);
9700		btrfs_put_block_group(block_group);
9701		spin_lock(&trans->transaction->dirty_bgs_lock);
9702	}
9703
9704	if (!list_empty(&block_group->dirty_list)) {
9705		list_del_init(&block_group->dirty_list);
9706		btrfs_put_block_group(block_group);
9707	}
9708	spin_unlock(&trans->transaction->dirty_bgs_lock);
9709	mutex_unlock(&trans->transaction->cache_write_mutex);
9710
9711	if (!IS_ERR(inode)) {
9712		ret = btrfs_orphan_add(trans, inode);
9713		if (ret) {
9714			btrfs_add_delayed_iput(inode);
9715			goto out;
9716		}
9717		clear_nlink(inode);
9718		/* One for the block groups ref */
9719		spin_lock(&block_group->lock);
9720		if (block_group->iref) {
9721			block_group->iref = 0;
9722			block_group->inode = NULL;
9723			spin_unlock(&block_group->lock);
9724			iput(inode);
9725		} else {
9726			spin_unlock(&block_group->lock);
9727		}
9728		/* One for our lookup ref */
9729		btrfs_add_delayed_iput(inode);
9730	}
9731
9732	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9733	key.offset = block_group->key.objectid;
9734	key.type = 0;
9735
9736	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9737	if (ret < 0)
9738		goto out;
9739	if (ret > 0)
9740		btrfs_release_path(path);
9741	if (ret == 0) {
9742		ret = btrfs_del_item(trans, tree_root, path);
9743		if (ret)
9744			goto out;
9745		btrfs_release_path(path);
9746	}
9747
9748	spin_lock(&root->fs_info->block_group_cache_lock);
9749	rb_erase(&block_group->cache_node,
9750		 &root->fs_info->block_group_cache_tree);
9751	RB_CLEAR_NODE(&block_group->cache_node);
9752
9753	if (root->fs_info->first_logical_byte == block_group->key.objectid)
9754		root->fs_info->first_logical_byte = (u64)-1;
9755	spin_unlock(&root->fs_info->block_group_cache_lock);
9756
9757	down_write(&block_group->space_info->groups_sem);
9758	/*
9759	 * we must use list_del_init so people can check to see if they
9760	 * are still on the list after taking the semaphore
9761	 */
9762	list_del_init(&block_group->list);
9763	if (list_empty(&block_group->space_info->block_groups[index])) {
9764		kobj = block_group->space_info->block_group_kobjs[index];
9765		block_group->space_info->block_group_kobjs[index] = NULL;
9766		clear_avail_alloc_bits(root->fs_info, block_group->flags);
9767	}
9768	up_write(&block_group->space_info->groups_sem);
9769	if (kobj) {
9770		kobject_del(kobj);
9771		kobject_put(kobj);
9772	}
9773
9774	if (block_group->has_caching_ctl)
9775		caching_ctl = get_caching_control(block_group);
9776	if (block_group->cached == BTRFS_CACHE_STARTED)
9777		wait_block_group_cache_done(block_group);
9778	if (block_group->has_caching_ctl) {
9779		down_write(&root->fs_info->commit_root_sem);
9780		if (!caching_ctl) {
9781			struct btrfs_caching_control *ctl;
9782
9783			list_for_each_entry(ctl,
9784				    &root->fs_info->caching_block_groups, list)
9785				if (ctl->block_group == block_group) {
9786					caching_ctl = ctl;
9787					atomic_inc(&caching_ctl->count);
9788					break;
9789				}
9790		}
9791		if (caching_ctl)
9792			list_del_init(&caching_ctl->list);
9793		up_write(&root->fs_info->commit_root_sem);
9794		if (caching_ctl) {
9795			/* Once for the caching bgs list and once for us. */
9796			put_caching_control(caching_ctl);
9797			put_caching_control(caching_ctl);
9798		}
9799	}
9800
9801	spin_lock(&trans->transaction->dirty_bgs_lock);
9802	if (!list_empty(&block_group->dirty_list)) {
9803		WARN_ON(1);
9804	}
9805	if (!list_empty(&block_group->io_list)) {
9806		WARN_ON(1);
9807	}
9808	spin_unlock(&trans->transaction->dirty_bgs_lock);
9809	btrfs_remove_free_space_cache(block_group);
9810
9811	spin_lock(&block_group->space_info->lock);
9812	list_del_init(&block_group->ro_list);
9813
9814	if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
9815		WARN_ON(block_group->space_info->total_bytes
9816			< block_group->key.offset);
9817		WARN_ON(block_group->space_info->bytes_readonly
9818			< block_group->key.offset);
9819		WARN_ON(block_group->space_info->disk_total
9820			< block_group->key.offset * factor);
9821	}
9822	block_group->space_info->total_bytes -= block_group->key.offset;
9823	block_group->space_info->bytes_readonly -= block_group->key.offset;
9824	block_group->space_info->disk_total -= block_group->key.offset * factor;
9825
9826	spin_unlock(&block_group->space_info->lock);
9827
9828	memcpy(&key, &block_group->key, sizeof(key));
9829
9830	lock_chunks(root);
9831	if (!list_empty(&em->list)) {
9832		/* We're in the transaction->pending_chunks list. */
9833		free_extent_map(em);
9834	}
9835	spin_lock(&block_group->lock);
9836	block_group->removed = 1;
9837	/*
9838	 * At this point trimming can't start on this block group, because we
9839	 * removed the block group from the tree fs_info->block_group_cache_tree
9840	 * so no one can't find it anymore and even if someone already got this
9841	 * block group before we removed it from the rbtree, they have already
9842	 * incremented block_group->trimming - if they didn't, they won't find
9843	 * any free space entries because we already removed them all when we
9844	 * called btrfs_remove_free_space_cache().
9845	 *
9846	 * And we must not remove the extent map from the fs_info->mapping_tree
9847	 * to prevent the same logical address range and physical device space
9848	 * ranges from being reused for a new block group. This is because our
9849	 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9850	 * completely transactionless, so while it is trimming a range the
9851	 * currently running transaction might finish and a new one start,
9852	 * allowing for new block groups to be created that can reuse the same
9853	 * physical device locations unless we take this special care.
9854	 */
9855	remove_em = (atomic_read(&block_group->trimming) == 0);
9856	/*
9857	 * Make sure a trimmer task always sees the em in the pinned_chunks list
9858	 * if it sees block_group->removed == 1 (needs to lock block_group->lock
9859	 * before checking block_group->removed).
9860	 */
9861	if (!remove_em) {
9862		/*
9863		 * Our em might be in trans->transaction->pending_chunks which
9864		 * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9865		 * and so is the fs_info->pinned_chunks list.
9866		 *
9867		 * So at this point we must be holding the chunk_mutex to avoid
9868		 * any races with chunk allocation (more specifically at
9869		 * volumes.c:contains_pending_extent()), to ensure it always
9870		 * sees the em, either in the pending_chunks list or in the
9871		 * pinned_chunks list.
9872		 */
9873		list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9874	}
9875	spin_unlock(&block_group->lock);
9876
9877	if (remove_em) {
9878		struct extent_map_tree *em_tree;
9879
9880		em_tree = &root->fs_info->mapping_tree.map_tree;
9881		write_lock(&em_tree->lock);
9882		/*
9883		 * The em might be in the pending_chunks list, so make sure the
9884		 * chunk mutex is locked, since remove_extent_mapping() will
9885		 * delete us from that list.
9886		 */
9887		remove_extent_mapping(em_tree, em);
9888		write_unlock(&em_tree->lock);
9889		/* once for the tree */
9890		free_extent_map(em);
9891	}
9892
9893	unlock_chunks(root);
9894
9895	btrfs_put_block_group(block_group);
9896	btrfs_put_block_group(block_group);
9897
9898	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9899	if (ret > 0)
9900		ret = -EIO;
9901	if (ret < 0)
9902		goto out;
9903
9904	ret = btrfs_del_item(trans, root, path);
9905out:
9906	btrfs_free_path(path);
9907	return ret;
9908}
9909
9910/*
9911 * Process the unused_bgs list and remove any that don't have any allocated
9912 * space inside of them.
9913 */
9914void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9915{
9916	struct btrfs_block_group_cache *block_group;
9917	struct btrfs_space_info *space_info;
9918	struct btrfs_root *root = fs_info->extent_root;
9919	struct btrfs_trans_handle *trans;
9920	int ret = 0;
9921
9922	if (!fs_info->open)
9923		return;
9924
9925	spin_lock(&fs_info->unused_bgs_lock);
9926	while (!list_empty(&fs_info->unused_bgs)) {
9927		u64 start, end;
9928
9929		block_group = list_first_entry(&fs_info->unused_bgs,
9930					       struct btrfs_block_group_cache,
9931					       bg_list);
9932		space_info = block_group->space_info;
9933		list_del_init(&block_group->bg_list);
9934		if (ret || btrfs_mixed_space_info(space_info)) {
9935			btrfs_put_block_group(block_group);
9936			continue;
9937		}
9938		spin_unlock(&fs_info->unused_bgs_lock);
9939
9940		/* Don't want to race with allocators so take the groups_sem */
9941		down_write(&space_info->groups_sem);
9942		spin_lock(&block_group->lock);
9943		if (block_group->reserved ||
9944		    btrfs_block_group_used(&block_group->item) ||
9945		    block_group->ro) {
9946			/*
9947			 * We want to bail if we made new allocations or have
9948			 * outstanding allocations in this block group.  We do
9949			 * the ro check in case balance is currently acting on
9950			 * this block group.
9951			 */
9952			spin_unlock(&block_group->lock);
9953			up_write(&space_info->groups_sem);
9954			goto next;
9955		}
9956		spin_unlock(&block_group->lock);
9957
9958		/* We don't want to force the issue, only flip if it's ok. */
9959		ret = set_block_group_ro(block_group, 0);
9960		up_write(&space_info->groups_sem);
9961		if (ret < 0) {
9962			ret = 0;
9963			goto next;
9964		}
9965
9966		/*
9967		 * Want to do this before we do anything else so we can recover
9968		 * properly if we fail to join the transaction.
9969		 */
9970		/* 1 for btrfs_orphan_reserve_metadata() */
9971		trans = btrfs_start_transaction(root, 1);
9972		if (IS_ERR(trans)) {
9973			btrfs_set_block_group_rw(root, block_group);
9974			ret = PTR_ERR(trans);
9975			goto next;
9976		}
9977
9978		/*
9979		 * We could have pending pinned extents for this block group,
9980		 * just delete them, we don't care about them anymore.
9981		 */
9982		start = block_group->key.objectid;
9983		end = start + block_group->key.offset - 1;
9984		/*
9985		 * Hold the unused_bg_unpin_mutex lock to avoid racing with
9986		 * btrfs_finish_extent_commit(). If we are at transaction N,
9987		 * another task might be running finish_extent_commit() for the
9988		 * previous transaction N - 1, and have seen a range belonging
9989		 * to the block group in freed_extents[] before we were able to
9990		 * clear the whole block group range from freed_extents[]. This
9991		 * means that task can lookup for the block group after we
9992		 * unpinned it from freed_extents[] and removed it, leading to
9993		 * a BUG_ON() at btrfs_unpin_extent_range().
9994		 */
9995		mutex_lock(&fs_info->unused_bg_unpin_mutex);
9996		ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
9997				  EXTENT_DIRTY, GFP_NOFS);
9998		if (ret) {
9999			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10000			btrfs_set_block_group_rw(root, block_group);
10001			goto end_trans;
10002		}
10003		ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10004				  EXTENT_DIRTY, GFP_NOFS);
10005		if (ret) {
10006			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10007			btrfs_set_block_group_rw(root, block_group);
10008			goto end_trans;
10009		}
10010		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10011
10012		/* Reset pinned so btrfs_put_block_group doesn't complain */
10013		spin_lock(&space_info->lock);
10014		spin_lock(&block_group->lock);
10015
10016		space_info->bytes_pinned -= block_group->pinned;
10017		space_info->bytes_readonly += block_group->pinned;
10018		percpu_counter_add(&space_info->total_bytes_pinned,
10019				   -block_group->pinned);
10020		block_group->pinned = 0;
10021
10022		spin_unlock(&block_group->lock);
10023		spin_unlock(&space_info->lock);
10024
10025		/*
10026		 * Btrfs_remove_chunk will abort the transaction if things go
10027		 * horribly wrong.
10028		 */
10029		ret = btrfs_remove_chunk(trans, root,
10030					 block_group->key.objectid);
10031end_trans:
10032		btrfs_end_transaction(trans, root);
10033next:
10034		btrfs_put_block_group(block_group);
10035		spin_lock(&fs_info->unused_bgs_lock);
10036	}
10037	spin_unlock(&fs_info->unused_bgs_lock);
10038}
10039
10040int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10041{
10042	struct btrfs_space_info *space_info;
10043	struct btrfs_super_block *disk_super;
10044	u64 features;
10045	u64 flags;
10046	int mixed = 0;
10047	int ret;
10048
10049	disk_super = fs_info->super_copy;
10050	if (!btrfs_super_root(disk_super))
10051		return 1;
10052
10053	features = btrfs_super_incompat_flags(disk_super);
10054	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10055		mixed = 1;
10056
10057	flags = BTRFS_BLOCK_GROUP_SYSTEM;
10058	ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10059	if (ret)
10060		goto out;
10061
10062	if (mixed) {
10063		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10064		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10065	} else {
10066		flags = BTRFS_BLOCK_GROUP_METADATA;
10067		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10068		if (ret)
10069			goto out;
10070
10071		flags = BTRFS_BLOCK_GROUP_DATA;
10072		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10073	}
10074out:
10075	return ret;
10076}
10077
10078int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10079{
10080	return unpin_extent_range(root, start, end, false);
10081}
10082
10083int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10084{
10085	struct btrfs_fs_info *fs_info = root->fs_info;
10086	struct btrfs_block_group_cache *cache = NULL;
10087	u64 group_trimmed;
10088	u64 start;
10089	u64 end;
10090	u64 trimmed = 0;
10091	u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10092	int ret = 0;
10093
10094	/*
10095	 * try to trim all FS space, our block group may start from non-zero.
10096	 */
10097	if (range->len == total_bytes)
10098		cache = btrfs_lookup_first_block_group(fs_info, range->start);
10099	else
10100		cache = btrfs_lookup_block_group(fs_info, range->start);
10101
10102	while (cache) {
10103		if (cache->key.objectid >= (range->start + range->len)) {
10104			btrfs_put_block_group(cache);
10105			break;
10106		}
10107
10108		start = max(range->start, cache->key.objectid);
10109		end = min(range->start + range->len,
10110				cache->key.objectid + cache->key.offset);
10111
10112		if (end - start >= range->minlen) {
10113			if (!block_group_cache_done(cache)) {
10114				ret = cache_block_group(cache, 0);
10115				if (ret) {
10116					btrfs_put_block_group(cache);
10117					break;
10118				}
10119				ret = wait_block_group_cache_done(cache);
10120				if (ret) {
10121					btrfs_put_block_group(cache);
10122					break;
10123				}
10124			}
10125			ret = btrfs_trim_block_group(cache,
10126						     &group_trimmed,
10127						     start,
10128						     end,
10129						     range->minlen);
10130
10131			trimmed += group_trimmed;
10132			if (ret) {
10133				btrfs_put_block_group(cache);
10134				break;
10135			}
10136		}
10137
10138		cache = next_block_group(fs_info->tree_root, cache);
10139	}
10140
10141	range->len = trimmed;
10142	return ret;
10143}
10144
10145/*
10146 * btrfs_{start,end}_write_no_snapshoting() are similar to
10147 * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10148 * data into the page cache through nocow before the subvolume is snapshoted,
10149 * but flush the data into disk after the snapshot creation, or to prevent
10150 * operations while snapshoting is ongoing and that cause the snapshot to be
10151 * inconsistent (writes followed by expanding truncates for example).
10152 */
10153void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10154{
10155	percpu_counter_dec(&root->subv_writers->counter);
10156	/*
10157	 * Make sure counter is updated before we wake up
10158	 * waiters.
10159	 */
10160	smp_mb();
10161	if (waitqueue_active(&root->subv_writers->wait))
10162		wake_up(&root->subv_writers->wait);
10163}
10164
10165int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10166{
10167	if (atomic_read(&root->will_be_snapshoted))
10168		return 0;
10169
10170	percpu_counter_inc(&root->subv_writers->counter);
10171	/*
10172	 * Make sure counter is updated before we check for snapshot creation.
10173	 */
10174	smp_mb();
10175	if (atomic_read(&root->will_be_snapshoted)) {
10176		btrfs_end_write_no_snapshoting(root);
10177		return 0;
10178	}
10179	return 1;
10180}
10181