1/*
2 * Copyright (C) 2007 Oracle.  All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
20#include <linux/slab.h>
21#include <linux/buffer_head.h>
22#include <linux/blkdev.h>
23#include <linux/random.h>
24#include <linux/iocontext.h>
25#include <linux/capability.h>
26#include <linux/ratelimit.h>
27#include <linux/kthread.h>
28#include <linux/raid/pq.h>
29#include <linux/semaphore.h>
30#include <asm/div64.h>
31#include "ctree.h"
32#include "extent_map.h"
33#include "disk-io.h"
34#include "transaction.h"
35#include "print-tree.h"
36#include "volumes.h"
37#include "raid56.h"
38#include "async-thread.h"
39#include "check-integrity.h"
40#include "rcu-string.h"
41#include "math.h"
42#include "dev-replace.h"
43#include "sysfs.h"
44
45static int init_first_rw_device(struct btrfs_trans_handle *trans,
46				struct btrfs_root *root,
47				struct btrfs_device *device);
48static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
49static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
50static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
51static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
52
53DEFINE_MUTEX(uuid_mutex);
54static LIST_HEAD(fs_uuids);
55
56static struct btrfs_fs_devices *__alloc_fs_devices(void)
57{
58	struct btrfs_fs_devices *fs_devs;
59
60	fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS);
61	if (!fs_devs)
62		return ERR_PTR(-ENOMEM);
63
64	mutex_init(&fs_devs->device_list_mutex);
65
66	INIT_LIST_HEAD(&fs_devs->devices);
67	INIT_LIST_HEAD(&fs_devs->resized_devices);
68	INIT_LIST_HEAD(&fs_devs->alloc_list);
69	INIT_LIST_HEAD(&fs_devs->list);
70
71	return fs_devs;
72}
73
74/**
75 * alloc_fs_devices - allocate struct btrfs_fs_devices
76 * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
77 *		generated.
78 *
79 * Return: a pointer to a new &struct btrfs_fs_devices on success;
80 * ERR_PTR() on error.  Returned struct is not linked onto any lists and
81 * can be destroyed with kfree() right away.
82 */
83static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
84{
85	struct btrfs_fs_devices *fs_devs;
86
87	fs_devs = __alloc_fs_devices();
88	if (IS_ERR(fs_devs))
89		return fs_devs;
90
91	if (fsid)
92		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
93	else
94		generate_random_uuid(fs_devs->fsid);
95
96	return fs_devs;
97}
98
99static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
100{
101	struct btrfs_device *device;
102	WARN_ON(fs_devices->opened);
103	while (!list_empty(&fs_devices->devices)) {
104		device = list_entry(fs_devices->devices.next,
105				    struct btrfs_device, dev_list);
106		list_del(&device->dev_list);
107		rcu_string_free(device->name);
108		kfree(device);
109	}
110	kfree(fs_devices);
111}
112
113static void btrfs_kobject_uevent(struct block_device *bdev,
114				 enum kobject_action action)
115{
116	int ret;
117
118	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
119	if (ret)
120		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
121			action,
122			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
123			&disk_to_dev(bdev->bd_disk)->kobj);
124}
125
126void btrfs_cleanup_fs_uuids(void)
127{
128	struct btrfs_fs_devices *fs_devices;
129
130	while (!list_empty(&fs_uuids)) {
131		fs_devices = list_entry(fs_uuids.next,
132					struct btrfs_fs_devices, list);
133		list_del(&fs_devices->list);
134		free_fs_devices(fs_devices);
135	}
136}
137
138static struct btrfs_device *__alloc_device(void)
139{
140	struct btrfs_device *dev;
141
142	dev = kzalloc(sizeof(*dev), GFP_NOFS);
143	if (!dev)
144		return ERR_PTR(-ENOMEM);
145
146	INIT_LIST_HEAD(&dev->dev_list);
147	INIT_LIST_HEAD(&dev->dev_alloc_list);
148	INIT_LIST_HEAD(&dev->resized_list);
149
150	spin_lock_init(&dev->io_lock);
151
152	spin_lock_init(&dev->reada_lock);
153	atomic_set(&dev->reada_in_flight, 0);
154	atomic_set(&dev->dev_stats_ccnt, 0);
155	btrfs_device_data_ordered_init(dev);
156	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT);
157	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT);
158
159	return dev;
160}
161
162static noinline struct btrfs_device *__find_device(struct list_head *head,
163						   u64 devid, u8 *uuid)
164{
165	struct btrfs_device *dev;
166
167	list_for_each_entry(dev, head, dev_list) {
168		if (dev->devid == devid &&
169		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
170			return dev;
171		}
172	}
173	return NULL;
174}
175
176static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
177{
178	struct btrfs_fs_devices *fs_devices;
179
180	list_for_each_entry(fs_devices, &fs_uuids, list) {
181		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
182			return fs_devices;
183	}
184	return NULL;
185}
186
187static int
188btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
189		      int flush, struct block_device **bdev,
190		      struct buffer_head **bh)
191{
192	int ret;
193
194	*bdev = blkdev_get_by_path(device_path, flags, holder);
195
196	if (IS_ERR(*bdev)) {
197		ret = PTR_ERR(*bdev);
198		printk(KERN_INFO "BTRFS: open %s failed\n", device_path);
199		goto error;
200	}
201
202	if (flush)
203		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
204	ret = set_blocksize(*bdev, 4096);
205	if (ret) {
206		blkdev_put(*bdev, flags);
207		goto error;
208	}
209	invalidate_bdev(*bdev);
210	*bh = btrfs_read_dev_super(*bdev);
211	if (!*bh) {
212		ret = -EINVAL;
213		blkdev_put(*bdev, flags);
214		goto error;
215	}
216
217	return 0;
218
219error:
220	*bdev = NULL;
221	*bh = NULL;
222	return ret;
223}
224
225static void requeue_list(struct btrfs_pending_bios *pending_bios,
226			struct bio *head, struct bio *tail)
227{
228
229	struct bio *old_head;
230
231	old_head = pending_bios->head;
232	pending_bios->head = head;
233	if (pending_bios->tail)
234		tail->bi_next = old_head;
235	else
236		pending_bios->tail = tail;
237}
238
239/*
240 * we try to collect pending bios for a device so we don't get a large
241 * number of procs sending bios down to the same device.  This greatly
242 * improves the schedulers ability to collect and merge the bios.
243 *
244 * But, it also turns into a long list of bios to process and that is sure
245 * to eventually make the worker thread block.  The solution here is to
246 * make some progress and then put this work struct back at the end of
247 * the list if the block device is congested.  This way, multiple devices
248 * can make progress from a single worker thread.
249 */
250static noinline void run_scheduled_bios(struct btrfs_device *device)
251{
252	struct bio *pending;
253	struct backing_dev_info *bdi;
254	struct btrfs_fs_info *fs_info;
255	struct btrfs_pending_bios *pending_bios;
256	struct bio *tail;
257	struct bio *cur;
258	int again = 0;
259	unsigned long num_run;
260	unsigned long batch_run = 0;
261	unsigned long limit;
262	unsigned long last_waited = 0;
263	int force_reg = 0;
264	int sync_pending = 0;
265	struct blk_plug plug;
266
267	/*
268	 * this function runs all the bios we've collected for
269	 * a particular device.  We don't want to wander off to
270	 * another device without first sending all of these down.
271	 * So, setup a plug here and finish it off before we return
272	 */
273	blk_start_plug(&plug);
274
275	bdi = blk_get_backing_dev_info(device->bdev);
276	fs_info = device->dev_root->fs_info;
277	limit = btrfs_async_submit_limit(fs_info);
278	limit = limit * 2 / 3;
279
280loop:
281	spin_lock(&device->io_lock);
282
283loop_lock:
284	num_run = 0;
285
286	/* take all the bios off the list at once and process them
287	 * later on (without the lock held).  But, remember the
288	 * tail and other pointers so the bios can be properly reinserted
289	 * into the list if we hit congestion
290	 */
291	if (!force_reg && device->pending_sync_bios.head) {
292		pending_bios = &device->pending_sync_bios;
293		force_reg = 1;
294	} else {
295		pending_bios = &device->pending_bios;
296		force_reg = 0;
297	}
298
299	pending = pending_bios->head;
300	tail = pending_bios->tail;
301	WARN_ON(pending && !tail);
302
303	/*
304	 * if pending was null this time around, no bios need processing
305	 * at all and we can stop.  Otherwise it'll loop back up again
306	 * and do an additional check so no bios are missed.
307	 *
308	 * device->running_pending is used to synchronize with the
309	 * schedule_bio code.
310	 */
311	if (device->pending_sync_bios.head == NULL &&
312	    device->pending_bios.head == NULL) {
313		again = 0;
314		device->running_pending = 0;
315	} else {
316		again = 1;
317		device->running_pending = 1;
318	}
319
320	pending_bios->head = NULL;
321	pending_bios->tail = NULL;
322
323	spin_unlock(&device->io_lock);
324
325	while (pending) {
326
327		rmb();
328		/* we want to work on both lists, but do more bios on the
329		 * sync list than the regular list
330		 */
331		if ((num_run > 32 &&
332		    pending_bios != &device->pending_sync_bios &&
333		    device->pending_sync_bios.head) ||
334		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
335		    device->pending_bios.head)) {
336			spin_lock(&device->io_lock);
337			requeue_list(pending_bios, pending, tail);
338			goto loop_lock;
339		}
340
341		cur = pending;
342		pending = pending->bi_next;
343		cur->bi_next = NULL;
344
345		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
346		    waitqueue_active(&fs_info->async_submit_wait))
347			wake_up(&fs_info->async_submit_wait);
348
349		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
350
351		/*
352		 * if we're doing the sync list, record that our
353		 * plug has some sync requests on it
354		 *
355		 * If we're doing the regular list and there are
356		 * sync requests sitting around, unplug before
357		 * we add more
358		 */
359		if (pending_bios == &device->pending_sync_bios) {
360			sync_pending = 1;
361		} else if (sync_pending) {
362			blk_finish_plug(&plug);
363			blk_start_plug(&plug);
364			sync_pending = 0;
365		}
366
367		btrfsic_submit_bio(cur->bi_rw, cur);
368		num_run++;
369		batch_run++;
370
371		cond_resched();
372
373		/*
374		 * we made progress, there is more work to do and the bdi
375		 * is now congested.  Back off and let other work structs
376		 * run instead
377		 */
378		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
379		    fs_info->fs_devices->open_devices > 1) {
380			struct io_context *ioc;
381
382			ioc = current->io_context;
383
384			/*
385			 * the main goal here is that we don't want to
386			 * block if we're going to be able to submit
387			 * more requests without blocking.
388			 *
389			 * This code does two great things, it pokes into
390			 * the elevator code from a filesystem _and_
391			 * it makes assumptions about how batching works.
392			 */
393			if (ioc && ioc->nr_batch_requests > 0 &&
394			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
395			    (last_waited == 0 ||
396			     ioc->last_waited == last_waited)) {
397				/*
398				 * we want to go through our batch of
399				 * requests and stop.  So, we copy out
400				 * the ioc->last_waited time and test
401				 * against it before looping
402				 */
403				last_waited = ioc->last_waited;
404				cond_resched();
405				continue;
406			}
407			spin_lock(&device->io_lock);
408			requeue_list(pending_bios, pending, tail);
409			device->running_pending = 1;
410
411			spin_unlock(&device->io_lock);
412			btrfs_queue_work(fs_info->submit_workers,
413					 &device->work);
414			goto done;
415		}
416		/* unplug every 64 requests just for good measure */
417		if (batch_run % 64 == 0) {
418			blk_finish_plug(&plug);
419			blk_start_plug(&plug);
420			sync_pending = 0;
421		}
422	}
423
424	cond_resched();
425	if (again)
426		goto loop;
427
428	spin_lock(&device->io_lock);
429	if (device->pending_bios.head || device->pending_sync_bios.head)
430		goto loop_lock;
431	spin_unlock(&device->io_lock);
432
433done:
434	blk_finish_plug(&plug);
435}
436
437static void pending_bios_fn(struct btrfs_work *work)
438{
439	struct btrfs_device *device;
440
441	device = container_of(work, struct btrfs_device, work);
442	run_scheduled_bios(device);
443}
444
445/*
446 * Add new device to list of registered devices
447 *
448 * Returns:
449 * 1   - first time device is seen
450 * 0   - device already known
451 * < 0 - error
452 */
453static noinline int device_list_add(const char *path,
454			   struct btrfs_super_block *disk_super,
455			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
456{
457	struct btrfs_device *device;
458	struct btrfs_fs_devices *fs_devices;
459	struct rcu_string *name;
460	int ret = 0;
461	u64 found_transid = btrfs_super_generation(disk_super);
462
463	fs_devices = find_fsid(disk_super->fsid);
464	if (!fs_devices) {
465		fs_devices = alloc_fs_devices(disk_super->fsid);
466		if (IS_ERR(fs_devices))
467			return PTR_ERR(fs_devices);
468
469		list_add(&fs_devices->list, &fs_uuids);
470
471		device = NULL;
472	} else {
473		device = __find_device(&fs_devices->devices, devid,
474				       disk_super->dev_item.uuid);
475	}
476
477	if (!device) {
478		if (fs_devices->opened)
479			return -EBUSY;
480
481		device = btrfs_alloc_device(NULL, &devid,
482					    disk_super->dev_item.uuid);
483		if (IS_ERR(device)) {
484			/* we can safely leave the fs_devices entry around */
485			return PTR_ERR(device);
486		}
487
488		name = rcu_string_strdup(path, GFP_NOFS);
489		if (!name) {
490			kfree(device);
491			return -ENOMEM;
492		}
493		rcu_assign_pointer(device->name, name);
494
495		mutex_lock(&fs_devices->device_list_mutex);
496		list_add_rcu(&device->dev_list, &fs_devices->devices);
497		fs_devices->num_devices++;
498		mutex_unlock(&fs_devices->device_list_mutex);
499
500		ret = 1;
501		device->fs_devices = fs_devices;
502	} else if (!device->name || strcmp(device->name->str, path)) {
503		/*
504		 * When FS is already mounted.
505		 * 1. If you are here and if the device->name is NULL that
506		 *    means this device was missing at time of FS mount.
507		 * 2. If you are here and if the device->name is different
508		 *    from 'path' that means either
509		 *      a. The same device disappeared and reappeared with
510		 *         different name. or
511		 *      b. The missing-disk-which-was-replaced, has
512		 *         reappeared now.
513		 *
514		 * We must allow 1 and 2a above. But 2b would be a spurious
515		 * and unintentional.
516		 *
517		 * Further in case of 1 and 2a above, the disk at 'path'
518		 * would have missed some transaction when it was away and
519		 * in case of 2a the stale bdev has to be updated as well.
520		 * 2b must not be allowed at all time.
521		 */
522
523		/*
524		 * For now, we do allow update to btrfs_fs_device through the
525		 * btrfs dev scan cli after FS has been mounted.  We're still
526		 * tracking a problem where systems fail mount by subvolume id
527		 * when we reject replacement on a mounted FS.
528		 */
529		if (!fs_devices->opened && found_transid < device->generation) {
530			/*
531			 * That is if the FS is _not_ mounted and if you
532			 * are here, that means there is more than one
533			 * disk with same uuid and devid.We keep the one
534			 * with larger generation number or the last-in if
535			 * generation are equal.
536			 */
537			return -EEXIST;
538		}
539
540		name = rcu_string_strdup(path, GFP_NOFS);
541		if (!name)
542			return -ENOMEM;
543		rcu_string_free(device->name);
544		rcu_assign_pointer(device->name, name);
545		if (device->missing) {
546			fs_devices->missing_devices--;
547			device->missing = 0;
548		}
549	}
550
551	/*
552	 * Unmount does not free the btrfs_device struct but would zero
553	 * generation along with most of the other members. So just update
554	 * it back. We need it to pick the disk with largest generation
555	 * (as above).
556	 */
557	if (!fs_devices->opened)
558		device->generation = found_transid;
559
560	*fs_devices_ret = fs_devices;
561
562	return ret;
563}
564
565static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
566{
567	struct btrfs_fs_devices *fs_devices;
568	struct btrfs_device *device;
569	struct btrfs_device *orig_dev;
570
571	fs_devices = alloc_fs_devices(orig->fsid);
572	if (IS_ERR(fs_devices))
573		return fs_devices;
574
575	mutex_lock(&orig->device_list_mutex);
576	fs_devices->total_devices = orig->total_devices;
577
578	/* We have held the volume lock, it is safe to get the devices. */
579	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
580		struct rcu_string *name;
581
582		device = btrfs_alloc_device(NULL, &orig_dev->devid,
583					    orig_dev->uuid);
584		if (IS_ERR(device))
585			goto error;
586
587		/*
588		 * This is ok to do without rcu read locked because we hold the
589		 * uuid mutex so nothing we touch in here is going to disappear.
590		 */
591		if (orig_dev->name) {
592			name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
593			if (!name) {
594				kfree(device);
595				goto error;
596			}
597			rcu_assign_pointer(device->name, name);
598		}
599
600		list_add(&device->dev_list, &fs_devices->devices);
601		device->fs_devices = fs_devices;
602		fs_devices->num_devices++;
603	}
604	mutex_unlock(&orig->device_list_mutex);
605	return fs_devices;
606error:
607	mutex_unlock(&orig->device_list_mutex);
608	free_fs_devices(fs_devices);
609	return ERR_PTR(-ENOMEM);
610}
611
612void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
613{
614	struct btrfs_device *device, *next;
615	struct btrfs_device *latest_dev = NULL;
616
617	mutex_lock(&uuid_mutex);
618again:
619	/* This is the initialized path, it is safe to release the devices. */
620	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
621		if (device->in_fs_metadata) {
622			if (!device->is_tgtdev_for_dev_replace &&
623			    (!latest_dev ||
624			     device->generation > latest_dev->generation)) {
625				latest_dev = device;
626			}
627			continue;
628		}
629
630		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
631			/*
632			 * In the first step, keep the device which has
633			 * the correct fsid and the devid that is used
634			 * for the dev_replace procedure.
635			 * In the second step, the dev_replace state is
636			 * read from the device tree and it is known
637			 * whether the procedure is really active or
638			 * not, which means whether this device is
639			 * used or whether it should be removed.
640			 */
641			if (step == 0 || device->is_tgtdev_for_dev_replace) {
642				continue;
643			}
644		}
645		if (device->bdev) {
646			blkdev_put(device->bdev, device->mode);
647			device->bdev = NULL;
648			fs_devices->open_devices--;
649		}
650		if (device->writeable) {
651			list_del_init(&device->dev_alloc_list);
652			device->writeable = 0;
653			if (!device->is_tgtdev_for_dev_replace)
654				fs_devices->rw_devices--;
655		}
656		list_del_init(&device->dev_list);
657		fs_devices->num_devices--;
658		rcu_string_free(device->name);
659		kfree(device);
660	}
661
662	if (fs_devices->seed) {
663		fs_devices = fs_devices->seed;
664		goto again;
665	}
666
667	fs_devices->latest_bdev = latest_dev->bdev;
668
669	mutex_unlock(&uuid_mutex);
670}
671
672static void __free_device(struct work_struct *work)
673{
674	struct btrfs_device *device;
675
676	device = container_of(work, struct btrfs_device, rcu_work);
677
678	if (device->bdev)
679		blkdev_put(device->bdev, device->mode);
680
681	rcu_string_free(device->name);
682	kfree(device);
683}
684
685static void free_device(struct rcu_head *head)
686{
687	struct btrfs_device *device;
688
689	device = container_of(head, struct btrfs_device, rcu);
690
691	INIT_WORK(&device->rcu_work, __free_device);
692	schedule_work(&device->rcu_work);
693}
694
695static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
696{
697	struct btrfs_device *device;
698
699	if (--fs_devices->opened > 0)
700		return 0;
701
702	mutex_lock(&fs_devices->device_list_mutex);
703	list_for_each_entry(device, &fs_devices->devices, dev_list) {
704		struct btrfs_device *new_device;
705		struct rcu_string *name;
706
707		if (device->bdev)
708			fs_devices->open_devices--;
709
710		if (device->writeable &&
711		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
712			list_del_init(&device->dev_alloc_list);
713			fs_devices->rw_devices--;
714		}
715
716		if (device->missing)
717			fs_devices->missing_devices--;
718
719		new_device = btrfs_alloc_device(NULL, &device->devid,
720						device->uuid);
721		BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
722
723		/* Safe because we are under uuid_mutex */
724		if (device->name) {
725			name = rcu_string_strdup(device->name->str, GFP_NOFS);
726			BUG_ON(!name); /* -ENOMEM */
727			rcu_assign_pointer(new_device->name, name);
728		}
729
730		list_replace_rcu(&device->dev_list, &new_device->dev_list);
731		new_device->fs_devices = device->fs_devices;
732
733		call_rcu(&device->rcu, free_device);
734	}
735	mutex_unlock(&fs_devices->device_list_mutex);
736
737	WARN_ON(fs_devices->open_devices);
738	WARN_ON(fs_devices->rw_devices);
739	fs_devices->opened = 0;
740	fs_devices->seeding = 0;
741
742	return 0;
743}
744
745int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
746{
747	struct btrfs_fs_devices *seed_devices = NULL;
748	int ret;
749
750	mutex_lock(&uuid_mutex);
751	ret = __btrfs_close_devices(fs_devices);
752	if (!fs_devices->opened) {
753		seed_devices = fs_devices->seed;
754		fs_devices->seed = NULL;
755	}
756	mutex_unlock(&uuid_mutex);
757
758	while (seed_devices) {
759		fs_devices = seed_devices;
760		seed_devices = fs_devices->seed;
761		__btrfs_close_devices(fs_devices);
762		free_fs_devices(fs_devices);
763	}
764	/*
765	 * Wait for rcu kworkers under __btrfs_close_devices
766	 * to finish all blkdev_puts so device is really
767	 * free when umount is done.
768	 */
769	rcu_barrier();
770	return ret;
771}
772
773static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
774				fmode_t flags, void *holder)
775{
776	struct request_queue *q;
777	struct block_device *bdev;
778	struct list_head *head = &fs_devices->devices;
779	struct btrfs_device *device;
780	struct btrfs_device *latest_dev = NULL;
781	struct buffer_head *bh;
782	struct btrfs_super_block *disk_super;
783	u64 devid;
784	int seeding = 1;
785	int ret = 0;
786
787	flags |= FMODE_EXCL;
788
789	list_for_each_entry(device, head, dev_list) {
790		if (device->bdev)
791			continue;
792		if (!device->name)
793			continue;
794
795		/* Just open everything we can; ignore failures here */
796		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
797					    &bdev, &bh))
798			continue;
799
800		disk_super = (struct btrfs_super_block *)bh->b_data;
801		devid = btrfs_stack_device_id(&disk_super->dev_item);
802		if (devid != device->devid)
803			goto error_brelse;
804
805		if (memcmp(device->uuid, disk_super->dev_item.uuid,
806			   BTRFS_UUID_SIZE))
807			goto error_brelse;
808
809		device->generation = btrfs_super_generation(disk_super);
810		if (!latest_dev ||
811		    device->generation > latest_dev->generation)
812			latest_dev = device;
813
814		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
815			device->writeable = 0;
816		} else {
817			device->writeable = !bdev_read_only(bdev);
818			seeding = 0;
819		}
820
821		q = bdev_get_queue(bdev);
822		if (blk_queue_discard(q))
823			device->can_discard = 1;
824
825		device->bdev = bdev;
826		device->in_fs_metadata = 0;
827		device->mode = flags;
828
829		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
830			fs_devices->rotating = 1;
831
832		fs_devices->open_devices++;
833		if (device->writeable &&
834		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
835			fs_devices->rw_devices++;
836			list_add(&device->dev_alloc_list,
837				 &fs_devices->alloc_list);
838		}
839		brelse(bh);
840		continue;
841
842error_brelse:
843		brelse(bh);
844		blkdev_put(bdev, flags);
845		continue;
846	}
847	if (fs_devices->open_devices == 0) {
848		ret = -EINVAL;
849		goto out;
850	}
851	fs_devices->seeding = seeding;
852	fs_devices->opened = 1;
853	fs_devices->latest_bdev = latest_dev->bdev;
854	fs_devices->total_rw_bytes = 0;
855out:
856	return ret;
857}
858
859int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
860		       fmode_t flags, void *holder)
861{
862	int ret;
863
864	mutex_lock(&uuid_mutex);
865	if (fs_devices->opened) {
866		fs_devices->opened++;
867		ret = 0;
868	} else {
869		ret = __btrfs_open_devices(fs_devices, flags, holder);
870	}
871	mutex_unlock(&uuid_mutex);
872	return ret;
873}
874
875/*
876 * Look for a btrfs signature on a device. This may be called out of the mount path
877 * and we are not allowed to call set_blocksize during the scan. The superblock
878 * is read via pagecache
879 */
880int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
881			  struct btrfs_fs_devices **fs_devices_ret)
882{
883	struct btrfs_super_block *disk_super;
884	struct block_device *bdev;
885	struct page *page;
886	void *p;
887	int ret = -EINVAL;
888	u64 devid;
889	u64 transid;
890	u64 total_devices;
891	u64 bytenr;
892	pgoff_t index;
893
894	/*
895	 * we would like to check all the supers, but that would make
896	 * a btrfs mount succeed after a mkfs from a different FS.
897	 * So, we need to add a special mount option to scan for
898	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
899	 */
900	bytenr = btrfs_sb_offset(0);
901	flags |= FMODE_EXCL;
902	mutex_lock(&uuid_mutex);
903
904	bdev = blkdev_get_by_path(path, flags, holder);
905
906	if (IS_ERR(bdev)) {
907		ret = PTR_ERR(bdev);
908		goto error;
909	}
910
911	/* make sure our super fits in the device */
912	if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
913		goto error_bdev_put;
914
915	/* make sure our super fits in the page */
916	if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
917		goto error_bdev_put;
918
919	/* make sure our super doesn't straddle pages on disk */
920	index = bytenr >> PAGE_CACHE_SHIFT;
921	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
922		goto error_bdev_put;
923
924	/* pull in the page with our super */
925	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
926				   index, GFP_NOFS);
927
928	if (IS_ERR_OR_NULL(page))
929		goto error_bdev_put;
930
931	p = kmap(page);
932
933	/* align our pointer to the offset of the super block */
934	disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
935
936	if (btrfs_super_bytenr(disk_super) != bytenr ||
937	    btrfs_super_magic(disk_super) != BTRFS_MAGIC)
938		goto error_unmap;
939
940	devid = btrfs_stack_device_id(&disk_super->dev_item);
941	transid = btrfs_super_generation(disk_super);
942	total_devices = btrfs_super_num_devices(disk_super);
943
944	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
945	if (ret > 0) {
946		if (disk_super->label[0]) {
947			if (disk_super->label[BTRFS_LABEL_SIZE - 1])
948				disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
949			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
950		} else {
951			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
952		}
953
954		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
955		ret = 0;
956	}
957	if (!ret && fs_devices_ret)
958		(*fs_devices_ret)->total_devices = total_devices;
959
960error_unmap:
961	kunmap(page);
962	page_cache_release(page);
963
964error_bdev_put:
965	blkdev_put(bdev, flags);
966error:
967	mutex_unlock(&uuid_mutex);
968	return ret;
969}
970
971/* helper to account the used device space in the range */
972int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
973				   u64 end, u64 *length)
974{
975	struct btrfs_key key;
976	struct btrfs_root *root = device->dev_root;
977	struct btrfs_dev_extent *dev_extent;
978	struct btrfs_path *path;
979	u64 extent_end;
980	int ret;
981	int slot;
982	struct extent_buffer *l;
983
984	*length = 0;
985
986	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
987		return 0;
988
989	path = btrfs_alloc_path();
990	if (!path)
991		return -ENOMEM;
992	path->reada = 2;
993
994	key.objectid = device->devid;
995	key.offset = start;
996	key.type = BTRFS_DEV_EXTENT_KEY;
997
998	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
999	if (ret < 0)
1000		goto out;
1001	if (ret > 0) {
1002		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1003		if (ret < 0)
1004			goto out;
1005	}
1006
1007	while (1) {
1008		l = path->nodes[0];
1009		slot = path->slots[0];
1010		if (slot >= btrfs_header_nritems(l)) {
1011			ret = btrfs_next_leaf(root, path);
1012			if (ret == 0)
1013				continue;
1014			if (ret < 0)
1015				goto out;
1016
1017			break;
1018		}
1019		btrfs_item_key_to_cpu(l, &key, slot);
1020
1021		if (key.objectid < device->devid)
1022			goto next;
1023
1024		if (key.objectid > device->devid)
1025			break;
1026
1027		if (key.type != BTRFS_DEV_EXTENT_KEY)
1028			goto next;
1029
1030		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1031		extent_end = key.offset + btrfs_dev_extent_length(l,
1032								  dev_extent);
1033		if (key.offset <= start && extent_end > end) {
1034			*length = end - start + 1;
1035			break;
1036		} else if (key.offset <= start && extent_end > start)
1037			*length += extent_end - start;
1038		else if (key.offset > start && extent_end <= end)
1039			*length += extent_end - key.offset;
1040		else if (key.offset > start && key.offset <= end) {
1041			*length += end - key.offset + 1;
1042			break;
1043		} else if (key.offset > end)
1044			break;
1045
1046next:
1047		path->slots[0]++;
1048	}
1049	ret = 0;
1050out:
1051	btrfs_free_path(path);
1052	return ret;
1053}
1054
1055static int contains_pending_extent(struct btrfs_trans_handle *trans,
1056				   struct btrfs_device *device,
1057				   u64 *start, u64 len)
1058{
1059	struct extent_map *em;
1060	struct list_head *search_list = &trans->transaction->pending_chunks;
1061	int ret = 0;
1062	u64 physical_start = *start;
1063
1064again:
1065	list_for_each_entry(em, search_list, list) {
1066		struct map_lookup *map;
1067		int i;
1068
1069		map = (struct map_lookup *)em->bdev;
1070		for (i = 0; i < map->num_stripes; i++) {
1071			if (map->stripes[i].dev != device)
1072				continue;
1073			if (map->stripes[i].physical >= physical_start + len ||
1074			    map->stripes[i].physical + em->orig_block_len <=
1075			    physical_start)
1076				continue;
1077			*start = map->stripes[i].physical +
1078				em->orig_block_len;
1079			ret = 1;
1080		}
1081	}
1082	if (search_list == &trans->transaction->pending_chunks) {
1083		search_list = &trans->root->fs_info->pinned_chunks;
1084		goto again;
1085	}
1086
1087	return ret;
1088}
1089
1090
1091/*
1092 * find_free_dev_extent - find free space in the specified device
1093 * @device:	the device which we search the free space in
1094 * @num_bytes:	the size of the free space that we need
1095 * @start:	store the start of the free space.
1096 * @len:	the size of the free space. that we find, or the size of the max
1097 * 		free space if we don't find suitable free space
1098 *
1099 * this uses a pretty simple search, the expectation is that it is
1100 * called very infrequently and that a given device has a small number
1101 * of extents
1102 *
1103 * @start is used to store the start of the free space if we find. But if we
1104 * don't find suitable free space, it will be used to store the start position
1105 * of the max free space.
1106 *
1107 * @len is used to store the size of the free space that we find.
1108 * But if we don't find suitable free space, it is used to store the size of
1109 * the max free space.
1110 */
1111int find_free_dev_extent(struct btrfs_trans_handle *trans,
1112			 struct btrfs_device *device, u64 num_bytes,
1113			 u64 *start, u64 *len)
1114{
1115	struct btrfs_key key;
1116	struct btrfs_root *root = device->dev_root;
1117	struct btrfs_dev_extent *dev_extent;
1118	struct btrfs_path *path;
1119	u64 hole_size;
1120	u64 max_hole_start;
1121	u64 max_hole_size;
1122	u64 extent_end;
1123	u64 search_start;
1124	u64 search_end = device->total_bytes;
1125	int ret;
1126	int slot;
1127	struct extent_buffer *l;
1128
1129	/* FIXME use last free of some kind */
1130
1131	/* we don't want to overwrite the superblock on the drive,
1132	 * so we make sure to start at an offset of at least 1MB
1133	 */
1134	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1135
1136	path = btrfs_alloc_path();
1137	if (!path)
1138		return -ENOMEM;
1139
1140	max_hole_start = search_start;
1141	max_hole_size = 0;
1142
1143again:
1144	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1145		ret = -ENOSPC;
1146		goto out;
1147	}
1148
1149	path->reada = 2;
1150	path->search_commit_root = 1;
1151	path->skip_locking = 1;
1152
1153	key.objectid = device->devid;
1154	key.offset = search_start;
1155	key.type = BTRFS_DEV_EXTENT_KEY;
1156
1157	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1158	if (ret < 0)
1159		goto out;
1160	if (ret > 0) {
1161		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1162		if (ret < 0)
1163			goto out;
1164	}
1165
1166	while (1) {
1167		l = path->nodes[0];
1168		slot = path->slots[0];
1169		if (slot >= btrfs_header_nritems(l)) {
1170			ret = btrfs_next_leaf(root, path);
1171			if (ret == 0)
1172				continue;
1173			if (ret < 0)
1174				goto out;
1175
1176			break;
1177		}
1178		btrfs_item_key_to_cpu(l, &key, slot);
1179
1180		if (key.objectid < device->devid)
1181			goto next;
1182
1183		if (key.objectid > device->devid)
1184			break;
1185
1186		if (key.type != BTRFS_DEV_EXTENT_KEY)
1187			goto next;
1188
1189		if (key.offset > search_start) {
1190			hole_size = key.offset - search_start;
1191
1192			/*
1193			 * Have to check before we set max_hole_start, otherwise
1194			 * we could end up sending back this offset anyway.
1195			 */
1196			if (contains_pending_extent(trans, device,
1197						    &search_start,
1198						    hole_size)) {
1199				if (key.offset >= search_start) {
1200					hole_size = key.offset - search_start;
1201				} else {
1202					WARN_ON_ONCE(1);
1203					hole_size = 0;
1204				}
1205			}
1206
1207			if (hole_size > max_hole_size) {
1208				max_hole_start = search_start;
1209				max_hole_size = hole_size;
1210			}
1211
1212			/*
1213			 * If this free space is greater than which we need,
1214			 * it must be the max free space that we have found
1215			 * until now, so max_hole_start must point to the start
1216			 * of this free space and the length of this free space
1217			 * is stored in max_hole_size. Thus, we return
1218			 * max_hole_start and max_hole_size and go back to the
1219			 * caller.
1220			 */
1221			if (hole_size >= num_bytes) {
1222				ret = 0;
1223				goto out;
1224			}
1225		}
1226
1227		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1228		extent_end = key.offset + btrfs_dev_extent_length(l,
1229								  dev_extent);
1230		if (extent_end > search_start)
1231			search_start = extent_end;
1232next:
1233		path->slots[0]++;
1234		cond_resched();
1235	}
1236
1237	/*
1238	 * At this point, search_start should be the end of
1239	 * allocated dev extents, and when shrinking the device,
1240	 * search_end may be smaller than search_start.
1241	 */
1242	if (search_end > search_start) {
1243		hole_size = search_end - search_start;
1244
1245		if (contains_pending_extent(trans, device, &search_start,
1246					    hole_size)) {
1247			btrfs_release_path(path);
1248			goto again;
1249		}
1250
1251		if (hole_size > max_hole_size) {
1252			max_hole_start = search_start;
1253			max_hole_size = hole_size;
1254		}
1255	}
1256
1257	/* See above. */
1258	if (max_hole_size < num_bytes)
1259		ret = -ENOSPC;
1260	else
1261		ret = 0;
1262
1263out:
1264	btrfs_free_path(path);
1265	*start = max_hole_start;
1266	if (len)
1267		*len = max_hole_size;
1268	return ret;
1269}
1270
1271static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1272			  struct btrfs_device *device,
1273			  u64 start, u64 *dev_extent_len)
1274{
1275	int ret;
1276	struct btrfs_path *path;
1277	struct btrfs_root *root = device->dev_root;
1278	struct btrfs_key key;
1279	struct btrfs_key found_key;
1280	struct extent_buffer *leaf = NULL;
1281	struct btrfs_dev_extent *extent = NULL;
1282
1283	path = btrfs_alloc_path();
1284	if (!path)
1285		return -ENOMEM;
1286
1287	key.objectid = device->devid;
1288	key.offset = start;
1289	key.type = BTRFS_DEV_EXTENT_KEY;
1290again:
1291	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1292	if (ret > 0) {
1293		ret = btrfs_previous_item(root, path, key.objectid,
1294					  BTRFS_DEV_EXTENT_KEY);
1295		if (ret)
1296			goto out;
1297		leaf = path->nodes[0];
1298		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1299		extent = btrfs_item_ptr(leaf, path->slots[0],
1300					struct btrfs_dev_extent);
1301		BUG_ON(found_key.offset > start || found_key.offset +
1302		       btrfs_dev_extent_length(leaf, extent) < start);
1303		key = found_key;
1304		btrfs_release_path(path);
1305		goto again;
1306	} else if (ret == 0) {
1307		leaf = path->nodes[0];
1308		extent = btrfs_item_ptr(leaf, path->slots[0],
1309					struct btrfs_dev_extent);
1310	} else {
1311		btrfs_error(root->fs_info, ret, "Slot search failed");
1312		goto out;
1313	}
1314
1315	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1316
1317	ret = btrfs_del_item(trans, root, path);
1318	if (ret) {
1319		btrfs_error(root->fs_info, ret,
1320			    "Failed to remove dev extent item");
1321	} else {
1322		trans->transaction->have_free_bgs = 1;
1323	}
1324out:
1325	btrfs_free_path(path);
1326	return ret;
1327}
1328
1329static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1330				  struct btrfs_device *device,
1331				  u64 chunk_tree, u64 chunk_objectid,
1332				  u64 chunk_offset, u64 start, u64 num_bytes)
1333{
1334	int ret;
1335	struct btrfs_path *path;
1336	struct btrfs_root *root = device->dev_root;
1337	struct btrfs_dev_extent *extent;
1338	struct extent_buffer *leaf;
1339	struct btrfs_key key;
1340
1341	WARN_ON(!device->in_fs_metadata);
1342	WARN_ON(device->is_tgtdev_for_dev_replace);
1343	path = btrfs_alloc_path();
1344	if (!path)
1345		return -ENOMEM;
1346
1347	key.objectid = device->devid;
1348	key.offset = start;
1349	key.type = BTRFS_DEV_EXTENT_KEY;
1350	ret = btrfs_insert_empty_item(trans, root, path, &key,
1351				      sizeof(*extent));
1352	if (ret)
1353		goto out;
1354
1355	leaf = path->nodes[0];
1356	extent = btrfs_item_ptr(leaf, path->slots[0],
1357				struct btrfs_dev_extent);
1358	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1359	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1360	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1361
1362	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1363		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1364
1365	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1366	btrfs_mark_buffer_dirty(leaf);
1367out:
1368	btrfs_free_path(path);
1369	return ret;
1370}
1371
1372static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1373{
1374	struct extent_map_tree *em_tree;
1375	struct extent_map *em;
1376	struct rb_node *n;
1377	u64 ret = 0;
1378
1379	em_tree = &fs_info->mapping_tree.map_tree;
1380	read_lock(&em_tree->lock);
1381	n = rb_last(&em_tree->map);
1382	if (n) {
1383		em = rb_entry(n, struct extent_map, rb_node);
1384		ret = em->start + em->len;
1385	}
1386	read_unlock(&em_tree->lock);
1387
1388	return ret;
1389}
1390
1391static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1392				    u64 *devid_ret)
1393{
1394	int ret;
1395	struct btrfs_key key;
1396	struct btrfs_key found_key;
1397	struct btrfs_path *path;
1398
1399	path = btrfs_alloc_path();
1400	if (!path)
1401		return -ENOMEM;
1402
1403	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1404	key.type = BTRFS_DEV_ITEM_KEY;
1405	key.offset = (u64)-1;
1406
1407	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1408	if (ret < 0)
1409		goto error;
1410
1411	BUG_ON(ret == 0); /* Corruption */
1412
1413	ret = btrfs_previous_item(fs_info->chunk_root, path,
1414				  BTRFS_DEV_ITEMS_OBJECTID,
1415				  BTRFS_DEV_ITEM_KEY);
1416	if (ret) {
1417		*devid_ret = 1;
1418	} else {
1419		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1420				      path->slots[0]);
1421		*devid_ret = found_key.offset + 1;
1422	}
1423	ret = 0;
1424error:
1425	btrfs_free_path(path);
1426	return ret;
1427}
1428
1429/*
1430 * the device information is stored in the chunk root
1431 * the btrfs_device struct should be fully filled in
1432 */
1433static int btrfs_add_device(struct btrfs_trans_handle *trans,
1434			    struct btrfs_root *root,
1435			    struct btrfs_device *device)
1436{
1437	int ret;
1438	struct btrfs_path *path;
1439	struct btrfs_dev_item *dev_item;
1440	struct extent_buffer *leaf;
1441	struct btrfs_key key;
1442	unsigned long ptr;
1443
1444	root = root->fs_info->chunk_root;
1445
1446	path = btrfs_alloc_path();
1447	if (!path)
1448		return -ENOMEM;
1449
1450	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1451	key.type = BTRFS_DEV_ITEM_KEY;
1452	key.offset = device->devid;
1453
1454	ret = btrfs_insert_empty_item(trans, root, path, &key,
1455				      sizeof(*dev_item));
1456	if (ret)
1457		goto out;
1458
1459	leaf = path->nodes[0];
1460	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1461
1462	btrfs_set_device_id(leaf, dev_item, device->devid);
1463	btrfs_set_device_generation(leaf, dev_item, 0);
1464	btrfs_set_device_type(leaf, dev_item, device->type);
1465	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1466	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1467	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1468	btrfs_set_device_total_bytes(leaf, dev_item,
1469				     btrfs_device_get_disk_total_bytes(device));
1470	btrfs_set_device_bytes_used(leaf, dev_item,
1471				    btrfs_device_get_bytes_used(device));
1472	btrfs_set_device_group(leaf, dev_item, 0);
1473	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1474	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1475	btrfs_set_device_start_offset(leaf, dev_item, 0);
1476
1477	ptr = btrfs_device_uuid(dev_item);
1478	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1479	ptr = btrfs_device_fsid(dev_item);
1480	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1481	btrfs_mark_buffer_dirty(leaf);
1482
1483	ret = 0;
1484out:
1485	btrfs_free_path(path);
1486	return ret;
1487}
1488
1489/*
1490 * Function to update ctime/mtime for a given device path.
1491 * Mainly used for ctime/mtime based probe like libblkid.
1492 */
1493static void update_dev_time(char *path_name)
1494{
1495	struct file *filp;
1496
1497	filp = filp_open(path_name, O_RDWR, 0);
1498	if (IS_ERR(filp))
1499		return;
1500	file_update_time(filp);
1501	filp_close(filp, NULL);
1502	return;
1503}
1504
1505static int btrfs_rm_dev_item(struct btrfs_root *root,
1506			     struct btrfs_device *device)
1507{
1508	int ret;
1509	struct btrfs_path *path;
1510	struct btrfs_key key;
1511	struct btrfs_trans_handle *trans;
1512
1513	root = root->fs_info->chunk_root;
1514
1515	path = btrfs_alloc_path();
1516	if (!path)
1517		return -ENOMEM;
1518
1519	trans = btrfs_start_transaction(root, 0);
1520	if (IS_ERR(trans)) {
1521		btrfs_free_path(path);
1522		return PTR_ERR(trans);
1523	}
1524	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1525	key.type = BTRFS_DEV_ITEM_KEY;
1526	key.offset = device->devid;
1527
1528	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1529	if (ret < 0)
1530		goto out;
1531
1532	if (ret > 0) {
1533		ret = -ENOENT;
1534		goto out;
1535	}
1536
1537	ret = btrfs_del_item(trans, root, path);
1538	if (ret)
1539		goto out;
1540out:
1541	btrfs_free_path(path);
1542	btrfs_commit_transaction(trans, root);
1543	return ret;
1544}
1545
1546int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1547{
1548	struct btrfs_device *device;
1549	struct btrfs_device *next_device;
1550	struct block_device *bdev;
1551	struct buffer_head *bh = NULL;
1552	struct btrfs_super_block *disk_super;
1553	struct btrfs_fs_devices *cur_devices;
1554	u64 all_avail;
1555	u64 devid;
1556	u64 num_devices;
1557	u8 *dev_uuid;
1558	unsigned seq;
1559	int ret = 0;
1560	bool clear_super = false;
1561
1562	mutex_lock(&uuid_mutex);
1563
1564	do {
1565		seq = read_seqbegin(&root->fs_info->profiles_lock);
1566
1567		all_avail = root->fs_info->avail_data_alloc_bits |
1568			    root->fs_info->avail_system_alloc_bits |
1569			    root->fs_info->avail_metadata_alloc_bits;
1570	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
1571
1572	num_devices = root->fs_info->fs_devices->num_devices;
1573	btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1574	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1575		WARN_ON(num_devices < 1);
1576		num_devices--;
1577	}
1578	btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1579
1580	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1581		ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1582		goto out;
1583	}
1584
1585	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1586		ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1587		goto out;
1588	}
1589
1590	if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1591	    root->fs_info->fs_devices->rw_devices <= 2) {
1592		ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1593		goto out;
1594	}
1595	if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1596	    root->fs_info->fs_devices->rw_devices <= 3) {
1597		ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1598		goto out;
1599	}
1600
1601	if (strcmp(device_path, "missing") == 0) {
1602		struct list_head *devices;
1603		struct btrfs_device *tmp;
1604
1605		device = NULL;
1606		devices = &root->fs_info->fs_devices->devices;
1607		/*
1608		 * It is safe to read the devices since the volume_mutex
1609		 * is held.
1610		 */
1611		list_for_each_entry(tmp, devices, dev_list) {
1612			if (tmp->in_fs_metadata &&
1613			    !tmp->is_tgtdev_for_dev_replace &&
1614			    !tmp->bdev) {
1615				device = tmp;
1616				break;
1617			}
1618		}
1619		bdev = NULL;
1620		bh = NULL;
1621		disk_super = NULL;
1622		if (!device) {
1623			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1624			goto out;
1625		}
1626	} else {
1627		ret = btrfs_get_bdev_and_sb(device_path,
1628					    FMODE_WRITE | FMODE_EXCL,
1629					    root->fs_info->bdev_holder, 0,
1630					    &bdev, &bh);
1631		if (ret)
1632			goto out;
1633		disk_super = (struct btrfs_super_block *)bh->b_data;
1634		devid = btrfs_stack_device_id(&disk_super->dev_item);
1635		dev_uuid = disk_super->dev_item.uuid;
1636		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1637					   disk_super->fsid);
1638		if (!device) {
1639			ret = -ENOENT;
1640			goto error_brelse;
1641		}
1642	}
1643
1644	if (device->is_tgtdev_for_dev_replace) {
1645		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1646		goto error_brelse;
1647	}
1648
1649	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1650		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1651		goto error_brelse;
1652	}
1653
1654	if (device->writeable) {
1655		lock_chunks(root);
1656		list_del_init(&device->dev_alloc_list);
1657		device->fs_devices->rw_devices--;
1658		unlock_chunks(root);
1659		clear_super = true;
1660	}
1661
1662	mutex_unlock(&uuid_mutex);
1663	ret = btrfs_shrink_device(device, 0);
1664	mutex_lock(&uuid_mutex);
1665	if (ret)
1666		goto error_undo;
1667
1668	/*
1669	 * TODO: the superblock still includes this device in its num_devices
1670	 * counter although write_all_supers() is not locked out. This
1671	 * could give a filesystem state which requires a degraded mount.
1672	 */
1673	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1674	if (ret)
1675		goto error_undo;
1676
1677	device->in_fs_metadata = 0;
1678	btrfs_scrub_cancel_dev(root->fs_info, device);
1679
1680	/*
1681	 * the device list mutex makes sure that we don't change
1682	 * the device list while someone else is writing out all
1683	 * the device supers. Whoever is writing all supers, should
1684	 * lock the device list mutex before getting the number of
1685	 * devices in the super block (super_copy). Conversely,
1686	 * whoever updates the number of devices in the super block
1687	 * (super_copy) should hold the device list mutex.
1688	 */
1689
1690	cur_devices = device->fs_devices;
1691	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1692	list_del_rcu(&device->dev_list);
1693
1694	device->fs_devices->num_devices--;
1695	device->fs_devices->total_devices--;
1696
1697	if (device->missing)
1698		device->fs_devices->missing_devices--;
1699
1700	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1701				 struct btrfs_device, dev_list);
1702	if (device->bdev == root->fs_info->sb->s_bdev)
1703		root->fs_info->sb->s_bdev = next_device->bdev;
1704	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1705		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1706
1707	if (device->bdev) {
1708		device->fs_devices->open_devices--;
1709		/* remove sysfs entry */
1710		btrfs_kobj_rm_device(root->fs_info, device);
1711	}
1712
1713	call_rcu(&device->rcu, free_device);
1714
1715	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1716	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1717	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1718
1719	if (cur_devices->open_devices == 0) {
1720		struct btrfs_fs_devices *fs_devices;
1721		fs_devices = root->fs_info->fs_devices;
1722		while (fs_devices) {
1723			if (fs_devices->seed == cur_devices) {
1724				fs_devices->seed = cur_devices->seed;
1725				break;
1726			}
1727			fs_devices = fs_devices->seed;
1728		}
1729		cur_devices->seed = NULL;
1730		__btrfs_close_devices(cur_devices);
1731		free_fs_devices(cur_devices);
1732	}
1733
1734	root->fs_info->num_tolerated_disk_barrier_failures =
1735		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1736
1737	/*
1738	 * at this point, the device is zero sized.  We want to
1739	 * remove it from the devices list and zero out the old super
1740	 */
1741	if (clear_super && disk_super) {
1742		u64 bytenr;
1743		int i;
1744
1745		/* make sure this device isn't detected as part of
1746		 * the FS anymore
1747		 */
1748		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1749		set_buffer_dirty(bh);
1750		sync_dirty_buffer(bh);
1751
1752		/* clear the mirror copies of super block on the disk
1753		 * being removed, 0th copy is been taken care above and
1754		 * the below would take of the rest
1755		 */
1756		for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1757			bytenr = btrfs_sb_offset(i);
1758			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
1759					i_size_read(bdev->bd_inode))
1760				break;
1761
1762			brelse(bh);
1763			bh = __bread(bdev, bytenr / 4096,
1764					BTRFS_SUPER_INFO_SIZE);
1765			if (!bh)
1766				continue;
1767
1768			disk_super = (struct btrfs_super_block *)bh->b_data;
1769
1770			if (btrfs_super_bytenr(disk_super) != bytenr ||
1771				btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1772				continue;
1773			}
1774			memset(&disk_super->magic, 0,
1775						sizeof(disk_super->magic));
1776			set_buffer_dirty(bh);
1777			sync_dirty_buffer(bh);
1778		}
1779	}
1780
1781	ret = 0;
1782
1783	if (bdev) {
1784		/* Notify udev that device has changed */
1785		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1786
1787		/* Update ctime/mtime for device path for libblkid */
1788		update_dev_time(device_path);
1789	}
1790
1791error_brelse:
1792	brelse(bh);
1793	if (bdev)
1794		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1795out:
1796	mutex_unlock(&uuid_mutex);
1797	return ret;
1798error_undo:
1799	if (device->writeable) {
1800		lock_chunks(root);
1801		list_add(&device->dev_alloc_list,
1802			 &root->fs_info->fs_devices->alloc_list);
1803		device->fs_devices->rw_devices++;
1804		unlock_chunks(root);
1805	}
1806	goto error_brelse;
1807}
1808
1809void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1810					struct btrfs_device *srcdev)
1811{
1812	struct btrfs_fs_devices *fs_devices;
1813
1814	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1815
1816	/*
1817	 * in case of fs with no seed, srcdev->fs_devices will point
1818	 * to fs_devices of fs_info. However when the dev being replaced is
1819	 * a seed dev it will point to the seed's local fs_devices. In short
1820	 * srcdev will have its correct fs_devices in both the cases.
1821	 */
1822	fs_devices = srcdev->fs_devices;
1823
1824	list_del_rcu(&srcdev->dev_list);
1825	list_del_rcu(&srcdev->dev_alloc_list);
1826	fs_devices->num_devices--;
1827	if (srcdev->missing)
1828		fs_devices->missing_devices--;
1829
1830	if (srcdev->writeable) {
1831		fs_devices->rw_devices--;
1832		/* zero out the old super if it is writable */
1833		btrfs_scratch_superblock(srcdev);
1834	}
1835
1836	if (srcdev->bdev)
1837		fs_devices->open_devices--;
1838}
1839
1840void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
1841				      struct btrfs_device *srcdev)
1842{
1843	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
1844
1845	call_rcu(&srcdev->rcu, free_device);
1846
1847	/*
1848	 * unless fs_devices is seed fs, num_devices shouldn't go
1849	 * zero
1850	 */
1851	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
1852
1853	/* if this is no devs we rather delete the fs_devices */
1854	if (!fs_devices->num_devices) {
1855		struct btrfs_fs_devices *tmp_fs_devices;
1856
1857		tmp_fs_devices = fs_info->fs_devices;
1858		while (tmp_fs_devices) {
1859			if (tmp_fs_devices->seed == fs_devices) {
1860				tmp_fs_devices->seed = fs_devices->seed;
1861				break;
1862			}
1863			tmp_fs_devices = tmp_fs_devices->seed;
1864		}
1865		fs_devices->seed = NULL;
1866		__btrfs_close_devices(fs_devices);
1867		free_fs_devices(fs_devices);
1868	}
1869}
1870
1871void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1872				      struct btrfs_device *tgtdev)
1873{
1874	struct btrfs_device *next_device;
1875
1876	mutex_lock(&uuid_mutex);
1877	WARN_ON(!tgtdev);
1878	mutex_lock(&fs_info->fs_devices->device_list_mutex);
1879	if (tgtdev->bdev) {
1880		btrfs_scratch_superblock(tgtdev);
1881		fs_info->fs_devices->open_devices--;
1882	}
1883	fs_info->fs_devices->num_devices--;
1884
1885	next_device = list_entry(fs_info->fs_devices->devices.next,
1886				 struct btrfs_device, dev_list);
1887	if (tgtdev->bdev == fs_info->sb->s_bdev)
1888		fs_info->sb->s_bdev = next_device->bdev;
1889	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1890		fs_info->fs_devices->latest_bdev = next_device->bdev;
1891	list_del_rcu(&tgtdev->dev_list);
1892
1893	call_rcu(&tgtdev->rcu, free_device);
1894
1895	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1896	mutex_unlock(&uuid_mutex);
1897}
1898
1899static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1900				     struct btrfs_device **device)
1901{
1902	int ret = 0;
1903	struct btrfs_super_block *disk_super;
1904	u64 devid;
1905	u8 *dev_uuid;
1906	struct block_device *bdev;
1907	struct buffer_head *bh;
1908
1909	*device = NULL;
1910	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1911				    root->fs_info->bdev_holder, 0, &bdev, &bh);
1912	if (ret)
1913		return ret;
1914	disk_super = (struct btrfs_super_block *)bh->b_data;
1915	devid = btrfs_stack_device_id(&disk_super->dev_item);
1916	dev_uuid = disk_super->dev_item.uuid;
1917	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1918				    disk_super->fsid);
1919	brelse(bh);
1920	if (!*device)
1921		ret = -ENOENT;
1922	blkdev_put(bdev, FMODE_READ);
1923	return ret;
1924}
1925
1926int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1927					 char *device_path,
1928					 struct btrfs_device **device)
1929{
1930	*device = NULL;
1931	if (strcmp(device_path, "missing") == 0) {
1932		struct list_head *devices;
1933		struct btrfs_device *tmp;
1934
1935		devices = &root->fs_info->fs_devices->devices;
1936		/*
1937		 * It is safe to read the devices since the volume_mutex
1938		 * is held by the caller.
1939		 */
1940		list_for_each_entry(tmp, devices, dev_list) {
1941			if (tmp->in_fs_metadata && !tmp->bdev) {
1942				*device = tmp;
1943				break;
1944			}
1945		}
1946
1947		if (!*device) {
1948			btrfs_err(root->fs_info, "no missing device found");
1949			return -ENOENT;
1950		}
1951
1952		return 0;
1953	} else {
1954		return btrfs_find_device_by_path(root, device_path, device);
1955	}
1956}
1957
1958/*
1959 * does all the dirty work required for changing file system's UUID.
1960 */
1961static int btrfs_prepare_sprout(struct btrfs_root *root)
1962{
1963	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1964	struct btrfs_fs_devices *old_devices;
1965	struct btrfs_fs_devices *seed_devices;
1966	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1967	struct btrfs_device *device;
1968	u64 super_flags;
1969
1970	BUG_ON(!mutex_is_locked(&uuid_mutex));
1971	if (!fs_devices->seeding)
1972		return -EINVAL;
1973
1974	seed_devices = __alloc_fs_devices();
1975	if (IS_ERR(seed_devices))
1976		return PTR_ERR(seed_devices);
1977
1978	old_devices = clone_fs_devices(fs_devices);
1979	if (IS_ERR(old_devices)) {
1980		kfree(seed_devices);
1981		return PTR_ERR(old_devices);
1982	}
1983
1984	list_add(&old_devices->list, &fs_uuids);
1985
1986	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1987	seed_devices->opened = 1;
1988	INIT_LIST_HEAD(&seed_devices->devices);
1989	INIT_LIST_HEAD(&seed_devices->alloc_list);
1990	mutex_init(&seed_devices->device_list_mutex);
1991
1992	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1993	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1994			      synchronize_rcu);
1995	list_for_each_entry(device, &seed_devices->devices, dev_list)
1996		device->fs_devices = seed_devices;
1997
1998	lock_chunks(root);
1999	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2000	unlock_chunks(root);
2001
2002	fs_devices->seeding = 0;
2003	fs_devices->num_devices = 0;
2004	fs_devices->open_devices = 0;
2005	fs_devices->missing_devices = 0;
2006	fs_devices->rotating = 0;
2007	fs_devices->seed = seed_devices;
2008
2009	generate_random_uuid(fs_devices->fsid);
2010	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2011	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2012	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2013
2014	super_flags = btrfs_super_flags(disk_super) &
2015		      ~BTRFS_SUPER_FLAG_SEEDING;
2016	btrfs_set_super_flags(disk_super, super_flags);
2017
2018	return 0;
2019}
2020
2021/*
2022 * strore the expected generation for seed devices in device items.
2023 */
2024static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2025			       struct btrfs_root *root)
2026{
2027	struct btrfs_path *path;
2028	struct extent_buffer *leaf;
2029	struct btrfs_dev_item *dev_item;
2030	struct btrfs_device *device;
2031	struct btrfs_key key;
2032	u8 fs_uuid[BTRFS_UUID_SIZE];
2033	u8 dev_uuid[BTRFS_UUID_SIZE];
2034	u64 devid;
2035	int ret;
2036
2037	path = btrfs_alloc_path();
2038	if (!path)
2039		return -ENOMEM;
2040
2041	root = root->fs_info->chunk_root;
2042	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2043	key.offset = 0;
2044	key.type = BTRFS_DEV_ITEM_KEY;
2045
2046	while (1) {
2047		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2048		if (ret < 0)
2049			goto error;
2050
2051		leaf = path->nodes[0];
2052next_slot:
2053		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2054			ret = btrfs_next_leaf(root, path);
2055			if (ret > 0)
2056				break;
2057			if (ret < 0)
2058				goto error;
2059			leaf = path->nodes[0];
2060			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2061			btrfs_release_path(path);
2062			continue;
2063		}
2064
2065		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2066		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2067		    key.type != BTRFS_DEV_ITEM_KEY)
2068			break;
2069
2070		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2071					  struct btrfs_dev_item);
2072		devid = btrfs_device_id(leaf, dev_item);
2073		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2074				   BTRFS_UUID_SIZE);
2075		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2076				   BTRFS_UUID_SIZE);
2077		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2078					   fs_uuid);
2079		BUG_ON(!device); /* Logic error */
2080
2081		if (device->fs_devices->seeding) {
2082			btrfs_set_device_generation(leaf, dev_item,
2083						    device->generation);
2084			btrfs_mark_buffer_dirty(leaf);
2085		}
2086
2087		path->slots[0]++;
2088		goto next_slot;
2089	}
2090	ret = 0;
2091error:
2092	btrfs_free_path(path);
2093	return ret;
2094}
2095
2096int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2097{
2098	struct request_queue *q;
2099	struct btrfs_trans_handle *trans;
2100	struct btrfs_device *device;
2101	struct block_device *bdev;
2102	struct list_head *devices;
2103	struct super_block *sb = root->fs_info->sb;
2104	struct rcu_string *name;
2105	u64 tmp;
2106	int seeding_dev = 0;
2107	int ret = 0;
2108
2109	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2110		return -EROFS;
2111
2112	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2113				  root->fs_info->bdev_holder);
2114	if (IS_ERR(bdev))
2115		return PTR_ERR(bdev);
2116
2117	if (root->fs_info->fs_devices->seeding) {
2118		seeding_dev = 1;
2119		down_write(&sb->s_umount);
2120		mutex_lock(&uuid_mutex);
2121	}
2122
2123	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2124
2125	devices = &root->fs_info->fs_devices->devices;
2126
2127	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2128	list_for_each_entry(device, devices, dev_list) {
2129		if (device->bdev == bdev) {
2130			ret = -EEXIST;
2131			mutex_unlock(
2132				&root->fs_info->fs_devices->device_list_mutex);
2133			goto error;
2134		}
2135	}
2136	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2137
2138	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2139	if (IS_ERR(device)) {
2140		/* we can safely leave the fs_devices entry around */
2141		ret = PTR_ERR(device);
2142		goto error;
2143	}
2144
2145	name = rcu_string_strdup(device_path, GFP_NOFS);
2146	if (!name) {
2147		kfree(device);
2148		ret = -ENOMEM;
2149		goto error;
2150	}
2151	rcu_assign_pointer(device->name, name);
2152
2153	trans = btrfs_start_transaction(root, 0);
2154	if (IS_ERR(trans)) {
2155		rcu_string_free(device->name);
2156		kfree(device);
2157		ret = PTR_ERR(trans);
2158		goto error;
2159	}
2160
2161	q = bdev_get_queue(bdev);
2162	if (blk_queue_discard(q))
2163		device->can_discard = 1;
2164	device->writeable = 1;
2165	device->generation = trans->transid;
2166	device->io_width = root->sectorsize;
2167	device->io_align = root->sectorsize;
2168	device->sector_size = root->sectorsize;
2169	device->total_bytes = i_size_read(bdev->bd_inode);
2170	device->disk_total_bytes = device->total_bytes;
2171	device->commit_total_bytes = device->total_bytes;
2172	device->dev_root = root->fs_info->dev_root;
2173	device->bdev = bdev;
2174	device->in_fs_metadata = 1;
2175	device->is_tgtdev_for_dev_replace = 0;
2176	device->mode = FMODE_EXCL;
2177	device->dev_stats_valid = 1;
2178	set_blocksize(device->bdev, 4096);
2179
2180	if (seeding_dev) {
2181		sb->s_flags &= ~MS_RDONLY;
2182		ret = btrfs_prepare_sprout(root);
2183		BUG_ON(ret); /* -ENOMEM */
2184	}
2185
2186	device->fs_devices = root->fs_info->fs_devices;
2187
2188	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2189	lock_chunks(root);
2190	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2191	list_add(&device->dev_alloc_list,
2192		 &root->fs_info->fs_devices->alloc_list);
2193	root->fs_info->fs_devices->num_devices++;
2194	root->fs_info->fs_devices->open_devices++;
2195	root->fs_info->fs_devices->rw_devices++;
2196	root->fs_info->fs_devices->total_devices++;
2197	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2198
2199	spin_lock(&root->fs_info->free_chunk_lock);
2200	root->fs_info->free_chunk_space += device->total_bytes;
2201	spin_unlock(&root->fs_info->free_chunk_lock);
2202
2203	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2204		root->fs_info->fs_devices->rotating = 1;
2205
2206	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2207	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2208				    tmp + device->total_bytes);
2209
2210	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2211	btrfs_set_super_num_devices(root->fs_info->super_copy,
2212				    tmp + 1);
2213
2214	/* add sysfs device entry */
2215	btrfs_kobj_add_device(root->fs_info, device);
2216
2217	/*
2218	 * we've got more storage, clear any full flags on the space
2219	 * infos
2220	 */
2221	btrfs_clear_space_info_full(root->fs_info);
2222
2223	unlock_chunks(root);
2224	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2225
2226	if (seeding_dev) {
2227		lock_chunks(root);
2228		ret = init_first_rw_device(trans, root, device);
2229		unlock_chunks(root);
2230		if (ret) {
2231			btrfs_abort_transaction(trans, root, ret);
2232			goto error_trans;
2233		}
2234	}
2235
2236	ret = btrfs_add_device(trans, root, device);
2237	if (ret) {
2238		btrfs_abort_transaction(trans, root, ret);
2239		goto error_trans;
2240	}
2241
2242	if (seeding_dev) {
2243		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2244
2245		ret = btrfs_finish_sprout(trans, root);
2246		if (ret) {
2247			btrfs_abort_transaction(trans, root, ret);
2248			goto error_trans;
2249		}
2250
2251		/* Sprouting would change fsid of the mounted root,
2252		 * so rename the fsid on the sysfs
2253		 */
2254		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2255						root->fs_info->fsid);
2256		if (kobject_rename(&root->fs_info->super_kobj, fsid_buf))
2257			goto error_trans;
2258	}
2259
2260	root->fs_info->num_tolerated_disk_barrier_failures =
2261		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2262	ret = btrfs_commit_transaction(trans, root);
2263
2264	if (seeding_dev) {
2265		mutex_unlock(&uuid_mutex);
2266		up_write(&sb->s_umount);
2267
2268		if (ret) /* transaction commit */
2269			return ret;
2270
2271		ret = btrfs_relocate_sys_chunks(root);
2272		if (ret < 0)
2273			btrfs_error(root->fs_info, ret,
2274				    "Failed to relocate sys chunks after "
2275				    "device initialization. This can be fixed "
2276				    "using the \"btrfs balance\" command.");
2277		trans = btrfs_attach_transaction(root);
2278		if (IS_ERR(trans)) {
2279			if (PTR_ERR(trans) == -ENOENT)
2280				return 0;
2281			return PTR_ERR(trans);
2282		}
2283		ret = btrfs_commit_transaction(trans, root);
2284	}
2285
2286	/* Update ctime/mtime for libblkid */
2287	update_dev_time(device_path);
2288	return ret;
2289
2290error_trans:
2291	btrfs_end_transaction(trans, root);
2292	rcu_string_free(device->name);
2293	btrfs_kobj_rm_device(root->fs_info, device);
2294	kfree(device);
2295error:
2296	blkdev_put(bdev, FMODE_EXCL);
2297	if (seeding_dev) {
2298		mutex_unlock(&uuid_mutex);
2299		up_write(&sb->s_umount);
2300	}
2301	return ret;
2302}
2303
2304int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2305				  struct btrfs_device *srcdev,
2306				  struct btrfs_device **device_out)
2307{
2308	struct request_queue *q;
2309	struct btrfs_device *device;
2310	struct block_device *bdev;
2311	struct btrfs_fs_info *fs_info = root->fs_info;
2312	struct list_head *devices;
2313	struct rcu_string *name;
2314	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2315	int ret = 0;
2316
2317	*device_out = NULL;
2318	if (fs_info->fs_devices->seeding) {
2319		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2320		return -EINVAL;
2321	}
2322
2323	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2324				  fs_info->bdev_holder);
2325	if (IS_ERR(bdev)) {
2326		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2327		return PTR_ERR(bdev);
2328	}
2329
2330	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2331
2332	devices = &fs_info->fs_devices->devices;
2333	list_for_each_entry(device, devices, dev_list) {
2334		if (device->bdev == bdev) {
2335			btrfs_err(fs_info, "target device is in the filesystem!");
2336			ret = -EEXIST;
2337			goto error;
2338		}
2339	}
2340
2341
2342	if (i_size_read(bdev->bd_inode) <
2343	    btrfs_device_get_total_bytes(srcdev)) {
2344		btrfs_err(fs_info, "target device is smaller than source device!");
2345		ret = -EINVAL;
2346		goto error;
2347	}
2348
2349
2350	device = btrfs_alloc_device(NULL, &devid, NULL);
2351	if (IS_ERR(device)) {
2352		ret = PTR_ERR(device);
2353		goto error;
2354	}
2355
2356	name = rcu_string_strdup(device_path, GFP_NOFS);
2357	if (!name) {
2358		kfree(device);
2359		ret = -ENOMEM;
2360		goto error;
2361	}
2362	rcu_assign_pointer(device->name, name);
2363
2364	q = bdev_get_queue(bdev);
2365	if (blk_queue_discard(q))
2366		device->can_discard = 1;
2367	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2368	device->writeable = 1;
2369	device->generation = 0;
2370	device->io_width = root->sectorsize;
2371	device->io_align = root->sectorsize;
2372	device->sector_size = root->sectorsize;
2373	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
2374	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
2375	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2376	ASSERT(list_empty(&srcdev->resized_list));
2377	device->commit_total_bytes = srcdev->commit_total_bytes;
2378	device->commit_bytes_used = device->bytes_used;
2379	device->dev_root = fs_info->dev_root;
2380	device->bdev = bdev;
2381	device->in_fs_metadata = 1;
2382	device->is_tgtdev_for_dev_replace = 1;
2383	device->mode = FMODE_EXCL;
2384	device->dev_stats_valid = 1;
2385	set_blocksize(device->bdev, 4096);
2386	device->fs_devices = fs_info->fs_devices;
2387	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2388	fs_info->fs_devices->num_devices++;
2389	fs_info->fs_devices->open_devices++;
2390	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2391
2392	*device_out = device;
2393	return ret;
2394
2395error:
2396	blkdev_put(bdev, FMODE_EXCL);
2397	return ret;
2398}
2399
2400void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2401					      struct btrfs_device *tgtdev)
2402{
2403	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2404	tgtdev->io_width = fs_info->dev_root->sectorsize;
2405	tgtdev->io_align = fs_info->dev_root->sectorsize;
2406	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2407	tgtdev->dev_root = fs_info->dev_root;
2408	tgtdev->in_fs_metadata = 1;
2409}
2410
2411static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2412					struct btrfs_device *device)
2413{
2414	int ret;
2415	struct btrfs_path *path;
2416	struct btrfs_root *root;
2417	struct btrfs_dev_item *dev_item;
2418	struct extent_buffer *leaf;
2419	struct btrfs_key key;
2420
2421	root = device->dev_root->fs_info->chunk_root;
2422
2423	path = btrfs_alloc_path();
2424	if (!path)
2425		return -ENOMEM;
2426
2427	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2428	key.type = BTRFS_DEV_ITEM_KEY;
2429	key.offset = device->devid;
2430
2431	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2432	if (ret < 0)
2433		goto out;
2434
2435	if (ret > 0) {
2436		ret = -ENOENT;
2437		goto out;
2438	}
2439
2440	leaf = path->nodes[0];
2441	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2442
2443	btrfs_set_device_id(leaf, dev_item, device->devid);
2444	btrfs_set_device_type(leaf, dev_item, device->type);
2445	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2446	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2447	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2448	btrfs_set_device_total_bytes(leaf, dev_item,
2449				     btrfs_device_get_disk_total_bytes(device));
2450	btrfs_set_device_bytes_used(leaf, dev_item,
2451				    btrfs_device_get_bytes_used(device));
2452	btrfs_mark_buffer_dirty(leaf);
2453
2454out:
2455	btrfs_free_path(path);
2456	return ret;
2457}
2458
2459int btrfs_grow_device(struct btrfs_trans_handle *trans,
2460		      struct btrfs_device *device, u64 new_size)
2461{
2462	struct btrfs_super_block *super_copy =
2463		device->dev_root->fs_info->super_copy;
2464	struct btrfs_fs_devices *fs_devices;
2465	u64 old_total;
2466	u64 diff;
2467
2468	if (!device->writeable)
2469		return -EACCES;
2470
2471	lock_chunks(device->dev_root);
2472	old_total = btrfs_super_total_bytes(super_copy);
2473	diff = new_size - device->total_bytes;
2474
2475	if (new_size <= device->total_bytes ||
2476	    device->is_tgtdev_for_dev_replace) {
2477		unlock_chunks(device->dev_root);
2478		return -EINVAL;
2479	}
2480
2481	fs_devices = device->dev_root->fs_info->fs_devices;
2482
2483	btrfs_set_super_total_bytes(super_copy, old_total + diff);
2484	device->fs_devices->total_rw_bytes += diff;
2485
2486	btrfs_device_set_total_bytes(device, new_size);
2487	btrfs_device_set_disk_total_bytes(device, new_size);
2488	btrfs_clear_space_info_full(device->dev_root->fs_info);
2489	if (list_empty(&device->resized_list))
2490		list_add_tail(&device->resized_list,
2491			      &fs_devices->resized_devices);
2492	unlock_chunks(device->dev_root);
2493
2494	return btrfs_update_device(trans, device);
2495}
2496
2497static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2498			    struct btrfs_root *root, u64 chunk_objectid,
2499			    u64 chunk_offset)
2500{
2501	int ret;
2502	struct btrfs_path *path;
2503	struct btrfs_key key;
2504
2505	root = root->fs_info->chunk_root;
2506	path = btrfs_alloc_path();
2507	if (!path)
2508		return -ENOMEM;
2509
2510	key.objectid = chunk_objectid;
2511	key.offset = chunk_offset;
2512	key.type = BTRFS_CHUNK_ITEM_KEY;
2513
2514	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2515	if (ret < 0)
2516		goto out;
2517	else if (ret > 0) { /* Logic error or corruption */
2518		btrfs_error(root->fs_info, -ENOENT,
2519			    "Failed lookup while freeing chunk.");
2520		ret = -ENOENT;
2521		goto out;
2522	}
2523
2524	ret = btrfs_del_item(trans, root, path);
2525	if (ret < 0)
2526		btrfs_error(root->fs_info, ret,
2527			    "Failed to delete chunk item.");
2528out:
2529	btrfs_free_path(path);
2530	return ret;
2531}
2532
2533static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2534			chunk_offset)
2535{
2536	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2537	struct btrfs_disk_key *disk_key;
2538	struct btrfs_chunk *chunk;
2539	u8 *ptr;
2540	int ret = 0;
2541	u32 num_stripes;
2542	u32 array_size;
2543	u32 len = 0;
2544	u32 cur;
2545	struct btrfs_key key;
2546
2547	lock_chunks(root);
2548	array_size = btrfs_super_sys_array_size(super_copy);
2549
2550	ptr = super_copy->sys_chunk_array;
2551	cur = 0;
2552
2553	while (cur < array_size) {
2554		disk_key = (struct btrfs_disk_key *)ptr;
2555		btrfs_disk_key_to_cpu(&key, disk_key);
2556
2557		len = sizeof(*disk_key);
2558
2559		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2560			chunk = (struct btrfs_chunk *)(ptr + len);
2561			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2562			len += btrfs_chunk_item_size(num_stripes);
2563		} else {
2564			ret = -EIO;
2565			break;
2566		}
2567		if (key.objectid == chunk_objectid &&
2568		    key.offset == chunk_offset) {
2569			memmove(ptr, ptr + len, array_size - (cur + len));
2570			array_size -= len;
2571			btrfs_set_super_sys_array_size(super_copy, array_size);
2572		} else {
2573			ptr += len;
2574			cur += len;
2575		}
2576	}
2577	unlock_chunks(root);
2578	return ret;
2579}
2580
2581int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2582		       struct btrfs_root *root, u64 chunk_offset)
2583{
2584	struct extent_map_tree *em_tree;
2585	struct extent_map *em;
2586	struct btrfs_root *extent_root = root->fs_info->extent_root;
2587	struct map_lookup *map;
2588	u64 dev_extent_len = 0;
2589	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2590	int i, ret = 0;
2591
2592	/* Just in case */
2593	root = root->fs_info->chunk_root;
2594	em_tree = &root->fs_info->mapping_tree.map_tree;
2595
2596	read_lock(&em_tree->lock);
2597	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2598	read_unlock(&em_tree->lock);
2599
2600	if (!em || em->start > chunk_offset ||
2601	    em->start + em->len < chunk_offset) {
2602		/*
2603		 * This is a logic error, but we don't want to just rely on the
2604		 * user having built with ASSERT enabled, so if ASSERT doens't
2605		 * do anything we still error out.
2606		 */
2607		ASSERT(0);
2608		if (em)
2609			free_extent_map(em);
2610		return -EINVAL;
2611	}
2612	map = (struct map_lookup *)em->bdev;
2613
2614	for (i = 0; i < map->num_stripes; i++) {
2615		struct btrfs_device *device = map->stripes[i].dev;
2616		ret = btrfs_free_dev_extent(trans, device,
2617					    map->stripes[i].physical,
2618					    &dev_extent_len);
2619		if (ret) {
2620			btrfs_abort_transaction(trans, root, ret);
2621			goto out;
2622		}
2623
2624		if (device->bytes_used > 0) {
2625			lock_chunks(root);
2626			btrfs_device_set_bytes_used(device,
2627					device->bytes_used - dev_extent_len);
2628			spin_lock(&root->fs_info->free_chunk_lock);
2629			root->fs_info->free_chunk_space += dev_extent_len;
2630			spin_unlock(&root->fs_info->free_chunk_lock);
2631			btrfs_clear_space_info_full(root->fs_info);
2632			unlock_chunks(root);
2633		}
2634
2635		if (map->stripes[i].dev) {
2636			ret = btrfs_update_device(trans, map->stripes[i].dev);
2637			if (ret) {
2638				btrfs_abort_transaction(trans, root, ret);
2639				goto out;
2640			}
2641		}
2642	}
2643	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2644	if (ret) {
2645		btrfs_abort_transaction(trans, root, ret);
2646		goto out;
2647	}
2648
2649	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2650
2651	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2652		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2653		if (ret) {
2654			btrfs_abort_transaction(trans, root, ret);
2655			goto out;
2656		}
2657	}
2658
2659	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2660	if (ret) {
2661		btrfs_abort_transaction(trans, extent_root, ret);
2662		goto out;
2663	}
2664
2665out:
2666	/* once for us */
2667	free_extent_map(em);
2668	return ret;
2669}
2670
2671static int btrfs_relocate_chunk(struct btrfs_root *root,
2672				u64 chunk_objectid,
2673				u64 chunk_offset)
2674{
2675	struct btrfs_root *extent_root;
2676	struct btrfs_trans_handle *trans;
2677	int ret;
2678
2679	root = root->fs_info->chunk_root;
2680	extent_root = root->fs_info->extent_root;
2681
2682	ret = btrfs_can_relocate(extent_root, chunk_offset);
2683	if (ret)
2684		return -ENOSPC;
2685
2686	/* step one, relocate all the extents inside this chunk */
2687	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2688	if (ret)
2689		return ret;
2690
2691	trans = btrfs_start_transaction(root, 0);
2692	if (IS_ERR(trans)) {
2693		ret = PTR_ERR(trans);
2694		btrfs_std_error(root->fs_info, ret);
2695		return ret;
2696	}
2697
2698	/*
2699	 * step two, delete the device extents and the
2700	 * chunk tree entries
2701	 */
2702	ret = btrfs_remove_chunk(trans, root, chunk_offset);
2703	btrfs_end_transaction(trans, root);
2704	return ret;
2705}
2706
2707static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2708{
2709	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2710	struct btrfs_path *path;
2711	struct extent_buffer *leaf;
2712	struct btrfs_chunk *chunk;
2713	struct btrfs_key key;
2714	struct btrfs_key found_key;
2715	u64 chunk_type;
2716	bool retried = false;
2717	int failed = 0;
2718	int ret;
2719
2720	path = btrfs_alloc_path();
2721	if (!path)
2722		return -ENOMEM;
2723
2724again:
2725	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2726	key.offset = (u64)-1;
2727	key.type = BTRFS_CHUNK_ITEM_KEY;
2728
2729	while (1) {
2730		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2731		if (ret < 0)
2732			goto error;
2733		BUG_ON(ret == 0); /* Corruption */
2734
2735		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2736					  key.type);
2737		if (ret < 0)
2738			goto error;
2739		if (ret > 0)
2740			break;
2741
2742		leaf = path->nodes[0];
2743		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2744
2745		chunk = btrfs_item_ptr(leaf, path->slots[0],
2746				       struct btrfs_chunk);
2747		chunk_type = btrfs_chunk_type(leaf, chunk);
2748		btrfs_release_path(path);
2749
2750		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2751			ret = btrfs_relocate_chunk(chunk_root,
2752						   found_key.objectid,
2753						   found_key.offset);
2754			if (ret == -ENOSPC)
2755				failed++;
2756			else
2757				BUG_ON(ret);
2758		}
2759
2760		if (found_key.offset == 0)
2761			break;
2762		key.offset = found_key.offset - 1;
2763	}
2764	ret = 0;
2765	if (failed && !retried) {
2766		failed = 0;
2767		retried = true;
2768		goto again;
2769	} else if (WARN_ON(failed && retried)) {
2770		ret = -ENOSPC;
2771	}
2772error:
2773	btrfs_free_path(path);
2774	return ret;
2775}
2776
2777static int insert_balance_item(struct btrfs_root *root,
2778			       struct btrfs_balance_control *bctl)
2779{
2780	struct btrfs_trans_handle *trans;
2781	struct btrfs_balance_item *item;
2782	struct btrfs_disk_balance_args disk_bargs;
2783	struct btrfs_path *path;
2784	struct extent_buffer *leaf;
2785	struct btrfs_key key;
2786	int ret, err;
2787
2788	path = btrfs_alloc_path();
2789	if (!path)
2790		return -ENOMEM;
2791
2792	trans = btrfs_start_transaction(root, 0);
2793	if (IS_ERR(trans)) {
2794		btrfs_free_path(path);
2795		return PTR_ERR(trans);
2796	}
2797
2798	key.objectid = BTRFS_BALANCE_OBJECTID;
2799	key.type = BTRFS_BALANCE_ITEM_KEY;
2800	key.offset = 0;
2801
2802	ret = btrfs_insert_empty_item(trans, root, path, &key,
2803				      sizeof(*item));
2804	if (ret)
2805		goto out;
2806
2807	leaf = path->nodes[0];
2808	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2809
2810	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2811
2812	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2813	btrfs_set_balance_data(leaf, item, &disk_bargs);
2814	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2815	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2816	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2817	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2818
2819	btrfs_set_balance_flags(leaf, item, bctl->flags);
2820
2821	btrfs_mark_buffer_dirty(leaf);
2822out:
2823	btrfs_free_path(path);
2824	err = btrfs_commit_transaction(trans, root);
2825	if (err && !ret)
2826		ret = err;
2827	return ret;
2828}
2829
2830static int del_balance_item(struct btrfs_root *root)
2831{
2832	struct btrfs_trans_handle *trans;
2833	struct btrfs_path *path;
2834	struct btrfs_key key;
2835	int ret, err;
2836
2837	path = btrfs_alloc_path();
2838	if (!path)
2839		return -ENOMEM;
2840
2841	trans = btrfs_start_transaction(root, 0);
2842	if (IS_ERR(trans)) {
2843		btrfs_free_path(path);
2844		return PTR_ERR(trans);
2845	}
2846
2847	key.objectid = BTRFS_BALANCE_OBJECTID;
2848	key.type = BTRFS_BALANCE_ITEM_KEY;
2849	key.offset = 0;
2850
2851	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2852	if (ret < 0)
2853		goto out;
2854	if (ret > 0) {
2855		ret = -ENOENT;
2856		goto out;
2857	}
2858
2859	ret = btrfs_del_item(trans, root, path);
2860out:
2861	btrfs_free_path(path);
2862	err = btrfs_commit_transaction(trans, root);
2863	if (err && !ret)
2864		ret = err;
2865	return ret;
2866}
2867
2868/*
2869 * This is a heuristic used to reduce the number of chunks balanced on
2870 * resume after balance was interrupted.
2871 */
2872static void update_balance_args(struct btrfs_balance_control *bctl)
2873{
2874	/*
2875	 * Turn on soft mode for chunk types that were being converted.
2876	 */
2877	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2878		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2879	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2880		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2881	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2882		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2883
2884	/*
2885	 * Turn on usage filter if is not already used.  The idea is
2886	 * that chunks that we have already balanced should be
2887	 * reasonably full.  Don't do it for chunks that are being
2888	 * converted - that will keep us from relocating unconverted
2889	 * (albeit full) chunks.
2890	 */
2891	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2892	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2893		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2894		bctl->data.usage = 90;
2895	}
2896	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2897	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2898		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2899		bctl->sys.usage = 90;
2900	}
2901	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2902	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2903		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2904		bctl->meta.usage = 90;
2905	}
2906}
2907
2908/*
2909 * Should be called with both balance and volume mutexes held to
2910 * serialize other volume operations (add_dev/rm_dev/resize) with
2911 * restriper.  Same goes for unset_balance_control.
2912 */
2913static void set_balance_control(struct btrfs_balance_control *bctl)
2914{
2915	struct btrfs_fs_info *fs_info = bctl->fs_info;
2916
2917	BUG_ON(fs_info->balance_ctl);
2918
2919	spin_lock(&fs_info->balance_lock);
2920	fs_info->balance_ctl = bctl;
2921	spin_unlock(&fs_info->balance_lock);
2922}
2923
2924static void unset_balance_control(struct btrfs_fs_info *fs_info)
2925{
2926	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2927
2928	BUG_ON(!fs_info->balance_ctl);
2929
2930	spin_lock(&fs_info->balance_lock);
2931	fs_info->balance_ctl = NULL;
2932	spin_unlock(&fs_info->balance_lock);
2933
2934	kfree(bctl);
2935}
2936
2937/*
2938 * Balance filters.  Return 1 if chunk should be filtered out
2939 * (should not be balanced).
2940 */
2941static int chunk_profiles_filter(u64 chunk_type,
2942				 struct btrfs_balance_args *bargs)
2943{
2944	chunk_type = chunk_to_extended(chunk_type) &
2945				BTRFS_EXTENDED_PROFILE_MASK;
2946
2947	if (bargs->profiles & chunk_type)
2948		return 0;
2949
2950	return 1;
2951}
2952
2953static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2954			      struct btrfs_balance_args *bargs)
2955{
2956	struct btrfs_block_group_cache *cache;
2957	u64 chunk_used, user_thresh;
2958	int ret = 1;
2959
2960	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2961	chunk_used = btrfs_block_group_used(&cache->item);
2962
2963	if (bargs->usage == 0)
2964		user_thresh = 1;
2965	else if (bargs->usage > 100)
2966		user_thresh = cache->key.offset;
2967	else
2968		user_thresh = div_factor_fine(cache->key.offset,
2969					      bargs->usage);
2970
2971	if (chunk_used < user_thresh)
2972		ret = 0;
2973
2974	btrfs_put_block_group(cache);
2975	return ret;
2976}
2977
2978static int chunk_devid_filter(struct extent_buffer *leaf,
2979			      struct btrfs_chunk *chunk,
2980			      struct btrfs_balance_args *bargs)
2981{
2982	struct btrfs_stripe *stripe;
2983	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2984	int i;
2985
2986	for (i = 0; i < num_stripes; i++) {
2987		stripe = btrfs_stripe_nr(chunk, i);
2988		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2989			return 0;
2990	}
2991
2992	return 1;
2993}
2994
2995/* [pstart, pend) */
2996static int chunk_drange_filter(struct extent_buffer *leaf,
2997			       struct btrfs_chunk *chunk,
2998			       u64 chunk_offset,
2999			       struct btrfs_balance_args *bargs)
3000{
3001	struct btrfs_stripe *stripe;
3002	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3003	u64 stripe_offset;
3004	u64 stripe_length;
3005	int factor;
3006	int i;
3007
3008	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3009		return 0;
3010
3011	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3012	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3013		factor = num_stripes / 2;
3014	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3015		factor = num_stripes - 1;
3016	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3017		factor = num_stripes - 2;
3018	} else {
3019		factor = num_stripes;
3020	}
3021
3022	for (i = 0; i < num_stripes; i++) {
3023		stripe = btrfs_stripe_nr(chunk, i);
3024		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3025			continue;
3026
3027		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3028		stripe_length = btrfs_chunk_length(leaf, chunk);
3029		stripe_length = div_u64(stripe_length, factor);
3030
3031		if (stripe_offset < bargs->pend &&
3032		    stripe_offset + stripe_length > bargs->pstart)
3033			return 0;
3034	}
3035
3036	return 1;
3037}
3038
3039/* [vstart, vend) */
3040static int chunk_vrange_filter(struct extent_buffer *leaf,
3041			       struct btrfs_chunk *chunk,
3042			       u64 chunk_offset,
3043			       struct btrfs_balance_args *bargs)
3044{
3045	if (chunk_offset < bargs->vend &&
3046	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3047		/* at least part of the chunk is inside this vrange */
3048		return 0;
3049
3050	return 1;
3051}
3052
3053static int chunk_soft_convert_filter(u64 chunk_type,
3054				     struct btrfs_balance_args *bargs)
3055{
3056	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3057		return 0;
3058
3059	chunk_type = chunk_to_extended(chunk_type) &
3060				BTRFS_EXTENDED_PROFILE_MASK;
3061
3062	if (bargs->target == chunk_type)
3063		return 1;
3064
3065	return 0;
3066}
3067
3068static int should_balance_chunk(struct btrfs_root *root,
3069				struct extent_buffer *leaf,
3070				struct btrfs_chunk *chunk, u64 chunk_offset)
3071{
3072	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3073	struct btrfs_balance_args *bargs = NULL;
3074	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3075
3076	/* type filter */
3077	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3078	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3079		return 0;
3080	}
3081
3082	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3083		bargs = &bctl->data;
3084	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3085		bargs = &bctl->sys;
3086	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3087		bargs = &bctl->meta;
3088
3089	/* profiles filter */
3090	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3091	    chunk_profiles_filter(chunk_type, bargs)) {
3092		return 0;
3093	}
3094
3095	/* usage filter */
3096	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3097	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
3098		return 0;
3099	}
3100
3101	/* devid filter */
3102	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3103	    chunk_devid_filter(leaf, chunk, bargs)) {
3104		return 0;
3105	}
3106
3107	/* drange filter, makes sense only with devid filter */
3108	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3109	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
3110		return 0;
3111	}
3112
3113	/* vrange filter */
3114	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3115	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3116		return 0;
3117	}
3118
3119	/* soft profile changing mode */
3120	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3121	    chunk_soft_convert_filter(chunk_type, bargs)) {
3122		return 0;
3123	}
3124
3125	/*
3126	 * limited by count, must be the last filter
3127	 */
3128	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3129		if (bargs->limit == 0)
3130			return 0;
3131		else
3132			bargs->limit--;
3133	}
3134
3135	return 1;
3136}
3137
3138static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3139{
3140	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3141	struct btrfs_root *chunk_root = fs_info->chunk_root;
3142	struct btrfs_root *dev_root = fs_info->dev_root;
3143	struct list_head *devices;
3144	struct btrfs_device *device;
3145	u64 old_size;
3146	u64 size_to_free;
3147	struct btrfs_chunk *chunk;
3148	struct btrfs_path *path;
3149	struct btrfs_key key;
3150	struct btrfs_key found_key;
3151	struct btrfs_trans_handle *trans;
3152	struct extent_buffer *leaf;
3153	int slot;
3154	int ret;
3155	int enospc_errors = 0;
3156	bool counting = true;
3157	u64 limit_data = bctl->data.limit;
3158	u64 limit_meta = bctl->meta.limit;
3159	u64 limit_sys = bctl->sys.limit;
3160
3161	/* step one make some room on all the devices */
3162	devices = &fs_info->fs_devices->devices;
3163	list_for_each_entry(device, devices, dev_list) {
3164		old_size = btrfs_device_get_total_bytes(device);
3165		size_to_free = div_factor(old_size, 1);
3166		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
3167		if (!device->writeable ||
3168		    btrfs_device_get_total_bytes(device) -
3169		    btrfs_device_get_bytes_used(device) > size_to_free ||
3170		    device->is_tgtdev_for_dev_replace)
3171			continue;
3172
3173		ret = btrfs_shrink_device(device, old_size - size_to_free);
3174		if (ret == -ENOSPC)
3175			break;
3176		BUG_ON(ret);
3177
3178		trans = btrfs_start_transaction(dev_root, 0);
3179		BUG_ON(IS_ERR(trans));
3180
3181		ret = btrfs_grow_device(trans, device, old_size);
3182		BUG_ON(ret);
3183
3184		btrfs_end_transaction(trans, dev_root);
3185	}
3186
3187	/* step two, relocate all the chunks */
3188	path = btrfs_alloc_path();
3189	if (!path) {
3190		ret = -ENOMEM;
3191		goto error;
3192	}
3193
3194	/* zero out stat counters */
3195	spin_lock(&fs_info->balance_lock);
3196	memset(&bctl->stat, 0, sizeof(bctl->stat));
3197	spin_unlock(&fs_info->balance_lock);
3198again:
3199	if (!counting) {
3200		bctl->data.limit = limit_data;
3201		bctl->meta.limit = limit_meta;
3202		bctl->sys.limit = limit_sys;
3203	}
3204	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3205	key.offset = (u64)-1;
3206	key.type = BTRFS_CHUNK_ITEM_KEY;
3207
3208	while (1) {
3209		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3210		    atomic_read(&fs_info->balance_cancel_req)) {
3211			ret = -ECANCELED;
3212			goto error;
3213		}
3214
3215		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3216		if (ret < 0)
3217			goto error;
3218
3219		/*
3220		 * this shouldn't happen, it means the last relocate
3221		 * failed
3222		 */
3223		if (ret == 0)
3224			BUG(); /* FIXME break ? */
3225
3226		ret = btrfs_previous_item(chunk_root, path, 0,
3227					  BTRFS_CHUNK_ITEM_KEY);
3228		if (ret) {
3229			ret = 0;
3230			break;
3231		}
3232
3233		leaf = path->nodes[0];
3234		slot = path->slots[0];
3235		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3236
3237		if (found_key.objectid != key.objectid)
3238			break;
3239
3240		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3241
3242		if (!counting) {
3243			spin_lock(&fs_info->balance_lock);
3244			bctl->stat.considered++;
3245			spin_unlock(&fs_info->balance_lock);
3246		}
3247
3248		ret = should_balance_chunk(chunk_root, leaf, chunk,
3249					   found_key.offset);
3250		btrfs_release_path(path);
3251		if (!ret)
3252			goto loop;
3253
3254		if (counting) {
3255			spin_lock(&fs_info->balance_lock);
3256			bctl->stat.expected++;
3257			spin_unlock(&fs_info->balance_lock);
3258			goto loop;
3259		}
3260
3261		ret = btrfs_relocate_chunk(chunk_root,
3262					   found_key.objectid,
3263					   found_key.offset);
3264		if (ret && ret != -ENOSPC)
3265			goto error;
3266		if (ret == -ENOSPC) {
3267			enospc_errors++;
3268		} else {
3269			spin_lock(&fs_info->balance_lock);
3270			bctl->stat.completed++;
3271			spin_unlock(&fs_info->balance_lock);
3272		}
3273loop:
3274		if (found_key.offset == 0)
3275			break;
3276		key.offset = found_key.offset - 1;
3277	}
3278
3279	if (counting) {
3280		btrfs_release_path(path);
3281		counting = false;
3282		goto again;
3283	}
3284error:
3285	btrfs_free_path(path);
3286	if (enospc_errors) {
3287		btrfs_info(fs_info, "%d enospc errors during balance",
3288		       enospc_errors);
3289		if (!ret)
3290			ret = -ENOSPC;
3291	}
3292
3293	return ret;
3294}
3295
3296/**
3297 * alloc_profile_is_valid - see if a given profile is valid and reduced
3298 * @flags: profile to validate
3299 * @extended: if true @flags is treated as an extended profile
3300 */
3301static int alloc_profile_is_valid(u64 flags, int extended)
3302{
3303	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3304			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3305
3306	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3307
3308	/* 1) check that all other bits are zeroed */
3309	if (flags & ~mask)
3310		return 0;
3311
3312	/* 2) see if profile is reduced */
3313	if (flags == 0)
3314		return !extended; /* "0" is valid for usual profiles */
3315
3316	/* true if exactly one bit set */
3317	return (flags & (flags - 1)) == 0;
3318}
3319
3320static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3321{
3322	/* cancel requested || normal exit path */
3323	return atomic_read(&fs_info->balance_cancel_req) ||
3324		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3325		 atomic_read(&fs_info->balance_cancel_req) == 0);
3326}
3327
3328static void __cancel_balance(struct btrfs_fs_info *fs_info)
3329{
3330	int ret;
3331
3332	unset_balance_control(fs_info);
3333	ret = del_balance_item(fs_info->tree_root);
3334	if (ret)
3335		btrfs_std_error(fs_info, ret);
3336
3337	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3338}
3339
3340/*
3341 * Should be called with both balance and volume mutexes held
3342 */
3343int btrfs_balance(struct btrfs_balance_control *bctl,
3344		  struct btrfs_ioctl_balance_args *bargs)
3345{
3346	struct btrfs_fs_info *fs_info = bctl->fs_info;
3347	u64 allowed;
3348	int mixed = 0;
3349	int ret;
3350	u64 num_devices;
3351	unsigned seq;
3352
3353	if (btrfs_fs_closing(fs_info) ||
3354	    atomic_read(&fs_info->balance_pause_req) ||
3355	    atomic_read(&fs_info->balance_cancel_req)) {
3356		ret = -EINVAL;
3357		goto out;
3358	}
3359
3360	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3361	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3362		mixed = 1;
3363
3364	/*
3365	 * In case of mixed groups both data and meta should be picked,
3366	 * and identical options should be given for both of them.
3367	 */
3368	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3369	if (mixed && (bctl->flags & allowed)) {
3370		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3371		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3372		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3373			btrfs_err(fs_info, "with mixed groups data and "
3374				   "metadata balance options must be the same");
3375			ret = -EINVAL;
3376			goto out;
3377		}
3378	}
3379
3380	num_devices = fs_info->fs_devices->num_devices;
3381	btrfs_dev_replace_lock(&fs_info->dev_replace);
3382	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3383		BUG_ON(num_devices < 1);
3384		num_devices--;
3385	}
3386	btrfs_dev_replace_unlock(&fs_info->dev_replace);
3387	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3388	if (num_devices == 1)
3389		allowed |= BTRFS_BLOCK_GROUP_DUP;
3390	else if (num_devices > 1)
3391		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3392	if (num_devices > 2)
3393		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3394	if (num_devices > 3)
3395		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3396			    BTRFS_BLOCK_GROUP_RAID6);
3397	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3398	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
3399	     (bctl->data.target & ~allowed))) {
3400		btrfs_err(fs_info, "unable to start balance with target "
3401			   "data profile %llu",
3402		       bctl->data.target);
3403		ret = -EINVAL;
3404		goto out;
3405	}
3406	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3407	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3408	     (bctl->meta.target & ~allowed))) {
3409		btrfs_err(fs_info,
3410			   "unable to start balance with target metadata profile %llu",
3411		       bctl->meta.target);
3412		ret = -EINVAL;
3413		goto out;
3414	}
3415	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3416	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3417	     (bctl->sys.target & ~allowed))) {
3418		btrfs_err(fs_info,
3419			   "unable to start balance with target system profile %llu",
3420		       bctl->sys.target);
3421		ret = -EINVAL;
3422		goto out;
3423	}
3424
3425	/* allow dup'ed data chunks only in mixed mode */
3426	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3427	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3428		btrfs_err(fs_info, "dup for data is not allowed");
3429		ret = -EINVAL;
3430		goto out;
3431	}
3432
3433	/* allow to reduce meta or sys integrity only if force set */
3434	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3435			BTRFS_BLOCK_GROUP_RAID10 |
3436			BTRFS_BLOCK_GROUP_RAID5 |
3437			BTRFS_BLOCK_GROUP_RAID6;
3438	do {
3439		seq = read_seqbegin(&fs_info->profiles_lock);
3440
3441		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3442		     (fs_info->avail_system_alloc_bits & allowed) &&
3443		     !(bctl->sys.target & allowed)) ||
3444		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3445		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3446		     !(bctl->meta.target & allowed))) {
3447			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3448				btrfs_info(fs_info, "force reducing metadata integrity");
3449			} else {
3450				btrfs_err(fs_info, "balance will reduce metadata "
3451					   "integrity, use force if you want this");
3452				ret = -EINVAL;
3453				goto out;
3454			}
3455		}
3456	} while (read_seqretry(&fs_info->profiles_lock, seq));
3457
3458	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3459		int num_tolerated_disk_barrier_failures;
3460		u64 target = bctl->sys.target;
3461
3462		num_tolerated_disk_barrier_failures =
3463			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3464		if (num_tolerated_disk_barrier_failures > 0 &&
3465		    (target &
3466		     (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3467		      BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3468			num_tolerated_disk_barrier_failures = 0;
3469		else if (num_tolerated_disk_barrier_failures > 1 &&
3470			 (target &
3471			  (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3472			num_tolerated_disk_barrier_failures = 1;
3473
3474		fs_info->num_tolerated_disk_barrier_failures =
3475			num_tolerated_disk_barrier_failures;
3476	}
3477
3478	ret = insert_balance_item(fs_info->tree_root, bctl);
3479	if (ret && ret != -EEXIST)
3480		goto out;
3481
3482	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3483		BUG_ON(ret == -EEXIST);
3484		set_balance_control(bctl);
3485	} else {
3486		BUG_ON(ret != -EEXIST);
3487		spin_lock(&fs_info->balance_lock);
3488		update_balance_args(bctl);
3489		spin_unlock(&fs_info->balance_lock);
3490	}
3491
3492	atomic_inc(&fs_info->balance_running);
3493	mutex_unlock(&fs_info->balance_mutex);
3494
3495	ret = __btrfs_balance(fs_info);
3496
3497	mutex_lock(&fs_info->balance_mutex);
3498	atomic_dec(&fs_info->balance_running);
3499
3500	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3501		fs_info->num_tolerated_disk_barrier_failures =
3502			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3503	}
3504
3505	if (bargs) {
3506		memset(bargs, 0, sizeof(*bargs));
3507		update_ioctl_balance_args(fs_info, 0, bargs);
3508	}
3509
3510	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3511	    balance_need_close(fs_info)) {
3512		__cancel_balance(fs_info);
3513	}
3514
3515	wake_up(&fs_info->balance_wait_q);
3516
3517	return ret;
3518out:
3519	if (bctl->flags & BTRFS_BALANCE_RESUME)
3520		__cancel_balance(fs_info);
3521	else {
3522		kfree(bctl);
3523		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3524	}
3525	return ret;
3526}
3527
3528static int balance_kthread(void *data)
3529{
3530	struct btrfs_fs_info *fs_info = data;
3531	int ret = 0;
3532
3533	mutex_lock(&fs_info->volume_mutex);
3534	mutex_lock(&fs_info->balance_mutex);
3535
3536	if (fs_info->balance_ctl) {
3537		btrfs_info(fs_info, "continuing balance");
3538		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3539	}
3540
3541	mutex_unlock(&fs_info->balance_mutex);
3542	mutex_unlock(&fs_info->volume_mutex);
3543
3544	return ret;
3545}
3546
3547int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3548{
3549	struct task_struct *tsk;
3550
3551	spin_lock(&fs_info->balance_lock);
3552	if (!fs_info->balance_ctl) {
3553		spin_unlock(&fs_info->balance_lock);
3554		return 0;
3555	}
3556	spin_unlock(&fs_info->balance_lock);
3557
3558	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3559		btrfs_info(fs_info, "force skipping balance");
3560		return 0;
3561	}
3562
3563	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3564	return PTR_ERR_OR_ZERO(tsk);
3565}
3566
3567int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3568{
3569	struct btrfs_balance_control *bctl;
3570	struct btrfs_balance_item *item;
3571	struct btrfs_disk_balance_args disk_bargs;
3572	struct btrfs_path *path;
3573	struct extent_buffer *leaf;
3574	struct btrfs_key key;
3575	int ret;
3576
3577	path = btrfs_alloc_path();
3578	if (!path)
3579		return -ENOMEM;
3580
3581	key.objectid = BTRFS_BALANCE_OBJECTID;
3582	key.type = BTRFS_BALANCE_ITEM_KEY;
3583	key.offset = 0;
3584
3585	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3586	if (ret < 0)
3587		goto out;
3588	if (ret > 0) { /* ret = -ENOENT; */
3589		ret = 0;
3590		goto out;
3591	}
3592
3593	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3594	if (!bctl) {
3595		ret = -ENOMEM;
3596		goto out;
3597	}
3598
3599	leaf = path->nodes[0];
3600	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3601
3602	bctl->fs_info = fs_info;
3603	bctl->flags = btrfs_balance_flags(leaf, item);
3604	bctl->flags |= BTRFS_BALANCE_RESUME;
3605
3606	btrfs_balance_data(leaf, item, &disk_bargs);
3607	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3608	btrfs_balance_meta(leaf, item, &disk_bargs);
3609	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3610	btrfs_balance_sys(leaf, item, &disk_bargs);
3611	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3612
3613	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3614
3615	mutex_lock(&fs_info->volume_mutex);
3616	mutex_lock(&fs_info->balance_mutex);
3617
3618	set_balance_control(bctl);
3619
3620	mutex_unlock(&fs_info->balance_mutex);
3621	mutex_unlock(&fs_info->volume_mutex);
3622out:
3623	btrfs_free_path(path);
3624	return ret;
3625}
3626
3627int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3628{
3629	int ret = 0;
3630
3631	mutex_lock(&fs_info->balance_mutex);
3632	if (!fs_info->balance_ctl) {
3633		mutex_unlock(&fs_info->balance_mutex);
3634		return -ENOTCONN;
3635	}
3636
3637	if (atomic_read(&fs_info->balance_running)) {
3638		atomic_inc(&fs_info->balance_pause_req);
3639		mutex_unlock(&fs_info->balance_mutex);
3640
3641		wait_event(fs_info->balance_wait_q,
3642			   atomic_read(&fs_info->balance_running) == 0);
3643
3644		mutex_lock(&fs_info->balance_mutex);
3645		/* we are good with balance_ctl ripped off from under us */
3646		BUG_ON(atomic_read(&fs_info->balance_running));
3647		atomic_dec(&fs_info->balance_pause_req);
3648	} else {
3649		ret = -ENOTCONN;
3650	}
3651
3652	mutex_unlock(&fs_info->balance_mutex);
3653	return ret;
3654}
3655
3656int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3657{
3658	if (fs_info->sb->s_flags & MS_RDONLY)
3659		return -EROFS;
3660
3661	mutex_lock(&fs_info->balance_mutex);
3662	if (!fs_info->balance_ctl) {
3663		mutex_unlock(&fs_info->balance_mutex);
3664		return -ENOTCONN;
3665	}
3666
3667	atomic_inc(&fs_info->balance_cancel_req);
3668	/*
3669	 * if we are running just wait and return, balance item is
3670	 * deleted in btrfs_balance in this case
3671	 */
3672	if (atomic_read(&fs_info->balance_running)) {
3673		mutex_unlock(&fs_info->balance_mutex);
3674		wait_event(fs_info->balance_wait_q,
3675			   atomic_read(&fs_info->balance_running) == 0);
3676		mutex_lock(&fs_info->balance_mutex);
3677	} else {
3678		/* __cancel_balance needs volume_mutex */
3679		mutex_unlock(&fs_info->balance_mutex);
3680		mutex_lock(&fs_info->volume_mutex);
3681		mutex_lock(&fs_info->balance_mutex);
3682
3683		if (fs_info->balance_ctl)
3684			__cancel_balance(fs_info);
3685
3686		mutex_unlock(&fs_info->volume_mutex);
3687	}
3688
3689	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3690	atomic_dec(&fs_info->balance_cancel_req);
3691	mutex_unlock(&fs_info->balance_mutex);
3692	return 0;
3693}
3694
3695static int btrfs_uuid_scan_kthread(void *data)
3696{
3697	struct btrfs_fs_info *fs_info = data;
3698	struct btrfs_root *root = fs_info->tree_root;
3699	struct btrfs_key key;
3700	struct btrfs_key max_key;
3701	struct btrfs_path *path = NULL;
3702	int ret = 0;
3703	struct extent_buffer *eb;
3704	int slot;
3705	struct btrfs_root_item root_item;
3706	u32 item_size;
3707	struct btrfs_trans_handle *trans = NULL;
3708
3709	path = btrfs_alloc_path();
3710	if (!path) {
3711		ret = -ENOMEM;
3712		goto out;
3713	}
3714
3715	key.objectid = 0;
3716	key.type = BTRFS_ROOT_ITEM_KEY;
3717	key.offset = 0;
3718
3719	max_key.objectid = (u64)-1;
3720	max_key.type = BTRFS_ROOT_ITEM_KEY;
3721	max_key.offset = (u64)-1;
3722
3723	while (1) {
3724		ret = btrfs_search_forward(root, &key, path, 0);
3725		if (ret) {
3726			if (ret > 0)
3727				ret = 0;
3728			break;
3729		}
3730
3731		if (key.type != BTRFS_ROOT_ITEM_KEY ||
3732		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
3733		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
3734		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
3735			goto skip;
3736
3737		eb = path->nodes[0];
3738		slot = path->slots[0];
3739		item_size = btrfs_item_size_nr(eb, slot);
3740		if (item_size < sizeof(root_item))
3741			goto skip;
3742
3743		read_extent_buffer(eb, &root_item,
3744				   btrfs_item_ptr_offset(eb, slot),
3745				   (int)sizeof(root_item));
3746		if (btrfs_root_refs(&root_item) == 0)
3747			goto skip;
3748
3749		if (!btrfs_is_empty_uuid(root_item.uuid) ||
3750		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
3751			if (trans)
3752				goto update_tree;
3753
3754			btrfs_release_path(path);
3755			/*
3756			 * 1 - subvol uuid item
3757			 * 1 - received_subvol uuid item
3758			 */
3759			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
3760			if (IS_ERR(trans)) {
3761				ret = PTR_ERR(trans);
3762				break;
3763			}
3764			continue;
3765		} else {
3766			goto skip;
3767		}
3768update_tree:
3769		if (!btrfs_is_empty_uuid(root_item.uuid)) {
3770			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3771						  root_item.uuid,
3772						  BTRFS_UUID_KEY_SUBVOL,
3773						  key.objectid);
3774			if (ret < 0) {
3775				btrfs_warn(fs_info, "uuid_tree_add failed %d",
3776					ret);
3777				break;
3778			}
3779		}
3780
3781		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
3782			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3783						  root_item.received_uuid,
3784						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
3785						  key.objectid);
3786			if (ret < 0) {
3787				btrfs_warn(fs_info, "uuid_tree_add failed %d",
3788					ret);
3789				break;
3790			}
3791		}
3792
3793skip:
3794		if (trans) {
3795			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
3796			trans = NULL;
3797			if (ret)
3798				break;
3799		}
3800
3801		btrfs_release_path(path);
3802		if (key.offset < (u64)-1) {
3803			key.offset++;
3804		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
3805			key.offset = 0;
3806			key.type = BTRFS_ROOT_ITEM_KEY;
3807		} else if (key.objectid < (u64)-1) {
3808			key.offset = 0;
3809			key.type = BTRFS_ROOT_ITEM_KEY;
3810			key.objectid++;
3811		} else {
3812			break;
3813		}
3814		cond_resched();
3815	}
3816
3817out:
3818	btrfs_free_path(path);
3819	if (trans && !IS_ERR(trans))
3820		btrfs_end_transaction(trans, fs_info->uuid_root);
3821	if (ret)
3822		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
3823	else
3824		fs_info->update_uuid_tree_gen = 1;
3825	up(&fs_info->uuid_tree_rescan_sem);
3826	return 0;
3827}
3828
3829/*
3830 * Callback for btrfs_uuid_tree_iterate().
3831 * returns:
3832 * 0	check succeeded, the entry is not outdated.
3833 * < 0	if an error occured.
3834 * > 0	if the check failed, which means the caller shall remove the entry.
3835 */
3836static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
3837				       u8 *uuid, u8 type, u64 subid)
3838{
3839	struct btrfs_key key;
3840	int ret = 0;
3841	struct btrfs_root *subvol_root;
3842
3843	if (type != BTRFS_UUID_KEY_SUBVOL &&
3844	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
3845		goto out;
3846
3847	key.objectid = subid;
3848	key.type = BTRFS_ROOT_ITEM_KEY;
3849	key.offset = (u64)-1;
3850	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
3851	if (IS_ERR(subvol_root)) {
3852		ret = PTR_ERR(subvol_root);
3853		if (ret == -ENOENT)
3854			ret = 1;
3855		goto out;
3856	}
3857
3858	switch (type) {
3859	case BTRFS_UUID_KEY_SUBVOL:
3860		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
3861			ret = 1;
3862		break;
3863	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
3864		if (memcmp(uuid, subvol_root->root_item.received_uuid,
3865			   BTRFS_UUID_SIZE))
3866			ret = 1;
3867		break;
3868	}
3869
3870out:
3871	return ret;
3872}
3873
3874static int btrfs_uuid_rescan_kthread(void *data)
3875{
3876	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
3877	int ret;
3878
3879	/*
3880	 * 1st step is to iterate through the existing UUID tree and
3881	 * to delete all entries that contain outdated data.
3882	 * 2nd step is to add all missing entries to the UUID tree.
3883	 */
3884	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
3885	if (ret < 0) {
3886		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
3887		up(&fs_info->uuid_tree_rescan_sem);
3888		return ret;
3889	}
3890	return btrfs_uuid_scan_kthread(data);
3891}
3892
3893int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
3894{
3895	struct btrfs_trans_handle *trans;
3896	struct btrfs_root *tree_root = fs_info->tree_root;
3897	struct btrfs_root *uuid_root;
3898	struct task_struct *task;
3899	int ret;
3900
3901	/*
3902	 * 1 - root node
3903	 * 1 - root item
3904	 */
3905	trans = btrfs_start_transaction(tree_root, 2);
3906	if (IS_ERR(trans))
3907		return PTR_ERR(trans);
3908
3909	uuid_root = btrfs_create_tree(trans, fs_info,
3910				      BTRFS_UUID_TREE_OBJECTID);
3911	if (IS_ERR(uuid_root)) {
3912		btrfs_abort_transaction(trans, tree_root,
3913					PTR_ERR(uuid_root));
3914		return PTR_ERR(uuid_root);
3915	}
3916
3917	fs_info->uuid_root = uuid_root;
3918
3919	ret = btrfs_commit_transaction(trans, tree_root);
3920	if (ret)
3921		return ret;
3922
3923	down(&fs_info->uuid_tree_rescan_sem);
3924	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
3925	if (IS_ERR(task)) {
3926		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
3927		btrfs_warn(fs_info, "failed to start uuid_scan task");
3928		up(&fs_info->uuid_tree_rescan_sem);
3929		return PTR_ERR(task);
3930	}
3931
3932	return 0;
3933}
3934
3935int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
3936{
3937	struct task_struct *task;
3938
3939	down(&fs_info->uuid_tree_rescan_sem);
3940	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
3941	if (IS_ERR(task)) {
3942		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
3943		btrfs_warn(fs_info, "failed to start uuid_rescan task");
3944		up(&fs_info->uuid_tree_rescan_sem);
3945		return PTR_ERR(task);
3946	}
3947
3948	return 0;
3949}
3950
3951/*
3952 * shrinking a device means finding all of the device extents past
3953 * the new size, and then following the back refs to the chunks.
3954 * The chunk relocation code actually frees the device extent
3955 */
3956int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3957{
3958	struct btrfs_trans_handle *trans;
3959	struct btrfs_root *root = device->dev_root;
3960	struct btrfs_dev_extent *dev_extent = NULL;
3961	struct btrfs_path *path;
3962	u64 length;
3963	u64 chunk_objectid;
3964	u64 chunk_offset;
3965	int ret;
3966	int slot;
3967	int failed = 0;
3968	bool retried = false;
3969	struct extent_buffer *l;
3970	struct btrfs_key key;
3971	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3972	u64 old_total = btrfs_super_total_bytes(super_copy);
3973	u64 old_size = btrfs_device_get_total_bytes(device);
3974	u64 diff = old_size - new_size;
3975
3976	if (device->is_tgtdev_for_dev_replace)
3977		return -EINVAL;
3978
3979	path = btrfs_alloc_path();
3980	if (!path)
3981		return -ENOMEM;
3982
3983	path->reada = 2;
3984
3985	lock_chunks(root);
3986
3987	btrfs_device_set_total_bytes(device, new_size);
3988	if (device->writeable) {
3989		device->fs_devices->total_rw_bytes -= diff;
3990		spin_lock(&root->fs_info->free_chunk_lock);
3991		root->fs_info->free_chunk_space -= diff;
3992		spin_unlock(&root->fs_info->free_chunk_lock);
3993	}
3994	unlock_chunks(root);
3995
3996again:
3997	key.objectid = device->devid;
3998	key.offset = (u64)-1;
3999	key.type = BTRFS_DEV_EXTENT_KEY;
4000
4001	do {
4002		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4003		if (ret < 0)
4004			goto done;
4005
4006		ret = btrfs_previous_item(root, path, 0, key.type);
4007		if (ret < 0)
4008			goto done;
4009		if (ret) {
4010			ret = 0;
4011			btrfs_release_path(path);
4012			break;
4013		}
4014
4015		l = path->nodes[0];
4016		slot = path->slots[0];
4017		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4018
4019		if (key.objectid != device->devid) {
4020			btrfs_release_path(path);
4021			break;
4022		}
4023
4024		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4025		length = btrfs_dev_extent_length(l, dev_extent);
4026
4027		if (key.offset + length <= new_size) {
4028			btrfs_release_path(path);
4029			break;
4030		}
4031
4032		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
4033		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4034		btrfs_release_path(path);
4035
4036		ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset);
4037		if (ret && ret != -ENOSPC)
4038			goto done;
4039		if (ret == -ENOSPC)
4040			failed++;
4041	} while (key.offset-- > 0);
4042
4043	if (failed && !retried) {
4044		failed = 0;
4045		retried = true;
4046		goto again;
4047	} else if (failed && retried) {
4048		ret = -ENOSPC;
4049		lock_chunks(root);
4050
4051		btrfs_device_set_total_bytes(device, old_size);
4052		if (device->writeable)
4053			device->fs_devices->total_rw_bytes += diff;
4054		spin_lock(&root->fs_info->free_chunk_lock);
4055		root->fs_info->free_chunk_space += diff;
4056		spin_unlock(&root->fs_info->free_chunk_lock);
4057		unlock_chunks(root);
4058		goto done;
4059	}
4060
4061	/* Shrinking succeeded, else we would be at "done". */
4062	trans = btrfs_start_transaction(root, 0);
4063	if (IS_ERR(trans)) {
4064		ret = PTR_ERR(trans);
4065		goto done;
4066	}
4067
4068	lock_chunks(root);
4069	btrfs_device_set_disk_total_bytes(device, new_size);
4070	if (list_empty(&device->resized_list))
4071		list_add_tail(&device->resized_list,
4072			      &root->fs_info->fs_devices->resized_devices);
4073
4074	WARN_ON(diff > old_total);
4075	btrfs_set_super_total_bytes(super_copy, old_total - diff);
4076	unlock_chunks(root);
4077
4078	/* Now btrfs_update_device() will change the on-disk size. */
4079	ret = btrfs_update_device(trans, device);
4080	btrfs_end_transaction(trans, root);
4081done:
4082	btrfs_free_path(path);
4083	return ret;
4084}
4085
4086static int btrfs_add_system_chunk(struct btrfs_root *root,
4087			   struct btrfs_key *key,
4088			   struct btrfs_chunk *chunk, int item_size)
4089{
4090	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4091	struct btrfs_disk_key disk_key;
4092	u32 array_size;
4093	u8 *ptr;
4094
4095	lock_chunks(root);
4096	array_size = btrfs_super_sys_array_size(super_copy);
4097	if (array_size + item_size + sizeof(disk_key)
4098			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4099		unlock_chunks(root);
4100		return -EFBIG;
4101	}
4102
4103	ptr = super_copy->sys_chunk_array + array_size;
4104	btrfs_cpu_key_to_disk(&disk_key, key);
4105	memcpy(ptr, &disk_key, sizeof(disk_key));
4106	ptr += sizeof(disk_key);
4107	memcpy(ptr, chunk, item_size);
4108	item_size += sizeof(disk_key);
4109	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4110	unlock_chunks(root);
4111
4112	return 0;
4113}
4114
4115/*
4116 * sort the devices in descending order by max_avail, total_avail
4117 */
4118static int btrfs_cmp_device_info(const void *a, const void *b)
4119{
4120	const struct btrfs_device_info *di_a = a;
4121	const struct btrfs_device_info *di_b = b;
4122
4123	if (di_a->max_avail > di_b->max_avail)
4124		return -1;
4125	if (di_a->max_avail < di_b->max_avail)
4126		return 1;
4127	if (di_a->total_avail > di_b->total_avail)
4128		return -1;
4129	if (di_a->total_avail < di_b->total_avail)
4130		return 1;
4131	return 0;
4132}
4133
4134static const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
4135	[BTRFS_RAID_RAID10] = {
4136		.sub_stripes	= 2,
4137		.dev_stripes	= 1,
4138		.devs_max	= 0,	/* 0 == as many as possible */
4139		.devs_min	= 4,
4140		.devs_increment	= 2,
4141		.ncopies	= 2,
4142	},
4143	[BTRFS_RAID_RAID1] = {
4144		.sub_stripes	= 1,
4145		.dev_stripes	= 1,
4146		.devs_max	= 2,
4147		.devs_min	= 2,
4148		.devs_increment	= 2,
4149		.ncopies	= 2,
4150	},
4151	[BTRFS_RAID_DUP] = {
4152		.sub_stripes	= 1,
4153		.dev_stripes	= 2,
4154		.devs_max	= 1,
4155		.devs_min	= 1,
4156		.devs_increment	= 1,
4157		.ncopies	= 2,
4158	},
4159	[BTRFS_RAID_RAID0] = {
4160		.sub_stripes	= 1,
4161		.dev_stripes	= 1,
4162		.devs_max	= 0,
4163		.devs_min	= 2,
4164		.devs_increment	= 1,
4165		.ncopies	= 1,
4166	},
4167	[BTRFS_RAID_SINGLE] = {
4168		.sub_stripes	= 1,
4169		.dev_stripes	= 1,
4170		.devs_max	= 1,
4171		.devs_min	= 1,
4172		.devs_increment	= 1,
4173		.ncopies	= 1,
4174	},
4175	[BTRFS_RAID_RAID5] = {
4176		.sub_stripes	= 1,
4177		.dev_stripes	= 1,
4178		.devs_max	= 0,
4179		.devs_min	= 2,
4180		.devs_increment	= 1,
4181		.ncopies	= 2,
4182	},
4183	[BTRFS_RAID_RAID6] = {
4184		.sub_stripes	= 1,
4185		.dev_stripes	= 1,
4186		.devs_max	= 0,
4187		.devs_min	= 3,
4188		.devs_increment	= 1,
4189		.ncopies	= 3,
4190	},
4191};
4192
4193static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
4194{
4195	/* TODO allow them to set a preferred stripe size */
4196	return 64 * 1024;
4197}
4198
4199static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4200{
4201	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4202		return;
4203
4204	btrfs_set_fs_incompat(info, RAID56);
4205}
4206
4207#define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r)		\
4208			- sizeof(struct btrfs_item)		\
4209			- sizeof(struct btrfs_chunk))		\
4210			/ sizeof(struct btrfs_stripe) + 1)
4211
4212#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
4213				- 2 * sizeof(struct btrfs_disk_key)	\
4214				- 2 * sizeof(struct btrfs_chunk))	\
4215				/ sizeof(struct btrfs_stripe) + 1)
4216
4217static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4218			       struct btrfs_root *extent_root, u64 start,
4219			       u64 type)
4220{
4221	struct btrfs_fs_info *info = extent_root->fs_info;
4222	struct btrfs_fs_devices *fs_devices = info->fs_devices;
4223	struct list_head *cur;
4224	struct map_lookup *map = NULL;
4225	struct extent_map_tree *em_tree;
4226	struct extent_map *em;
4227	struct btrfs_device_info *devices_info = NULL;
4228	u64 total_avail;
4229	int num_stripes;	/* total number of stripes to allocate */
4230	int data_stripes;	/* number of stripes that count for
4231				   block group size */
4232	int sub_stripes;	/* sub_stripes info for map */
4233	int dev_stripes;	/* stripes per dev */
4234	int devs_max;		/* max devs to use */
4235	int devs_min;		/* min devs needed */
4236	int devs_increment;	/* ndevs has to be a multiple of this */
4237	int ncopies;		/* how many copies to data has */
4238	int ret;
4239	u64 max_stripe_size;
4240	u64 max_chunk_size;
4241	u64 stripe_size;
4242	u64 num_bytes;
4243	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4244	int ndevs;
4245	int i;
4246	int j;
4247	int index;
4248
4249	BUG_ON(!alloc_profile_is_valid(type, 0));
4250
4251	if (list_empty(&fs_devices->alloc_list))
4252		return -ENOSPC;
4253
4254	index = __get_raid_index(type);
4255
4256	sub_stripes = btrfs_raid_array[index].sub_stripes;
4257	dev_stripes = btrfs_raid_array[index].dev_stripes;
4258	devs_max = btrfs_raid_array[index].devs_max;
4259	devs_min = btrfs_raid_array[index].devs_min;
4260	devs_increment = btrfs_raid_array[index].devs_increment;
4261	ncopies = btrfs_raid_array[index].ncopies;
4262
4263	if (type & BTRFS_BLOCK_GROUP_DATA) {
4264		max_stripe_size = 1024 * 1024 * 1024;
4265		max_chunk_size = 10 * max_stripe_size;
4266		if (!devs_max)
4267			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4268	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4269		/* for larger filesystems, use larger metadata chunks */
4270		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
4271			max_stripe_size = 1024 * 1024 * 1024;
4272		else
4273			max_stripe_size = 256 * 1024 * 1024;
4274		max_chunk_size = max_stripe_size;
4275		if (!devs_max)
4276			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4277	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4278		max_stripe_size = 32 * 1024 * 1024;
4279		max_chunk_size = 2 * max_stripe_size;
4280		if (!devs_max)
4281			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4282	} else {
4283		btrfs_err(info, "invalid chunk type 0x%llx requested",
4284		       type);
4285		BUG_ON(1);
4286	}
4287
4288	/* we don't want a chunk larger than 10% of writeable space */
4289	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4290			     max_chunk_size);
4291
4292	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4293			       GFP_NOFS);
4294	if (!devices_info)
4295		return -ENOMEM;
4296
4297	cur = fs_devices->alloc_list.next;
4298
4299	/*
4300	 * in the first pass through the devices list, we gather information
4301	 * about the available holes on each device.
4302	 */
4303	ndevs = 0;
4304	while (cur != &fs_devices->alloc_list) {
4305		struct btrfs_device *device;
4306		u64 max_avail;
4307		u64 dev_offset;
4308
4309		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4310
4311		cur = cur->next;
4312
4313		if (!device->writeable) {
4314			WARN(1, KERN_ERR
4315			       "BTRFS: read-only device in alloc_list\n");
4316			continue;
4317		}
4318
4319		if (!device->in_fs_metadata ||
4320		    device->is_tgtdev_for_dev_replace)
4321			continue;
4322
4323		if (device->total_bytes > device->bytes_used)
4324			total_avail = device->total_bytes - device->bytes_used;
4325		else
4326			total_avail = 0;
4327
4328		/* If there is no space on this device, skip it. */
4329		if (total_avail == 0)
4330			continue;
4331
4332		ret = find_free_dev_extent(trans, device,
4333					   max_stripe_size * dev_stripes,
4334					   &dev_offset, &max_avail);
4335		if (ret && ret != -ENOSPC)
4336			goto error;
4337
4338		if (ret == 0)
4339			max_avail = max_stripe_size * dev_stripes;
4340
4341		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4342			continue;
4343
4344		if (ndevs == fs_devices->rw_devices) {
4345			WARN(1, "%s: found more than %llu devices\n",
4346			     __func__, fs_devices->rw_devices);
4347			break;
4348		}
4349		devices_info[ndevs].dev_offset = dev_offset;
4350		devices_info[ndevs].max_avail = max_avail;
4351		devices_info[ndevs].total_avail = total_avail;
4352		devices_info[ndevs].dev = device;
4353		++ndevs;
4354	}
4355
4356	/*
4357	 * now sort the devices by hole size / available space
4358	 */
4359	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4360	     btrfs_cmp_device_info, NULL);
4361
4362	/* round down to number of usable stripes */
4363	ndevs -= ndevs % devs_increment;
4364
4365	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4366		ret = -ENOSPC;
4367		goto error;
4368	}
4369
4370	if (devs_max && ndevs > devs_max)
4371		ndevs = devs_max;
4372	/*
4373	 * the primary goal is to maximize the number of stripes, so use as many
4374	 * devices as possible, even if the stripes are not maximum sized.
4375	 */
4376	stripe_size = devices_info[ndevs-1].max_avail;
4377	num_stripes = ndevs * dev_stripes;
4378
4379	/*
4380	 * this will have to be fixed for RAID1 and RAID10 over
4381	 * more drives
4382	 */
4383	data_stripes = num_stripes / ncopies;
4384
4385	if (type & BTRFS_BLOCK_GROUP_RAID5) {
4386		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4387				 btrfs_super_stripesize(info->super_copy));
4388		data_stripes = num_stripes - 1;
4389	}
4390	if (type & BTRFS_BLOCK_GROUP_RAID6) {
4391		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4392				 btrfs_super_stripesize(info->super_copy));
4393		data_stripes = num_stripes - 2;
4394	}
4395
4396	/*
4397	 * Use the number of data stripes to figure out how big this chunk
4398	 * is really going to be in terms of logical address space,
4399	 * and compare that answer with the max chunk size
4400	 */
4401	if (stripe_size * data_stripes > max_chunk_size) {
4402		u64 mask = (1ULL << 24) - 1;
4403
4404		stripe_size = div_u64(max_chunk_size, data_stripes);
4405
4406		/* bump the answer up to a 16MB boundary */
4407		stripe_size = (stripe_size + mask) & ~mask;
4408
4409		/* but don't go higher than the limits we found
4410		 * while searching for free extents
4411		 */
4412		if (stripe_size > devices_info[ndevs-1].max_avail)
4413			stripe_size = devices_info[ndevs-1].max_avail;
4414	}
4415
4416	stripe_size = div_u64(stripe_size, dev_stripes);
4417
4418	/* align to BTRFS_STRIPE_LEN */
4419	stripe_size = div_u64(stripe_size, raid_stripe_len);
4420	stripe_size *= raid_stripe_len;
4421
4422	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4423	if (!map) {
4424		ret = -ENOMEM;
4425		goto error;
4426	}
4427	map->num_stripes = num_stripes;
4428
4429	for (i = 0; i < ndevs; ++i) {
4430		for (j = 0; j < dev_stripes; ++j) {
4431			int s = i * dev_stripes + j;
4432			map->stripes[s].dev = devices_info[i].dev;
4433			map->stripes[s].physical = devices_info[i].dev_offset +
4434						   j * stripe_size;
4435		}
4436	}
4437	map->sector_size = extent_root->sectorsize;
4438	map->stripe_len = raid_stripe_len;
4439	map->io_align = raid_stripe_len;
4440	map->io_width = raid_stripe_len;
4441	map->type = type;
4442	map->sub_stripes = sub_stripes;
4443
4444	num_bytes = stripe_size * data_stripes;
4445
4446	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4447
4448	em = alloc_extent_map();
4449	if (!em) {
4450		kfree(map);
4451		ret = -ENOMEM;
4452		goto error;
4453	}
4454	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4455	em->bdev = (struct block_device *)map;
4456	em->start = start;
4457	em->len = num_bytes;
4458	em->block_start = 0;
4459	em->block_len = em->len;
4460	em->orig_block_len = stripe_size;
4461
4462	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4463	write_lock(&em_tree->lock);
4464	ret = add_extent_mapping(em_tree, em, 0);
4465	if (!ret) {
4466		list_add_tail(&em->list, &trans->transaction->pending_chunks);
4467		atomic_inc(&em->refs);
4468	}
4469	write_unlock(&em_tree->lock);
4470	if (ret) {
4471		free_extent_map(em);
4472		goto error;
4473	}
4474
4475	ret = btrfs_make_block_group(trans, extent_root, 0, type,
4476				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4477				     start, num_bytes);
4478	if (ret)
4479		goto error_del_extent;
4480
4481	for (i = 0; i < map->num_stripes; i++) {
4482		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4483		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4484	}
4485
4486	spin_lock(&extent_root->fs_info->free_chunk_lock);
4487	extent_root->fs_info->free_chunk_space -= (stripe_size *
4488						   map->num_stripes);
4489	spin_unlock(&extent_root->fs_info->free_chunk_lock);
4490
4491	free_extent_map(em);
4492	check_raid56_incompat_flag(extent_root->fs_info, type);
4493
4494	kfree(devices_info);
4495	return 0;
4496
4497error_del_extent:
4498	write_lock(&em_tree->lock);
4499	remove_extent_mapping(em_tree, em);
4500	write_unlock(&em_tree->lock);
4501
4502	/* One for our allocation */
4503	free_extent_map(em);
4504	/* One for the tree reference */
4505	free_extent_map(em);
4506	/* One for the pending_chunks list reference */
4507	free_extent_map(em);
4508error:
4509	kfree(devices_info);
4510	return ret;
4511}
4512
4513int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4514				struct btrfs_root *extent_root,
4515				u64 chunk_offset, u64 chunk_size)
4516{
4517	struct btrfs_key key;
4518	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4519	struct btrfs_device *device;
4520	struct btrfs_chunk *chunk;
4521	struct btrfs_stripe *stripe;
4522	struct extent_map_tree *em_tree;
4523	struct extent_map *em;
4524	struct map_lookup *map;
4525	size_t item_size;
4526	u64 dev_offset;
4527	u64 stripe_size;
4528	int i = 0;
4529	int ret;
4530
4531	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4532	read_lock(&em_tree->lock);
4533	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4534	read_unlock(&em_tree->lock);
4535
4536	if (!em) {
4537		btrfs_crit(extent_root->fs_info, "unable to find logical "
4538			   "%Lu len %Lu", chunk_offset, chunk_size);
4539		return -EINVAL;
4540	}
4541
4542	if (em->start != chunk_offset || em->len != chunk_size) {
4543		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4544			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4545			  chunk_size, em->start, em->len);
4546		free_extent_map(em);
4547		return -EINVAL;
4548	}
4549
4550	map = (struct map_lookup *)em->bdev;
4551	item_size = btrfs_chunk_item_size(map->num_stripes);
4552	stripe_size = em->orig_block_len;
4553
4554	chunk = kzalloc(item_size, GFP_NOFS);
4555	if (!chunk) {
4556		ret = -ENOMEM;
4557		goto out;
4558	}
4559
4560	for (i = 0; i < map->num_stripes; i++) {
4561		device = map->stripes[i].dev;
4562		dev_offset = map->stripes[i].physical;
4563
4564		ret = btrfs_update_device(trans, device);
4565		if (ret)
4566			goto out;
4567		ret = btrfs_alloc_dev_extent(trans, device,
4568					     chunk_root->root_key.objectid,
4569					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4570					     chunk_offset, dev_offset,
4571					     stripe_size);
4572		if (ret)
4573			goto out;
4574	}
4575
4576	stripe = &chunk->stripe;
4577	for (i = 0; i < map->num_stripes; i++) {
4578		device = map->stripes[i].dev;
4579		dev_offset = map->stripes[i].physical;
4580
4581		btrfs_set_stack_stripe_devid(stripe, device->devid);
4582		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4583		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4584		stripe++;
4585	}
4586
4587	btrfs_set_stack_chunk_length(chunk, chunk_size);
4588	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4589	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4590	btrfs_set_stack_chunk_type(chunk, map->type);
4591	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4592	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4593	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4594	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4595	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4596
4597	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4598	key.type = BTRFS_CHUNK_ITEM_KEY;
4599	key.offset = chunk_offset;
4600
4601	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4602	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4603		/*
4604		 * TODO: Cleanup of inserted chunk root in case of
4605		 * failure.
4606		 */
4607		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4608					     item_size);
4609	}
4610
4611out:
4612	kfree(chunk);
4613	free_extent_map(em);
4614	return ret;
4615}
4616
4617/*
4618 * Chunk allocation falls into two parts. The first part does works
4619 * that make the new allocated chunk useable, but not do any operation
4620 * that modifies the chunk tree. The second part does the works that
4621 * require modifying the chunk tree. This division is important for the
4622 * bootstrap process of adding storage to a seed btrfs.
4623 */
4624int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4625		      struct btrfs_root *extent_root, u64 type)
4626{
4627	u64 chunk_offset;
4628
4629	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4630	chunk_offset = find_next_chunk(extent_root->fs_info);
4631	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4632}
4633
4634static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4635					 struct btrfs_root *root,
4636					 struct btrfs_device *device)
4637{
4638	u64 chunk_offset;
4639	u64 sys_chunk_offset;
4640	u64 alloc_profile;
4641	struct btrfs_fs_info *fs_info = root->fs_info;
4642	struct btrfs_root *extent_root = fs_info->extent_root;
4643	int ret;
4644
4645	chunk_offset = find_next_chunk(fs_info);
4646	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4647	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4648				  alloc_profile);
4649	if (ret)
4650		return ret;
4651
4652	sys_chunk_offset = find_next_chunk(root->fs_info);
4653	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4654	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4655				  alloc_profile);
4656	return ret;
4657}
4658
4659static inline int btrfs_chunk_max_errors(struct map_lookup *map)
4660{
4661	int max_errors;
4662
4663	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4664			 BTRFS_BLOCK_GROUP_RAID10 |
4665			 BTRFS_BLOCK_GROUP_RAID5 |
4666			 BTRFS_BLOCK_GROUP_DUP)) {
4667		max_errors = 1;
4668	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4669		max_errors = 2;
4670	} else {
4671		max_errors = 0;
4672	}
4673
4674	return max_errors;
4675}
4676
4677int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4678{
4679	struct extent_map *em;
4680	struct map_lookup *map;
4681	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4682	int readonly = 0;
4683	int miss_ndevs = 0;
4684	int i;
4685
4686	read_lock(&map_tree->map_tree.lock);
4687	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4688	read_unlock(&map_tree->map_tree.lock);
4689	if (!em)
4690		return 1;
4691
4692	map = (struct map_lookup *)em->bdev;
4693	for (i = 0; i < map->num_stripes; i++) {
4694		if (map->stripes[i].dev->missing) {
4695			miss_ndevs++;
4696			continue;
4697		}
4698
4699		if (!map->stripes[i].dev->writeable) {
4700			readonly = 1;
4701			goto end;
4702		}
4703	}
4704
4705	/*
4706	 * If the number of missing devices is larger than max errors,
4707	 * we can not write the data into that chunk successfully, so
4708	 * set it readonly.
4709	 */
4710	if (miss_ndevs > btrfs_chunk_max_errors(map))
4711		readonly = 1;
4712end:
4713	free_extent_map(em);
4714	return readonly;
4715}
4716
4717void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4718{
4719	extent_map_tree_init(&tree->map_tree);
4720}
4721
4722void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4723{
4724	struct extent_map *em;
4725
4726	while (1) {
4727		write_lock(&tree->map_tree.lock);
4728		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4729		if (em)
4730			remove_extent_mapping(&tree->map_tree, em);
4731		write_unlock(&tree->map_tree.lock);
4732		if (!em)
4733			break;
4734		/* once for us */
4735		free_extent_map(em);
4736		/* once for the tree */
4737		free_extent_map(em);
4738	}
4739}
4740
4741int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4742{
4743	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4744	struct extent_map *em;
4745	struct map_lookup *map;
4746	struct extent_map_tree *em_tree = &map_tree->map_tree;
4747	int ret;
4748
4749	read_lock(&em_tree->lock);
4750	em = lookup_extent_mapping(em_tree, logical, len);
4751	read_unlock(&em_tree->lock);
4752
4753	/*
4754	 * We could return errors for these cases, but that could get ugly and
4755	 * we'd probably do the same thing which is just not do anything else
4756	 * and exit, so return 1 so the callers don't try to use other copies.
4757	 */
4758	if (!em) {
4759		btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
4760			    logical+len);
4761		return 1;
4762	}
4763
4764	if (em->start > logical || em->start + em->len < logical) {
4765		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
4766			    "%Lu-%Lu", logical, logical+len, em->start,
4767			    em->start + em->len);
4768		free_extent_map(em);
4769		return 1;
4770	}
4771
4772	map = (struct map_lookup *)em->bdev;
4773	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4774		ret = map->num_stripes;
4775	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4776		ret = map->sub_stripes;
4777	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4778		ret = 2;
4779	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4780		ret = 3;
4781	else
4782		ret = 1;
4783	free_extent_map(em);
4784
4785	btrfs_dev_replace_lock(&fs_info->dev_replace);
4786	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4787		ret++;
4788	btrfs_dev_replace_unlock(&fs_info->dev_replace);
4789
4790	return ret;
4791}
4792
4793unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4794				    struct btrfs_mapping_tree *map_tree,
4795				    u64 logical)
4796{
4797	struct extent_map *em;
4798	struct map_lookup *map;
4799	struct extent_map_tree *em_tree = &map_tree->map_tree;
4800	unsigned long len = root->sectorsize;
4801
4802	read_lock(&em_tree->lock);
4803	em = lookup_extent_mapping(em_tree, logical, len);
4804	read_unlock(&em_tree->lock);
4805	BUG_ON(!em);
4806
4807	BUG_ON(em->start > logical || em->start + em->len < logical);
4808	map = (struct map_lookup *)em->bdev;
4809	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
4810		len = map->stripe_len * nr_data_stripes(map);
4811	free_extent_map(em);
4812	return len;
4813}
4814
4815int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4816			   u64 logical, u64 len, int mirror_num)
4817{
4818	struct extent_map *em;
4819	struct map_lookup *map;
4820	struct extent_map_tree *em_tree = &map_tree->map_tree;
4821	int ret = 0;
4822
4823	read_lock(&em_tree->lock);
4824	em = lookup_extent_mapping(em_tree, logical, len);
4825	read_unlock(&em_tree->lock);
4826	BUG_ON(!em);
4827
4828	BUG_ON(em->start > logical || em->start + em->len < logical);
4829	map = (struct map_lookup *)em->bdev;
4830	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
4831		ret = 1;
4832	free_extent_map(em);
4833	return ret;
4834}
4835
4836static int find_live_mirror(struct btrfs_fs_info *fs_info,
4837			    struct map_lookup *map, int first, int num,
4838			    int optimal, int dev_replace_is_ongoing)
4839{
4840	int i;
4841	int tolerance;
4842	struct btrfs_device *srcdev;
4843
4844	if (dev_replace_is_ongoing &&
4845	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4846	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4847		srcdev = fs_info->dev_replace.srcdev;
4848	else
4849		srcdev = NULL;
4850
4851	/*
4852	 * try to avoid the drive that is the source drive for a
4853	 * dev-replace procedure, only choose it if no other non-missing
4854	 * mirror is available
4855	 */
4856	for (tolerance = 0; tolerance < 2; tolerance++) {
4857		if (map->stripes[optimal].dev->bdev &&
4858		    (tolerance || map->stripes[optimal].dev != srcdev))
4859			return optimal;
4860		for (i = first; i < first + num; i++) {
4861			if (map->stripes[i].dev->bdev &&
4862			    (tolerance || map->stripes[i].dev != srcdev))
4863				return i;
4864		}
4865	}
4866
4867	/* we couldn't find one that doesn't fail.  Just return something
4868	 * and the io error handling code will clean up eventually
4869	 */
4870	return optimal;
4871}
4872
4873static inline int parity_smaller(u64 a, u64 b)
4874{
4875	return a > b;
4876}
4877
4878/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4879static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
4880{
4881	struct btrfs_bio_stripe s;
4882	int i;
4883	u64 l;
4884	int again = 1;
4885
4886	while (again) {
4887		again = 0;
4888		for (i = 0; i < num_stripes - 1; i++) {
4889			if (parity_smaller(bbio->raid_map[i],
4890					   bbio->raid_map[i+1])) {
4891				s = bbio->stripes[i];
4892				l = bbio->raid_map[i];
4893				bbio->stripes[i] = bbio->stripes[i+1];
4894				bbio->raid_map[i] = bbio->raid_map[i+1];
4895				bbio->stripes[i+1] = s;
4896				bbio->raid_map[i+1] = l;
4897
4898				again = 1;
4899			}
4900		}
4901	}
4902}
4903
4904static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
4905{
4906	struct btrfs_bio *bbio = kzalloc(
4907		 /* the size of the btrfs_bio */
4908		sizeof(struct btrfs_bio) +
4909		/* plus the variable array for the stripes */
4910		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
4911		/* plus the variable array for the tgt dev */
4912		sizeof(int) * (real_stripes) +
4913		/*
4914		 * plus the raid_map, which includes both the tgt dev
4915		 * and the stripes
4916		 */
4917		sizeof(u64) * (total_stripes),
4918		GFP_NOFS);
4919	if (!bbio)
4920		return NULL;
4921
4922	atomic_set(&bbio->error, 0);
4923	atomic_set(&bbio->refs, 1);
4924
4925	return bbio;
4926}
4927
4928void btrfs_get_bbio(struct btrfs_bio *bbio)
4929{
4930	WARN_ON(!atomic_read(&bbio->refs));
4931	atomic_inc(&bbio->refs);
4932}
4933
4934void btrfs_put_bbio(struct btrfs_bio *bbio)
4935{
4936	if (!bbio)
4937		return;
4938	if (atomic_dec_and_test(&bbio->refs))
4939		kfree(bbio);
4940}
4941
4942static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4943			     u64 logical, u64 *length,
4944			     struct btrfs_bio **bbio_ret,
4945			     int mirror_num, int need_raid_map)
4946{
4947	struct extent_map *em;
4948	struct map_lookup *map;
4949	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4950	struct extent_map_tree *em_tree = &map_tree->map_tree;
4951	u64 offset;
4952	u64 stripe_offset;
4953	u64 stripe_end_offset;
4954	u64 stripe_nr;
4955	u64 stripe_nr_orig;
4956	u64 stripe_nr_end;
4957	u64 stripe_len;
4958	u32 stripe_index;
4959	int i;
4960	int ret = 0;
4961	int num_stripes;
4962	int max_errors = 0;
4963	int tgtdev_indexes = 0;
4964	struct btrfs_bio *bbio = NULL;
4965	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4966	int dev_replace_is_ongoing = 0;
4967	int num_alloc_stripes;
4968	int patch_the_first_stripe_for_dev_replace = 0;
4969	u64 physical_to_patch_in_first_stripe = 0;
4970	u64 raid56_full_stripe_start = (u64)-1;
4971
4972	read_lock(&em_tree->lock);
4973	em = lookup_extent_mapping(em_tree, logical, *length);
4974	read_unlock(&em_tree->lock);
4975
4976	if (!em) {
4977		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4978			logical, *length);
4979		return -EINVAL;
4980	}
4981
4982	if (em->start > logical || em->start + em->len < logical) {
4983		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4984			   "found %Lu-%Lu", logical, em->start,
4985			   em->start + em->len);
4986		free_extent_map(em);
4987		return -EINVAL;
4988	}
4989
4990	map = (struct map_lookup *)em->bdev;
4991	offset = logical - em->start;
4992
4993	stripe_len = map->stripe_len;
4994	stripe_nr = offset;
4995	/*
4996	 * stripe_nr counts the total number of stripes we have to stride
4997	 * to get to this block
4998	 */
4999	stripe_nr = div64_u64(stripe_nr, stripe_len);
5000
5001	stripe_offset = stripe_nr * stripe_len;
5002	BUG_ON(offset < stripe_offset);
5003
5004	/* stripe_offset is the offset of this block in its stripe*/
5005	stripe_offset = offset - stripe_offset;
5006
5007	/* if we're here for raid56, we need to know the stripe aligned start */
5008	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5009		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5010		raid56_full_stripe_start = offset;
5011
5012		/* allow a write of a full stripe, but make sure we don't
5013		 * allow straddling of stripes
5014		 */
5015		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5016				full_stripe_len);
5017		raid56_full_stripe_start *= full_stripe_len;
5018	}
5019
5020	if (rw & REQ_DISCARD) {
5021		/* we don't discard raid56 yet */
5022		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5023			ret = -EOPNOTSUPP;
5024			goto out;
5025		}
5026		*length = min_t(u64, em->len - offset, *length);
5027	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5028		u64 max_len;
5029		/* For writes to RAID[56], allow a full stripeset across all disks.
5030		   For other RAID types and for RAID[56] reads, just allow a single
5031		   stripe (on a single disk). */
5032		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5033		    (rw & REQ_WRITE)) {
5034			max_len = stripe_len * nr_data_stripes(map) -
5035				(offset - raid56_full_stripe_start);
5036		} else {
5037			/* we limit the length of each bio to what fits in a stripe */
5038			max_len = stripe_len - stripe_offset;
5039		}
5040		*length = min_t(u64, em->len - offset, max_len);
5041	} else {
5042		*length = em->len - offset;
5043	}
5044
5045	/* This is for when we're called from btrfs_merge_bio_hook() and all
5046	   it cares about is the length */
5047	if (!bbio_ret)
5048		goto out;
5049
5050	btrfs_dev_replace_lock(dev_replace);
5051	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5052	if (!dev_replace_is_ongoing)
5053		btrfs_dev_replace_unlock(dev_replace);
5054
5055	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5056	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
5057	    dev_replace->tgtdev != NULL) {
5058		/*
5059		 * in dev-replace case, for repair case (that's the only
5060		 * case where the mirror is selected explicitly when
5061		 * calling btrfs_map_block), blocks left of the left cursor
5062		 * can also be read from the target drive.
5063		 * For REQ_GET_READ_MIRRORS, the target drive is added as
5064		 * the last one to the array of stripes. For READ, it also
5065		 * needs to be supported using the same mirror number.
5066		 * If the requested block is not left of the left cursor,
5067		 * EIO is returned. This can happen because btrfs_num_copies()
5068		 * returns one more in the dev-replace case.
5069		 */
5070		u64 tmp_length = *length;
5071		struct btrfs_bio *tmp_bbio = NULL;
5072		int tmp_num_stripes;
5073		u64 srcdev_devid = dev_replace->srcdev->devid;
5074		int index_srcdev = 0;
5075		int found = 0;
5076		u64 physical_of_found = 0;
5077
5078		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5079			     logical, &tmp_length, &tmp_bbio, 0, 0);
5080		if (ret) {
5081			WARN_ON(tmp_bbio != NULL);
5082			goto out;
5083		}
5084
5085		tmp_num_stripes = tmp_bbio->num_stripes;
5086		if (mirror_num > tmp_num_stripes) {
5087			/*
5088			 * REQ_GET_READ_MIRRORS does not contain this
5089			 * mirror, that means that the requested area
5090			 * is not left of the left cursor
5091			 */
5092			ret = -EIO;
5093			btrfs_put_bbio(tmp_bbio);
5094			goto out;
5095		}
5096
5097		/*
5098		 * process the rest of the function using the mirror_num
5099		 * of the source drive. Therefore look it up first.
5100		 * At the end, patch the device pointer to the one of the
5101		 * target drive.
5102		 */
5103		for (i = 0; i < tmp_num_stripes; i++) {
5104			if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
5105				/*
5106				 * In case of DUP, in order to keep it
5107				 * simple, only add the mirror with the
5108				 * lowest physical address
5109				 */
5110				if (found &&
5111				    physical_of_found <=
5112				     tmp_bbio->stripes[i].physical)
5113					continue;
5114				index_srcdev = i;
5115				found = 1;
5116				physical_of_found =
5117					tmp_bbio->stripes[i].physical;
5118			}
5119		}
5120
5121		if (found) {
5122			mirror_num = index_srcdev + 1;
5123			patch_the_first_stripe_for_dev_replace = 1;
5124			physical_to_patch_in_first_stripe = physical_of_found;
5125		} else {
5126			WARN_ON(1);
5127			ret = -EIO;
5128			btrfs_put_bbio(tmp_bbio);
5129			goto out;
5130		}
5131
5132		btrfs_put_bbio(tmp_bbio);
5133	} else if (mirror_num > map->num_stripes) {
5134		mirror_num = 0;
5135	}
5136
5137	num_stripes = 1;
5138	stripe_index = 0;
5139	stripe_nr_orig = stripe_nr;
5140	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5141	stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5142	stripe_end_offset = stripe_nr_end * map->stripe_len -
5143			    (offset + *length);
5144
5145	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5146		if (rw & REQ_DISCARD)
5147			num_stripes = min_t(u64, map->num_stripes,
5148					    stripe_nr_end - stripe_nr_orig);
5149		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5150				&stripe_index);
5151		if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
5152			mirror_num = 1;
5153	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5154		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5155			num_stripes = map->num_stripes;
5156		else if (mirror_num)
5157			stripe_index = mirror_num - 1;
5158		else {
5159			stripe_index = find_live_mirror(fs_info, map, 0,
5160					    map->num_stripes,
5161					    current->pid % map->num_stripes,
5162					    dev_replace_is_ongoing);
5163			mirror_num = stripe_index + 1;
5164		}
5165
5166	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5167		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5168			num_stripes = map->num_stripes;
5169		} else if (mirror_num) {
5170			stripe_index = mirror_num - 1;
5171		} else {
5172			mirror_num = 1;
5173		}
5174
5175	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5176		u32 factor = map->num_stripes / map->sub_stripes;
5177
5178		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5179		stripe_index *= map->sub_stripes;
5180
5181		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5182			num_stripes = map->sub_stripes;
5183		else if (rw & REQ_DISCARD)
5184			num_stripes = min_t(u64, map->sub_stripes *
5185					    (stripe_nr_end - stripe_nr_orig),
5186					    map->num_stripes);
5187		else if (mirror_num)
5188			stripe_index += mirror_num - 1;
5189		else {
5190			int old_stripe_index = stripe_index;
5191			stripe_index = find_live_mirror(fs_info, map,
5192					      stripe_index,
5193					      map->sub_stripes, stripe_index +
5194					      current->pid % map->sub_stripes,
5195					      dev_replace_is_ongoing);
5196			mirror_num = stripe_index - old_stripe_index + 1;
5197		}
5198
5199	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5200		if (need_raid_map &&
5201		    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5202		     mirror_num > 1)) {
5203			/* push stripe_nr back to the start of the full stripe */
5204			stripe_nr = div_u64(raid56_full_stripe_start,
5205					stripe_len * nr_data_stripes(map));
5206
5207			/* RAID[56] write or recovery. Return all stripes */
5208			num_stripes = map->num_stripes;
5209			max_errors = nr_parity_stripes(map);
5210
5211			*length = map->stripe_len;
5212			stripe_index = 0;
5213			stripe_offset = 0;
5214		} else {
5215			/*
5216			 * Mirror #0 or #1 means the original data block.
5217			 * Mirror #2 is RAID5 parity block.
5218			 * Mirror #3 is RAID6 Q block.
5219			 */
5220			stripe_nr = div_u64_rem(stripe_nr,
5221					nr_data_stripes(map), &stripe_index);
5222			if (mirror_num > 1)
5223				stripe_index = nr_data_stripes(map) +
5224						mirror_num - 2;
5225
5226			/* We distribute the parity blocks across stripes */
5227			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5228					&stripe_index);
5229			if (!(rw & (REQ_WRITE | REQ_DISCARD |
5230				    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
5231				mirror_num = 1;
5232		}
5233	} else {
5234		/*
5235		 * after this, stripe_nr is the number of stripes on this
5236		 * device we have to walk to find the data, and stripe_index is
5237		 * the number of our device in the stripe array
5238		 */
5239		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5240				&stripe_index);
5241		mirror_num = stripe_index + 1;
5242	}
5243	BUG_ON(stripe_index >= map->num_stripes);
5244
5245	num_alloc_stripes = num_stripes;
5246	if (dev_replace_is_ongoing) {
5247		if (rw & (REQ_WRITE | REQ_DISCARD))
5248			num_alloc_stripes <<= 1;
5249		if (rw & REQ_GET_READ_MIRRORS)
5250			num_alloc_stripes++;
5251		tgtdev_indexes = num_stripes;
5252	}
5253
5254	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5255	if (!bbio) {
5256		ret = -ENOMEM;
5257		goto out;
5258	}
5259	if (dev_replace_is_ongoing)
5260		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5261
5262	/* build raid_map */
5263	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5264	    need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5265	    mirror_num > 1)) {
5266		u64 tmp;
5267		unsigned rot;
5268
5269		bbio->raid_map = (u64 *)((void *)bbio->stripes +
5270				 sizeof(struct btrfs_bio_stripe) *
5271				 num_alloc_stripes +
5272				 sizeof(int) * tgtdev_indexes);
5273
5274		/* Work out the disk rotation on this stripe-set */
5275		div_u64_rem(stripe_nr, num_stripes, &rot);
5276
5277		/* Fill in the logical address of each stripe */
5278		tmp = stripe_nr * nr_data_stripes(map);
5279		for (i = 0; i < nr_data_stripes(map); i++)
5280			bbio->raid_map[(i+rot) % num_stripes] =
5281				em->start + (tmp + i) * map->stripe_len;
5282
5283		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5284		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5285			bbio->raid_map[(i+rot+1) % num_stripes] =
5286				RAID6_Q_STRIPE;
5287	}
5288
5289	if (rw & REQ_DISCARD) {
5290		u32 factor = 0;
5291		u32 sub_stripes = 0;
5292		u64 stripes_per_dev = 0;
5293		u32 remaining_stripes = 0;
5294		u32 last_stripe = 0;
5295
5296		if (map->type &
5297		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
5298			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5299				sub_stripes = 1;
5300			else
5301				sub_stripes = map->sub_stripes;
5302
5303			factor = map->num_stripes / sub_stripes;
5304			stripes_per_dev = div_u64_rem(stripe_nr_end -
5305						      stripe_nr_orig,
5306						      factor,
5307						      &remaining_stripes);
5308			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5309			last_stripe *= sub_stripes;
5310		}
5311
5312		for (i = 0; i < num_stripes; i++) {
5313			bbio->stripes[i].physical =
5314				map->stripes[stripe_index].physical +
5315				stripe_offset + stripe_nr * map->stripe_len;
5316			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5317
5318			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5319					 BTRFS_BLOCK_GROUP_RAID10)) {
5320				bbio->stripes[i].length = stripes_per_dev *
5321							  map->stripe_len;
5322
5323				if (i / sub_stripes < remaining_stripes)
5324					bbio->stripes[i].length +=
5325						map->stripe_len;
5326
5327				/*
5328				 * Special for the first stripe and
5329				 * the last stripe:
5330				 *
5331				 * |-------|...|-------|
5332				 *     |----------|
5333				 *    off     end_off
5334				 */
5335				if (i < sub_stripes)
5336					bbio->stripes[i].length -=
5337						stripe_offset;
5338
5339				if (stripe_index >= last_stripe &&
5340				    stripe_index <= (last_stripe +
5341						     sub_stripes - 1))
5342					bbio->stripes[i].length -=
5343						stripe_end_offset;
5344
5345				if (i == sub_stripes - 1)
5346					stripe_offset = 0;
5347			} else
5348				bbio->stripes[i].length = *length;
5349
5350			stripe_index++;
5351			if (stripe_index == map->num_stripes) {
5352				/* This could only happen for RAID0/10 */
5353				stripe_index = 0;
5354				stripe_nr++;
5355			}
5356		}
5357	} else {
5358		for (i = 0; i < num_stripes; i++) {
5359			bbio->stripes[i].physical =
5360				map->stripes[stripe_index].physical +
5361				stripe_offset +
5362				stripe_nr * map->stripe_len;
5363			bbio->stripes[i].dev =
5364				map->stripes[stripe_index].dev;
5365			stripe_index++;
5366		}
5367	}
5368
5369	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5370		max_errors = btrfs_chunk_max_errors(map);
5371
5372	if (bbio->raid_map)
5373		sort_parity_stripes(bbio, num_stripes);
5374
5375	tgtdev_indexes = 0;
5376	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5377	    dev_replace->tgtdev != NULL) {
5378		int index_where_to_add;
5379		u64 srcdev_devid = dev_replace->srcdev->devid;
5380
5381		/*
5382		 * duplicate the write operations while the dev replace
5383		 * procedure is running. Since the copying of the old disk
5384		 * to the new disk takes place at run time while the
5385		 * filesystem is mounted writable, the regular write
5386		 * operations to the old disk have to be duplicated to go
5387		 * to the new disk as well.
5388		 * Note that device->missing is handled by the caller, and
5389		 * that the write to the old disk is already set up in the
5390		 * stripes array.
5391		 */
5392		index_where_to_add = num_stripes;
5393		for (i = 0; i < num_stripes; i++) {
5394			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5395				/* write to new disk, too */
5396				struct btrfs_bio_stripe *new =
5397					bbio->stripes + index_where_to_add;
5398				struct btrfs_bio_stripe *old =
5399					bbio->stripes + i;
5400
5401				new->physical = old->physical;
5402				new->length = old->length;
5403				new->dev = dev_replace->tgtdev;
5404				bbio->tgtdev_map[i] = index_where_to_add;
5405				index_where_to_add++;
5406				max_errors++;
5407				tgtdev_indexes++;
5408			}
5409		}
5410		num_stripes = index_where_to_add;
5411	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5412		   dev_replace->tgtdev != NULL) {
5413		u64 srcdev_devid = dev_replace->srcdev->devid;
5414		int index_srcdev = 0;
5415		int found = 0;
5416		u64 physical_of_found = 0;
5417
5418		/*
5419		 * During the dev-replace procedure, the target drive can
5420		 * also be used to read data in case it is needed to repair
5421		 * a corrupt block elsewhere. This is possible if the
5422		 * requested area is left of the left cursor. In this area,
5423		 * the target drive is a full copy of the source drive.
5424		 */
5425		for (i = 0; i < num_stripes; i++) {
5426			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5427				/*
5428				 * In case of DUP, in order to keep it
5429				 * simple, only add the mirror with the
5430				 * lowest physical address
5431				 */
5432				if (found &&
5433				    physical_of_found <=
5434				     bbio->stripes[i].physical)
5435					continue;
5436				index_srcdev = i;
5437				found = 1;
5438				physical_of_found = bbio->stripes[i].physical;
5439			}
5440		}
5441		if (found) {
5442			if (physical_of_found + map->stripe_len <=
5443			    dev_replace->cursor_left) {
5444				struct btrfs_bio_stripe *tgtdev_stripe =
5445					bbio->stripes + num_stripes;
5446
5447				tgtdev_stripe->physical = physical_of_found;
5448				tgtdev_stripe->length =
5449					bbio->stripes[index_srcdev].length;
5450				tgtdev_stripe->dev = dev_replace->tgtdev;
5451				bbio->tgtdev_map[index_srcdev] = num_stripes;
5452
5453				tgtdev_indexes++;
5454				num_stripes++;
5455			}
5456		}
5457	}
5458
5459	*bbio_ret = bbio;
5460	bbio->map_type = map->type;
5461	bbio->num_stripes = num_stripes;
5462	bbio->max_errors = max_errors;
5463	bbio->mirror_num = mirror_num;
5464	bbio->num_tgtdevs = tgtdev_indexes;
5465
5466	/*
5467	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5468	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5469	 * available as a mirror
5470	 */
5471	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5472		WARN_ON(num_stripes > 1);
5473		bbio->stripes[0].dev = dev_replace->tgtdev;
5474		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5475		bbio->mirror_num = map->num_stripes + 1;
5476	}
5477out:
5478	if (dev_replace_is_ongoing)
5479		btrfs_dev_replace_unlock(dev_replace);
5480	free_extent_map(em);
5481	return ret;
5482}
5483
5484int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5485		      u64 logical, u64 *length,
5486		      struct btrfs_bio **bbio_ret, int mirror_num)
5487{
5488	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5489				 mirror_num, 0);
5490}
5491
5492/* For Scrub/replace */
5493int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
5494		     u64 logical, u64 *length,
5495		     struct btrfs_bio **bbio_ret, int mirror_num,
5496		     int need_raid_map)
5497{
5498	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5499				 mirror_num, need_raid_map);
5500}
5501
5502int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5503		     u64 chunk_start, u64 physical, u64 devid,
5504		     u64 **logical, int *naddrs, int *stripe_len)
5505{
5506	struct extent_map_tree *em_tree = &map_tree->map_tree;
5507	struct extent_map *em;
5508	struct map_lookup *map;
5509	u64 *buf;
5510	u64 bytenr;
5511	u64 length;
5512	u64 stripe_nr;
5513	u64 rmap_len;
5514	int i, j, nr = 0;
5515
5516	read_lock(&em_tree->lock);
5517	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5518	read_unlock(&em_tree->lock);
5519
5520	if (!em) {
5521		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5522		       chunk_start);
5523		return -EIO;
5524	}
5525
5526	if (em->start != chunk_start) {
5527		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5528		       em->start, chunk_start);
5529		free_extent_map(em);
5530		return -EIO;
5531	}
5532	map = (struct map_lookup *)em->bdev;
5533
5534	length = em->len;
5535	rmap_len = map->stripe_len;
5536
5537	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5538		length = div_u64(length, map->num_stripes / map->sub_stripes);
5539	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5540		length = div_u64(length, map->num_stripes);
5541	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5542		length = div_u64(length, nr_data_stripes(map));
5543		rmap_len = map->stripe_len * nr_data_stripes(map);
5544	}
5545
5546	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5547	BUG_ON(!buf); /* -ENOMEM */
5548
5549	for (i = 0; i < map->num_stripes; i++) {
5550		if (devid && map->stripes[i].dev->devid != devid)
5551			continue;
5552		if (map->stripes[i].physical > physical ||
5553		    map->stripes[i].physical + length <= physical)
5554			continue;
5555
5556		stripe_nr = physical - map->stripes[i].physical;
5557		stripe_nr = div_u64(stripe_nr, map->stripe_len);
5558
5559		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5560			stripe_nr = stripe_nr * map->num_stripes + i;
5561			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
5562		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5563			stripe_nr = stripe_nr * map->num_stripes + i;
5564		} /* else if RAID[56], multiply by nr_data_stripes().
5565		   * Alternatively, just use rmap_len below instead of
5566		   * map->stripe_len */
5567
5568		bytenr = chunk_start + stripe_nr * rmap_len;
5569		WARN_ON(nr >= map->num_stripes);
5570		for (j = 0; j < nr; j++) {
5571			if (buf[j] == bytenr)
5572				break;
5573		}
5574		if (j == nr) {
5575			WARN_ON(nr >= map->num_stripes);
5576			buf[nr++] = bytenr;
5577		}
5578	}
5579
5580	*logical = buf;
5581	*naddrs = nr;
5582	*stripe_len = rmap_len;
5583
5584	free_extent_map(em);
5585	return 0;
5586}
5587
5588static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err)
5589{
5590	if (likely(bbio->flags & BTRFS_BIO_ORIG_BIO_SUBMITTED))
5591		bio_endio_nodec(bio, err);
5592	else
5593		bio_endio(bio, err);
5594	btrfs_put_bbio(bbio);
5595}
5596
5597static void btrfs_end_bio(struct bio *bio, int err)
5598{
5599	struct btrfs_bio *bbio = bio->bi_private;
5600	struct btrfs_device *dev = bbio->stripes[0].dev;
5601	int is_orig_bio = 0;
5602
5603	if (err) {
5604		atomic_inc(&bbio->error);
5605		if (err == -EIO || err == -EREMOTEIO) {
5606			unsigned int stripe_index =
5607				btrfs_io_bio(bio)->stripe_index;
5608
5609			BUG_ON(stripe_index >= bbio->num_stripes);
5610			dev = bbio->stripes[stripe_index].dev;
5611			if (dev->bdev) {
5612				if (bio->bi_rw & WRITE)
5613					btrfs_dev_stat_inc(dev,
5614						BTRFS_DEV_STAT_WRITE_ERRS);
5615				else
5616					btrfs_dev_stat_inc(dev,
5617						BTRFS_DEV_STAT_READ_ERRS);
5618				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5619					btrfs_dev_stat_inc(dev,
5620						BTRFS_DEV_STAT_FLUSH_ERRS);
5621				btrfs_dev_stat_print_on_error(dev);
5622			}
5623		}
5624	}
5625
5626	if (bio == bbio->orig_bio)
5627		is_orig_bio = 1;
5628
5629	btrfs_bio_counter_dec(bbio->fs_info);
5630
5631	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5632		if (!is_orig_bio) {
5633			bio_put(bio);
5634			bio = bbio->orig_bio;
5635		}
5636
5637		bio->bi_private = bbio->private;
5638		bio->bi_end_io = bbio->end_io;
5639		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5640		/* only send an error to the higher layers if it is
5641		 * beyond the tolerance of the btrfs bio
5642		 */
5643		if (atomic_read(&bbio->error) > bbio->max_errors) {
5644			err = -EIO;
5645		} else {
5646			/*
5647			 * this bio is actually up to date, we didn't
5648			 * go over the max number of errors
5649			 */
5650			set_bit(BIO_UPTODATE, &bio->bi_flags);
5651			err = 0;
5652		}
5653
5654		btrfs_end_bbio(bbio, bio, err);
5655	} else if (!is_orig_bio) {
5656		bio_put(bio);
5657	}
5658}
5659
5660/*
5661 * see run_scheduled_bios for a description of why bios are collected for
5662 * async submit.
5663 *
5664 * This will add one bio to the pending list for a device and make sure
5665 * the work struct is scheduled.
5666 */
5667static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5668					struct btrfs_device *device,
5669					int rw, struct bio *bio)
5670{
5671	int should_queue = 1;
5672	struct btrfs_pending_bios *pending_bios;
5673
5674	if (device->missing || !device->bdev) {
5675		bio_endio(bio, -EIO);
5676		return;
5677	}
5678
5679	/* don't bother with additional async steps for reads, right now */
5680	if (!(rw & REQ_WRITE)) {
5681		bio_get(bio);
5682		btrfsic_submit_bio(rw, bio);
5683		bio_put(bio);
5684		return;
5685	}
5686
5687	/*
5688	 * nr_async_bios allows us to reliably return congestion to the
5689	 * higher layers.  Otherwise, the async bio makes it appear we have
5690	 * made progress against dirty pages when we've really just put it
5691	 * on a queue for later
5692	 */
5693	atomic_inc(&root->fs_info->nr_async_bios);
5694	WARN_ON(bio->bi_next);
5695	bio->bi_next = NULL;
5696	bio->bi_rw |= rw;
5697
5698	spin_lock(&device->io_lock);
5699	if (bio->bi_rw & REQ_SYNC)
5700		pending_bios = &device->pending_sync_bios;
5701	else
5702		pending_bios = &device->pending_bios;
5703
5704	if (pending_bios->tail)
5705		pending_bios->tail->bi_next = bio;
5706
5707	pending_bios->tail = bio;
5708	if (!pending_bios->head)
5709		pending_bios->head = bio;
5710	if (device->running_pending)
5711		should_queue = 0;
5712
5713	spin_unlock(&device->io_lock);
5714
5715	if (should_queue)
5716		btrfs_queue_work(root->fs_info->submit_workers,
5717				 &device->work);
5718}
5719
5720static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5721		       sector_t sector)
5722{
5723	struct bio_vec *prev;
5724	struct request_queue *q = bdev_get_queue(bdev);
5725	unsigned int max_sectors = queue_max_sectors(q);
5726	struct bvec_merge_data bvm = {
5727		.bi_bdev = bdev,
5728		.bi_sector = sector,
5729		.bi_rw = bio->bi_rw,
5730	};
5731
5732	if (WARN_ON(bio->bi_vcnt == 0))
5733		return 1;
5734
5735	prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5736	if (bio_sectors(bio) > max_sectors)
5737		return 0;
5738
5739	if (!q->merge_bvec_fn)
5740		return 1;
5741
5742	bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
5743	if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5744		return 0;
5745	return 1;
5746}
5747
5748static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5749			      struct bio *bio, u64 physical, int dev_nr,
5750			      int rw, int async)
5751{
5752	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5753
5754	bio->bi_private = bbio;
5755	btrfs_io_bio(bio)->stripe_index = dev_nr;
5756	bio->bi_end_io = btrfs_end_bio;
5757	bio->bi_iter.bi_sector = physical >> 9;
5758#ifdef DEBUG
5759	{
5760		struct rcu_string *name;
5761
5762		rcu_read_lock();
5763		name = rcu_dereference(dev->name);
5764		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5765			 "(%s id %llu), size=%u\n", rw,
5766			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
5767			 name->str, dev->devid, bio->bi_iter.bi_size);
5768		rcu_read_unlock();
5769	}
5770#endif
5771	bio->bi_bdev = dev->bdev;
5772
5773	btrfs_bio_counter_inc_noblocked(root->fs_info);
5774
5775	if (async)
5776		btrfs_schedule_bio(root, dev, rw, bio);
5777	else
5778		btrfsic_submit_bio(rw, bio);
5779}
5780
5781static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5782			      struct bio *first_bio, struct btrfs_device *dev,
5783			      int dev_nr, int rw, int async)
5784{
5785	struct bio_vec *bvec = first_bio->bi_io_vec;
5786	struct bio *bio;
5787	int nr_vecs = bio_get_nr_vecs(dev->bdev);
5788	u64 physical = bbio->stripes[dev_nr].physical;
5789
5790again:
5791	bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5792	if (!bio)
5793		return -ENOMEM;
5794
5795	while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5796		if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5797				 bvec->bv_offset) < bvec->bv_len) {
5798			u64 len = bio->bi_iter.bi_size;
5799
5800			atomic_inc(&bbio->stripes_pending);
5801			submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5802					  rw, async);
5803			physical += len;
5804			goto again;
5805		}
5806		bvec++;
5807	}
5808
5809	submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5810	return 0;
5811}
5812
5813static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5814{
5815	atomic_inc(&bbio->error);
5816	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5817		/* Shoud be the original bio. */
5818		WARN_ON(bio != bbio->orig_bio);
5819
5820		bio->bi_private = bbio->private;
5821		bio->bi_end_io = bbio->end_io;
5822		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5823		bio->bi_iter.bi_sector = logical >> 9;
5824
5825		btrfs_end_bbio(bbio, bio, -EIO);
5826	}
5827}
5828
5829int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5830		  int mirror_num, int async_submit)
5831{
5832	struct btrfs_device *dev;
5833	struct bio *first_bio = bio;
5834	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
5835	u64 length = 0;
5836	u64 map_length;
5837	int ret;
5838	int dev_nr;
5839	int total_devs;
5840	struct btrfs_bio *bbio = NULL;
5841
5842	length = bio->bi_iter.bi_size;
5843	map_length = length;
5844
5845	btrfs_bio_counter_inc_blocked(root->fs_info);
5846	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5847			      mirror_num, 1);
5848	if (ret) {
5849		btrfs_bio_counter_dec(root->fs_info);
5850		return ret;
5851	}
5852
5853	total_devs = bbio->num_stripes;
5854	bbio->orig_bio = first_bio;
5855	bbio->private = first_bio->bi_private;
5856	bbio->end_io = first_bio->bi_end_io;
5857	bbio->fs_info = root->fs_info;
5858	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5859
5860	if (bbio->raid_map) {
5861		/* In this case, map_length has been set to the length of
5862		   a single stripe; not the whole write */
5863		if (rw & WRITE) {
5864			ret = raid56_parity_write(root, bio, bbio, map_length);
5865		} else {
5866			ret = raid56_parity_recover(root, bio, bbio, map_length,
5867						    mirror_num, 1);
5868		}
5869
5870		btrfs_bio_counter_dec(root->fs_info);
5871		return ret;
5872	}
5873
5874	if (map_length < length) {
5875		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5876			logical, length, map_length);
5877		BUG();
5878	}
5879
5880	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
5881		dev = bbio->stripes[dev_nr].dev;
5882		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5883			bbio_error(bbio, first_bio, logical);
5884			continue;
5885		}
5886
5887		/*
5888		 * Check and see if we're ok with this bio based on it's size
5889		 * and offset with the given device.
5890		 */
5891		if (!bio_size_ok(dev->bdev, first_bio,
5892				 bbio->stripes[dev_nr].physical >> 9)) {
5893			ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5894						 dev_nr, rw, async_submit);
5895			BUG_ON(ret);
5896			continue;
5897		}
5898
5899		if (dev_nr < total_devs - 1) {
5900			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5901			BUG_ON(!bio); /* -ENOMEM */
5902		} else {
5903			bio = first_bio;
5904			bbio->flags |= BTRFS_BIO_ORIG_BIO_SUBMITTED;
5905		}
5906
5907		submit_stripe_bio(root, bbio, bio,
5908				  bbio->stripes[dev_nr].physical, dev_nr, rw,
5909				  async_submit);
5910	}
5911	btrfs_bio_counter_dec(root->fs_info);
5912	return 0;
5913}
5914
5915struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5916				       u8 *uuid, u8 *fsid)
5917{
5918	struct btrfs_device *device;
5919	struct btrfs_fs_devices *cur_devices;
5920
5921	cur_devices = fs_info->fs_devices;
5922	while (cur_devices) {
5923		if (!fsid ||
5924		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5925			device = __find_device(&cur_devices->devices,
5926					       devid, uuid);
5927			if (device)
5928				return device;
5929		}
5930		cur_devices = cur_devices->seed;
5931	}
5932	return NULL;
5933}
5934
5935static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5936					    struct btrfs_fs_devices *fs_devices,
5937					    u64 devid, u8 *dev_uuid)
5938{
5939	struct btrfs_device *device;
5940
5941	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
5942	if (IS_ERR(device))
5943		return NULL;
5944
5945	list_add(&device->dev_list, &fs_devices->devices);
5946	device->fs_devices = fs_devices;
5947	fs_devices->num_devices++;
5948
5949	device->missing = 1;
5950	fs_devices->missing_devices++;
5951
5952	return device;
5953}
5954
5955/**
5956 * btrfs_alloc_device - allocate struct btrfs_device
5957 * @fs_info:	used only for generating a new devid, can be NULL if
5958 *		devid is provided (i.e. @devid != NULL).
5959 * @devid:	a pointer to devid for this device.  If NULL a new devid
5960 *		is generated.
5961 * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
5962 *		is generated.
5963 *
5964 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
5965 * on error.  Returned struct is not linked onto any lists and can be
5966 * destroyed with kfree() right away.
5967 */
5968struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
5969					const u64 *devid,
5970					const u8 *uuid)
5971{
5972	struct btrfs_device *dev;
5973	u64 tmp;
5974
5975	if (WARN_ON(!devid && !fs_info))
5976		return ERR_PTR(-EINVAL);
5977
5978	dev = __alloc_device();
5979	if (IS_ERR(dev))
5980		return dev;
5981
5982	if (devid)
5983		tmp = *devid;
5984	else {
5985		int ret;
5986
5987		ret = find_next_devid(fs_info, &tmp);
5988		if (ret) {
5989			kfree(dev);
5990			return ERR_PTR(ret);
5991		}
5992	}
5993	dev->devid = tmp;
5994
5995	if (uuid)
5996		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
5997	else
5998		generate_random_uuid(dev->uuid);
5999
6000	btrfs_init_work(&dev->work, btrfs_submit_helper,
6001			pending_bios_fn, NULL, NULL);
6002
6003	return dev;
6004}
6005
6006static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6007			  struct extent_buffer *leaf,
6008			  struct btrfs_chunk *chunk)
6009{
6010	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6011	struct map_lookup *map;
6012	struct extent_map *em;
6013	u64 logical;
6014	u64 length;
6015	u64 devid;
6016	u8 uuid[BTRFS_UUID_SIZE];
6017	int num_stripes;
6018	int ret;
6019	int i;
6020
6021	logical = key->offset;
6022	length = btrfs_chunk_length(leaf, chunk);
6023
6024	read_lock(&map_tree->map_tree.lock);
6025	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6026	read_unlock(&map_tree->map_tree.lock);
6027
6028	/* already mapped? */
6029	if (em && em->start <= logical && em->start + em->len > logical) {
6030		free_extent_map(em);
6031		return 0;
6032	} else if (em) {
6033		free_extent_map(em);
6034	}
6035
6036	em = alloc_extent_map();
6037	if (!em)
6038		return -ENOMEM;
6039	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6040	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6041	if (!map) {
6042		free_extent_map(em);
6043		return -ENOMEM;
6044	}
6045
6046	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6047	em->bdev = (struct block_device *)map;
6048	em->start = logical;
6049	em->len = length;
6050	em->orig_start = 0;
6051	em->block_start = 0;
6052	em->block_len = em->len;
6053
6054	map->num_stripes = num_stripes;
6055	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6056	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6057	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
6058	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6059	map->type = btrfs_chunk_type(leaf, chunk);
6060	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6061	for (i = 0; i < num_stripes; i++) {
6062		map->stripes[i].physical =
6063			btrfs_stripe_offset_nr(leaf, chunk, i);
6064		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6065		read_extent_buffer(leaf, uuid, (unsigned long)
6066				   btrfs_stripe_dev_uuid_nr(chunk, i),
6067				   BTRFS_UUID_SIZE);
6068		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
6069							uuid, NULL);
6070		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
6071			free_extent_map(em);
6072			return -EIO;
6073		}
6074		if (!map->stripes[i].dev) {
6075			map->stripes[i].dev =
6076				add_missing_dev(root, root->fs_info->fs_devices,
6077						devid, uuid);
6078			if (!map->stripes[i].dev) {
6079				free_extent_map(em);
6080				return -EIO;
6081			}
6082		}
6083		map->stripes[i].dev->in_fs_metadata = 1;
6084	}
6085
6086	write_lock(&map_tree->map_tree.lock);
6087	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6088	write_unlock(&map_tree->map_tree.lock);
6089	BUG_ON(ret); /* Tree corruption */
6090	free_extent_map(em);
6091
6092	return 0;
6093}
6094
6095static void fill_device_from_item(struct extent_buffer *leaf,
6096				 struct btrfs_dev_item *dev_item,
6097				 struct btrfs_device *device)
6098{
6099	unsigned long ptr;
6100
6101	device->devid = btrfs_device_id(leaf, dev_item);
6102	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6103	device->total_bytes = device->disk_total_bytes;
6104	device->commit_total_bytes = device->disk_total_bytes;
6105	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6106	device->commit_bytes_used = device->bytes_used;
6107	device->type = btrfs_device_type(leaf, dev_item);
6108	device->io_align = btrfs_device_io_align(leaf, dev_item);
6109	device->io_width = btrfs_device_io_width(leaf, dev_item);
6110	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6111	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6112	device->is_tgtdev_for_dev_replace = 0;
6113
6114	ptr = btrfs_device_uuid(dev_item);
6115	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6116}
6117
6118static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
6119						  u8 *fsid)
6120{
6121	struct btrfs_fs_devices *fs_devices;
6122	int ret;
6123
6124	BUG_ON(!mutex_is_locked(&uuid_mutex));
6125
6126	fs_devices = root->fs_info->fs_devices->seed;
6127	while (fs_devices) {
6128		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
6129			return fs_devices;
6130
6131		fs_devices = fs_devices->seed;
6132	}
6133
6134	fs_devices = find_fsid(fsid);
6135	if (!fs_devices) {
6136		if (!btrfs_test_opt(root, DEGRADED))
6137			return ERR_PTR(-ENOENT);
6138
6139		fs_devices = alloc_fs_devices(fsid);
6140		if (IS_ERR(fs_devices))
6141			return fs_devices;
6142
6143		fs_devices->seeding = 1;
6144		fs_devices->opened = 1;
6145		return fs_devices;
6146	}
6147
6148	fs_devices = clone_fs_devices(fs_devices);
6149	if (IS_ERR(fs_devices))
6150		return fs_devices;
6151
6152	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6153				   root->fs_info->bdev_holder);
6154	if (ret) {
6155		free_fs_devices(fs_devices);
6156		fs_devices = ERR_PTR(ret);
6157		goto out;
6158	}
6159
6160	if (!fs_devices->seeding) {
6161		__btrfs_close_devices(fs_devices);
6162		free_fs_devices(fs_devices);
6163		fs_devices = ERR_PTR(-EINVAL);
6164		goto out;
6165	}
6166
6167	fs_devices->seed = root->fs_info->fs_devices->seed;
6168	root->fs_info->fs_devices->seed = fs_devices;
6169out:
6170	return fs_devices;
6171}
6172
6173static int read_one_dev(struct btrfs_root *root,
6174			struct extent_buffer *leaf,
6175			struct btrfs_dev_item *dev_item)
6176{
6177	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6178	struct btrfs_device *device;
6179	u64 devid;
6180	int ret;
6181	u8 fs_uuid[BTRFS_UUID_SIZE];
6182	u8 dev_uuid[BTRFS_UUID_SIZE];
6183
6184	devid = btrfs_device_id(leaf, dev_item);
6185	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6186			   BTRFS_UUID_SIZE);
6187	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6188			   BTRFS_UUID_SIZE);
6189
6190	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6191		fs_devices = open_seed_devices(root, fs_uuid);
6192		if (IS_ERR(fs_devices))
6193			return PTR_ERR(fs_devices);
6194	}
6195
6196	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6197	if (!device) {
6198		if (!btrfs_test_opt(root, DEGRADED))
6199			return -EIO;
6200
6201		btrfs_warn(root->fs_info, "devid %llu missing", devid);
6202		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
6203		if (!device)
6204			return -ENOMEM;
6205	} else {
6206		if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
6207			return -EIO;
6208
6209		if(!device->bdev && !device->missing) {
6210			/*
6211			 * this happens when a device that was properly setup
6212			 * in the device info lists suddenly goes bad.
6213			 * device->bdev is NULL, and so we have to set
6214			 * device->missing to one here
6215			 */
6216			device->fs_devices->missing_devices++;
6217			device->missing = 1;
6218		}
6219
6220		/* Move the device to its own fs_devices */
6221		if (device->fs_devices != fs_devices) {
6222			ASSERT(device->missing);
6223
6224			list_move(&device->dev_list, &fs_devices->devices);
6225			device->fs_devices->num_devices--;
6226			fs_devices->num_devices++;
6227
6228			device->fs_devices->missing_devices--;
6229			fs_devices->missing_devices++;
6230
6231			device->fs_devices = fs_devices;
6232		}
6233	}
6234
6235	if (device->fs_devices != root->fs_info->fs_devices) {
6236		BUG_ON(device->writeable);
6237		if (device->generation !=
6238		    btrfs_device_generation(leaf, dev_item))
6239			return -EINVAL;
6240	}
6241
6242	fill_device_from_item(leaf, dev_item, device);
6243	device->in_fs_metadata = 1;
6244	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
6245		device->fs_devices->total_rw_bytes += device->total_bytes;
6246		spin_lock(&root->fs_info->free_chunk_lock);
6247		root->fs_info->free_chunk_space += device->total_bytes -
6248			device->bytes_used;
6249		spin_unlock(&root->fs_info->free_chunk_lock);
6250	}
6251	ret = 0;
6252	return ret;
6253}
6254
6255int btrfs_read_sys_array(struct btrfs_root *root)
6256{
6257	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6258	struct extent_buffer *sb;
6259	struct btrfs_disk_key *disk_key;
6260	struct btrfs_chunk *chunk;
6261	u8 *array_ptr;
6262	unsigned long sb_array_offset;
6263	int ret = 0;
6264	u32 num_stripes;
6265	u32 array_size;
6266	u32 len = 0;
6267	u32 cur_offset;
6268	struct btrfs_key key;
6269
6270	ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
6271	/*
6272	 * This will create extent buffer of nodesize, superblock size is
6273	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6274	 * overallocate but we can keep it as-is, only the first page is used.
6275	 */
6276	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6277	if (!sb)
6278		return -ENOMEM;
6279	btrfs_set_buffer_uptodate(sb);
6280	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6281	/*
6282	 * The sb extent buffer is artifical and just used to read the system array.
6283	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
6284	 * pages up-to-date when the page is larger: extent does not cover the
6285	 * whole page and consequently check_page_uptodate does not find all
6286	 * the page's extents up-to-date (the hole beyond sb),
6287	 * write_extent_buffer then triggers a WARN_ON.
6288	 *
6289	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6290	 * but sb spans only this function. Add an explicit SetPageUptodate call
6291	 * to silence the warning eg. on PowerPC 64.
6292	 */
6293	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
6294		SetPageUptodate(sb->pages[0]);
6295
6296	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6297	array_size = btrfs_super_sys_array_size(super_copy);
6298
6299	array_ptr = super_copy->sys_chunk_array;
6300	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6301	cur_offset = 0;
6302
6303	while (cur_offset < array_size) {
6304		disk_key = (struct btrfs_disk_key *)array_ptr;
6305		len = sizeof(*disk_key);
6306		if (cur_offset + len > array_size)
6307			goto out_short_read;
6308
6309		btrfs_disk_key_to_cpu(&key, disk_key);
6310
6311		array_ptr += len;
6312		sb_array_offset += len;
6313		cur_offset += len;
6314
6315		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6316			chunk = (struct btrfs_chunk *)sb_array_offset;
6317			/*
6318			 * At least one btrfs_chunk with one stripe must be
6319			 * present, exact stripe count check comes afterwards
6320			 */
6321			len = btrfs_chunk_item_size(1);
6322			if (cur_offset + len > array_size)
6323				goto out_short_read;
6324
6325			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6326			if (!num_stripes) {
6327				printk(KERN_ERR
6328	    "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
6329					num_stripes, cur_offset);
6330				ret = -EIO;
6331				break;
6332			}
6333
6334			len = btrfs_chunk_item_size(num_stripes);
6335			if (cur_offset + len > array_size)
6336				goto out_short_read;
6337
6338			ret = read_one_chunk(root, &key, sb, chunk);
6339			if (ret)
6340				break;
6341		} else {
6342			ret = -EIO;
6343			break;
6344		}
6345		array_ptr += len;
6346		sb_array_offset += len;
6347		cur_offset += len;
6348	}
6349	free_extent_buffer(sb);
6350	return ret;
6351
6352out_short_read:
6353	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6354			len, cur_offset);
6355	free_extent_buffer(sb);
6356	return -EIO;
6357}
6358
6359int btrfs_read_chunk_tree(struct btrfs_root *root)
6360{
6361	struct btrfs_path *path;
6362	struct extent_buffer *leaf;
6363	struct btrfs_key key;
6364	struct btrfs_key found_key;
6365	int ret;
6366	int slot;
6367
6368	root = root->fs_info->chunk_root;
6369
6370	path = btrfs_alloc_path();
6371	if (!path)
6372		return -ENOMEM;
6373
6374	mutex_lock(&uuid_mutex);
6375	lock_chunks(root);
6376
6377	/*
6378	 * Read all device items, and then all the chunk items. All
6379	 * device items are found before any chunk item (their object id
6380	 * is smaller than the lowest possible object id for a chunk
6381	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6382	 */
6383	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6384	key.offset = 0;
6385	key.type = 0;
6386	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6387	if (ret < 0)
6388		goto error;
6389	while (1) {
6390		leaf = path->nodes[0];
6391		slot = path->slots[0];
6392		if (slot >= btrfs_header_nritems(leaf)) {
6393			ret = btrfs_next_leaf(root, path);
6394			if (ret == 0)
6395				continue;
6396			if (ret < 0)
6397				goto error;
6398			break;
6399		}
6400		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6401		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6402			struct btrfs_dev_item *dev_item;
6403			dev_item = btrfs_item_ptr(leaf, slot,
6404						  struct btrfs_dev_item);
6405			ret = read_one_dev(root, leaf, dev_item);
6406			if (ret)
6407				goto error;
6408		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6409			struct btrfs_chunk *chunk;
6410			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6411			ret = read_one_chunk(root, &found_key, leaf, chunk);
6412			if (ret)
6413				goto error;
6414		}
6415		path->slots[0]++;
6416	}
6417	ret = 0;
6418error:
6419	unlock_chunks(root);
6420	mutex_unlock(&uuid_mutex);
6421
6422	btrfs_free_path(path);
6423	return ret;
6424}
6425
6426void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6427{
6428	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6429	struct btrfs_device *device;
6430
6431	while (fs_devices) {
6432		mutex_lock(&fs_devices->device_list_mutex);
6433		list_for_each_entry(device, &fs_devices->devices, dev_list)
6434			device->dev_root = fs_info->dev_root;
6435		mutex_unlock(&fs_devices->device_list_mutex);
6436
6437		fs_devices = fs_devices->seed;
6438	}
6439}
6440
6441static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6442{
6443	int i;
6444
6445	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6446		btrfs_dev_stat_reset(dev, i);
6447}
6448
6449int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6450{
6451	struct btrfs_key key;
6452	struct btrfs_key found_key;
6453	struct btrfs_root *dev_root = fs_info->dev_root;
6454	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6455	struct extent_buffer *eb;
6456	int slot;
6457	int ret = 0;
6458	struct btrfs_device *device;
6459	struct btrfs_path *path = NULL;
6460	int i;
6461
6462	path = btrfs_alloc_path();
6463	if (!path) {
6464		ret = -ENOMEM;
6465		goto out;
6466	}
6467
6468	mutex_lock(&fs_devices->device_list_mutex);
6469	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6470		int item_size;
6471		struct btrfs_dev_stats_item *ptr;
6472
6473		key.objectid = 0;
6474		key.type = BTRFS_DEV_STATS_KEY;
6475		key.offset = device->devid;
6476		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6477		if (ret) {
6478			__btrfs_reset_dev_stats(device);
6479			device->dev_stats_valid = 1;
6480			btrfs_release_path(path);
6481			continue;
6482		}
6483		slot = path->slots[0];
6484		eb = path->nodes[0];
6485		btrfs_item_key_to_cpu(eb, &found_key, slot);
6486		item_size = btrfs_item_size_nr(eb, slot);
6487
6488		ptr = btrfs_item_ptr(eb, slot,
6489				     struct btrfs_dev_stats_item);
6490
6491		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6492			if (item_size >= (1 + i) * sizeof(__le64))
6493				btrfs_dev_stat_set(device, i,
6494					btrfs_dev_stats_value(eb, ptr, i));
6495			else
6496				btrfs_dev_stat_reset(device, i);
6497		}
6498
6499		device->dev_stats_valid = 1;
6500		btrfs_dev_stat_print_on_load(device);
6501		btrfs_release_path(path);
6502	}
6503	mutex_unlock(&fs_devices->device_list_mutex);
6504
6505out:
6506	btrfs_free_path(path);
6507	return ret < 0 ? ret : 0;
6508}
6509
6510static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6511				struct btrfs_root *dev_root,
6512				struct btrfs_device *device)
6513{
6514	struct btrfs_path *path;
6515	struct btrfs_key key;
6516	struct extent_buffer *eb;
6517	struct btrfs_dev_stats_item *ptr;
6518	int ret;
6519	int i;
6520
6521	key.objectid = 0;
6522	key.type = BTRFS_DEV_STATS_KEY;
6523	key.offset = device->devid;
6524
6525	path = btrfs_alloc_path();
6526	BUG_ON(!path);
6527	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6528	if (ret < 0) {
6529		printk_in_rcu(KERN_WARNING "BTRFS: "
6530			"error %d while searching for dev_stats item for device %s!\n",
6531			      ret, rcu_str_deref(device->name));
6532		goto out;
6533	}
6534
6535	if (ret == 0 &&
6536	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6537		/* need to delete old one and insert a new one */
6538		ret = btrfs_del_item(trans, dev_root, path);
6539		if (ret != 0) {
6540			printk_in_rcu(KERN_WARNING "BTRFS: "
6541				"delete too small dev_stats item for device %s failed %d!\n",
6542				      rcu_str_deref(device->name), ret);
6543			goto out;
6544		}
6545		ret = 1;
6546	}
6547
6548	if (ret == 1) {
6549		/* need to insert a new item */
6550		btrfs_release_path(path);
6551		ret = btrfs_insert_empty_item(trans, dev_root, path,
6552					      &key, sizeof(*ptr));
6553		if (ret < 0) {
6554			printk_in_rcu(KERN_WARNING "BTRFS: "
6555					  "insert dev_stats item for device %s failed %d!\n",
6556				      rcu_str_deref(device->name), ret);
6557			goto out;
6558		}
6559	}
6560
6561	eb = path->nodes[0];
6562	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6563	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6564		btrfs_set_dev_stats_value(eb, ptr, i,
6565					  btrfs_dev_stat_read(device, i));
6566	btrfs_mark_buffer_dirty(eb);
6567
6568out:
6569	btrfs_free_path(path);
6570	return ret;
6571}
6572
6573/*
6574 * called from commit_transaction. Writes all changed device stats to disk.
6575 */
6576int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6577			struct btrfs_fs_info *fs_info)
6578{
6579	struct btrfs_root *dev_root = fs_info->dev_root;
6580	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6581	struct btrfs_device *device;
6582	int stats_cnt;
6583	int ret = 0;
6584
6585	mutex_lock(&fs_devices->device_list_mutex);
6586	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6587		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6588			continue;
6589
6590		stats_cnt = atomic_read(&device->dev_stats_ccnt);
6591		ret = update_dev_stat_item(trans, dev_root, device);
6592		if (!ret)
6593			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
6594	}
6595	mutex_unlock(&fs_devices->device_list_mutex);
6596
6597	return ret;
6598}
6599
6600void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6601{
6602	btrfs_dev_stat_inc(dev, index);
6603	btrfs_dev_stat_print_on_error(dev);
6604}
6605
6606static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6607{
6608	if (!dev->dev_stats_valid)
6609		return;
6610	printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
6611			   "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6612			   rcu_str_deref(dev->name),
6613			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6614			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6615			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6616			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6617			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6618}
6619
6620static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6621{
6622	int i;
6623
6624	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6625		if (btrfs_dev_stat_read(dev, i) != 0)
6626			break;
6627	if (i == BTRFS_DEV_STAT_VALUES_MAX)
6628		return; /* all values == 0, suppress message */
6629
6630	printk_in_rcu(KERN_INFO "BTRFS: "
6631		   "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6632	       rcu_str_deref(dev->name),
6633	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6634	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6635	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6636	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6637	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6638}
6639
6640int btrfs_get_dev_stats(struct btrfs_root *root,
6641			struct btrfs_ioctl_get_dev_stats *stats)
6642{
6643	struct btrfs_device *dev;
6644	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6645	int i;
6646
6647	mutex_lock(&fs_devices->device_list_mutex);
6648	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6649	mutex_unlock(&fs_devices->device_list_mutex);
6650
6651	if (!dev) {
6652		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
6653		return -ENODEV;
6654	} else if (!dev->dev_stats_valid) {
6655		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
6656		return -ENODEV;
6657	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6658		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6659			if (stats->nr_items > i)
6660				stats->values[i] =
6661					btrfs_dev_stat_read_and_reset(dev, i);
6662			else
6663				btrfs_dev_stat_reset(dev, i);
6664		}
6665	} else {
6666		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6667			if (stats->nr_items > i)
6668				stats->values[i] = btrfs_dev_stat_read(dev, i);
6669	}
6670	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6671		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6672	return 0;
6673}
6674
6675int btrfs_scratch_superblock(struct btrfs_device *device)
6676{
6677	struct buffer_head *bh;
6678	struct btrfs_super_block *disk_super;
6679
6680	bh = btrfs_read_dev_super(device->bdev);
6681	if (!bh)
6682		return -EINVAL;
6683	disk_super = (struct btrfs_super_block *)bh->b_data;
6684
6685	memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6686	set_buffer_dirty(bh);
6687	sync_dirty_buffer(bh);
6688	brelse(bh);
6689
6690	return 0;
6691}
6692
6693/*
6694 * Update the size of all devices, which is used for writing out the
6695 * super blocks.
6696 */
6697void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
6698{
6699	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6700	struct btrfs_device *curr, *next;
6701
6702	if (list_empty(&fs_devices->resized_devices))
6703		return;
6704
6705	mutex_lock(&fs_devices->device_list_mutex);
6706	lock_chunks(fs_info->dev_root);
6707	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
6708				 resized_list) {
6709		list_del_init(&curr->resized_list);
6710		curr->commit_total_bytes = curr->disk_total_bytes;
6711	}
6712	unlock_chunks(fs_info->dev_root);
6713	mutex_unlock(&fs_devices->device_list_mutex);
6714}
6715
6716/* Must be invoked during the transaction commit */
6717void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
6718					struct btrfs_transaction *transaction)
6719{
6720	struct extent_map *em;
6721	struct map_lookup *map;
6722	struct btrfs_device *dev;
6723	int i;
6724
6725	if (list_empty(&transaction->pending_chunks))
6726		return;
6727
6728	/* In order to kick the device replace finish process */
6729	lock_chunks(root);
6730	list_for_each_entry(em, &transaction->pending_chunks, list) {
6731		map = (struct map_lookup *)em->bdev;
6732
6733		for (i = 0; i < map->num_stripes; i++) {
6734			dev = map->stripes[i].dev;
6735			dev->commit_bytes_used = dev->bytes_used;
6736		}
6737	}
6738	unlock_chunks(root);
6739}
6740