1/*
2 * Copyright (C) 2011 STRATO.  All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/blkdev.h>
23#include <linux/rbtree.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
26#include "ctree.h"
27#include "volumes.h"
28#include "disk-io.h"
29#include "transaction.h"
30#include "dev-replace.h"
31
32#undef DEBUG
33
34/*
35 * This is the implementation for the generic read ahead framework.
36 *
37 * To trigger a readahead, btrfs_reada_add must be called. It will start
38 * a read ahead for the given range [start, end) on tree root. The returned
39 * handle can either be used to wait on the readahead to finish
40 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
41 *
42 * The read ahead works as follows:
43 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
44 * reada_start_machine will then search for extents to prefetch and trigger
45 * some reads. When a read finishes for a node, all contained node/leaf
46 * pointers that lie in the given range will also be enqueued. The reads will
47 * be triggered in sequential order, thus giving a big win over a naive
48 * enumeration. It will also make use of multi-device layouts. Each disk
49 * will have its on read pointer and all disks will by utilized in parallel.
50 * Also will no two disks read both sides of a mirror simultaneously, as this
51 * would waste seeking capacity. Instead both disks will read different parts
52 * of the filesystem.
53 * Any number of readaheads can be started in parallel. The read order will be
54 * determined globally, i.e. 2 parallel readaheads will normally finish faster
55 * than the 2 started one after another.
56 */
57
58#define MAX_IN_FLIGHT 6
59
60struct reada_extctl {
61	struct list_head	list;
62	struct reada_control	*rc;
63	u64			generation;
64};
65
66struct reada_extent {
67	u64			logical;
68	struct btrfs_key	top;
69	int			err;
70	struct list_head	extctl;
71	int 			refcnt;
72	spinlock_t		lock;
73	struct reada_zone	*zones[BTRFS_MAX_MIRRORS];
74	int			nzones;
75	struct btrfs_device	*scheduled_for;
76};
77
78struct reada_zone {
79	u64			start;
80	u64			end;
81	u64			elems;
82	struct list_head	list;
83	spinlock_t		lock;
84	int			locked;
85	struct btrfs_device	*device;
86	struct btrfs_device	*devs[BTRFS_MAX_MIRRORS]; /* full list, incl
87							   * self */
88	int			ndevs;
89	struct kref		refcnt;
90};
91
92struct reada_machine_work {
93	struct btrfs_work	work;
94	struct btrfs_fs_info	*fs_info;
95};
96
97static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
98static void reada_control_release(struct kref *kref);
99static void reada_zone_release(struct kref *kref);
100static void reada_start_machine(struct btrfs_fs_info *fs_info);
101static void __reada_start_machine(struct btrfs_fs_info *fs_info);
102
103static int reada_add_block(struct reada_control *rc, u64 logical,
104			   struct btrfs_key *top, int level, u64 generation);
105
106/* recurses */
107/* in case of err, eb might be NULL */
108static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
109			    u64 start, int err)
110{
111	int level = 0;
112	int nritems;
113	int i;
114	u64 bytenr;
115	u64 generation;
116	struct reada_extent *re;
117	struct btrfs_fs_info *fs_info = root->fs_info;
118	struct list_head list;
119	unsigned long index = start >> PAGE_CACHE_SHIFT;
120	struct btrfs_device *for_dev;
121
122	if (eb)
123		level = btrfs_header_level(eb);
124
125	/* find extent */
126	spin_lock(&fs_info->reada_lock);
127	re = radix_tree_lookup(&fs_info->reada_tree, index);
128	if (re)
129		re->refcnt++;
130	spin_unlock(&fs_info->reada_lock);
131
132	if (!re)
133		return -1;
134
135	spin_lock(&re->lock);
136	/*
137	 * just take the full list from the extent. afterwards we
138	 * don't need the lock anymore
139	 */
140	list_replace_init(&re->extctl, &list);
141	for_dev = re->scheduled_for;
142	re->scheduled_for = NULL;
143	spin_unlock(&re->lock);
144
145	if (err == 0) {
146		nritems = level ? btrfs_header_nritems(eb) : 0;
147		generation = btrfs_header_generation(eb);
148		/*
149		 * FIXME: currently we just set nritems to 0 if this is a leaf,
150		 * effectively ignoring the content. In a next step we could
151		 * trigger more readahead depending from the content, e.g.
152		 * fetch the checksums for the extents in the leaf.
153		 */
154	} else {
155		/*
156		 * this is the error case, the extent buffer has not been
157		 * read correctly. We won't access anything from it and
158		 * just cleanup our data structures. Effectively this will
159		 * cut the branch below this node from read ahead.
160		 */
161		nritems = 0;
162		generation = 0;
163	}
164
165	for (i = 0; i < nritems; i++) {
166		struct reada_extctl *rec;
167		u64 n_gen;
168		struct btrfs_key key;
169		struct btrfs_key next_key;
170
171		btrfs_node_key_to_cpu(eb, &key, i);
172		if (i + 1 < nritems)
173			btrfs_node_key_to_cpu(eb, &next_key, i + 1);
174		else
175			next_key = re->top;
176		bytenr = btrfs_node_blockptr(eb, i);
177		n_gen = btrfs_node_ptr_generation(eb, i);
178
179		list_for_each_entry(rec, &list, list) {
180			struct reada_control *rc = rec->rc;
181
182			/*
183			 * if the generation doesn't match, just ignore this
184			 * extctl. This will probably cut off a branch from
185			 * prefetch. Alternatively one could start a new (sub-)
186			 * prefetch for this branch, starting again from root.
187			 * FIXME: move the generation check out of this loop
188			 */
189#ifdef DEBUG
190			if (rec->generation != generation) {
191				btrfs_debug(root->fs_info,
192					   "generation mismatch for (%llu,%d,%llu) %llu != %llu",
193				       key.objectid, key.type, key.offset,
194				       rec->generation, generation);
195			}
196#endif
197			if (rec->generation == generation &&
198			    btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
199			    btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
200				reada_add_block(rc, bytenr, &next_key,
201						level - 1, n_gen);
202		}
203	}
204	/*
205	 * free extctl records
206	 */
207	while (!list_empty(&list)) {
208		struct reada_control *rc;
209		struct reada_extctl *rec;
210
211		rec = list_first_entry(&list, struct reada_extctl, list);
212		list_del(&rec->list);
213		rc = rec->rc;
214		kfree(rec);
215
216		kref_get(&rc->refcnt);
217		if (atomic_dec_and_test(&rc->elems)) {
218			kref_put(&rc->refcnt, reada_control_release);
219			wake_up(&rc->wait);
220		}
221		kref_put(&rc->refcnt, reada_control_release);
222
223		reada_extent_put(fs_info, re);	/* one ref for each entry */
224	}
225	reada_extent_put(fs_info, re);	/* our ref */
226	if (for_dev)
227		atomic_dec(&for_dev->reada_in_flight);
228
229	return 0;
230}
231
232/*
233 * start is passed separately in case eb in NULL, which may be the case with
234 * failed I/O
235 */
236int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
237			 u64 start, int err)
238{
239	int ret;
240
241	ret = __readahead_hook(root, eb, start, err);
242
243	reada_start_machine(root->fs_info);
244
245	return ret;
246}
247
248static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
249					  struct btrfs_device *dev, u64 logical,
250					  struct btrfs_bio *bbio)
251{
252	int ret;
253	struct reada_zone *zone;
254	struct btrfs_block_group_cache *cache = NULL;
255	u64 start;
256	u64 end;
257	int i;
258
259	zone = NULL;
260	spin_lock(&fs_info->reada_lock);
261	ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
262				     logical >> PAGE_CACHE_SHIFT, 1);
263	if (ret == 1)
264		kref_get(&zone->refcnt);
265	spin_unlock(&fs_info->reada_lock);
266
267	if (ret == 1) {
268		if (logical >= zone->start && logical < zone->end)
269			return zone;
270		spin_lock(&fs_info->reada_lock);
271		kref_put(&zone->refcnt, reada_zone_release);
272		spin_unlock(&fs_info->reada_lock);
273	}
274
275	cache = btrfs_lookup_block_group(fs_info, logical);
276	if (!cache)
277		return NULL;
278
279	start = cache->key.objectid;
280	end = start + cache->key.offset - 1;
281	btrfs_put_block_group(cache);
282
283	zone = kzalloc(sizeof(*zone), GFP_NOFS);
284	if (!zone)
285		return NULL;
286
287	zone->start = start;
288	zone->end = end;
289	INIT_LIST_HEAD(&zone->list);
290	spin_lock_init(&zone->lock);
291	zone->locked = 0;
292	kref_init(&zone->refcnt);
293	zone->elems = 0;
294	zone->device = dev; /* our device always sits at index 0 */
295	for (i = 0; i < bbio->num_stripes; ++i) {
296		/* bounds have already been checked */
297		zone->devs[i] = bbio->stripes[i].dev;
298	}
299	zone->ndevs = bbio->num_stripes;
300
301	spin_lock(&fs_info->reada_lock);
302	ret = radix_tree_insert(&dev->reada_zones,
303				(unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
304				zone);
305
306	if (ret == -EEXIST) {
307		kfree(zone);
308		ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
309					     logical >> PAGE_CACHE_SHIFT, 1);
310		if (ret == 1)
311			kref_get(&zone->refcnt);
312	}
313	spin_unlock(&fs_info->reada_lock);
314
315	return zone;
316}
317
318static struct reada_extent *reada_find_extent(struct btrfs_root *root,
319					      u64 logical,
320					      struct btrfs_key *top, int level)
321{
322	int ret;
323	struct reada_extent *re = NULL;
324	struct reada_extent *re_exist = NULL;
325	struct btrfs_fs_info *fs_info = root->fs_info;
326	struct btrfs_bio *bbio = NULL;
327	struct btrfs_device *dev;
328	struct btrfs_device *prev_dev;
329	u32 blocksize;
330	u64 length;
331	int real_stripes;
332	int nzones = 0;
333	int i;
334	unsigned long index = logical >> PAGE_CACHE_SHIFT;
335	int dev_replace_is_ongoing;
336
337	spin_lock(&fs_info->reada_lock);
338	re = radix_tree_lookup(&fs_info->reada_tree, index);
339	if (re)
340		re->refcnt++;
341	spin_unlock(&fs_info->reada_lock);
342
343	if (re)
344		return re;
345
346	re = kzalloc(sizeof(*re), GFP_NOFS);
347	if (!re)
348		return NULL;
349
350	blocksize = root->nodesize;
351	re->logical = logical;
352	re->top = *top;
353	INIT_LIST_HEAD(&re->extctl);
354	spin_lock_init(&re->lock);
355	re->refcnt = 1;
356
357	/*
358	 * map block
359	 */
360	length = blocksize;
361	ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
362			      &bbio, 0);
363	if (ret || !bbio || length < blocksize)
364		goto error;
365
366	if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
367		btrfs_err(root->fs_info,
368			   "readahead: more than %d copies not supported",
369			   BTRFS_MAX_MIRRORS);
370		goto error;
371	}
372
373	real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
374	for (nzones = 0; nzones < real_stripes; ++nzones) {
375		struct reada_zone *zone;
376
377		dev = bbio->stripes[nzones].dev;
378		zone = reada_find_zone(fs_info, dev, logical, bbio);
379		if (!zone)
380			break;
381
382		re->zones[nzones] = zone;
383		spin_lock(&zone->lock);
384		if (!zone->elems)
385			kref_get(&zone->refcnt);
386		++zone->elems;
387		spin_unlock(&zone->lock);
388		spin_lock(&fs_info->reada_lock);
389		kref_put(&zone->refcnt, reada_zone_release);
390		spin_unlock(&fs_info->reada_lock);
391	}
392	re->nzones = nzones;
393	if (nzones == 0) {
394		/* not a single zone found, error and out */
395		goto error;
396	}
397
398	/* insert extent in reada_tree + all per-device trees, all or nothing */
399	btrfs_dev_replace_lock(&fs_info->dev_replace);
400	spin_lock(&fs_info->reada_lock);
401	ret = radix_tree_insert(&fs_info->reada_tree, index, re);
402	if (ret == -EEXIST) {
403		re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
404		BUG_ON(!re_exist);
405		re_exist->refcnt++;
406		spin_unlock(&fs_info->reada_lock);
407		btrfs_dev_replace_unlock(&fs_info->dev_replace);
408		goto error;
409	}
410	if (ret) {
411		spin_unlock(&fs_info->reada_lock);
412		btrfs_dev_replace_unlock(&fs_info->dev_replace);
413		goto error;
414	}
415	prev_dev = NULL;
416	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
417			&fs_info->dev_replace);
418	for (i = 0; i < nzones; ++i) {
419		dev = bbio->stripes[i].dev;
420		if (dev == prev_dev) {
421			/*
422			 * in case of DUP, just add the first zone. As both
423			 * are on the same device, there's nothing to gain
424			 * from adding both.
425			 * Also, it wouldn't work, as the tree is per device
426			 * and adding would fail with EEXIST
427			 */
428			continue;
429		}
430		if (!dev->bdev) {
431			/*
432			 * cannot read ahead on missing device, but for RAID5/6,
433			 * REQ_GET_READ_MIRRORS return 1. So don't skip missing
434			 * device for such case.
435			 */
436			if (nzones > 1)
437				continue;
438		}
439		if (dev_replace_is_ongoing &&
440		    dev == fs_info->dev_replace.tgtdev) {
441			/*
442			 * as this device is selected for reading only as
443			 * a last resort, skip it for read ahead.
444			 */
445			continue;
446		}
447		prev_dev = dev;
448		ret = radix_tree_insert(&dev->reada_extents, index, re);
449		if (ret) {
450			while (--i >= 0) {
451				dev = bbio->stripes[i].dev;
452				BUG_ON(dev == NULL);
453				/* ignore whether the entry was inserted */
454				radix_tree_delete(&dev->reada_extents, index);
455			}
456			BUG_ON(fs_info == NULL);
457			radix_tree_delete(&fs_info->reada_tree, index);
458			spin_unlock(&fs_info->reada_lock);
459			btrfs_dev_replace_unlock(&fs_info->dev_replace);
460			goto error;
461		}
462	}
463	spin_unlock(&fs_info->reada_lock);
464	btrfs_dev_replace_unlock(&fs_info->dev_replace);
465
466	btrfs_put_bbio(bbio);
467	return re;
468
469error:
470	while (nzones) {
471		struct reada_zone *zone;
472
473		--nzones;
474		zone = re->zones[nzones];
475		kref_get(&zone->refcnt);
476		spin_lock(&zone->lock);
477		--zone->elems;
478		if (zone->elems == 0) {
479			/*
480			 * no fs_info->reada_lock needed, as this can't be
481			 * the last ref
482			 */
483			kref_put(&zone->refcnt, reada_zone_release);
484		}
485		spin_unlock(&zone->lock);
486
487		spin_lock(&fs_info->reada_lock);
488		kref_put(&zone->refcnt, reada_zone_release);
489		spin_unlock(&fs_info->reada_lock);
490	}
491	btrfs_put_bbio(bbio);
492	kfree(re);
493	return re_exist;
494}
495
496static void reada_extent_put(struct btrfs_fs_info *fs_info,
497			     struct reada_extent *re)
498{
499	int i;
500	unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
501
502	spin_lock(&fs_info->reada_lock);
503	if (--re->refcnt) {
504		spin_unlock(&fs_info->reada_lock);
505		return;
506	}
507
508	radix_tree_delete(&fs_info->reada_tree, index);
509	for (i = 0; i < re->nzones; ++i) {
510		struct reada_zone *zone = re->zones[i];
511
512		radix_tree_delete(&zone->device->reada_extents, index);
513	}
514
515	spin_unlock(&fs_info->reada_lock);
516
517	for (i = 0; i < re->nzones; ++i) {
518		struct reada_zone *zone = re->zones[i];
519
520		kref_get(&zone->refcnt);
521		spin_lock(&zone->lock);
522		--zone->elems;
523		if (zone->elems == 0) {
524			/* no fs_info->reada_lock needed, as this can't be
525			 * the last ref */
526			kref_put(&zone->refcnt, reada_zone_release);
527		}
528		spin_unlock(&zone->lock);
529
530		spin_lock(&fs_info->reada_lock);
531		kref_put(&zone->refcnt, reada_zone_release);
532		spin_unlock(&fs_info->reada_lock);
533	}
534	if (re->scheduled_for)
535		atomic_dec(&re->scheduled_for->reada_in_flight);
536
537	kfree(re);
538}
539
540static void reada_zone_release(struct kref *kref)
541{
542	struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
543
544	radix_tree_delete(&zone->device->reada_zones,
545			  zone->end >> PAGE_CACHE_SHIFT);
546
547	kfree(zone);
548}
549
550static void reada_control_release(struct kref *kref)
551{
552	struct reada_control *rc = container_of(kref, struct reada_control,
553						refcnt);
554
555	kfree(rc);
556}
557
558static int reada_add_block(struct reada_control *rc, u64 logical,
559			   struct btrfs_key *top, int level, u64 generation)
560{
561	struct btrfs_root *root = rc->root;
562	struct reada_extent *re;
563	struct reada_extctl *rec;
564
565	re = reada_find_extent(root, logical, top, level); /* takes one ref */
566	if (!re)
567		return -1;
568
569	rec = kzalloc(sizeof(*rec), GFP_NOFS);
570	if (!rec) {
571		reada_extent_put(root->fs_info, re);
572		return -ENOMEM;
573	}
574
575	rec->rc = rc;
576	rec->generation = generation;
577	atomic_inc(&rc->elems);
578
579	spin_lock(&re->lock);
580	list_add_tail(&rec->list, &re->extctl);
581	spin_unlock(&re->lock);
582
583	/* leave the ref on the extent */
584
585	return 0;
586}
587
588/*
589 * called with fs_info->reada_lock held
590 */
591static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
592{
593	int i;
594	unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
595
596	for (i = 0; i < zone->ndevs; ++i) {
597		struct reada_zone *peer;
598		peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
599		if (peer && peer->device != zone->device)
600			peer->locked = lock;
601	}
602}
603
604/*
605 * called with fs_info->reada_lock held
606 */
607static int reada_pick_zone(struct btrfs_device *dev)
608{
609	struct reada_zone *top_zone = NULL;
610	struct reada_zone *top_locked_zone = NULL;
611	u64 top_elems = 0;
612	u64 top_locked_elems = 0;
613	unsigned long index = 0;
614	int ret;
615
616	if (dev->reada_curr_zone) {
617		reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
618		kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
619		dev->reada_curr_zone = NULL;
620	}
621	/* pick the zone with the most elements */
622	while (1) {
623		struct reada_zone *zone;
624
625		ret = radix_tree_gang_lookup(&dev->reada_zones,
626					     (void **)&zone, index, 1);
627		if (ret == 0)
628			break;
629		index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
630		if (zone->locked) {
631			if (zone->elems > top_locked_elems) {
632				top_locked_elems = zone->elems;
633				top_locked_zone = zone;
634			}
635		} else {
636			if (zone->elems > top_elems) {
637				top_elems = zone->elems;
638				top_zone = zone;
639			}
640		}
641	}
642	if (top_zone)
643		dev->reada_curr_zone = top_zone;
644	else if (top_locked_zone)
645		dev->reada_curr_zone = top_locked_zone;
646	else
647		return 0;
648
649	dev->reada_next = dev->reada_curr_zone->start;
650	kref_get(&dev->reada_curr_zone->refcnt);
651	reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
652
653	return 1;
654}
655
656static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
657				   struct btrfs_device *dev)
658{
659	struct reada_extent *re = NULL;
660	int mirror_num = 0;
661	struct extent_buffer *eb = NULL;
662	u64 logical;
663	int ret;
664	int i;
665	int need_kick = 0;
666
667	spin_lock(&fs_info->reada_lock);
668	if (dev->reada_curr_zone == NULL) {
669		ret = reada_pick_zone(dev);
670		if (!ret) {
671			spin_unlock(&fs_info->reada_lock);
672			return 0;
673		}
674	}
675	/*
676	 * FIXME currently we issue the reads one extent at a time. If we have
677	 * a contiguous block of extents, we could also coagulate them or use
678	 * plugging to speed things up
679	 */
680	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
681				     dev->reada_next >> PAGE_CACHE_SHIFT, 1);
682	if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
683		ret = reada_pick_zone(dev);
684		if (!ret) {
685			spin_unlock(&fs_info->reada_lock);
686			return 0;
687		}
688		re = NULL;
689		ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
690					dev->reada_next >> PAGE_CACHE_SHIFT, 1);
691	}
692	if (ret == 0) {
693		spin_unlock(&fs_info->reada_lock);
694		return 0;
695	}
696	dev->reada_next = re->logical + fs_info->tree_root->nodesize;
697	re->refcnt++;
698
699	spin_unlock(&fs_info->reada_lock);
700
701	/*
702	 * find mirror num
703	 */
704	for (i = 0; i < re->nzones; ++i) {
705		if (re->zones[i]->device == dev) {
706			mirror_num = i + 1;
707			break;
708		}
709	}
710	logical = re->logical;
711
712	spin_lock(&re->lock);
713	if (re->scheduled_for == NULL) {
714		re->scheduled_for = dev;
715		need_kick = 1;
716	}
717	spin_unlock(&re->lock);
718
719	reada_extent_put(fs_info, re);
720
721	if (!need_kick)
722		return 0;
723
724	atomic_inc(&dev->reada_in_flight);
725	ret = reada_tree_block_flagged(fs_info->extent_root, logical,
726			mirror_num, &eb);
727	if (ret)
728		__readahead_hook(fs_info->extent_root, NULL, logical, ret);
729	else if (eb)
730		__readahead_hook(fs_info->extent_root, eb, eb->start, ret);
731
732	if (eb)
733		free_extent_buffer(eb);
734
735	return 1;
736
737}
738
739static void reada_start_machine_worker(struct btrfs_work *work)
740{
741	struct reada_machine_work *rmw;
742	struct btrfs_fs_info *fs_info;
743	int old_ioprio;
744
745	rmw = container_of(work, struct reada_machine_work, work);
746	fs_info = rmw->fs_info;
747
748	kfree(rmw);
749
750	old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
751				       task_nice_ioprio(current));
752	set_task_ioprio(current, BTRFS_IOPRIO_READA);
753	__reada_start_machine(fs_info);
754	set_task_ioprio(current, old_ioprio);
755}
756
757static void __reada_start_machine(struct btrfs_fs_info *fs_info)
758{
759	struct btrfs_device *device;
760	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
761	u64 enqueued;
762	u64 total = 0;
763	int i;
764
765	do {
766		enqueued = 0;
767		list_for_each_entry(device, &fs_devices->devices, dev_list) {
768			if (atomic_read(&device->reada_in_flight) <
769			    MAX_IN_FLIGHT)
770				enqueued += reada_start_machine_dev(fs_info,
771								    device);
772		}
773		total += enqueued;
774	} while (enqueued && total < 10000);
775
776	if (enqueued == 0)
777		return;
778
779	/*
780	 * If everything is already in the cache, this is effectively single
781	 * threaded. To a) not hold the caller for too long and b) to utilize
782	 * more cores, we broke the loop above after 10000 iterations and now
783	 * enqueue to workers to finish it. This will distribute the load to
784	 * the cores.
785	 */
786	for (i = 0; i < 2; ++i)
787		reada_start_machine(fs_info);
788}
789
790static void reada_start_machine(struct btrfs_fs_info *fs_info)
791{
792	struct reada_machine_work *rmw;
793
794	rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
795	if (!rmw) {
796		/* FIXME we cannot handle this properly right now */
797		BUG();
798	}
799	btrfs_init_work(&rmw->work, btrfs_readahead_helper,
800			reada_start_machine_worker, NULL, NULL);
801	rmw->fs_info = fs_info;
802
803	btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
804}
805
806#ifdef DEBUG
807static void dump_devs(struct btrfs_fs_info *fs_info, int all)
808{
809	struct btrfs_device *device;
810	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
811	unsigned long index;
812	int ret;
813	int i;
814	int j;
815	int cnt;
816
817	spin_lock(&fs_info->reada_lock);
818	list_for_each_entry(device, &fs_devices->devices, dev_list) {
819		printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
820			atomic_read(&device->reada_in_flight));
821		index = 0;
822		while (1) {
823			struct reada_zone *zone;
824			ret = radix_tree_gang_lookup(&device->reada_zones,
825						     (void **)&zone, index, 1);
826			if (ret == 0)
827				break;
828			printk(KERN_DEBUG "  zone %llu-%llu elems %llu locked "
829				"%d devs", zone->start, zone->end, zone->elems,
830				zone->locked);
831			for (j = 0; j < zone->ndevs; ++j) {
832				printk(KERN_CONT " %lld",
833					zone->devs[j]->devid);
834			}
835			if (device->reada_curr_zone == zone)
836				printk(KERN_CONT " curr off %llu",
837					device->reada_next - zone->start);
838			printk(KERN_CONT "\n");
839			index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
840		}
841		cnt = 0;
842		index = 0;
843		while (all) {
844			struct reada_extent *re = NULL;
845
846			ret = radix_tree_gang_lookup(&device->reada_extents,
847						     (void **)&re, index, 1);
848			if (ret == 0)
849				break;
850			printk(KERN_DEBUG
851				"  re: logical %llu size %u empty %d for %lld",
852				re->logical, fs_info->tree_root->nodesize,
853				list_empty(&re->extctl), re->scheduled_for ?
854				re->scheduled_for->devid : -1);
855
856			for (i = 0; i < re->nzones; ++i) {
857				printk(KERN_CONT " zone %llu-%llu devs",
858					re->zones[i]->start,
859					re->zones[i]->end);
860				for (j = 0; j < re->zones[i]->ndevs; ++j) {
861					printk(KERN_CONT " %lld",
862						re->zones[i]->devs[j]->devid);
863				}
864			}
865			printk(KERN_CONT "\n");
866			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
867			if (++cnt > 15)
868				break;
869		}
870	}
871
872	index = 0;
873	cnt = 0;
874	while (all) {
875		struct reada_extent *re = NULL;
876
877		ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
878					     index, 1);
879		if (ret == 0)
880			break;
881		if (!re->scheduled_for) {
882			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
883			continue;
884		}
885		printk(KERN_DEBUG
886			"re: logical %llu size %u list empty %d for %lld",
887			re->logical, fs_info->tree_root->nodesize,
888			list_empty(&re->extctl),
889			re->scheduled_for ? re->scheduled_for->devid : -1);
890		for (i = 0; i < re->nzones; ++i) {
891			printk(KERN_CONT " zone %llu-%llu devs",
892				re->zones[i]->start,
893				re->zones[i]->end);
894			for (i = 0; i < re->nzones; ++i) {
895				printk(KERN_CONT " zone %llu-%llu devs",
896					re->zones[i]->start,
897					re->zones[i]->end);
898				for (j = 0; j < re->zones[i]->ndevs; ++j) {
899					printk(KERN_CONT " %lld",
900						re->zones[i]->devs[j]->devid);
901				}
902			}
903		}
904		printk(KERN_CONT "\n");
905		index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
906	}
907	spin_unlock(&fs_info->reada_lock);
908}
909#endif
910
911/*
912 * interface
913 */
914struct reada_control *btrfs_reada_add(struct btrfs_root *root,
915			struct btrfs_key *key_start, struct btrfs_key *key_end)
916{
917	struct reada_control *rc;
918	u64 start;
919	u64 generation;
920	int level;
921	int ret;
922	struct extent_buffer *node;
923	static struct btrfs_key max_key = {
924		.objectid = (u64)-1,
925		.type = (u8)-1,
926		.offset = (u64)-1
927	};
928
929	rc = kzalloc(sizeof(*rc), GFP_NOFS);
930	if (!rc)
931		return ERR_PTR(-ENOMEM);
932
933	rc->root = root;
934	rc->key_start = *key_start;
935	rc->key_end = *key_end;
936	atomic_set(&rc->elems, 0);
937	init_waitqueue_head(&rc->wait);
938	kref_init(&rc->refcnt);
939	kref_get(&rc->refcnt); /* one ref for having elements */
940
941	node = btrfs_root_node(root);
942	start = node->start;
943	level = btrfs_header_level(node);
944	generation = btrfs_header_generation(node);
945	free_extent_buffer(node);
946
947	ret = reada_add_block(rc, start, &max_key, level, generation);
948	if (ret) {
949		kfree(rc);
950		return ERR_PTR(ret);
951	}
952
953	reada_start_machine(root->fs_info);
954
955	return rc;
956}
957
958#ifdef DEBUG
959int btrfs_reada_wait(void *handle)
960{
961	struct reada_control *rc = handle;
962
963	while (atomic_read(&rc->elems)) {
964		wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
965				   5 * HZ);
966		dump_devs(rc->root->fs_info,
967			  atomic_read(&rc->elems) < 10 ? 1 : 0);
968	}
969
970	dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
971
972	kref_put(&rc->refcnt, reada_control_release);
973
974	return 0;
975}
976#else
977int btrfs_reada_wait(void *handle)
978{
979	struct reada_control *rc = handle;
980
981	while (atomic_read(&rc->elems)) {
982		wait_event(rc->wait, atomic_read(&rc->elems) == 0);
983	}
984
985	kref_put(&rc->refcnt, reada_control_release);
986
987	return 0;
988}
989#endif
990
991void btrfs_reada_detach(void *handle)
992{
993	struct reada_control *rc = handle;
994
995	kref_put(&rc->refcnt, reada_control_release);
996}
997