1/*
2 *  Block device elevator/IO-scheduler.
3 *
4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 *   an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 *  when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
34#include <linux/blktrace_api.h>
35#include <linux/hash.h>
36#include <linux/uaccess.h>
37#include <linux/pm_runtime.h>
38
39#include <trace/events/block.h>
40
41#include "blk.h"
42#include "blk-cgroup.h"
43
44static DEFINE_SPINLOCK(elv_list_lock);
45static LIST_HEAD(elv_list);
46
47/*
48 * Merge hash stuff.
49 */
50#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
51
52/*
53 * Query io scheduler to see if the current process issuing bio may be
54 * merged with rq.
55 */
56static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
57{
58	struct request_queue *q = rq->q;
59	struct elevator_queue *e = q->elevator;
60
61	if (e->type->ops.elevator_allow_merge_fn)
62		return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
63
64	return 1;
65}
66
67/*
68 * can we safely merge with this request?
69 */
70bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
71{
72	if (!blk_rq_merge_ok(rq, bio))
73		return 0;
74
75	if (!elv_iosched_allow_merge(rq, bio))
76		return 0;
77
78	return 1;
79}
80EXPORT_SYMBOL(elv_rq_merge_ok);
81
82static struct elevator_type *elevator_find(const char *name)
83{
84	struct elevator_type *e;
85
86	list_for_each_entry(e, &elv_list, list) {
87		if (!strcmp(e->elevator_name, name))
88			return e;
89	}
90
91	return NULL;
92}
93
94static void elevator_put(struct elevator_type *e)
95{
96	module_put(e->elevator_owner);
97}
98
99static struct elevator_type *elevator_get(const char *name, bool try_loading)
100{
101	struct elevator_type *e;
102
103	spin_lock(&elv_list_lock);
104
105	e = elevator_find(name);
106	if (!e && try_loading) {
107		spin_unlock(&elv_list_lock);
108		request_module("%s-iosched", name);
109		spin_lock(&elv_list_lock);
110		e = elevator_find(name);
111	}
112
113	if (e && !try_module_get(e->elevator_owner))
114		e = NULL;
115
116	spin_unlock(&elv_list_lock);
117
118	return e;
119}
120
121static char chosen_elevator[ELV_NAME_MAX];
122
123static int __init elevator_setup(char *str)
124{
125	/*
126	 * Be backwards-compatible with previous kernels, so users
127	 * won't get the wrong elevator.
128	 */
129	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
130	return 1;
131}
132
133__setup("elevator=", elevator_setup);
134
135/* called during boot to load the elevator chosen by the elevator param */
136void __init load_default_elevator_module(void)
137{
138	struct elevator_type *e;
139
140	if (!chosen_elevator[0])
141		return;
142
143	spin_lock(&elv_list_lock);
144	e = elevator_find(chosen_elevator);
145	spin_unlock(&elv_list_lock);
146
147	if (!e)
148		request_module("%s-iosched", chosen_elevator);
149}
150
151static struct kobj_type elv_ktype;
152
153struct elevator_queue *elevator_alloc(struct request_queue *q,
154				  struct elevator_type *e)
155{
156	struct elevator_queue *eq;
157
158	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
159	if (unlikely(!eq))
160		return NULL;
161
162	eq->type = e;
163	kobject_init(&eq->kobj, &elv_ktype);
164	mutex_init(&eq->sysfs_lock);
165	hash_init(eq->hash);
166
167	return eq;
168}
169EXPORT_SYMBOL(elevator_alloc);
170
171static void elevator_release(struct kobject *kobj)
172{
173	struct elevator_queue *e;
174
175	e = container_of(kobj, struct elevator_queue, kobj);
176	elevator_put(e->type);
177	kfree(e);
178}
179
180int elevator_init(struct request_queue *q, char *name)
181{
182	struct elevator_type *e = NULL;
183	int err;
184
185	/*
186	 * q->sysfs_lock must be held to provide mutual exclusion between
187	 * elevator_switch() and here.
188	 */
189	lockdep_assert_held(&q->sysfs_lock);
190
191	if (unlikely(q->elevator))
192		return 0;
193
194	INIT_LIST_HEAD(&q->queue_head);
195	q->last_merge = NULL;
196	q->end_sector = 0;
197	q->boundary_rq = NULL;
198
199	if (name) {
200		e = elevator_get(name, true);
201		if (!e)
202			return -EINVAL;
203	}
204
205	/*
206	 * Use the default elevator specified by config boot param or
207	 * config option.  Don't try to load modules as we could be running
208	 * off async and request_module() isn't allowed from async.
209	 */
210	if (!e && *chosen_elevator) {
211		e = elevator_get(chosen_elevator, false);
212		if (!e)
213			printk(KERN_ERR "I/O scheduler %s not found\n",
214							chosen_elevator);
215	}
216
217	if (!e) {
218		e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
219		if (!e) {
220			printk(KERN_ERR
221				"Default I/O scheduler not found. " \
222				"Using noop.\n");
223			e = elevator_get("noop", false);
224		}
225	}
226
227	err = e->ops.elevator_init_fn(q, e);
228	if (err)
229		elevator_put(e);
230	return err;
231}
232EXPORT_SYMBOL(elevator_init);
233
234void elevator_exit(struct elevator_queue *e)
235{
236	mutex_lock(&e->sysfs_lock);
237	if (e->type->ops.elevator_exit_fn)
238		e->type->ops.elevator_exit_fn(e);
239	mutex_unlock(&e->sysfs_lock);
240
241	kobject_put(&e->kobj);
242}
243EXPORT_SYMBOL(elevator_exit);
244
245static inline void __elv_rqhash_del(struct request *rq)
246{
247	hash_del(&rq->hash);
248	rq->cmd_flags &= ~REQ_HASHED;
249}
250
251static void elv_rqhash_del(struct request_queue *q, struct request *rq)
252{
253	if (ELV_ON_HASH(rq))
254		__elv_rqhash_del(rq);
255}
256
257static void elv_rqhash_add(struct request_queue *q, struct request *rq)
258{
259	struct elevator_queue *e = q->elevator;
260
261	BUG_ON(ELV_ON_HASH(rq));
262	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
263	rq->cmd_flags |= REQ_HASHED;
264}
265
266static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
267{
268	__elv_rqhash_del(rq);
269	elv_rqhash_add(q, rq);
270}
271
272static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
273{
274	struct elevator_queue *e = q->elevator;
275	struct hlist_node *next;
276	struct request *rq;
277
278	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
279		BUG_ON(!ELV_ON_HASH(rq));
280
281		if (unlikely(!rq_mergeable(rq))) {
282			__elv_rqhash_del(rq);
283			continue;
284		}
285
286		if (rq_hash_key(rq) == offset)
287			return rq;
288	}
289
290	return NULL;
291}
292
293/*
294 * RB-tree support functions for inserting/lookup/removal of requests
295 * in a sorted RB tree.
296 */
297void elv_rb_add(struct rb_root *root, struct request *rq)
298{
299	struct rb_node **p = &root->rb_node;
300	struct rb_node *parent = NULL;
301	struct request *__rq;
302
303	while (*p) {
304		parent = *p;
305		__rq = rb_entry(parent, struct request, rb_node);
306
307		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
308			p = &(*p)->rb_left;
309		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
310			p = &(*p)->rb_right;
311	}
312
313	rb_link_node(&rq->rb_node, parent, p);
314	rb_insert_color(&rq->rb_node, root);
315}
316EXPORT_SYMBOL(elv_rb_add);
317
318void elv_rb_del(struct rb_root *root, struct request *rq)
319{
320	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
321	rb_erase(&rq->rb_node, root);
322	RB_CLEAR_NODE(&rq->rb_node);
323}
324EXPORT_SYMBOL(elv_rb_del);
325
326struct request *elv_rb_find(struct rb_root *root, sector_t sector)
327{
328	struct rb_node *n = root->rb_node;
329	struct request *rq;
330
331	while (n) {
332		rq = rb_entry(n, struct request, rb_node);
333
334		if (sector < blk_rq_pos(rq))
335			n = n->rb_left;
336		else if (sector > blk_rq_pos(rq))
337			n = n->rb_right;
338		else
339			return rq;
340	}
341
342	return NULL;
343}
344EXPORT_SYMBOL(elv_rb_find);
345
346/*
347 * Insert rq into dispatch queue of q.  Queue lock must be held on
348 * entry.  rq is sort instead into the dispatch queue. To be used by
349 * specific elevators.
350 */
351void elv_dispatch_sort(struct request_queue *q, struct request *rq)
352{
353	sector_t boundary;
354	struct list_head *entry;
355	int stop_flags;
356
357	if (q->last_merge == rq)
358		q->last_merge = NULL;
359
360	elv_rqhash_del(q, rq);
361
362	q->nr_sorted--;
363
364	boundary = q->end_sector;
365	stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
366	list_for_each_prev(entry, &q->queue_head) {
367		struct request *pos = list_entry_rq(entry);
368
369		if ((rq->cmd_flags & REQ_DISCARD) !=
370		    (pos->cmd_flags & REQ_DISCARD))
371			break;
372		if (rq_data_dir(rq) != rq_data_dir(pos))
373			break;
374		if (pos->cmd_flags & stop_flags)
375			break;
376		if (blk_rq_pos(rq) >= boundary) {
377			if (blk_rq_pos(pos) < boundary)
378				continue;
379		} else {
380			if (blk_rq_pos(pos) >= boundary)
381				break;
382		}
383		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
384			break;
385	}
386
387	list_add(&rq->queuelist, entry);
388}
389EXPORT_SYMBOL(elv_dispatch_sort);
390
391/*
392 * Insert rq into dispatch queue of q.  Queue lock must be held on
393 * entry.  rq is added to the back of the dispatch queue. To be used by
394 * specific elevators.
395 */
396void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
397{
398	if (q->last_merge == rq)
399		q->last_merge = NULL;
400
401	elv_rqhash_del(q, rq);
402
403	q->nr_sorted--;
404
405	q->end_sector = rq_end_sector(rq);
406	q->boundary_rq = rq;
407	list_add_tail(&rq->queuelist, &q->queue_head);
408}
409EXPORT_SYMBOL(elv_dispatch_add_tail);
410
411int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
412{
413	struct elevator_queue *e = q->elevator;
414	struct request *__rq;
415	int ret;
416
417	/*
418	 * Levels of merges:
419	 * 	nomerges:  No merges at all attempted
420	 * 	noxmerges: Only simple one-hit cache try
421	 * 	merges:	   All merge tries attempted
422	 */
423	if (blk_queue_nomerges(q))
424		return ELEVATOR_NO_MERGE;
425
426	/*
427	 * First try one-hit cache.
428	 */
429	if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
430		ret = blk_try_merge(q->last_merge, bio);
431		if (ret != ELEVATOR_NO_MERGE) {
432			*req = q->last_merge;
433			return ret;
434		}
435	}
436
437	if (blk_queue_noxmerges(q))
438		return ELEVATOR_NO_MERGE;
439
440	/*
441	 * See if our hash lookup can find a potential backmerge.
442	 */
443	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
444	if (__rq && elv_rq_merge_ok(__rq, bio)) {
445		*req = __rq;
446		return ELEVATOR_BACK_MERGE;
447	}
448
449	if (e->type->ops.elevator_merge_fn)
450		return e->type->ops.elevator_merge_fn(q, req, bio);
451
452	return ELEVATOR_NO_MERGE;
453}
454
455/*
456 * Attempt to do an insertion back merge. Only check for the case where
457 * we can append 'rq' to an existing request, so we can throw 'rq' away
458 * afterwards.
459 *
460 * Returns true if we merged, false otherwise
461 */
462static bool elv_attempt_insert_merge(struct request_queue *q,
463				     struct request *rq)
464{
465	struct request *__rq;
466	bool ret;
467
468	if (blk_queue_nomerges(q))
469		return false;
470
471	/*
472	 * First try one-hit cache.
473	 */
474	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
475		return true;
476
477	if (blk_queue_noxmerges(q))
478		return false;
479
480	ret = false;
481	/*
482	 * See if our hash lookup can find a potential backmerge.
483	 */
484	while (1) {
485		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
486		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
487			break;
488
489		/* The merged request could be merged with others, try again */
490		ret = true;
491		rq = __rq;
492	}
493
494	return ret;
495}
496
497void elv_merged_request(struct request_queue *q, struct request *rq, int type)
498{
499	struct elevator_queue *e = q->elevator;
500
501	if (e->type->ops.elevator_merged_fn)
502		e->type->ops.elevator_merged_fn(q, rq, type);
503
504	if (type == ELEVATOR_BACK_MERGE)
505		elv_rqhash_reposition(q, rq);
506
507	q->last_merge = rq;
508}
509
510void elv_merge_requests(struct request_queue *q, struct request *rq,
511			     struct request *next)
512{
513	struct elevator_queue *e = q->elevator;
514	const int next_sorted = next->cmd_flags & REQ_SORTED;
515
516	if (next_sorted && e->type->ops.elevator_merge_req_fn)
517		e->type->ops.elevator_merge_req_fn(q, rq, next);
518
519	elv_rqhash_reposition(q, rq);
520
521	if (next_sorted) {
522		elv_rqhash_del(q, next);
523		q->nr_sorted--;
524	}
525
526	q->last_merge = rq;
527}
528
529void elv_bio_merged(struct request_queue *q, struct request *rq,
530			struct bio *bio)
531{
532	struct elevator_queue *e = q->elevator;
533
534	if (e->type->ops.elevator_bio_merged_fn)
535		e->type->ops.elevator_bio_merged_fn(q, rq, bio);
536}
537
538#ifdef CONFIG_PM
539static void blk_pm_requeue_request(struct request *rq)
540{
541	if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
542		rq->q->nr_pending--;
543}
544
545static void blk_pm_add_request(struct request_queue *q, struct request *rq)
546{
547	if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
548	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
549		pm_request_resume(q->dev);
550}
551#else
552static inline void blk_pm_requeue_request(struct request *rq) {}
553static inline void blk_pm_add_request(struct request_queue *q,
554				      struct request *rq)
555{
556}
557#endif
558
559void elv_requeue_request(struct request_queue *q, struct request *rq)
560{
561	/*
562	 * it already went through dequeue, we need to decrement the
563	 * in_flight count again
564	 */
565	if (blk_account_rq(rq)) {
566		q->in_flight[rq_is_sync(rq)]--;
567		if (rq->cmd_flags & REQ_SORTED)
568			elv_deactivate_rq(q, rq);
569	}
570
571	rq->cmd_flags &= ~REQ_STARTED;
572
573	blk_pm_requeue_request(rq);
574
575	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
576}
577
578void elv_drain_elevator(struct request_queue *q)
579{
580	static int printed;
581
582	lockdep_assert_held(q->queue_lock);
583
584	while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
585		;
586	if (q->nr_sorted && printed++ < 10) {
587		printk(KERN_ERR "%s: forced dispatching is broken "
588		       "(nr_sorted=%u), please report this\n",
589		       q->elevator->type->elevator_name, q->nr_sorted);
590	}
591}
592
593void __elv_add_request(struct request_queue *q, struct request *rq, int where)
594{
595	trace_block_rq_insert(q, rq);
596
597	blk_pm_add_request(q, rq);
598
599	rq->q = q;
600
601	if (rq->cmd_flags & REQ_SOFTBARRIER) {
602		/* barriers are scheduling boundary, update end_sector */
603		if (rq->cmd_type == REQ_TYPE_FS) {
604			q->end_sector = rq_end_sector(rq);
605			q->boundary_rq = rq;
606		}
607	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
608		    (where == ELEVATOR_INSERT_SORT ||
609		     where == ELEVATOR_INSERT_SORT_MERGE))
610		where = ELEVATOR_INSERT_BACK;
611
612	switch (where) {
613	case ELEVATOR_INSERT_REQUEUE:
614	case ELEVATOR_INSERT_FRONT:
615		rq->cmd_flags |= REQ_SOFTBARRIER;
616		list_add(&rq->queuelist, &q->queue_head);
617		break;
618
619	case ELEVATOR_INSERT_BACK:
620		rq->cmd_flags |= REQ_SOFTBARRIER;
621		elv_drain_elevator(q);
622		list_add_tail(&rq->queuelist, &q->queue_head);
623		/*
624		 * We kick the queue here for the following reasons.
625		 * - The elevator might have returned NULL previously
626		 *   to delay requests and returned them now.  As the
627		 *   queue wasn't empty before this request, ll_rw_blk
628		 *   won't run the queue on return, resulting in hang.
629		 * - Usually, back inserted requests won't be merged
630		 *   with anything.  There's no point in delaying queue
631		 *   processing.
632		 */
633		__blk_run_queue(q);
634		break;
635
636	case ELEVATOR_INSERT_SORT_MERGE:
637		/*
638		 * If we succeed in merging this request with one in the
639		 * queue already, we are done - rq has now been freed,
640		 * so no need to do anything further.
641		 */
642		if (elv_attempt_insert_merge(q, rq))
643			break;
644	case ELEVATOR_INSERT_SORT:
645		BUG_ON(rq->cmd_type != REQ_TYPE_FS);
646		rq->cmd_flags |= REQ_SORTED;
647		q->nr_sorted++;
648		if (rq_mergeable(rq)) {
649			elv_rqhash_add(q, rq);
650			if (!q->last_merge)
651				q->last_merge = rq;
652		}
653
654		/*
655		 * Some ioscheds (cfq) run q->request_fn directly, so
656		 * rq cannot be accessed after calling
657		 * elevator_add_req_fn.
658		 */
659		q->elevator->type->ops.elevator_add_req_fn(q, rq);
660		break;
661
662	case ELEVATOR_INSERT_FLUSH:
663		rq->cmd_flags |= REQ_SOFTBARRIER;
664		blk_insert_flush(rq);
665		break;
666	default:
667		printk(KERN_ERR "%s: bad insertion point %d\n",
668		       __func__, where);
669		BUG();
670	}
671}
672EXPORT_SYMBOL(__elv_add_request);
673
674void elv_add_request(struct request_queue *q, struct request *rq, int where)
675{
676	unsigned long flags;
677
678	spin_lock_irqsave(q->queue_lock, flags);
679	__elv_add_request(q, rq, where);
680	spin_unlock_irqrestore(q->queue_lock, flags);
681}
682EXPORT_SYMBOL(elv_add_request);
683
684struct request *elv_latter_request(struct request_queue *q, struct request *rq)
685{
686	struct elevator_queue *e = q->elevator;
687
688	if (e->type->ops.elevator_latter_req_fn)
689		return e->type->ops.elevator_latter_req_fn(q, rq);
690	return NULL;
691}
692
693struct request *elv_former_request(struct request_queue *q, struct request *rq)
694{
695	struct elevator_queue *e = q->elevator;
696
697	if (e->type->ops.elevator_former_req_fn)
698		return e->type->ops.elevator_former_req_fn(q, rq);
699	return NULL;
700}
701
702int elv_set_request(struct request_queue *q, struct request *rq,
703		    struct bio *bio, gfp_t gfp_mask)
704{
705	struct elevator_queue *e = q->elevator;
706
707	if (e->type->ops.elevator_set_req_fn)
708		return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
709	return 0;
710}
711
712void elv_put_request(struct request_queue *q, struct request *rq)
713{
714	struct elevator_queue *e = q->elevator;
715
716	if (e->type->ops.elevator_put_req_fn)
717		e->type->ops.elevator_put_req_fn(rq);
718}
719
720int elv_may_queue(struct request_queue *q, int rw)
721{
722	struct elevator_queue *e = q->elevator;
723
724	if (e->type->ops.elevator_may_queue_fn)
725		return e->type->ops.elevator_may_queue_fn(q, rw);
726
727	return ELV_MQUEUE_MAY;
728}
729
730void elv_completed_request(struct request_queue *q, struct request *rq)
731{
732	struct elevator_queue *e = q->elevator;
733
734	/*
735	 * request is released from the driver, io must be done
736	 */
737	if (blk_account_rq(rq)) {
738		q->in_flight[rq_is_sync(rq)]--;
739		if ((rq->cmd_flags & REQ_SORTED) &&
740		    e->type->ops.elevator_completed_req_fn)
741			e->type->ops.elevator_completed_req_fn(q, rq);
742	}
743}
744
745#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
746
747static ssize_t
748elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
749{
750	struct elv_fs_entry *entry = to_elv(attr);
751	struct elevator_queue *e;
752	ssize_t error;
753
754	if (!entry->show)
755		return -EIO;
756
757	e = container_of(kobj, struct elevator_queue, kobj);
758	mutex_lock(&e->sysfs_lock);
759	error = e->type ? entry->show(e, page) : -ENOENT;
760	mutex_unlock(&e->sysfs_lock);
761	return error;
762}
763
764static ssize_t
765elv_attr_store(struct kobject *kobj, struct attribute *attr,
766	       const char *page, size_t length)
767{
768	struct elv_fs_entry *entry = to_elv(attr);
769	struct elevator_queue *e;
770	ssize_t error;
771
772	if (!entry->store)
773		return -EIO;
774
775	e = container_of(kobj, struct elevator_queue, kobj);
776	mutex_lock(&e->sysfs_lock);
777	error = e->type ? entry->store(e, page, length) : -ENOENT;
778	mutex_unlock(&e->sysfs_lock);
779	return error;
780}
781
782static const struct sysfs_ops elv_sysfs_ops = {
783	.show	= elv_attr_show,
784	.store	= elv_attr_store,
785};
786
787static struct kobj_type elv_ktype = {
788	.sysfs_ops	= &elv_sysfs_ops,
789	.release	= elevator_release,
790};
791
792int elv_register_queue(struct request_queue *q)
793{
794	struct elevator_queue *e = q->elevator;
795	int error;
796
797	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
798	if (!error) {
799		struct elv_fs_entry *attr = e->type->elevator_attrs;
800		if (attr) {
801			while (attr->attr.name) {
802				if (sysfs_create_file(&e->kobj, &attr->attr))
803					break;
804				attr++;
805			}
806		}
807		kobject_uevent(&e->kobj, KOBJ_ADD);
808		e->registered = 1;
809	}
810	return error;
811}
812EXPORT_SYMBOL(elv_register_queue);
813
814void elv_unregister_queue(struct request_queue *q)
815{
816	if (q) {
817		struct elevator_queue *e = q->elevator;
818
819		kobject_uevent(&e->kobj, KOBJ_REMOVE);
820		kobject_del(&e->kobj);
821		e->registered = 0;
822	}
823}
824EXPORT_SYMBOL(elv_unregister_queue);
825
826int elv_register(struct elevator_type *e)
827{
828	char *def = "";
829
830	/* create icq_cache if requested */
831	if (e->icq_size) {
832		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
833		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
834			return -EINVAL;
835
836		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
837			 "%s_io_cq", e->elevator_name);
838		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
839						 e->icq_align, 0, NULL);
840		if (!e->icq_cache)
841			return -ENOMEM;
842	}
843
844	/* register, don't allow duplicate names */
845	spin_lock(&elv_list_lock);
846	if (elevator_find(e->elevator_name)) {
847		spin_unlock(&elv_list_lock);
848		if (e->icq_cache)
849			kmem_cache_destroy(e->icq_cache);
850		return -EBUSY;
851	}
852	list_add_tail(&e->list, &elv_list);
853	spin_unlock(&elv_list_lock);
854
855	/* print pretty message */
856	if (!strcmp(e->elevator_name, chosen_elevator) ||
857			(!*chosen_elevator &&
858			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
859				def = " (default)";
860
861	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
862								def);
863	return 0;
864}
865EXPORT_SYMBOL_GPL(elv_register);
866
867void elv_unregister(struct elevator_type *e)
868{
869	/* unregister */
870	spin_lock(&elv_list_lock);
871	list_del_init(&e->list);
872	spin_unlock(&elv_list_lock);
873
874	/*
875	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
876	 * sure all RCU operations are complete before proceeding.
877	 */
878	if (e->icq_cache) {
879		rcu_barrier();
880		kmem_cache_destroy(e->icq_cache);
881		e->icq_cache = NULL;
882	}
883}
884EXPORT_SYMBOL_GPL(elv_unregister);
885
886/*
887 * switch to new_e io scheduler. be careful not to introduce deadlocks -
888 * we don't free the old io scheduler, before we have allocated what we
889 * need for the new one. this way we have a chance of going back to the old
890 * one, if the new one fails init for some reason.
891 */
892static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
893{
894	struct elevator_queue *old = q->elevator;
895	bool registered = old->registered;
896	int err;
897
898	/*
899	 * Turn on BYPASS and drain all requests w/ elevator private data.
900	 * Block layer doesn't call into a quiesced elevator - all requests
901	 * are directly put on the dispatch list without elevator data
902	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
903	 * merge happens either.
904	 */
905	blk_queue_bypass_start(q);
906
907	/* unregister and clear all auxiliary data of the old elevator */
908	if (registered)
909		elv_unregister_queue(q);
910
911	spin_lock_irq(q->queue_lock);
912	ioc_clear_queue(q);
913	spin_unlock_irq(q->queue_lock);
914
915	/* allocate, init and register new elevator */
916	err = new_e->ops.elevator_init_fn(q, new_e);
917	if (err)
918		goto fail_init;
919
920	if (registered) {
921		err = elv_register_queue(q);
922		if (err)
923			goto fail_register;
924	}
925
926	/* done, kill the old one and finish */
927	elevator_exit(old);
928	blk_queue_bypass_end(q);
929
930	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
931
932	return 0;
933
934fail_register:
935	elevator_exit(q->elevator);
936fail_init:
937	/* switch failed, restore and re-register old elevator */
938	q->elevator = old;
939	elv_register_queue(q);
940	blk_queue_bypass_end(q);
941
942	return err;
943}
944
945/*
946 * Switch this queue to the given IO scheduler.
947 */
948static int __elevator_change(struct request_queue *q, const char *name)
949{
950	char elevator_name[ELV_NAME_MAX];
951	struct elevator_type *e;
952
953	if (!q->elevator)
954		return -ENXIO;
955
956	strlcpy(elevator_name, name, sizeof(elevator_name));
957	e = elevator_get(strstrip(elevator_name), true);
958	if (!e) {
959		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
960		return -EINVAL;
961	}
962
963	if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
964		elevator_put(e);
965		return 0;
966	}
967
968	return elevator_switch(q, e);
969}
970
971int elevator_change(struct request_queue *q, const char *name)
972{
973	int ret;
974
975	/* Protect q->elevator from elevator_init() */
976	mutex_lock(&q->sysfs_lock);
977	ret = __elevator_change(q, name);
978	mutex_unlock(&q->sysfs_lock);
979
980	return ret;
981}
982EXPORT_SYMBOL(elevator_change);
983
984ssize_t elv_iosched_store(struct request_queue *q, const char *name,
985			  size_t count)
986{
987	int ret;
988
989	if (!q->elevator)
990		return count;
991
992	ret = __elevator_change(q, name);
993	if (!ret)
994		return count;
995
996	printk(KERN_ERR "elevator: switch to %s failed\n", name);
997	return ret;
998}
999
1000ssize_t elv_iosched_show(struct request_queue *q, char *name)
1001{
1002	struct elevator_queue *e = q->elevator;
1003	struct elevator_type *elv;
1004	struct elevator_type *__e;
1005	int len = 0;
1006
1007	if (!q->elevator || !blk_queue_stackable(q))
1008		return sprintf(name, "none\n");
1009
1010	elv = e->type;
1011
1012	spin_lock(&elv_list_lock);
1013	list_for_each_entry(__e, &elv_list, list) {
1014		if (!strcmp(elv->elevator_name, __e->elevator_name))
1015			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1016		else
1017			len += sprintf(name+len, "%s ", __e->elevator_name);
1018	}
1019	spin_unlock(&elv_list_lock);
1020
1021	len += sprintf(len+name, "\n");
1022	return len;
1023}
1024
1025struct request *elv_rb_former_request(struct request_queue *q,
1026				      struct request *rq)
1027{
1028	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1029
1030	if (rbprev)
1031		return rb_entry_rq(rbprev);
1032
1033	return NULL;
1034}
1035EXPORT_SYMBOL(elv_rb_former_request);
1036
1037struct request *elv_rb_latter_request(struct request_queue *q,
1038				      struct request *rq)
1039{
1040	struct rb_node *rbnext = rb_next(&rq->rb_node);
1041
1042	if (rbnext)
1043		return rb_entry_rq(rbnext);
1044
1045	return NULL;
1046}
1047EXPORT_SYMBOL(elv_rb_latter_request);
1048