]> bbs.cooldavid.org Git - net-next-2.6.git/blame - block/elevator.c
block: add dma alignment and padding support to blk_rq_map_kern
[net-next-2.6.git] / block / elevator.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
0fe23479 6 * 30042000 Jens Axboe <axboe@kernel.dk> :
1da177e4
LT
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
1da177e4
LT
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
cb98fc8b 34#include <linux/delay.h>
2056a782 35#include <linux/blktrace_api.h>
9817064b 36#include <linux/hash.h>
1da177e4
LT
37
38#include <asm/uaccess.h>
39
40static DEFINE_SPINLOCK(elv_list_lock);
41static LIST_HEAD(elv_list);
42
9817064b
JA
43/*
44 * Merge hash stuff.
45 */
46static const int elv_hash_shift = 6;
47#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
4eb166d9
JA
48#define ELV_HASH_FN(sec) \
49 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
9817064b
JA
50#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
51#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
52#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
53
da775265
JA
54/*
55 * Query io scheduler to see if the current process issuing bio may be
56 * merged with rq.
57 */
58static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
59{
165125e1 60 struct request_queue *q = rq->q;
da775265
JA
61 elevator_t *e = q->elevator;
62
63 if (e->ops->elevator_allow_merge_fn)
64 return e->ops->elevator_allow_merge_fn(q, rq, bio);
65
66 return 1;
67}
68
1da177e4
LT
69/*
70 * can we safely merge with this request?
71 */
72inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
73{
74 if (!rq_mergeable(rq))
75 return 0;
76
77 /*
78 * different data direction or already started, don't merge
79 */
80 if (bio_data_dir(bio) != rq_data_dir(rq))
81 return 0;
82
83 /*
da775265 84 * must be same device and not a special request
1da177e4 85 */
bb4067e3 86 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
da775265
JA
87 return 0;
88
89 if (!elv_iosched_allow_merge(rq, bio))
90 return 0;
1da177e4 91
da775265 92 return 1;
1da177e4
LT
93}
94EXPORT_SYMBOL(elv_rq_merge_ok);
95
769db45b 96static inline int elv_try_merge(struct request *__rq, struct bio *bio)
1da177e4
LT
97{
98 int ret = ELEVATOR_NO_MERGE;
99
100 /*
101 * we can merge and sequence is ok, check if it's possible
102 */
103 if (elv_rq_merge_ok(__rq, bio)) {
104 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
105 ret = ELEVATOR_BACK_MERGE;
106 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
107 ret = ELEVATOR_FRONT_MERGE;
108 }
109
110 return ret;
111}
1da177e4 112
1da177e4
LT
113static struct elevator_type *elevator_find(const char *name)
114{
a22b169d 115 struct elevator_type *e;
1da177e4 116
70cee26e 117 list_for_each_entry(e, &elv_list, list) {
a22b169d
VT
118 if (!strcmp(e->elevator_name, name))
119 return e;
1da177e4 120 }
1da177e4 121
a22b169d 122 return NULL;
1da177e4
LT
123}
124
125static void elevator_put(struct elevator_type *e)
126{
127 module_put(e->elevator_owner);
128}
129
130static struct elevator_type *elevator_get(const char *name)
131{
2824bc93 132 struct elevator_type *e;
1da177e4 133
2a12dcd7 134 spin_lock(&elv_list_lock);
2824bc93
TH
135
136 e = elevator_find(name);
e1640949
JA
137 if (!e) {
138 char elv[ELV_NAME_MAX + strlen("-iosched")];
139
140 spin_unlock(&elv_list_lock);
141
142 if (!strcmp(name, "anticipatory"))
143 sprintf(elv, "as-iosched");
144 else
145 sprintf(elv, "%s-iosched", name);
146
147 request_module(elv);
148 spin_lock(&elv_list_lock);
149 e = elevator_find(name);
150 }
151
2824bc93
TH
152 if (e && !try_module_get(e->elevator_owner))
153 e = NULL;
154
2a12dcd7 155 spin_unlock(&elv_list_lock);
1da177e4
LT
156
157 return e;
158}
159
165125e1
JA
160static void *elevator_init_queue(struct request_queue *q,
161 struct elevator_queue *eq)
1da177e4 162{
bb37b94c 163 return eq->ops->elevator_init_fn(q);
bc1c1169 164}
1da177e4 165
165125e1 166static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
bc1c1169
JA
167 void *data)
168{
1da177e4 169 q->elevator = eq;
bc1c1169 170 eq->elevator_data = data;
1da177e4
LT
171}
172
173static char chosen_elevator[16];
174
5f003976 175static int __init elevator_setup(char *str)
1da177e4 176{
752a3b79
CE
177 /*
178 * Be backwards-compatible with previous kernels, so users
179 * won't get the wrong elevator.
180 */
5f003976 181 if (!strcmp(str, "as"))
752a3b79 182 strcpy(chosen_elevator, "anticipatory");
cff3ba22 183 else
5f003976 184 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
9b41046c 185 return 1;
1da177e4
LT
186}
187
188__setup("elevator=", elevator_setup);
189
3d1ab40f
AV
190static struct kobj_type elv_ktype;
191
165125e1
JA
192static elevator_t *elevator_alloc(struct request_queue *q,
193 struct elevator_type *e)
3d1ab40f 194{
9817064b
JA
195 elevator_t *eq;
196 int i;
197
94f6030c 198 eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
9817064b
JA
199 if (unlikely(!eq))
200 goto err;
201
9817064b
JA
202 eq->ops = &e->ops;
203 eq->elevator_type = e;
f9cb074b 204 kobject_init(&eq->kobj, &elv_ktype);
9817064b
JA
205 mutex_init(&eq->sysfs_lock);
206
b5deef90
JA
207 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
208 GFP_KERNEL, q->node);
9817064b
JA
209 if (!eq->hash)
210 goto err;
211
212 for (i = 0; i < ELV_HASH_ENTRIES; i++)
213 INIT_HLIST_HEAD(&eq->hash[i]);
214
3d1ab40f 215 return eq;
9817064b
JA
216err:
217 kfree(eq);
218 elevator_put(e);
219 return NULL;
3d1ab40f
AV
220}
221
222static void elevator_release(struct kobject *kobj)
223{
224 elevator_t *e = container_of(kobj, elevator_t, kobj);
9817064b 225
3d1ab40f 226 elevator_put(e->elevator_type);
9817064b 227 kfree(e->hash);
3d1ab40f
AV
228 kfree(e);
229}
230
165125e1 231int elevator_init(struct request_queue *q, char *name)
1da177e4
LT
232{
233 struct elevator_type *e = NULL;
234 struct elevator_queue *eq;
235 int ret = 0;
bc1c1169 236 void *data;
1da177e4 237
cb98fc8b
TH
238 INIT_LIST_HEAD(&q->queue_head);
239 q->last_merge = NULL;
240 q->end_sector = 0;
241 q->boundary_rq = NULL;
cb98fc8b 242
4eb166d9
JA
243 if (name) {
244 e = elevator_get(name);
245 if (!e)
246 return -EINVAL;
247 }
1da177e4 248
4eb166d9
JA
249 if (!e && *chosen_elevator) {
250 e = elevator_get(chosen_elevator);
251 if (!e)
252 printk(KERN_ERR "I/O scheduler %s not found\n",
253 chosen_elevator);
254 }
248d5ca5 255
4eb166d9
JA
256 if (!e) {
257 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
258 if (!e) {
259 printk(KERN_ERR
260 "Default I/O scheduler not found. " \
261 "Using noop.\n");
262 e = elevator_get("noop");
263 }
5f003976
ND
264 }
265
b5deef90 266 eq = elevator_alloc(q, e);
3d1ab40f 267 if (!eq)
1da177e4 268 return -ENOMEM;
1da177e4 269
bc1c1169
JA
270 data = elevator_init_queue(q, eq);
271 if (!data) {
3d1ab40f 272 kobject_put(&eq->kobj);
bc1c1169
JA
273 return -ENOMEM;
274 }
1da177e4 275
bc1c1169 276 elevator_attach(q, eq, data);
1da177e4
LT
277 return ret;
278}
2e662b65
JA
279EXPORT_SYMBOL(elevator_init);
280
1da177e4
LT
281void elevator_exit(elevator_t *e)
282{
3d1ab40f 283 mutex_lock(&e->sysfs_lock);
1da177e4
LT
284 if (e->ops->elevator_exit_fn)
285 e->ops->elevator_exit_fn(e);
3d1ab40f
AV
286 e->ops = NULL;
287 mutex_unlock(&e->sysfs_lock);
1da177e4 288
3d1ab40f 289 kobject_put(&e->kobj);
1da177e4 290}
2e662b65
JA
291EXPORT_SYMBOL(elevator_exit);
292
165125e1 293static void elv_activate_rq(struct request_queue *q, struct request *rq)
cad97516
JA
294{
295 elevator_t *e = q->elevator;
296
297 if (e->ops->elevator_activate_req_fn)
298 e->ops->elevator_activate_req_fn(q, rq);
299}
300
165125e1 301static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
cad97516
JA
302{
303 elevator_t *e = q->elevator;
304
305 if (e->ops->elevator_deactivate_req_fn)
306 e->ops->elevator_deactivate_req_fn(q, rq);
307}
308
9817064b
JA
309static inline void __elv_rqhash_del(struct request *rq)
310{
311 hlist_del_init(&rq->hash);
312}
313
165125e1 314static void elv_rqhash_del(struct request_queue *q, struct request *rq)
9817064b
JA
315{
316 if (ELV_ON_HASH(rq))
317 __elv_rqhash_del(rq);
318}
319
165125e1 320static void elv_rqhash_add(struct request_queue *q, struct request *rq)
9817064b
JA
321{
322 elevator_t *e = q->elevator;
323
324 BUG_ON(ELV_ON_HASH(rq));
325 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
326}
327
165125e1 328static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
9817064b
JA
329{
330 __elv_rqhash_del(rq);
331 elv_rqhash_add(q, rq);
332}
333
165125e1 334static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
9817064b
JA
335{
336 elevator_t *e = q->elevator;
337 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
338 struct hlist_node *entry, *next;
339 struct request *rq;
340
341 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
342 BUG_ON(!ELV_ON_HASH(rq));
343
344 if (unlikely(!rq_mergeable(rq))) {
345 __elv_rqhash_del(rq);
346 continue;
347 }
348
349 if (rq_hash_key(rq) == offset)
350 return rq;
351 }
352
353 return NULL;
354}
355
2e662b65
JA
356/*
357 * RB-tree support functions for inserting/lookup/removal of requests
358 * in a sorted RB tree.
359 */
360struct request *elv_rb_add(struct rb_root *root, struct request *rq)
361{
362 struct rb_node **p = &root->rb_node;
363 struct rb_node *parent = NULL;
364 struct request *__rq;
365
366 while (*p) {
367 parent = *p;
368 __rq = rb_entry(parent, struct request, rb_node);
369
370 if (rq->sector < __rq->sector)
371 p = &(*p)->rb_left;
372 else if (rq->sector > __rq->sector)
373 p = &(*p)->rb_right;
374 else
375 return __rq;
376 }
377
378 rb_link_node(&rq->rb_node, parent, p);
379 rb_insert_color(&rq->rb_node, root);
380 return NULL;
381}
2e662b65
JA
382EXPORT_SYMBOL(elv_rb_add);
383
384void elv_rb_del(struct rb_root *root, struct request *rq)
385{
386 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
387 rb_erase(&rq->rb_node, root);
388 RB_CLEAR_NODE(&rq->rb_node);
389}
2e662b65
JA
390EXPORT_SYMBOL(elv_rb_del);
391
392struct request *elv_rb_find(struct rb_root *root, sector_t sector)
393{
394 struct rb_node *n = root->rb_node;
395 struct request *rq;
396
397 while (n) {
398 rq = rb_entry(n, struct request, rb_node);
399
400 if (sector < rq->sector)
401 n = n->rb_left;
402 else if (sector > rq->sector)
403 n = n->rb_right;
404 else
405 return rq;
406 }
407
408 return NULL;
409}
2e662b65
JA
410EXPORT_SYMBOL(elv_rb_find);
411
8922e16c
TH
412/*
413 * Insert rq into dispatch queue of q. Queue lock must be held on
dbe7f76d 414 * entry. rq is sort instead into the dispatch queue. To be used by
2e662b65 415 * specific elevators.
8922e16c 416 */
165125e1 417void elv_dispatch_sort(struct request_queue *q, struct request *rq)
8922e16c
TH
418{
419 sector_t boundary;
8922e16c 420 struct list_head *entry;
4eb166d9 421 int stop_flags;
8922e16c 422
06b86245
TH
423 if (q->last_merge == rq)
424 q->last_merge = NULL;
9817064b
JA
425
426 elv_rqhash_del(q, rq);
427
15853af9 428 q->nr_sorted--;
06b86245 429
1b47f531 430 boundary = q->end_sector;
4eb166d9 431 stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
8922e16c
TH
432 list_for_each_prev(entry, &q->queue_head) {
433 struct request *pos = list_entry_rq(entry);
434
783660b2
JA
435 if (rq_data_dir(rq) != rq_data_dir(pos))
436 break;
4eb166d9 437 if (pos->cmd_flags & stop_flags)
8922e16c
TH
438 break;
439 if (rq->sector >= boundary) {
440 if (pos->sector < boundary)
441 continue;
442 } else {
443 if (pos->sector >= boundary)
444 break;
445 }
446 if (rq->sector >= pos->sector)
447 break;
448 }
449
450 list_add(&rq->queuelist, entry);
451}
2e662b65
JA
452EXPORT_SYMBOL(elv_dispatch_sort);
453
9817064b 454/*
2e662b65
JA
455 * Insert rq into dispatch queue of q. Queue lock must be held on
456 * entry. rq is added to the back of the dispatch queue. To be used by
457 * specific elevators.
9817064b
JA
458 */
459void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
460{
461 if (q->last_merge == rq)
462 q->last_merge = NULL;
463
464 elv_rqhash_del(q, rq);
465
466 q->nr_sorted--;
467
468 q->end_sector = rq_end_sector(rq);
469 q->boundary_rq = rq;
470 list_add_tail(&rq->queuelist, &q->queue_head);
471}
2e662b65
JA
472EXPORT_SYMBOL(elv_dispatch_add_tail);
473
165125e1 474int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
1da177e4
LT
475{
476 elevator_t *e = q->elevator;
9817064b 477 struct request *__rq;
06b86245
TH
478 int ret;
479
9817064b
JA
480 /*
481 * First try one-hit cache.
482 */
06b86245
TH
483 if (q->last_merge) {
484 ret = elv_try_merge(q->last_merge, bio);
485 if (ret != ELEVATOR_NO_MERGE) {
486 *req = q->last_merge;
487 return ret;
488 }
489 }
1da177e4 490
9817064b
JA
491 /*
492 * See if our hash lookup can find a potential backmerge.
493 */
494 __rq = elv_rqhash_find(q, bio->bi_sector);
495 if (__rq && elv_rq_merge_ok(__rq, bio)) {
496 *req = __rq;
497 return ELEVATOR_BACK_MERGE;
498 }
499
1da177e4
LT
500 if (e->ops->elevator_merge_fn)
501 return e->ops->elevator_merge_fn(q, req, bio);
502
503 return ELEVATOR_NO_MERGE;
504}
505
165125e1 506void elv_merged_request(struct request_queue *q, struct request *rq, int type)
1da177e4
LT
507{
508 elevator_t *e = q->elevator;
509
510 if (e->ops->elevator_merged_fn)
2e662b65 511 e->ops->elevator_merged_fn(q, rq, type);
06b86245 512
2e662b65
JA
513 if (type == ELEVATOR_BACK_MERGE)
514 elv_rqhash_reposition(q, rq);
9817064b 515
06b86245 516 q->last_merge = rq;
1da177e4
LT
517}
518
165125e1 519void elv_merge_requests(struct request_queue *q, struct request *rq,
1da177e4
LT
520 struct request *next)
521{
522 elevator_t *e = q->elevator;
523
1da177e4
LT
524 if (e->ops->elevator_merge_req_fn)
525 e->ops->elevator_merge_req_fn(q, rq, next);
06b86245 526
9817064b
JA
527 elv_rqhash_reposition(q, rq);
528 elv_rqhash_del(q, next);
529
530 q->nr_sorted--;
06b86245 531 q->last_merge = rq;
1da177e4
LT
532}
533
165125e1 534void elv_requeue_request(struct request_queue *q, struct request *rq)
1da177e4 535{
1da177e4
LT
536 /*
537 * it already went through dequeue, we need to decrement the
538 * in_flight count again
539 */
8922e16c 540 if (blk_account_rq(rq)) {
1da177e4 541 q->in_flight--;
cad97516
JA
542 if (blk_sorted_rq(rq))
543 elv_deactivate_rq(q, rq);
8922e16c 544 }
1da177e4 545
4aff5e23 546 rq->cmd_flags &= ~REQ_STARTED;
1da177e4 547
30e9656c 548 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
1da177e4
LT
549}
550
165125e1 551static void elv_drain_elevator(struct request_queue *q)
15853af9
TH
552{
553 static int printed;
554 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
555 ;
556 if (q->nr_sorted == 0)
557 return;
558 if (printed++ < 10) {
559 printk(KERN_ERR "%s: forced dispatching is broken "
560 "(nr_sorted=%u), please report this\n",
561 q->elevator->elevator_type->elevator_name, q->nr_sorted);
562 }
563}
564
165125e1 565void elv_insert(struct request_queue *q, struct request *rq, int where)
1da177e4 566{
797e7dbb
TH
567 struct list_head *pos;
568 unsigned ordseq;
dac07ec1 569 int unplug_it = 1;
797e7dbb 570
2056a782
JA
571 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
572
1da177e4
LT
573 rq->q = q;
574
8922e16c
TH
575 switch (where) {
576 case ELEVATOR_INSERT_FRONT:
4aff5e23 577 rq->cmd_flags |= REQ_SOFTBARRIER;
8922e16c
TH
578
579 list_add(&rq->queuelist, &q->queue_head);
580 break;
581
582 case ELEVATOR_INSERT_BACK:
4aff5e23 583 rq->cmd_flags |= REQ_SOFTBARRIER;
15853af9 584 elv_drain_elevator(q);
8922e16c
TH
585 list_add_tail(&rq->queuelist, &q->queue_head);
586 /*
587 * We kick the queue here for the following reasons.
588 * - The elevator might have returned NULL previously
589 * to delay requests and returned them now. As the
590 * queue wasn't empty before this request, ll_rw_blk
591 * won't run the queue on return, resulting in hang.
592 * - Usually, back inserted requests won't be merged
593 * with anything. There's no point in delaying queue
594 * processing.
595 */
596 blk_remove_plug(q);
597 q->request_fn(q);
598 break;
599
600 case ELEVATOR_INSERT_SORT:
601 BUG_ON(!blk_fs_request(rq));
4aff5e23 602 rq->cmd_flags |= REQ_SORTED;
15853af9 603 q->nr_sorted++;
9817064b
JA
604 if (rq_mergeable(rq)) {
605 elv_rqhash_add(q, rq);
606 if (!q->last_merge)
607 q->last_merge = rq;
608 }
609
ca23509f
TH
610 /*
611 * Some ioscheds (cfq) run q->request_fn directly, so
612 * rq cannot be accessed after calling
613 * elevator_add_req_fn.
614 */
615 q->elevator->ops->elevator_add_req_fn(q, rq);
8922e16c
TH
616 break;
617
797e7dbb
TH
618 case ELEVATOR_INSERT_REQUEUE:
619 /*
620 * If ordered flush isn't in progress, we do front
621 * insertion; otherwise, requests should be requeued
622 * in ordseq order.
623 */
4aff5e23 624 rq->cmd_flags |= REQ_SOFTBARRIER;
797e7dbb 625
95543179
LV
626 /*
627 * Most requeues happen because of a busy condition,
628 * don't force unplug of the queue for that case.
629 */
630 unplug_it = 0;
631
797e7dbb
TH
632 if (q->ordseq == 0) {
633 list_add(&rq->queuelist, &q->queue_head);
634 break;
635 }
636
637 ordseq = blk_ordered_req_seq(rq);
638
639 list_for_each(pos, &q->queue_head) {
640 struct request *pos_rq = list_entry_rq(pos);
641 if (ordseq <= blk_ordered_req_seq(pos_rq))
642 break;
643 }
644
645 list_add_tail(&rq->queuelist, pos);
646 break;
647
8922e16c
TH
648 default:
649 printk(KERN_ERR "%s: bad insertion point %d\n",
650 __FUNCTION__, where);
651 BUG();
652 }
653
dac07ec1 654 if (unplug_it && blk_queue_plugged(q)) {
8922e16c
TH
655 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
656 - q->in_flight;
657
658 if (nrq >= q->unplug_thresh)
659 __generic_unplug_device(q);
660 }
1da177e4
LT
661}
662
165125e1 663void __elv_add_request(struct request_queue *q, struct request *rq, int where,
30e9656c
TH
664 int plug)
665{
666 if (q->ordcolor)
4aff5e23 667 rq->cmd_flags |= REQ_ORDERED_COLOR;
30e9656c 668
4aff5e23 669 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
30e9656c
TH
670 /*
671 * toggle ordered color
672 */
673 if (blk_barrier_rq(rq))
674 q->ordcolor ^= 1;
675
676 /*
677 * barriers implicitly indicate back insertion
678 */
679 if (where == ELEVATOR_INSERT_SORT)
680 where = ELEVATOR_INSERT_BACK;
681
682 /*
683 * this request is scheduling boundary, update
684 * end_sector
685 */
686 if (blk_fs_request(rq)) {
687 q->end_sector = rq_end_sector(rq);
688 q->boundary_rq = rq;
689 }
4eb166d9
JA
690 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
691 where == ELEVATOR_INSERT_SORT)
30e9656c
TH
692 where = ELEVATOR_INSERT_BACK;
693
694 if (plug)
695 blk_plug_device(q);
696
697 elv_insert(q, rq, where);
698}
2e662b65
JA
699EXPORT_SYMBOL(__elv_add_request);
700
165125e1 701void elv_add_request(struct request_queue *q, struct request *rq, int where,
1da177e4
LT
702 int plug)
703{
704 unsigned long flags;
705
706 spin_lock_irqsave(q->queue_lock, flags);
707 __elv_add_request(q, rq, where, plug);
708 spin_unlock_irqrestore(q->queue_lock, flags);
709}
2e662b65
JA
710EXPORT_SYMBOL(elv_add_request);
711
165125e1 712static inline struct request *__elv_next_request(struct request_queue *q)
1da177e4 713{
8922e16c
TH
714 struct request *rq;
715
797e7dbb
TH
716 while (1) {
717 while (!list_empty(&q->queue_head)) {
718 rq = list_entry_rq(q->queue_head.next);
719 if (blk_do_ordered(q, &rq))
720 return rq;
721 }
1da177e4 722
797e7dbb
TH
723 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
724 return NULL;
1da177e4 725 }
1da177e4
LT
726}
727
165125e1 728struct request *elv_next_request(struct request_queue *q)
1da177e4
LT
729{
730 struct request *rq;
731 int ret;
732
733 while ((rq = __elv_next_request(q)) != NULL) {
bf2de6f5
JA
734 /*
735 * Kill the empty barrier place holder, the driver must
736 * not ever see it.
737 */
738 if (blk_empty_barrier(rq)) {
739 end_queued_request(rq, 1);
740 continue;
741 }
4aff5e23 742 if (!(rq->cmd_flags & REQ_STARTED)) {
8922e16c
TH
743 /*
744 * This is the first time the device driver
745 * sees this request (possibly after
746 * requeueing). Notify IO scheduler.
747 */
cad97516
JA
748 if (blk_sorted_rq(rq))
749 elv_activate_rq(q, rq);
1da177e4 750
8922e16c
TH
751 /*
752 * just mark as started even if we don't start
753 * it, a request that has been delayed should
754 * not be passed by new incoming requests
755 */
4aff5e23 756 rq->cmd_flags |= REQ_STARTED;
2056a782 757 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
8922e16c 758 }
1da177e4 759
8922e16c 760 if (!q->boundary_rq || q->boundary_rq == rq) {
1b47f531 761 q->end_sector = rq_end_sector(rq);
8922e16c
TH
762 q->boundary_rq = NULL;
763 }
1da177e4 764
fa0ccd83
JB
765 if (rq->cmd_flags & REQ_DONTPREP)
766 break;
767
768 if (q->dma_drain_size && rq->data_len) {
769 /*
770 * make sure space for the drain appears we
771 * know we can do this because max_hw_segments
772 * has been adjusted to be one fewer than the
773 * device can handle
774 */
775 rq->nr_phys_segments++;
776 rq->nr_hw_segments++;
777 }
778
779 if (!q->prep_rq_fn)
1da177e4
LT
780 break;
781
782 ret = q->prep_rq_fn(q, rq);
783 if (ret == BLKPREP_OK) {
784 break;
785 } else if (ret == BLKPREP_DEFER) {
2e759cd4
TH
786 /*
787 * the request may have been (partially) prepped.
788 * we need to keep this request in the front to
8922e16c
TH
789 * avoid resource deadlock. REQ_STARTED will
790 * prevent other fs requests from passing this one.
2e759cd4 791 */
fa0ccd83
JB
792 if (q->dma_drain_size && rq->data_len &&
793 !(rq->cmd_flags & REQ_DONTPREP)) {
794 /*
795 * remove the space for the drain we added
796 * so that we don't add it again
797 */
798 --rq->nr_phys_segments;
799 --rq->nr_hw_segments;
800 }
801
1da177e4
LT
802 rq = NULL;
803 break;
804 } else if (ret == BLKPREP_KILL) {
4aff5e23 805 rq->cmd_flags |= REQ_QUIET;
a0cd1285 806 end_queued_request(rq, 0);
1da177e4
LT
807 } else {
808 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
809 ret);
810 break;
811 }
812 }
813
814 return rq;
815}
2e662b65
JA
816EXPORT_SYMBOL(elv_next_request);
817
165125e1 818void elv_dequeue_request(struct request_queue *q, struct request *rq)
1da177e4 819{
8922e16c 820 BUG_ON(list_empty(&rq->queuelist));
9817064b 821 BUG_ON(ELV_ON_HASH(rq));
8922e16c
TH
822
823 list_del_init(&rq->queuelist);
1da177e4
LT
824
825 /*
826 * the time frame between a request being removed from the lists
827 * and to it is freed is accounted as io that is in progress at
8922e16c 828 * the driver side.
1da177e4
LT
829 */
830 if (blk_account_rq(rq))
831 q->in_flight++;
1da177e4 832}
2e662b65
JA
833EXPORT_SYMBOL(elv_dequeue_request);
834
165125e1 835int elv_queue_empty(struct request_queue *q)
1da177e4
LT
836{
837 elevator_t *e = q->elevator;
838
8922e16c
TH
839 if (!list_empty(&q->queue_head))
840 return 0;
841
1da177e4
LT
842 if (e->ops->elevator_queue_empty_fn)
843 return e->ops->elevator_queue_empty_fn(q);
844
8922e16c 845 return 1;
1da177e4 846}
2e662b65
JA
847EXPORT_SYMBOL(elv_queue_empty);
848
165125e1 849struct request *elv_latter_request(struct request_queue *q, struct request *rq)
1da177e4 850{
1da177e4
LT
851 elevator_t *e = q->elevator;
852
853 if (e->ops->elevator_latter_req_fn)
854 return e->ops->elevator_latter_req_fn(q, rq);
1da177e4
LT
855 return NULL;
856}
857
165125e1 858struct request *elv_former_request(struct request_queue *q, struct request *rq)
1da177e4 859{
1da177e4
LT
860 elevator_t *e = q->elevator;
861
862 if (e->ops->elevator_former_req_fn)
863 return e->ops->elevator_former_req_fn(q, rq);
1da177e4
LT
864 return NULL;
865}
866
165125e1 867int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
1da177e4
LT
868{
869 elevator_t *e = q->elevator;
870
871 if (e->ops->elevator_set_req_fn)
cb78b285 872 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
1da177e4
LT
873
874 rq->elevator_private = NULL;
875 return 0;
876}
877
165125e1 878void elv_put_request(struct request_queue *q, struct request *rq)
1da177e4
LT
879{
880 elevator_t *e = q->elevator;
881
882 if (e->ops->elevator_put_req_fn)
bb37b94c 883 e->ops->elevator_put_req_fn(rq);
1da177e4
LT
884}
885
165125e1 886int elv_may_queue(struct request_queue *q, int rw)
1da177e4
LT
887{
888 elevator_t *e = q->elevator;
889
890 if (e->ops->elevator_may_queue_fn)
cb78b285 891 return e->ops->elevator_may_queue_fn(q, rw);
1da177e4
LT
892
893 return ELV_MQUEUE_MAY;
894}
895
165125e1 896void elv_completed_request(struct request_queue *q, struct request *rq)
1da177e4
LT
897{
898 elevator_t *e = q->elevator;
899
900 /*
901 * request is released from the driver, io must be done
902 */
8922e16c 903 if (blk_account_rq(rq)) {
1da177e4 904 q->in_flight--;
1bc691d3
TH
905 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
906 e->ops->elevator_completed_req_fn(q, rq);
907 }
797e7dbb 908
1bc691d3
TH
909 /*
910 * Check if the queue is waiting for fs requests to be
911 * drained for flush sequence.
912 */
913 if (unlikely(q->ordseq)) {
914 struct request *first_rq = list_entry_rq(q->queue_head.next);
915 if (q->in_flight == 0 &&
797e7dbb
TH
916 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
917 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
918 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
919 q->request_fn(q);
920 }
8922e16c 921 }
1da177e4
LT
922}
923
3d1ab40f
AV
924#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
925
926static ssize_t
927elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1da177e4 928{
3d1ab40f
AV
929 elevator_t *e = container_of(kobj, elevator_t, kobj);
930 struct elv_fs_entry *entry = to_elv(attr);
931 ssize_t error;
932
933 if (!entry->show)
934 return -EIO;
935
936 mutex_lock(&e->sysfs_lock);
937 error = e->ops ? entry->show(e, page) : -ENOENT;
938 mutex_unlock(&e->sysfs_lock);
939 return error;
940}
1da177e4 941
3d1ab40f
AV
942static ssize_t
943elv_attr_store(struct kobject *kobj, struct attribute *attr,
944 const char *page, size_t length)
945{
946 elevator_t *e = container_of(kobj, elevator_t, kobj);
947 struct elv_fs_entry *entry = to_elv(attr);
948 ssize_t error;
1da177e4 949
3d1ab40f
AV
950 if (!entry->store)
951 return -EIO;
1da177e4 952
3d1ab40f
AV
953 mutex_lock(&e->sysfs_lock);
954 error = e->ops ? entry->store(e, page, length) : -ENOENT;
955 mutex_unlock(&e->sysfs_lock);
956 return error;
957}
958
959static struct sysfs_ops elv_sysfs_ops = {
960 .show = elv_attr_show,
961 .store = elv_attr_store,
962};
963
964static struct kobj_type elv_ktype = {
965 .sysfs_ops = &elv_sysfs_ops,
966 .release = elevator_release,
967};
968
969int elv_register_queue(struct request_queue *q)
970{
971 elevator_t *e = q->elevator;
972 int error;
973
b2d6db58 974 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
3d1ab40f 975 if (!error) {
e572ec7e 976 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
3d1ab40f 977 if (attr) {
e572ec7e
AV
978 while (attr->attr.name) {
979 if (sysfs_create_file(&e->kobj, &attr->attr))
3d1ab40f 980 break;
e572ec7e 981 attr++;
3d1ab40f
AV
982 }
983 }
984 kobject_uevent(&e->kobj, KOBJ_ADD);
985 }
986 return error;
1da177e4
LT
987}
988
bc1c1169
JA
989static void __elv_unregister_queue(elevator_t *e)
990{
991 kobject_uevent(&e->kobj, KOBJ_REMOVE);
992 kobject_del(&e->kobj);
993}
994
1da177e4
LT
995void elv_unregister_queue(struct request_queue *q)
996{
bc1c1169
JA
997 if (q)
998 __elv_unregister_queue(q->elevator);
1da177e4
LT
999}
1000
2fdd82bd 1001void elv_register(struct elevator_type *e)
1da177e4 1002{
1ffb96c5 1003 char *def = "";
2a12dcd7
JA
1004
1005 spin_lock(&elv_list_lock);
ce524497 1006 BUG_ON(elevator_find(e->elevator_name));
1da177e4 1007 list_add_tail(&e->list, &elv_list);
2a12dcd7 1008 spin_unlock(&elv_list_lock);
1da177e4 1009
5f003976
ND
1010 if (!strcmp(e->elevator_name, chosen_elevator) ||
1011 (!*chosen_elevator &&
1012 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
1ffb96c5
TV
1013 def = " (default)";
1014
4eb166d9
JA
1015 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
1016 def);
1da177e4
LT
1017}
1018EXPORT_SYMBOL_GPL(elv_register);
1019
1020void elv_unregister(struct elevator_type *e)
1021{
83521d3e
CH
1022 struct task_struct *g, *p;
1023
1024 /*
1025 * Iterate every thread in the process to remove the io contexts.
1026 */
e17a9489
AV
1027 if (e->ops.trim) {
1028 read_lock(&tasklist_lock);
1029 do_each_thread(g, p) {
1030 task_lock(p);
2d8f6131
ON
1031 if (p->io_context)
1032 e->ops.trim(p->io_context);
e17a9489
AV
1033 task_unlock(p);
1034 } while_each_thread(g, p);
1035 read_unlock(&tasklist_lock);
1036 }
83521d3e 1037
2a12dcd7 1038 spin_lock(&elv_list_lock);
1da177e4 1039 list_del_init(&e->list);
2a12dcd7 1040 spin_unlock(&elv_list_lock);
1da177e4
LT
1041}
1042EXPORT_SYMBOL_GPL(elv_unregister);
1043
1044/*
1045 * switch to new_e io scheduler. be careful not to introduce deadlocks -
1046 * we don't free the old io scheduler, before we have allocated what we
1047 * need for the new one. this way we have a chance of going back to the old
cb98fc8b 1048 * one, if the new one fails init for some reason.
1da177e4 1049 */
165125e1 1050static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1da177e4 1051{
cb98fc8b 1052 elevator_t *old_elevator, *e;
bc1c1169 1053 void *data;
1da177e4 1054
cb98fc8b
TH
1055 /*
1056 * Allocate new elevator
1057 */
b5deef90 1058 e = elevator_alloc(q, new_e);
1da177e4 1059 if (!e)
3d1ab40f 1060 return 0;
1da177e4 1061
bc1c1169
JA
1062 data = elevator_init_queue(q, e);
1063 if (!data) {
1064 kobject_put(&e->kobj);
1065 return 0;
1066 }
1067
1da177e4 1068 /*
cb98fc8b 1069 * Turn on BYPASS and drain all requests w/ elevator private data
1da177e4 1070 */
cb98fc8b
TH
1071 spin_lock_irq(q->queue_lock);
1072
64521d1a 1073 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
cb98fc8b 1074
15853af9 1075 elv_drain_elevator(q);
cb98fc8b
TH
1076
1077 while (q->rq.elvpriv) {
407df2aa
TH
1078 blk_remove_plug(q);
1079 q->request_fn(q);
cb98fc8b 1080 spin_unlock_irq(q->queue_lock);
64521d1a 1081 msleep(10);
cb98fc8b 1082 spin_lock_irq(q->queue_lock);
15853af9 1083 elv_drain_elevator(q);
cb98fc8b
TH
1084 }
1085
1da177e4 1086 /*
bc1c1169 1087 * Remember old elevator.
1da177e4 1088 */
1da177e4
LT
1089 old_elevator = q->elevator;
1090
1da177e4
LT
1091 /*
1092 * attach and start new elevator
1093 */
bc1c1169
JA
1094 elevator_attach(q, e, data);
1095
1096 spin_unlock_irq(q->queue_lock);
1097
1098 __elv_unregister_queue(old_elevator);
1da177e4
LT
1099
1100 if (elv_register_queue(q))
1101 goto fail_register;
1102
1103 /*
cb98fc8b 1104 * finally exit old elevator and turn off BYPASS.
1da177e4
LT
1105 */
1106 elevator_exit(old_elevator);
64521d1a 1107 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
3d1ab40f 1108 return 1;
1da177e4
LT
1109
1110fail_register:
1111 /*
1112 * switch failed, exit the new io scheduler and reattach the old
1113 * one again (along with re-adding the sysfs dir)
1114 */
1115 elevator_exit(e);
1da177e4
LT
1116 q->elevator = old_elevator;
1117 elv_register_queue(q);
64521d1a 1118 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
3d1ab40f 1119 return 0;
1da177e4
LT
1120}
1121
165125e1
JA
1122ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1123 size_t count)
1da177e4
LT
1124{
1125 char elevator_name[ELV_NAME_MAX];
be561235 1126 size_t len;
1da177e4
LT
1127 struct elevator_type *e;
1128
be561235
TH
1129 elevator_name[sizeof(elevator_name) - 1] = '\0';
1130 strncpy(elevator_name, name, sizeof(elevator_name) - 1);
1131 len = strlen(elevator_name);
1da177e4 1132
be561235
TH
1133 if (len && elevator_name[len - 1] == '\n')
1134 elevator_name[len - 1] = '\0';
1da177e4
LT
1135
1136 e = elevator_get(elevator_name);
1137 if (!e) {
1138 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1139 return -EINVAL;
1140 }
1141
2ca7d93b
ND
1142 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1143 elevator_put(e);
1da177e4 1144 return count;
2ca7d93b 1145 }
1da177e4 1146
3d1ab40f 1147 if (!elevator_switch(q, e))
4eb166d9
JA
1148 printk(KERN_ERR "elevator: switch to %s failed\n",
1149 elevator_name);
1da177e4
LT
1150 return count;
1151}
1152
165125e1 1153ssize_t elv_iosched_show(struct request_queue *q, char *name)
1da177e4
LT
1154{
1155 elevator_t *e = q->elevator;
1156 struct elevator_type *elv = e->elevator_type;
70cee26e 1157 struct elevator_type *__e;
1da177e4
LT
1158 int len = 0;
1159
2a12dcd7 1160 spin_lock(&elv_list_lock);
70cee26e 1161 list_for_each_entry(__e, &elv_list, list) {
1da177e4
LT
1162 if (!strcmp(elv->elevator_name, __e->elevator_name))
1163 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1164 else
1165 len += sprintf(name+len, "%s ", __e->elevator_name);
1166 }
2a12dcd7 1167 spin_unlock(&elv_list_lock);
1da177e4
LT
1168
1169 len += sprintf(len+name, "\n");
1170 return len;
1171}
1172
165125e1
JA
1173struct request *elv_rb_former_request(struct request_queue *q,
1174 struct request *rq)
2e662b65
JA
1175{
1176 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1177
1178 if (rbprev)
1179 return rb_entry_rq(rbprev);
1180
1181 return NULL;
1182}
2e662b65
JA
1183EXPORT_SYMBOL(elv_rb_former_request);
1184
165125e1
JA
1185struct request *elv_rb_latter_request(struct request_queue *q,
1186 struct request *rq)
2e662b65
JA
1187{
1188 struct rb_node *rbnext = rb_next(&rq->rb_node);
1189
1190 if (rbnext)
1191 return rb_entry_rq(rbnext);
1192
1193 return NULL;
1194}
2e662b65 1195EXPORT_SYMBOL(elv_rb_latter_request);