]> bbs.cooldavid.org Git - net-next-2.6.git/blob - block/elevator.c
block: unify flags for struct bio and struct request
[net-next-2.6.git] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37 #include <linux/uaccess.h>
38
39 #include <trace/events/block.h>
40
41 #include "blk.h"
42
43 static DEFINE_SPINLOCK(elv_list_lock);
44 static LIST_HEAD(elv_list);
45
46 /*
47  * Merge hash stuff.
48  */
49 static const int elv_hash_shift = 6;
50 #define ELV_HASH_BLOCK(sec)     ((sec) >> 3)
51 #define ELV_HASH_FN(sec)        \
52                 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
53 #define ELV_HASH_ENTRIES        (1 << elv_hash_shift)
54 #define rq_hash_key(rq)         (blk_rq_pos(rq) + blk_rq_sectors(rq))
55
56 /*
57  * Query io scheduler to see if the current process issuing bio may be
58  * merged with rq.
59  */
60 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
61 {
62         struct request_queue *q = rq->q;
63         struct elevator_queue *e = q->elevator;
64
65         if (e->ops->elevator_allow_merge_fn)
66                 return e->ops->elevator_allow_merge_fn(q, rq, bio);
67
68         return 1;
69 }
70
71 /*
72  * can we safely merge with this request?
73  */
74 int elv_rq_merge_ok(struct request *rq, struct bio *bio)
75 {
76         if (!rq_mergeable(rq))
77                 return 0;
78
79         /*
80          * Don't merge file system requests and discard requests
81          */
82         if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
83                 return 0;
84
85         /*
86          * different data direction or already started, don't merge
87          */
88         if (bio_data_dir(bio) != rq_data_dir(rq))
89                 return 0;
90
91         /*
92          * must be same device and not a special request
93          */
94         if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
95                 return 0;
96
97         /*
98          * only merge integrity protected bio into ditto rq
99          */
100         if (bio_integrity(bio) != blk_integrity_rq(rq))
101                 return 0;
102
103         if (!elv_iosched_allow_merge(rq, bio))
104                 return 0;
105
106         return 1;
107 }
108 EXPORT_SYMBOL(elv_rq_merge_ok);
109
110 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
111 {
112         int ret = ELEVATOR_NO_MERGE;
113
114         /*
115          * we can merge and sequence is ok, check if it's possible
116          */
117         if (elv_rq_merge_ok(__rq, bio)) {
118                 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
119                         ret = ELEVATOR_BACK_MERGE;
120                 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
121                         ret = ELEVATOR_FRONT_MERGE;
122         }
123
124         return ret;
125 }
126
127 static struct elevator_type *elevator_find(const char *name)
128 {
129         struct elevator_type *e;
130
131         list_for_each_entry(e, &elv_list, list) {
132                 if (!strcmp(e->elevator_name, name))
133                         return e;
134         }
135
136         return NULL;
137 }
138
139 static void elevator_put(struct elevator_type *e)
140 {
141         module_put(e->elevator_owner);
142 }
143
144 static struct elevator_type *elevator_get(const char *name)
145 {
146         struct elevator_type *e;
147
148         spin_lock(&elv_list_lock);
149
150         e = elevator_find(name);
151         if (!e) {
152                 char elv[ELV_NAME_MAX + strlen("-iosched")];
153
154                 spin_unlock(&elv_list_lock);
155
156                 snprintf(elv, sizeof(elv), "%s-iosched", name);
157
158                 request_module("%s", elv);
159                 spin_lock(&elv_list_lock);
160                 e = elevator_find(name);
161         }
162
163         if (e && !try_module_get(e->elevator_owner))
164                 e = NULL;
165
166         spin_unlock(&elv_list_lock);
167
168         return e;
169 }
170
171 static void *elevator_init_queue(struct request_queue *q,
172                                  struct elevator_queue *eq)
173 {
174         return eq->ops->elevator_init_fn(q);
175 }
176
177 static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
178                            void *data)
179 {
180         q->elevator = eq;
181         eq->elevator_data = data;
182 }
183
184 static char chosen_elevator[16];
185
186 static int __init elevator_setup(char *str)
187 {
188         /*
189          * Be backwards-compatible with previous kernels, so users
190          * won't get the wrong elevator.
191          */
192         strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
193         return 1;
194 }
195
196 __setup("elevator=", elevator_setup);
197
198 static struct kobj_type elv_ktype;
199
200 static struct elevator_queue *elevator_alloc(struct request_queue *q,
201                                   struct elevator_type *e)
202 {
203         struct elevator_queue *eq;
204         int i;
205
206         eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
207         if (unlikely(!eq))
208                 goto err;
209
210         eq->ops = &e->ops;
211         eq->elevator_type = e;
212         kobject_init(&eq->kobj, &elv_ktype);
213         mutex_init(&eq->sysfs_lock);
214
215         eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
216                                         GFP_KERNEL, q->node);
217         if (!eq->hash)
218                 goto err;
219
220         for (i = 0; i < ELV_HASH_ENTRIES; i++)
221                 INIT_HLIST_HEAD(&eq->hash[i]);
222
223         return eq;
224 err:
225         kfree(eq);
226         elevator_put(e);
227         return NULL;
228 }
229
230 static void elevator_release(struct kobject *kobj)
231 {
232         struct elevator_queue *e;
233
234         e = container_of(kobj, struct elevator_queue, kobj);
235         elevator_put(e->elevator_type);
236         kfree(e->hash);
237         kfree(e);
238 }
239
240 int elevator_init(struct request_queue *q, char *name)
241 {
242         struct elevator_type *e = NULL;
243         struct elevator_queue *eq;
244         void *data;
245
246         if (unlikely(q->elevator))
247                 return 0;
248
249         INIT_LIST_HEAD(&q->queue_head);
250         q->last_merge = NULL;
251         q->end_sector = 0;
252         q->boundary_rq = NULL;
253
254         if (name) {
255                 e = elevator_get(name);
256                 if (!e)
257                         return -EINVAL;
258         }
259
260         if (!e && *chosen_elevator) {
261                 e = elevator_get(chosen_elevator);
262                 if (!e)
263                         printk(KERN_ERR "I/O scheduler %s not found\n",
264                                                         chosen_elevator);
265         }
266
267         if (!e) {
268                 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
269                 if (!e) {
270                         printk(KERN_ERR
271                                 "Default I/O scheduler not found. " \
272                                 "Using noop.\n");
273                         e = elevator_get("noop");
274                 }
275         }
276
277         eq = elevator_alloc(q, e);
278         if (!eq)
279                 return -ENOMEM;
280
281         data = elevator_init_queue(q, eq);
282         if (!data) {
283                 kobject_put(&eq->kobj);
284                 return -ENOMEM;
285         }
286
287         elevator_attach(q, eq, data);
288         return 0;
289 }
290 EXPORT_SYMBOL(elevator_init);
291
292 void elevator_exit(struct elevator_queue *e)
293 {
294         mutex_lock(&e->sysfs_lock);
295         if (e->ops->elevator_exit_fn)
296                 e->ops->elevator_exit_fn(e);
297         e->ops = NULL;
298         mutex_unlock(&e->sysfs_lock);
299
300         kobject_put(&e->kobj);
301 }
302 EXPORT_SYMBOL(elevator_exit);
303
304 static inline void __elv_rqhash_del(struct request *rq)
305 {
306         hlist_del_init(&rq->hash);
307 }
308
309 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
310 {
311         if (ELV_ON_HASH(rq))
312                 __elv_rqhash_del(rq);
313 }
314
315 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
316 {
317         struct elevator_queue *e = q->elevator;
318
319         BUG_ON(ELV_ON_HASH(rq));
320         hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
321 }
322
323 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
324 {
325         __elv_rqhash_del(rq);
326         elv_rqhash_add(q, rq);
327 }
328
329 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
330 {
331         struct elevator_queue *e = q->elevator;
332         struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
333         struct hlist_node *entry, *next;
334         struct request *rq;
335
336         hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
337                 BUG_ON(!ELV_ON_HASH(rq));
338
339                 if (unlikely(!rq_mergeable(rq))) {
340                         __elv_rqhash_del(rq);
341                         continue;
342                 }
343
344                 if (rq_hash_key(rq) == offset)
345                         return rq;
346         }
347
348         return NULL;
349 }
350
351 /*
352  * RB-tree support functions for inserting/lookup/removal of requests
353  * in a sorted RB tree.
354  */
355 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
356 {
357         struct rb_node **p = &root->rb_node;
358         struct rb_node *parent = NULL;
359         struct request *__rq;
360
361         while (*p) {
362                 parent = *p;
363                 __rq = rb_entry(parent, struct request, rb_node);
364
365                 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
366                         p = &(*p)->rb_left;
367                 else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
368                         p = &(*p)->rb_right;
369                 else
370                         return __rq;
371         }
372
373         rb_link_node(&rq->rb_node, parent, p);
374         rb_insert_color(&rq->rb_node, root);
375         return NULL;
376 }
377 EXPORT_SYMBOL(elv_rb_add);
378
379 void elv_rb_del(struct rb_root *root, struct request *rq)
380 {
381         BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
382         rb_erase(&rq->rb_node, root);
383         RB_CLEAR_NODE(&rq->rb_node);
384 }
385 EXPORT_SYMBOL(elv_rb_del);
386
387 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
388 {
389         struct rb_node *n = root->rb_node;
390         struct request *rq;
391
392         while (n) {
393                 rq = rb_entry(n, struct request, rb_node);
394
395                 if (sector < blk_rq_pos(rq))
396                         n = n->rb_left;
397                 else if (sector > blk_rq_pos(rq))
398                         n = n->rb_right;
399                 else
400                         return rq;
401         }
402
403         return NULL;
404 }
405 EXPORT_SYMBOL(elv_rb_find);
406
407 /*
408  * Insert rq into dispatch queue of q.  Queue lock must be held on
409  * entry.  rq is sort instead into the dispatch queue. To be used by
410  * specific elevators.
411  */
412 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
413 {
414         sector_t boundary;
415         struct list_head *entry;
416         int stop_flags;
417
418         if (q->last_merge == rq)
419                 q->last_merge = NULL;
420
421         elv_rqhash_del(q, rq);
422
423         q->nr_sorted--;
424
425         boundary = q->end_sector;
426         stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
427         list_for_each_prev(entry, &q->queue_head) {
428                 struct request *pos = list_entry_rq(entry);
429
430                 if ((rq->cmd_flags & REQ_DISCARD) !=
431                     (pos->cmd_flags & REQ_DISCARD))
432                         break;
433                 if (rq_data_dir(rq) != rq_data_dir(pos))
434                         break;
435                 if (pos->cmd_flags & stop_flags)
436                         break;
437                 if (blk_rq_pos(rq) >= boundary) {
438                         if (blk_rq_pos(pos) < boundary)
439                                 continue;
440                 } else {
441                         if (blk_rq_pos(pos) >= boundary)
442                                 break;
443                 }
444                 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
445                         break;
446         }
447
448         list_add(&rq->queuelist, entry);
449 }
450 EXPORT_SYMBOL(elv_dispatch_sort);
451
452 /*
453  * Insert rq into dispatch queue of q.  Queue lock must be held on
454  * entry.  rq is added to the back of the dispatch queue. To be used by
455  * specific elevators.
456  */
457 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
458 {
459         if (q->last_merge == rq)
460                 q->last_merge = NULL;
461
462         elv_rqhash_del(q, rq);
463
464         q->nr_sorted--;
465
466         q->end_sector = rq_end_sector(rq);
467         q->boundary_rq = rq;
468         list_add_tail(&rq->queuelist, &q->queue_head);
469 }
470 EXPORT_SYMBOL(elv_dispatch_add_tail);
471
472 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
473 {
474         struct elevator_queue *e = q->elevator;
475         struct request *__rq;
476         int ret;
477
478         /*
479          * Levels of merges:
480          *      nomerges:  No merges at all attempted
481          *      noxmerges: Only simple one-hit cache try
482          *      merges:    All merge tries attempted
483          */
484         if (blk_queue_nomerges(q))
485                 return ELEVATOR_NO_MERGE;
486
487         /*
488          * First try one-hit cache.
489          */
490         if (q->last_merge) {
491                 ret = elv_try_merge(q->last_merge, bio);
492                 if (ret != ELEVATOR_NO_MERGE) {
493                         *req = q->last_merge;
494                         return ret;
495                 }
496         }
497
498         if (blk_queue_noxmerges(q))
499                 return ELEVATOR_NO_MERGE;
500
501         /*
502          * See if our hash lookup can find a potential backmerge.
503          */
504         __rq = elv_rqhash_find(q, bio->bi_sector);
505         if (__rq && elv_rq_merge_ok(__rq, bio)) {
506                 *req = __rq;
507                 return ELEVATOR_BACK_MERGE;
508         }
509
510         if (e->ops->elevator_merge_fn)
511                 return e->ops->elevator_merge_fn(q, req, bio);
512
513         return ELEVATOR_NO_MERGE;
514 }
515
516 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
517 {
518         struct elevator_queue *e = q->elevator;
519
520         if (e->ops->elevator_merged_fn)
521                 e->ops->elevator_merged_fn(q, rq, type);
522
523         if (type == ELEVATOR_BACK_MERGE)
524                 elv_rqhash_reposition(q, rq);
525
526         q->last_merge = rq;
527 }
528
529 void elv_merge_requests(struct request_queue *q, struct request *rq,
530                              struct request *next)
531 {
532         struct elevator_queue *e = q->elevator;
533
534         if (e->ops->elevator_merge_req_fn)
535                 e->ops->elevator_merge_req_fn(q, rq, next);
536
537         elv_rqhash_reposition(q, rq);
538         elv_rqhash_del(q, next);
539
540         q->nr_sorted--;
541         q->last_merge = rq;
542 }
543
544 void elv_bio_merged(struct request_queue *q, struct request *rq,
545                         struct bio *bio)
546 {
547         struct elevator_queue *e = q->elevator;
548
549         if (e->ops->elevator_bio_merged_fn)
550                 e->ops->elevator_bio_merged_fn(q, rq, bio);
551 }
552
553 void elv_requeue_request(struct request_queue *q, struct request *rq)
554 {
555         /*
556          * it already went through dequeue, we need to decrement the
557          * in_flight count again
558          */
559         if (blk_account_rq(rq)) {
560                 q->in_flight[rq_is_sync(rq)]--;
561                 if (rq->cmd_flags & REQ_SORTED)
562                         elv_deactivate_rq(q, rq);
563         }
564
565         rq->cmd_flags &= ~REQ_STARTED;
566
567         elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
568 }
569
570 void elv_drain_elevator(struct request_queue *q)
571 {
572         static int printed;
573         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
574                 ;
575         if (q->nr_sorted == 0)
576                 return;
577         if (printed++ < 10) {
578                 printk(KERN_ERR "%s: forced dispatching is broken "
579                        "(nr_sorted=%u), please report this\n",
580                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
581         }
582 }
583
584 /*
585  * Call with queue lock held, interrupts disabled
586  */
587 void elv_quiesce_start(struct request_queue *q)
588 {
589         if (!q->elevator)
590                 return;
591
592         queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
593
594         /*
595          * make sure we don't have any requests in flight
596          */
597         elv_drain_elevator(q);
598         while (q->rq.elvpriv) {
599                 __blk_run_queue(q);
600                 spin_unlock_irq(q->queue_lock);
601                 msleep(10);
602                 spin_lock_irq(q->queue_lock);
603                 elv_drain_elevator(q);
604         }
605 }
606
607 void elv_quiesce_end(struct request_queue *q)
608 {
609         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
610 }
611
612 void elv_insert(struct request_queue *q, struct request *rq, int where)
613 {
614         struct list_head *pos;
615         unsigned ordseq;
616         int unplug_it = 1;
617
618         trace_block_rq_insert(q, rq);
619
620         rq->q = q;
621
622         switch (where) {
623         case ELEVATOR_INSERT_FRONT:
624                 rq->cmd_flags |= REQ_SOFTBARRIER;
625
626                 list_add(&rq->queuelist, &q->queue_head);
627                 break;
628
629         case ELEVATOR_INSERT_BACK:
630                 rq->cmd_flags |= REQ_SOFTBARRIER;
631                 elv_drain_elevator(q);
632                 list_add_tail(&rq->queuelist, &q->queue_head);
633                 /*
634                  * We kick the queue here for the following reasons.
635                  * - The elevator might have returned NULL previously
636                  *   to delay requests and returned them now.  As the
637                  *   queue wasn't empty before this request, ll_rw_blk
638                  *   won't run the queue on return, resulting in hang.
639                  * - Usually, back inserted requests won't be merged
640                  *   with anything.  There's no point in delaying queue
641                  *   processing.
642                  */
643                 __blk_run_queue(q);
644                 break;
645
646         case ELEVATOR_INSERT_SORT:
647                 BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
648                        !(rq->cmd_flags & REQ_DISCARD));
649                 rq->cmd_flags |= REQ_SORTED;
650                 q->nr_sorted++;
651                 if (rq_mergeable(rq)) {
652                         elv_rqhash_add(q, rq);
653                         if (!q->last_merge)
654                                 q->last_merge = rq;
655                 }
656
657                 /*
658                  * Some ioscheds (cfq) run q->request_fn directly, so
659                  * rq cannot be accessed after calling
660                  * elevator_add_req_fn.
661                  */
662                 q->elevator->ops->elevator_add_req_fn(q, rq);
663                 break;
664
665         case ELEVATOR_INSERT_REQUEUE:
666                 /*
667                  * If ordered flush isn't in progress, we do front
668                  * insertion; otherwise, requests should be requeued
669                  * in ordseq order.
670                  */
671                 rq->cmd_flags |= REQ_SOFTBARRIER;
672
673                 /*
674                  * Most requeues happen because of a busy condition,
675                  * don't force unplug of the queue for that case.
676                  */
677                 unplug_it = 0;
678
679                 if (q->ordseq == 0) {
680                         list_add(&rq->queuelist, &q->queue_head);
681                         break;
682                 }
683
684                 ordseq = blk_ordered_req_seq(rq);
685
686                 list_for_each(pos, &q->queue_head) {
687                         struct request *pos_rq = list_entry_rq(pos);
688                         if (ordseq <= blk_ordered_req_seq(pos_rq))
689                                 break;
690                 }
691
692                 list_add_tail(&rq->queuelist, pos);
693                 break;
694
695         default:
696                 printk(KERN_ERR "%s: bad insertion point %d\n",
697                        __func__, where);
698                 BUG();
699         }
700
701         if (unplug_it && blk_queue_plugged(q)) {
702                 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
703                                 - queue_in_flight(q);
704
705                 if (nrq >= q->unplug_thresh)
706                         __generic_unplug_device(q);
707         }
708 }
709
710 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
711                        int plug)
712 {
713         if (q->ordcolor)
714                 rq->cmd_flags |= REQ_ORDERED_COLOR;
715
716         if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
717                 /*
718                  * toggle ordered color
719                  */
720                 if (rq->cmd_flags & REQ_HARDBARRIER)
721                         q->ordcolor ^= 1;
722
723                 /*
724                  * barriers implicitly indicate back insertion
725                  */
726                 if (where == ELEVATOR_INSERT_SORT)
727                         where = ELEVATOR_INSERT_BACK;
728
729                 /*
730                  * this request is scheduling boundary, update
731                  * end_sector
732                  */
733                 if (rq->cmd_type == REQ_TYPE_FS ||
734                     (rq->cmd_flags & REQ_DISCARD)) {
735                         q->end_sector = rq_end_sector(rq);
736                         q->boundary_rq = rq;
737                 }
738         } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
739                     where == ELEVATOR_INSERT_SORT)
740                 where = ELEVATOR_INSERT_BACK;
741
742         if (plug)
743                 blk_plug_device(q);
744
745         elv_insert(q, rq, where);
746 }
747 EXPORT_SYMBOL(__elv_add_request);
748
749 void elv_add_request(struct request_queue *q, struct request *rq, int where,
750                      int plug)
751 {
752         unsigned long flags;
753
754         spin_lock_irqsave(q->queue_lock, flags);
755         __elv_add_request(q, rq, where, plug);
756         spin_unlock_irqrestore(q->queue_lock, flags);
757 }
758 EXPORT_SYMBOL(elv_add_request);
759
760 int elv_queue_empty(struct request_queue *q)
761 {
762         struct elevator_queue *e = q->elevator;
763
764         if (!list_empty(&q->queue_head))
765                 return 0;
766
767         if (e->ops->elevator_queue_empty_fn)
768                 return e->ops->elevator_queue_empty_fn(q);
769
770         return 1;
771 }
772 EXPORT_SYMBOL(elv_queue_empty);
773
774 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
775 {
776         struct elevator_queue *e = q->elevator;
777
778         if (e->ops->elevator_latter_req_fn)
779                 return e->ops->elevator_latter_req_fn(q, rq);
780         return NULL;
781 }
782
783 struct request *elv_former_request(struct request_queue *q, struct request *rq)
784 {
785         struct elevator_queue *e = q->elevator;
786
787         if (e->ops->elevator_former_req_fn)
788                 return e->ops->elevator_former_req_fn(q, rq);
789         return NULL;
790 }
791
792 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
793 {
794         struct elevator_queue *e = q->elevator;
795
796         if (e->ops->elevator_set_req_fn)
797                 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
798
799         rq->elevator_private = NULL;
800         return 0;
801 }
802
803 void elv_put_request(struct request_queue *q, struct request *rq)
804 {
805         struct elevator_queue *e = q->elevator;
806
807         if (e->ops->elevator_put_req_fn)
808                 e->ops->elevator_put_req_fn(rq);
809 }
810
811 int elv_may_queue(struct request_queue *q, int rw)
812 {
813         struct elevator_queue *e = q->elevator;
814
815         if (e->ops->elevator_may_queue_fn)
816                 return e->ops->elevator_may_queue_fn(q, rw);
817
818         return ELV_MQUEUE_MAY;
819 }
820
821 void elv_abort_queue(struct request_queue *q)
822 {
823         struct request *rq;
824
825         while (!list_empty(&q->queue_head)) {
826                 rq = list_entry_rq(q->queue_head.next);
827                 rq->cmd_flags |= REQ_QUIET;
828                 trace_block_rq_abort(q, rq);
829                 /*
830                  * Mark this request as started so we don't trigger
831                  * any debug logic in the end I/O path.
832                  */
833                 blk_start_request(rq);
834                 __blk_end_request_all(rq, -EIO);
835         }
836 }
837 EXPORT_SYMBOL(elv_abort_queue);
838
839 void elv_completed_request(struct request_queue *q, struct request *rq)
840 {
841         struct elevator_queue *e = q->elevator;
842
843         /*
844          * request is released from the driver, io must be done
845          */
846         if (blk_account_rq(rq)) {
847                 q->in_flight[rq_is_sync(rq)]--;
848                 if ((rq->cmd_flags & REQ_SORTED) &&
849                     e->ops->elevator_completed_req_fn)
850                         e->ops->elevator_completed_req_fn(q, rq);
851         }
852
853         /*
854          * Check if the queue is waiting for fs requests to be
855          * drained for flush sequence.
856          */
857         if (unlikely(q->ordseq)) {
858                 struct request *next = NULL;
859
860                 if (!list_empty(&q->queue_head))
861                         next = list_entry_rq(q->queue_head.next);
862
863                 if (!queue_in_flight(q) &&
864                     blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
865                     (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
866                         blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
867                         __blk_run_queue(q);
868                 }
869         }
870 }
871
872 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
873
874 static ssize_t
875 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
876 {
877         struct elv_fs_entry *entry = to_elv(attr);
878         struct elevator_queue *e;
879         ssize_t error;
880
881         if (!entry->show)
882                 return -EIO;
883
884         e = container_of(kobj, struct elevator_queue, kobj);
885         mutex_lock(&e->sysfs_lock);
886         error = e->ops ? entry->show(e, page) : -ENOENT;
887         mutex_unlock(&e->sysfs_lock);
888         return error;
889 }
890
891 static ssize_t
892 elv_attr_store(struct kobject *kobj, struct attribute *attr,
893                const char *page, size_t length)
894 {
895         struct elv_fs_entry *entry = to_elv(attr);
896         struct elevator_queue *e;
897         ssize_t error;
898
899         if (!entry->store)
900                 return -EIO;
901
902         e = container_of(kobj, struct elevator_queue, kobj);
903         mutex_lock(&e->sysfs_lock);
904         error = e->ops ? entry->store(e, page, length) : -ENOENT;
905         mutex_unlock(&e->sysfs_lock);
906         return error;
907 }
908
909 static const struct sysfs_ops elv_sysfs_ops = {
910         .show   = elv_attr_show,
911         .store  = elv_attr_store,
912 };
913
914 static struct kobj_type elv_ktype = {
915         .sysfs_ops      = &elv_sysfs_ops,
916         .release        = elevator_release,
917 };
918
919 int elv_register_queue(struct request_queue *q)
920 {
921         struct elevator_queue *e = q->elevator;
922         int error;
923
924         error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
925         if (!error) {
926                 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
927                 if (attr) {
928                         while (attr->attr.name) {
929                                 if (sysfs_create_file(&e->kobj, &attr->attr))
930                                         break;
931                                 attr++;
932                         }
933                 }
934                 kobject_uevent(&e->kobj, KOBJ_ADD);
935         }
936         return error;
937 }
938 EXPORT_SYMBOL(elv_register_queue);
939
940 static void __elv_unregister_queue(struct elevator_queue *e)
941 {
942         kobject_uevent(&e->kobj, KOBJ_REMOVE);
943         kobject_del(&e->kobj);
944 }
945
946 void elv_unregister_queue(struct request_queue *q)
947 {
948         if (q)
949                 __elv_unregister_queue(q->elevator);
950 }
951 EXPORT_SYMBOL(elv_unregister_queue);
952
953 void elv_register(struct elevator_type *e)
954 {
955         char *def = "";
956
957         spin_lock(&elv_list_lock);
958         BUG_ON(elevator_find(e->elevator_name));
959         list_add_tail(&e->list, &elv_list);
960         spin_unlock(&elv_list_lock);
961
962         if (!strcmp(e->elevator_name, chosen_elevator) ||
963                         (!*chosen_elevator &&
964                          !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
965                                 def = " (default)";
966
967         printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
968                                                                 def);
969 }
970 EXPORT_SYMBOL_GPL(elv_register);
971
972 void elv_unregister(struct elevator_type *e)
973 {
974         struct task_struct *g, *p;
975
976         /*
977          * Iterate every thread in the process to remove the io contexts.
978          */
979         if (e->ops.trim) {
980                 read_lock(&tasklist_lock);
981                 do_each_thread(g, p) {
982                         task_lock(p);
983                         if (p->io_context)
984                                 e->ops.trim(p->io_context);
985                         task_unlock(p);
986                 } while_each_thread(g, p);
987                 read_unlock(&tasklist_lock);
988         }
989
990         spin_lock(&elv_list_lock);
991         list_del_init(&e->list);
992         spin_unlock(&elv_list_lock);
993 }
994 EXPORT_SYMBOL_GPL(elv_unregister);
995
996 /*
997  * switch to new_e io scheduler. be careful not to introduce deadlocks -
998  * we don't free the old io scheduler, before we have allocated what we
999  * need for the new one. this way we have a chance of going back to the old
1000  * one, if the new one fails init for some reason.
1001  */
1002 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1003 {
1004         struct elevator_queue *old_elevator, *e;
1005         void *data;
1006
1007         /*
1008          * Allocate new elevator
1009          */
1010         e = elevator_alloc(q, new_e);
1011         if (!e)
1012                 return 0;
1013
1014         data = elevator_init_queue(q, e);
1015         if (!data) {
1016                 kobject_put(&e->kobj);
1017                 return 0;
1018         }
1019
1020         /*
1021          * Turn on BYPASS and drain all requests w/ elevator private data
1022          */
1023         spin_lock_irq(q->queue_lock);
1024         elv_quiesce_start(q);
1025
1026         /*
1027          * Remember old elevator.
1028          */
1029         old_elevator = q->elevator;
1030
1031         /*
1032          * attach and start new elevator
1033          */
1034         elevator_attach(q, e, data);
1035
1036         spin_unlock_irq(q->queue_lock);
1037
1038         __elv_unregister_queue(old_elevator);
1039
1040         if (elv_register_queue(q))
1041                 goto fail_register;
1042
1043         /*
1044          * finally exit old elevator and turn off BYPASS.
1045          */
1046         elevator_exit(old_elevator);
1047         spin_lock_irq(q->queue_lock);
1048         elv_quiesce_end(q);
1049         spin_unlock_irq(q->queue_lock);
1050
1051         blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1052
1053         return 1;
1054
1055 fail_register:
1056         /*
1057          * switch failed, exit the new io scheduler and reattach the old
1058          * one again (along with re-adding the sysfs dir)
1059          */
1060         elevator_exit(e);
1061         q->elevator = old_elevator;
1062         elv_register_queue(q);
1063
1064         spin_lock_irq(q->queue_lock);
1065         queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1066         spin_unlock_irq(q->queue_lock);
1067
1068         return 0;
1069 }
1070
1071 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1072                           size_t count)
1073 {
1074         char elevator_name[ELV_NAME_MAX];
1075         struct elevator_type *e;
1076
1077         if (!q->elevator)
1078                 return count;
1079
1080         strlcpy(elevator_name, name, sizeof(elevator_name));
1081         e = elevator_get(strstrip(elevator_name));
1082         if (!e) {
1083                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1084                 return -EINVAL;
1085         }
1086
1087         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1088                 elevator_put(e);
1089                 return count;
1090         }
1091
1092         if (!elevator_switch(q, e))
1093                 printk(KERN_ERR "elevator: switch to %s failed\n",
1094                                                         elevator_name);
1095         return count;
1096 }
1097
1098 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1099 {
1100         struct elevator_queue *e = q->elevator;
1101         struct elevator_type *elv;
1102         struct elevator_type *__e;
1103         int len = 0;
1104
1105         if (!q->elevator || !blk_queue_stackable(q))
1106                 return sprintf(name, "none\n");
1107
1108         elv = e->elevator_type;
1109
1110         spin_lock(&elv_list_lock);
1111         list_for_each_entry(__e, &elv_list, list) {
1112                 if (!strcmp(elv->elevator_name, __e->elevator_name))
1113                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
1114                 else
1115                         len += sprintf(name+len, "%s ", __e->elevator_name);
1116         }
1117         spin_unlock(&elv_list_lock);
1118
1119         len += sprintf(len+name, "\n");
1120         return len;
1121 }
1122
1123 struct request *elv_rb_former_request(struct request_queue *q,
1124                                       struct request *rq)
1125 {
1126         struct rb_node *rbprev = rb_prev(&rq->rb_node);
1127
1128         if (rbprev)
1129                 return rb_entry_rq(rbprev);
1130
1131         return NULL;
1132 }
1133 EXPORT_SYMBOL(elv_rb_former_request);
1134
1135 struct request *elv_rb_latter_request(struct request_queue *q,
1136                                       struct request *rq)
1137 {
1138         struct rb_node *rbnext = rb_next(&rq->rb_node);
1139
1140         if (rbnext)
1141                 return rb_entry_rq(rbnext);
1142
1143         return NULL;
1144 }
1145 EXPORT_SYMBOL(elv_rb_latter_request);