]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/block/elevator.c
[PATCH] 02/05: update ioscheds to use generic dispatch queue
[net-next-2.6.git] / drivers / block / elevator.c
1 /*
2  *  linux/drivers/block/elevator.c
3  *
4  *  Block device elevator/IO-scheduler.
5  *
6  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7  *
8  * 30042000 Jens Axboe <axboe@suse.de> :
9  *
10  * Split the elevator a bit so that it is possible to choose a different
11  * one or even write a new "plug in". There are three pieces:
12  * - elevator_fn, inserts a new request in the queue list
13  * - elevator_merge_fn, decides whether a new buffer can be merged with
14  *   an existing request
15  * - elevator_dequeue_fn, called when a request is taken off the active list
16  *
17  * 20082000 Dave Jones <davej@suse.de> :
18  * Removed tests for max-bomb-segments, which was breaking elvtune
19  *  when run without -bN
20  *
21  * Jens:
22  * - Rework again to work with bio instead of buffer_heads
23  * - loose bi_dev comparisons, partition handling is right now
24  * - completely modularize elevator setup and teardown
25  *
26  */
27 #include <linux/kernel.h>
28 #include <linux/fs.h>
29 #include <linux/blkdev.h>
30 #include <linux/elevator.h>
31 #include <linux/bio.h>
32 #include <linux/config.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/init.h>
36 #include <linux/compiler.h>
37
38 #include <asm/uaccess.h>
39
40 static DEFINE_SPINLOCK(elv_list_lock);
41 static LIST_HEAD(elv_list);
42
43 /*
44  * can we safely merge with this request?
45  */
46 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
47 {
48         if (!rq_mergeable(rq))
49                 return 0;
50
51         /*
52          * different data direction or already started, don't merge
53          */
54         if (bio_data_dir(bio) != rq_data_dir(rq))
55                 return 0;
56
57         /*
58          * same device and no special stuff set, merge is ok
59          */
60         if (rq->rq_disk == bio->bi_bdev->bd_disk &&
61             !rq->waiting && !rq->special)
62                 return 1;
63
64         return 0;
65 }
66 EXPORT_SYMBOL(elv_rq_merge_ok);
67
68 inline int elv_try_merge(struct request *__rq, struct bio *bio)
69 {
70         int ret = ELEVATOR_NO_MERGE;
71
72         /*
73          * we can merge and sequence is ok, check if it's possible
74          */
75         if (elv_rq_merge_ok(__rq, bio)) {
76                 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
77                         ret = ELEVATOR_BACK_MERGE;
78                 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
79                         ret = ELEVATOR_FRONT_MERGE;
80         }
81
82         return ret;
83 }
84 EXPORT_SYMBOL(elv_try_merge);
85
86 inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
87 {
88         if (q->last_merge)
89                 return elv_try_merge(q->last_merge, bio);
90
91         return ELEVATOR_NO_MERGE;
92 }
93 EXPORT_SYMBOL(elv_try_last_merge);
94
95 static struct elevator_type *elevator_find(const char *name)
96 {
97         struct elevator_type *e = NULL;
98         struct list_head *entry;
99
100         list_for_each(entry, &elv_list) {
101                 struct elevator_type *__e;
102
103                 __e = list_entry(entry, struct elevator_type, list);
104
105                 if (!strcmp(__e->elevator_name, name)) {
106                         e = __e;
107                         break;
108                 }
109         }
110
111         return e;
112 }
113
114 static void elevator_put(struct elevator_type *e)
115 {
116         module_put(e->elevator_owner);
117 }
118
119 static struct elevator_type *elevator_get(const char *name)
120 {
121         struct elevator_type *e;
122
123         spin_lock_irq(&elv_list_lock);
124
125         e = elevator_find(name);
126         if (e && !try_module_get(e->elevator_owner))
127                 e = NULL;
128
129         spin_unlock_irq(&elv_list_lock);
130
131         return e;
132 }
133
134 static int elevator_attach(request_queue_t *q, struct elevator_type *e,
135                            struct elevator_queue *eq)
136 {
137         int ret = 0;
138
139         memset(eq, 0, sizeof(*eq));
140         eq->ops = &e->ops;
141         eq->elevator_type = e;
142
143         INIT_LIST_HEAD(&q->queue_head);
144         q->last_merge = NULL;
145         q->elevator = eq;
146         q->end_sector = 0;
147         q->boundary_rq = NULL;
148         q->max_back_kb = 0;
149
150         if (eq->ops->elevator_init_fn)
151                 ret = eq->ops->elevator_init_fn(q, eq);
152
153         return ret;
154 }
155
156 static char chosen_elevator[16];
157
158 static void elevator_setup_default(void)
159 {
160         struct elevator_type *e;
161
162         /*
163          * check if default is set and exists
164          */
165         if (chosen_elevator[0] && (e = elevator_get(chosen_elevator))) {
166                 elevator_put(e);
167                 return;
168         }
169
170 #if defined(CONFIG_IOSCHED_AS)
171         strcpy(chosen_elevator, "anticipatory");
172 #elif defined(CONFIG_IOSCHED_DEADLINE)
173         strcpy(chosen_elevator, "deadline");
174 #elif defined(CONFIG_IOSCHED_CFQ)
175         strcpy(chosen_elevator, "cfq");
176 #elif defined(CONFIG_IOSCHED_NOOP)
177         strcpy(chosen_elevator, "noop");
178 #else
179 #error "You must build at least 1 IO scheduler into the kernel"
180 #endif
181 }
182
183 static int __init elevator_setup(char *str)
184 {
185         strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
186         return 0;
187 }
188
189 __setup("elevator=", elevator_setup);
190
191 int elevator_init(request_queue_t *q, char *name)
192 {
193         struct elevator_type *e = NULL;
194         struct elevator_queue *eq;
195         int ret = 0;
196
197         elevator_setup_default();
198
199         if (!name)
200                 name = chosen_elevator;
201
202         e = elevator_get(name);
203         if (!e)
204                 return -EINVAL;
205
206         eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
207         if (!eq) {
208                 elevator_put(e->elevator_type);
209                 return -ENOMEM;
210         }
211
212         ret = elevator_attach(q, e, eq);
213         if (ret) {
214                 kfree(eq);
215                 elevator_put(e->elevator_type);
216         }
217
218         return ret;
219 }
220
221 void elevator_exit(elevator_t *e)
222 {
223         if (e->ops->elevator_exit_fn)
224                 e->ops->elevator_exit_fn(e);
225
226         elevator_put(e->elevator_type);
227         e->elevator_type = NULL;
228         kfree(e);
229 }
230
231 /*
232  * Insert rq into dispatch queue of q.  Queue lock must be held on
233  * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
234  * appended to the dispatch queue.  To be used by specific elevators.
235  */
236 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
237 {
238         sector_t boundary;
239         unsigned max_back;
240         struct list_head *entry;
241
242         boundary = q->end_sector;
243         max_back = q->max_back_kb * 2;
244         boundary = boundary > max_back ? boundary - max_back : 0;
245         
246         list_for_each_prev(entry, &q->queue_head) {
247                 struct request *pos = list_entry_rq(entry);
248
249                 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
250                         break;
251                 if (rq->sector >= boundary) {
252                         if (pos->sector < boundary)
253                                 continue;
254                 } else {
255                         if (pos->sector >= boundary)
256                                 break;
257                 }
258                 if (rq->sector >= pos->sector)
259                         break;
260         }
261
262         list_add(&rq->queuelist, entry);
263 }
264
265 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
266 {
267         elevator_t *e = q->elevator;
268
269         if (e->ops->elevator_merge_fn)
270                 return e->ops->elevator_merge_fn(q, req, bio);
271
272         return ELEVATOR_NO_MERGE;
273 }
274
275 void elv_merged_request(request_queue_t *q, struct request *rq)
276 {
277         elevator_t *e = q->elevator;
278
279         if (e->ops->elevator_merged_fn)
280                 e->ops->elevator_merged_fn(q, rq);
281 }
282
283 void elv_merge_requests(request_queue_t *q, struct request *rq,
284                              struct request *next)
285 {
286         elevator_t *e = q->elevator;
287
288         if (q->last_merge == next)
289                 q->last_merge = NULL;
290
291         if (e->ops->elevator_merge_req_fn)
292                 e->ops->elevator_merge_req_fn(q, rq, next);
293 }
294
295 void elv_requeue_request(request_queue_t *q, struct request *rq)
296 {
297         elevator_t *e = q->elevator;
298
299         /*
300          * it already went through dequeue, we need to decrement the
301          * in_flight count again
302          */
303         if (blk_account_rq(rq)) {
304                 q->in_flight--;
305                 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
306                         e->ops->elevator_deactivate_req_fn(q, rq);
307         }
308
309         rq->flags &= ~REQ_STARTED;
310
311         /*
312          * if this is the flush, requeue the original instead and drop the flush
313          */
314         if (rq->flags & REQ_BAR_FLUSH) {
315                 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
316                 rq = rq->end_io_data;
317         }
318
319         __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
320 }
321
322 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
323                        int plug)
324 {
325         if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
326                 /*
327                  * barriers implicitly indicate back insertion
328                  */
329                 if (where == ELEVATOR_INSERT_SORT)
330                         where = ELEVATOR_INSERT_BACK;
331
332                 /*
333                  * this request is scheduling boundary, update end_sector
334                  */
335                 if (blk_fs_request(rq)) {
336                         q->end_sector = rq_end_sector(rq);
337                         q->boundary_rq = rq;
338                 }
339         }
340
341         if (plug)
342                 blk_plug_device(q);
343
344         rq->q = q;
345
346         if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) {
347                 /*
348                  * if drain is set, store the request "locally". when the drain
349                  * is finished, the requests will be handed ordered to the io
350                  * scheduler
351                  */
352                 list_add_tail(&rq->queuelist, &q->drain_list);
353                 return;
354         }
355
356         switch (where) {
357         case ELEVATOR_INSERT_FRONT:
358                 rq->flags |= REQ_SOFTBARRIER;
359
360                 list_add(&rq->queuelist, &q->queue_head);
361                 break;
362
363         case ELEVATOR_INSERT_BACK:
364                 rq->flags |= REQ_SOFTBARRIER;
365
366                 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
367                         ;
368                 list_add_tail(&rq->queuelist, &q->queue_head);
369                 /*
370                  * We kick the queue here for the following reasons.
371                  * - The elevator might have returned NULL previously
372                  *   to delay requests and returned them now.  As the
373                  *   queue wasn't empty before this request, ll_rw_blk
374                  *   won't run the queue on return, resulting in hang.
375                  * - Usually, back inserted requests won't be merged
376                  *   with anything.  There's no point in delaying queue
377                  *   processing.
378                  */
379                 blk_remove_plug(q);
380                 q->request_fn(q);
381                 break;
382
383         case ELEVATOR_INSERT_SORT:
384                 BUG_ON(!blk_fs_request(rq));
385                 rq->flags |= REQ_SORTED;
386                 q->elevator->ops->elevator_add_req_fn(q, rq);
387                 break;
388
389         default:
390                 printk(KERN_ERR "%s: bad insertion point %d\n",
391                        __FUNCTION__, where);
392                 BUG();
393         }
394
395         if (blk_queue_plugged(q)) {
396                 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
397                         - q->in_flight;
398
399                 if (nrq >= q->unplug_thresh)
400                         __generic_unplug_device(q);
401         }
402 }
403
404 void elv_add_request(request_queue_t *q, struct request *rq, int where,
405                      int plug)
406 {
407         unsigned long flags;
408
409         spin_lock_irqsave(q->queue_lock, flags);
410         __elv_add_request(q, rq, where, plug);
411         spin_unlock_irqrestore(q->queue_lock, flags);
412 }
413
414 static inline struct request *__elv_next_request(request_queue_t *q)
415 {
416         struct request *rq;
417
418         if (unlikely(list_empty(&q->queue_head) &&
419                      !q->elevator->ops->elevator_dispatch_fn(q, 0)))
420                 return NULL;
421
422         rq = list_entry_rq(q->queue_head.next);
423
424         /*
425          * if this is a barrier write and the device has to issue a
426          * flush sequence to support it, check how far we are
427          */
428         if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
429                 BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
430
431                 if (q->ordered == QUEUE_ORDERED_FLUSH &&
432                     !blk_barrier_preflush(rq))
433                         rq = blk_start_pre_flush(q, rq);
434         }
435
436         return rq;
437 }
438
439 struct request *elv_next_request(request_queue_t *q)
440 {
441         struct request *rq;
442         int ret;
443
444         while ((rq = __elv_next_request(q)) != NULL) {
445                 if (!(rq->flags & REQ_STARTED)) {
446                         elevator_t *e = q->elevator;
447
448                         /*
449                          * This is the first time the device driver
450                          * sees this request (possibly after
451                          * requeueing).  Notify IO scheduler.
452                          */
453                         if (blk_sorted_rq(rq) &&
454                             e->ops->elevator_activate_req_fn)
455                                 e->ops->elevator_activate_req_fn(q, rq);
456
457                         /*
458                          * just mark as started even if we don't start
459                          * it, a request that has been delayed should
460                          * not be passed by new incoming requests
461                          */
462                         rq->flags |= REQ_STARTED;
463                 }
464
465                 if (rq == q->last_merge)
466                         q->last_merge = NULL;
467
468                 if (!q->boundary_rq || q->boundary_rq == rq) {
469                         q->end_sector = rq_end_sector(rq);
470                         q->boundary_rq = NULL;
471                 }
472
473                 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
474                         break;
475
476                 ret = q->prep_rq_fn(q, rq);
477                 if (ret == BLKPREP_OK) {
478                         break;
479                 } else if (ret == BLKPREP_DEFER) {
480                         /*
481                          * the request may have been (partially) prepped.
482                          * we need to keep this request in the front to
483                          * avoid resource deadlock.  REQ_STARTED will
484                          * prevent other fs requests from passing this one.
485                          */
486                         rq = NULL;
487                         break;
488                 } else if (ret == BLKPREP_KILL) {
489                         int nr_bytes = rq->hard_nr_sectors << 9;
490
491                         if (!nr_bytes)
492                                 nr_bytes = rq->data_len;
493
494                         blkdev_dequeue_request(rq);
495                         rq->flags |= REQ_QUIET;
496                         end_that_request_chunk(rq, 0, nr_bytes);
497                         end_that_request_last(rq);
498                 } else {
499                         printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
500                                                                 ret);
501                         break;
502                 }
503         }
504
505         return rq;
506 }
507
508 void elv_dequeue_request(request_queue_t *q, struct request *rq)
509 {
510         BUG_ON(list_empty(&rq->queuelist));
511
512         list_del_init(&rq->queuelist);
513
514         /*
515          * the time frame between a request being removed from the lists
516          * and to it is freed is accounted as io that is in progress at
517          * the driver side.
518          */
519         if (blk_account_rq(rq))
520                 q->in_flight++;
521
522         /*
523          * the main clearing point for q->last_merge is on retrieval of
524          * request by driver (it calls elv_next_request()), but it _can_
525          * also happen here if a request is added to the queue but later
526          * deleted without ever being given to driver (merged with another
527          * request).
528          */
529         if (rq == q->last_merge)
530                 q->last_merge = NULL;
531 }
532
533 int elv_queue_empty(request_queue_t *q)
534 {
535         elevator_t *e = q->elevator;
536
537         if (!list_empty(&q->queue_head))
538                 return 0;
539
540         if (e->ops->elevator_queue_empty_fn)
541                 return e->ops->elevator_queue_empty_fn(q);
542
543         return 1;
544 }
545
546 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
547 {
548         struct list_head *next;
549
550         elevator_t *e = q->elevator;
551
552         if (e->ops->elevator_latter_req_fn)
553                 return e->ops->elevator_latter_req_fn(q, rq);
554
555         next = rq->queuelist.next;
556         if (next != &q->queue_head && next != &rq->queuelist)
557                 return list_entry_rq(next);
558
559         return NULL;
560 }
561
562 struct request *elv_former_request(request_queue_t *q, struct request *rq)
563 {
564         struct list_head *prev;
565
566         elevator_t *e = q->elevator;
567
568         if (e->ops->elevator_former_req_fn)
569                 return e->ops->elevator_former_req_fn(q, rq);
570
571         prev = rq->queuelist.prev;
572         if (prev != &q->queue_head && prev != &rq->queuelist)
573                 return list_entry_rq(prev);
574
575         return NULL;
576 }
577
578 int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
579                     int gfp_mask)
580 {
581         elevator_t *e = q->elevator;
582
583         if (e->ops->elevator_set_req_fn)
584                 return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
585
586         rq->elevator_private = NULL;
587         return 0;
588 }
589
590 void elv_put_request(request_queue_t *q, struct request *rq)
591 {
592         elevator_t *e = q->elevator;
593
594         if (e->ops->elevator_put_req_fn)
595                 e->ops->elevator_put_req_fn(q, rq);
596 }
597
598 int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
599 {
600         elevator_t *e = q->elevator;
601
602         if (e->ops->elevator_may_queue_fn)
603                 return e->ops->elevator_may_queue_fn(q, rw, bio);
604
605         return ELV_MQUEUE_MAY;
606 }
607
608 void elv_completed_request(request_queue_t *q, struct request *rq)
609 {
610         elevator_t *e = q->elevator;
611
612         /*
613          * request is released from the driver, io must be done
614          */
615         if (blk_account_rq(rq)) {
616                 q->in_flight--;
617                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
618                         e->ops->elevator_completed_req_fn(q, rq);
619         }
620 }
621
622 int elv_register_queue(struct request_queue *q)
623 {
624         elevator_t *e = q->elevator;
625
626         e->kobj.parent = kobject_get(&q->kobj);
627         if (!e->kobj.parent)
628                 return -EBUSY;
629
630         snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
631         e->kobj.ktype = e->elevator_type->elevator_ktype;
632
633         return kobject_register(&e->kobj);
634 }
635
636 void elv_unregister_queue(struct request_queue *q)
637 {
638         if (q) {
639                 elevator_t *e = q->elevator;
640                 kobject_unregister(&e->kobj);
641                 kobject_put(&q->kobj);
642         }
643 }
644
645 int elv_register(struct elevator_type *e)
646 {
647         spin_lock_irq(&elv_list_lock);
648         if (elevator_find(e->elevator_name))
649                 BUG();
650         list_add_tail(&e->list, &elv_list);
651         spin_unlock_irq(&elv_list_lock);
652
653         printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
654         if (!strcmp(e->elevator_name, chosen_elevator))
655                 printk(" (default)");
656         printk("\n");
657         return 0;
658 }
659 EXPORT_SYMBOL_GPL(elv_register);
660
661 void elv_unregister(struct elevator_type *e)
662 {
663         spin_lock_irq(&elv_list_lock);
664         list_del_init(&e->list);
665         spin_unlock_irq(&elv_list_lock);
666 }
667 EXPORT_SYMBOL_GPL(elv_unregister);
668
669 /*
670  * switch to new_e io scheduler. be careful not to introduce deadlocks -
671  * we don't free the old io scheduler, before we have allocated what we
672  * need for the new one. this way we have a chance of going back to the old
673  * one, if the new one fails init for some reason. we also do an intermediate
674  * switch to noop to ensure safety with stack-allocated requests, since they
675  * don't originate from the block layer allocator. noop is safe here, because
676  * it never needs to touch the elevator itself for completion events. DRAIN
677  * flags will make sure we don't touch it for additions either.
678  */
679 static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
680 {
681         elevator_t *e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
682         struct elevator_type *noop_elevator = NULL;
683         elevator_t *old_elevator;
684
685         if (!e)
686                 goto error;
687
688         /*
689          * first step, drain requests from the block freelist
690          */
691         blk_wait_queue_drained(q, 0);
692
693         /*
694          * unregister old elevator data
695          */
696         elv_unregister_queue(q);
697         old_elevator = q->elevator;
698
699         /*
700          * next step, switch to noop since it uses no private rq structures
701          * and doesn't allocate any memory for anything. then wait for any
702          * non-fs requests in-flight
703          */
704         noop_elevator = elevator_get("noop");
705         spin_lock_irq(q->queue_lock);
706         elevator_attach(q, noop_elevator, e);
707         spin_unlock_irq(q->queue_lock);
708
709         blk_wait_queue_drained(q, 1);
710
711         /*
712          * attach and start new elevator
713          */
714         if (elevator_attach(q, new_e, e))
715                 goto fail;
716
717         if (elv_register_queue(q))
718                 goto fail_register;
719
720         /*
721          * finally exit old elevator and start queue again
722          */
723         elevator_exit(old_elevator);
724         blk_finish_queue_drain(q);
725         elevator_put(noop_elevator);
726         return;
727
728 fail_register:
729         /*
730          * switch failed, exit the new io scheduler and reattach the old
731          * one again (along with re-adding the sysfs dir)
732          */
733         elevator_exit(e);
734 fail:
735         q->elevator = old_elevator;
736         elv_register_queue(q);
737         blk_finish_queue_drain(q);
738 error:
739         if (noop_elevator)
740                 elevator_put(noop_elevator);
741         elevator_put(new_e);
742         printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
743 }
744
745 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
746 {
747         char elevator_name[ELV_NAME_MAX];
748         struct elevator_type *e;
749
750         memset(elevator_name, 0, sizeof(elevator_name));
751         strncpy(elevator_name, name, sizeof(elevator_name));
752
753         if (elevator_name[strlen(elevator_name) - 1] == '\n')
754                 elevator_name[strlen(elevator_name) - 1] = '\0';
755
756         e = elevator_get(elevator_name);
757         if (!e) {
758                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
759                 return -EINVAL;
760         }
761
762         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name))
763                 return count;
764
765         elevator_switch(q, e);
766         return count;
767 }
768
769 ssize_t elv_iosched_show(request_queue_t *q, char *name)
770 {
771         elevator_t *e = q->elevator;
772         struct elevator_type *elv = e->elevator_type;
773         struct list_head *entry;
774         int len = 0;
775
776         spin_lock_irq(q->queue_lock);
777         list_for_each(entry, &elv_list) {
778                 struct elevator_type *__e;
779
780                 __e = list_entry(entry, struct elevator_type, list);
781                 if (!strcmp(elv->elevator_name, __e->elevator_name))
782                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
783                 else
784                         len += sprintf(name+len, "%s ", __e->elevator_name);
785         }
786         spin_unlock_irq(q->queue_lock);
787
788         len += sprintf(len+name, "\n");
789         return len;
790 }
791
792 EXPORT_SYMBOL(elv_dispatch_sort);
793 EXPORT_SYMBOL(elv_add_request);
794 EXPORT_SYMBOL(__elv_add_request);
795 EXPORT_SYMBOL(elv_requeue_request);
796 EXPORT_SYMBOL(elv_next_request);
797 EXPORT_SYMBOL(elv_dequeue_request);
798 EXPORT_SYMBOL(elv_queue_empty);
799 EXPORT_SYMBOL(elv_completed_request);
800 EXPORT_SYMBOL(elevator_exit);
801 EXPORT_SYMBOL(elevator_init);