]> bbs.cooldavid.org Git - net-next-2.6.git/blame - block/cfq-iosched.c
cfq-iosched: get rid of ->cur_rr and ->cfq_list
[net-next-2.6.git] / block / cfq-iosched.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
0fe23479 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4 8 */
1da177e4 9#include <linux/module.h>
1cc9be68
AV
10#include <linux/blkdev.h>
11#include <linux/elevator.h>
1da177e4
LT
12#include <linux/hash.h>
13#include <linux/rbtree.h>
22e2c507 14#include <linux/ioprio.h>
1da177e4
LT
15
16/*
17 * tunables
18 */
64100099 19static const int cfq_quantum = 4; /* max queue in one round of service */
64100099
AV
20static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
21static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
22static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
1da177e4 23
64100099 24static const int cfq_slice_sync = HZ / 10;
3b18152c 25static int cfq_slice_async = HZ / 25;
64100099 26static const int cfq_slice_async_rq = 2;
caaa5f9f 27static int cfq_slice_idle = HZ / 125;
22e2c507 28
d9e7620e
JA
29/*
30 * grace period before allowing idle class to get disk access
31 */
22e2c507 32#define CFQ_IDLE_GRACE (HZ / 10)
d9e7620e
JA
33
34/*
35 * below this threshold, we consider thinktime immediate
36 */
37#define CFQ_MIN_TT (2)
38
22e2c507
JA
39#define CFQ_SLICE_SCALE (5)
40
41#define CFQ_KEY_ASYNC (0)
22e2c507 42
1da177e4
LT
43/*
44 * for the hash of cfqq inside the cfqd
45 */
46#define CFQ_QHASH_SHIFT 6
47#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
1da177e4 48
5e705374
JA
49#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
50#define RQ_CFQQ(rq) ((rq)->elevator_private2)
1da177e4 51
e18b890b
CL
52static struct kmem_cache *cfq_pool;
53static struct kmem_cache *cfq_ioc_pool;
1da177e4 54
4050cf16 55static DEFINE_PER_CPU(unsigned long, ioc_count);
334e94de
AV
56static struct completion *ioc_gone;
57
22e2c507
JA
58#define CFQ_PRIO_LISTS IOPRIO_BE_NR
59#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507
JA
60#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
61
3b18152c
JA
62#define ASYNC (0)
63#define SYNC (1)
64
6d048f53 65#define cfq_cfqq_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC)
22e2c507 66
206dc69b
JA
67#define sample_valid(samples) ((samples) > 80)
68
cc09e299
JA
69/*
70 * Most of our rbtree usage is for sorting with min extraction, so
71 * if we cache the leftmost node we don't have to walk down the tree
72 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
73 * move this into the elevator for the rq sorting as well.
74 */
75struct cfq_rb_root {
76 struct rb_root rb;
77 struct rb_node *left;
78};
79#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
80
22e2c507
JA
81/*
82 * Per block device queue structure
83 */
1da177e4 84struct cfq_data {
22e2c507
JA
85 request_queue_t *queue;
86
87 /*
88 * rr list of queues with requests and the count of them
89 */
cc09e299 90 struct cfq_rb_root service_tree;
22e2c507
JA
91 unsigned int busy_queues;
92
22e2c507
JA
93 /*
94 * cfqq lookup hash
95 */
1da177e4 96 struct hlist_head *cfq_hash;
1da177e4 97
22e2c507 98 int rq_in_driver;
25776e35 99 int hw_tag;
1da177e4 100
22e2c507
JA
101 /*
102 * idle window management
103 */
104 struct timer_list idle_slice_timer;
105 struct work_struct unplug_work;
1da177e4 106
22e2c507
JA
107 struct cfq_queue *active_queue;
108 struct cfq_io_context *active_cic;
22e2c507
JA
109 unsigned int dispatch_slice;
110
111 struct timer_list idle_class_timer;
1da177e4 112
6d048f53 113 sector_t last_position;
22e2c507 114 unsigned long last_end_request;
1da177e4 115
1da177e4
LT
116 /*
117 * tunables, see top of file
118 */
119 unsigned int cfq_quantum;
22e2c507 120 unsigned int cfq_fifo_expire[2];
1da177e4
LT
121 unsigned int cfq_back_penalty;
122 unsigned int cfq_back_max;
22e2c507
JA
123 unsigned int cfq_slice[2];
124 unsigned int cfq_slice_async_rq;
125 unsigned int cfq_slice_idle;
d9ff4187
AV
126
127 struct list_head cic_list;
6d048f53
JA
128
129 sector_t new_seek_mean;
130 u64 new_seek_total;
1da177e4
LT
131};
132
22e2c507
JA
133/*
134 * Per process-grouping structure
135 */
1da177e4
LT
136struct cfq_queue {
137 /* reference count */
138 atomic_t ref;
139 /* parent cfq_data */
140 struct cfq_data *cfqd;
22e2c507 141 /* cfqq lookup hash */
1da177e4
LT
142 struct hlist_node cfq_hash;
143 /* hash key */
22e2c507 144 unsigned int key;
d9e7620e
JA
145 /* service_tree member */
146 struct rb_node rb_node;
147 /* service_tree key */
148 unsigned long rb_key;
1da177e4
LT
149 /* sorted list of pending requests */
150 struct rb_root sort_list;
151 /* if fifo isn't expired, next request to serve */
5e705374 152 struct request *next_rq;
1da177e4
LT
153 /* requests queued in sort_list */
154 int queued[2];
155 /* currently allocated requests */
156 int allocated[2];
374f84ac
JA
157 /* pending metadata requests */
158 int meta_pending;
1da177e4 159 /* fifo list of requests in sort_list */
22e2c507 160 struct list_head fifo;
1da177e4 161
22e2c507 162 unsigned long slice_end;
c5b680f3 163 long slice_resid;
1da177e4 164
6d048f53
JA
165 /* number of requests that are on the dispatch list or inside driver */
166 int dispatched;
22e2c507
JA
167
168 /* io prio of this group */
169 unsigned short ioprio, org_ioprio;
170 unsigned short ioprio_class, org_ioprio_class;
171
3b18152c
JA
172 /* various state flags, see below */
173 unsigned int flags;
6d048f53
JA
174
175 sector_t last_request_pos;
1da177e4
LT
176};
177
3b18152c 178enum cfqq_state_flags {
b0b8d749
JA
179 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
180 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
181 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
182 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
183 CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */
184 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
185 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
186 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
187 CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */
44f7c160 188 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
3b18152c
JA
189};
190
191#define CFQ_CFQQ_FNS(name) \
192static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
193{ \
194 cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
195} \
196static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
197{ \
198 cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
199} \
200static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
201{ \
202 return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
203}
204
205CFQ_CFQQ_FNS(on_rr);
206CFQ_CFQQ_FNS(wait_request);
207CFQ_CFQQ_FNS(must_alloc);
208CFQ_CFQQ_FNS(must_alloc_slice);
209CFQ_CFQQ_FNS(must_dispatch);
210CFQ_CFQQ_FNS(fifo_expire);
211CFQ_CFQQ_FNS(idle_window);
212CFQ_CFQQ_FNS(prio_changed);
53b03744 213CFQ_CFQQ_FNS(queue_new);
44f7c160 214CFQ_CFQQ_FNS(slice_new);
3b18152c
JA
215#undef CFQ_CFQQ_FNS
216
3b18152c 217static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
5e705374 218static void cfq_dispatch_insert(request_queue_t *, struct request *);
498d3aa2 219static struct cfq_queue *cfq_get_queue(struct cfq_data *, unsigned int, struct task_struct *, gfp_t);
1da177e4 220
99f95e52
AM
221/*
222 * scheduler run of queue, if there are requests pending and no one in the
223 * driver that will restart queueing
224 */
225static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
226{
7b14e3b5 227 if (cfqd->busy_queues)
99f95e52
AM
228 kblockd_schedule_work(&cfqd->unplug_work);
229}
230
231static int cfq_queue_empty(request_queue_t *q)
232{
233 struct cfq_data *cfqd = q->elevator->elevator_data;
234
b4878f24 235 return !cfqd->busy_queues;
99f95e52
AM
236}
237
7749a8d4 238static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
206dc69b 239{
7749a8d4
JA
240 /*
241 * Use the per-process queue, for read requests and syncronous writes
242 */
243 if (!(rw & REQ_RW) || is_sync)
206dc69b
JA
244 return task->pid;
245
246 return CFQ_KEY_ASYNC;
247}
248
44f7c160
JA
249/*
250 * Scale schedule slice based on io priority. Use the sync time slice only
251 * if a queue is marked sync and has sync io queued. A sync queue with async
252 * io only, should not get full sync slice length.
253 */
d9e7620e
JA
254static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
255 unsigned short prio)
44f7c160 256{
d9e7620e 257 const int base_slice = cfqd->cfq_slice[sync];
44f7c160 258
d9e7620e
JA
259 WARN_ON(prio >= IOPRIO_BE_NR);
260
261 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
262}
44f7c160 263
d9e7620e
JA
264static inline int
265cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
266{
267 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c160
JA
268}
269
270static inline void
271cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
272{
273 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
274}
275
276/*
277 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
278 * isn't valid until the first request from the dispatch is activated
279 * and the slice time set.
280 */
281static inline int cfq_slice_used(struct cfq_queue *cfqq)
282{
283 if (cfq_cfqq_slice_new(cfqq))
284 return 0;
285 if (time_before(jiffies, cfqq->slice_end))
286 return 0;
287
288 return 1;
289}
290
1da177e4 291/*
5e705374 292 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4 293 * We choose the request that is closest to the head right now. Distance
e8a99053 294 * behind the head is penalized and only allowed to a certain extent.
1da177e4 295 */
5e705374
JA
296static struct request *
297cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
1da177e4
LT
298{
299 sector_t last, s1, s2, d1 = 0, d2 = 0;
1da177e4 300 unsigned long back_max;
e8a99053
AM
301#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
302#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
303 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4 304
5e705374
JA
305 if (rq1 == NULL || rq1 == rq2)
306 return rq2;
307 if (rq2 == NULL)
308 return rq1;
9c2c38a1 309
5e705374
JA
310 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
311 return rq1;
312 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
313 return rq2;
374f84ac
JA
314 if (rq_is_meta(rq1) && !rq_is_meta(rq2))
315 return rq1;
316 else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
317 return rq2;
1da177e4 318
5e705374
JA
319 s1 = rq1->sector;
320 s2 = rq2->sector;
1da177e4 321
6d048f53 322 last = cfqd->last_position;
1da177e4 323
1da177e4
LT
324 /*
325 * by definition, 1KiB is 2 sectors
326 */
327 back_max = cfqd->cfq_back_max * 2;
328
329 /*
330 * Strict one way elevator _except_ in the case where we allow
331 * short backward seeks which are biased as twice the cost of a
332 * similar forward seek.
333 */
334 if (s1 >= last)
335 d1 = s1 - last;
336 else if (s1 + back_max >= last)
337 d1 = (last - s1) * cfqd->cfq_back_penalty;
338 else
e8a99053 339 wrap |= CFQ_RQ1_WRAP;
1da177e4
LT
340
341 if (s2 >= last)
342 d2 = s2 - last;
343 else if (s2 + back_max >= last)
344 d2 = (last - s2) * cfqd->cfq_back_penalty;
345 else
e8a99053 346 wrap |= CFQ_RQ2_WRAP;
1da177e4
LT
347
348 /* Found required data */
e8a99053
AM
349
350 /*
351 * By doing switch() on the bit mask "wrap" we avoid having to
352 * check two variables for all permutations: --> faster!
353 */
354 switch (wrap) {
5e705374 355 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053 356 if (d1 < d2)
5e705374 357 return rq1;
e8a99053 358 else if (d2 < d1)
5e705374 359 return rq2;
e8a99053
AM
360 else {
361 if (s1 >= s2)
5e705374 362 return rq1;
e8a99053 363 else
5e705374 364 return rq2;
e8a99053 365 }
1da177e4 366
e8a99053 367 case CFQ_RQ2_WRAP:
5e705374 368 return rq1;
e8a99053 369 case CFQ_RQ1_WRAP:
5e705374
JA
370 return rq2;
371 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053
AM
372 default:
373 /*
374 * Since both rqs are wrapped,
375 * start with the one that's further behind head
376 * (--> only *one* back seek required),
377 * since back seek takes more time than forward.
378 */
379 if (s1 <= s2)
5e705374 380 return rq1;
1da177e4 381 else
5e705374 382 return rq2;
1da177e4
LT
383 }
384}
385
498d3aa2
JA
386/*
387 * The below is leftmost cache rbtree addon
388 */
cc09e299
JA
389static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
390{
391 if (!root->left)
392 root->left = rb_first(&root->rb);
393
394 return root->left;
395}
396
397static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
398{
399 if (root->left == n)
400 root->left = NULL;
401
402 rb_erase(n, &root->rb);
403 RB_CLEAR_NODE(n);
404}
405
1da177e4
LT
406/*
407 * would be nice to take fifo expire time into account as well
408 */
5e705374
JA
409static struct request *
410cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
411 struct request *last)
1da177e4 412{
21183b07
JA
413 struct rb_node *rbnext = rb_next(&last->rb_node);
414 struct rb_node *rbprev = rb_prev(&last->rb_node);
5e705374 415 struct request *next = NULL, *prev = NULL;
1da177e4 416
21183b07 417 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4
LT
418
419 if (rbprev)
5e705374 420 prev = rb_entry_rq(rbprev);
1da177e4 421
21183b07 422 if (rbnext)
5e705374 423 next = rb_entry_rq(rbnext);
21183b07
JA
424 else {
425 rbnext = rb_first(&cfqq->sort_list);
426 if (rbnext && rbnext != &last->rb_node)
5e705374 427 next = rb_entry_rq(rbnext);
21183b07 428 }
1da177e4 429
21183b07 430 return cfq_choose_req(cfqd, next, prev);
1da177e4
LT
431}
432
d9e7620e
JA
433static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
434 struct cfq_queue *cfqq)
1da177e4 435{
d9e7620e
JA
436 /*
437 * just an approximation, should be ok.
438 */
67e6b49e
JA
439 return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
440 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e
JA
441}
442
498d3aa2
JA
443/*
444 * The cfqd->service_tree holds all pending cfq_queue's that have
445 * requests waiting to be processed. It is sorted in the order that
446 * we will service the queues.
447 */
d9e7620e 448static void cfq_service_tree_add(struct cfq_data *cfqd,
edd75ffd 449 struct cfq_queue *cfqq, int add_front)
d9e7620e 450{
cc09e299 451 struct rb_node **p = &cfqd->service_tree.rb.rb_node;
d9e7620e 452 struct rb_node *parent = NULL;
d9e7620e 453 unsigned long rb_key;
498d3aa2 454 int left;
d9e7620e 455
edd75ffd
JA
456 if (!add_front) {
457 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
458 rb_key += cfqq->slice_resid;
459 cfqq->slice_resid = 0;
460 } else
461 rb_key = 0;
1da177e4 462
d9e7620e 463 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
99f9628a 464 /*
d9e7620e 465 * same position, nothing more to do
99f9628a 466 */
d9e7620e
JA
467 if (rb_key == cfqq->rb_key)
468 return;
1da177e4 469
cc09e299 470 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
1da177e4 471 }
d9e7620e 472
498d3aa2 473 left = 1;
d9e7620e 474 while (*p) {
cc09e299 475 struct cfq_queue *__cfqq;
67060e37 476 struct rb_node **n;
cc09e299 477
d9e7620e
JA
478 parent = *p;
479 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
480
0c534e0a
JA
481 /*
482 * sort RT queues first, we always want to give
67060e37
JA
483 * preference to them. IDLE queues goes to the back.
484 * after that, sort on the next service time.
0c534e0a
JA
485 */
486 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
67060e37 487 n = &(*p)->rb_left;
0c534e0a 488 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
67060e37
JA
489 n = &(*p)->rb_right;
490 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
491 n = &(*p)->rb_left;
492 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
493 n = &(*p)->rb_right;
0c534e0a 494 else if (rb_key < __cfqq->rb_key)
67060e37
JA
495 n = &(*p)->rb_left;
496 else
497 n = &(*p)->rb_right;
498
499 if (n == &(*p)->rb_right)
cc09e299 500 left = 0;
67060e37
JA
501
502 p = n;
d9e7620e
JA
503 }
504
cc09e299
JA
505 if (left)
506 cfqd->service_tree.left = &cfqq->rb_node;
507
d9e7620e
JA
508 cfqq->rb_key = rb_key;
509 rb_link_node(&cfqq->rb_node, parent, p);
cc09e299 510 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
1da177e4
LT
511}
512
498d3aa2
JA
513/*
514 * Update cfqq's position in the service tree.
515 */
edd75ffd 516static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f53 517{
6d048f53
JA
518 /*
519 * Resorting requires the cfqq to be on the RR list already.
520 */
498d3aa2 521 if (cfq_cfqq_on_rr(cfqq))
edd75ffd 522 cfq_service_tree_add(cfqd, cfqq, 0);
6d048f53
JA
523}
524
1da177e4
LT
525/*
526 * add to busy list of queues for service, trying to be fair in ordering
22e2c507 527 * the pending list according to last request service
1da177e4
LT
528 */
529static inline void
b4878f24 530cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 531{
3b18152c
JA
532 BUG_ON(cfq_cfqq_on_rr(cfqq));
533 cfq_mark_cfqq_on_rr(cfqq);
1da177e4
LT
534 cfqd->busy_queues++;
535
edd75ffd 536 cfq_resort_rr_list(cfqd, cfqq);
1da177e4
LT
537}
538
498d3aa2
JA
539/*
540 * Called when the cfqq no longer has requests pending, remove it from
541 * the service tree.
542 */
1da177e4
LT
543static inline void
544cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
545{
3b18152c
JA
546 BUG_ON(!cfq_cfqq_on_rr(cfqq));
547 cfq_clear_cfqq_on_rr(cfqq);
1da177e4 548
cc09e299
JA
549 if (!RB_EMPTY_NODE(&cfqq->rb_node))
550 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
d9e7620e 551
1da177e4
LT
552 BUG_ON(!cfqd->busy_queues);
553 cfqd->busy_queues--;
554}
555
556/*
557 * rb tree support functions
558 */
5e705374 559static inline void cfq_del_rq_rb(struct request *rq)
1da177e4 560{
5e705374 561 struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f24 562 struct cfq_data *cfqd = cfqq->cfqd;
5e705374 563 const int sync = rq_is_sync(rq);
1da177e4 564
b4878f24
JA
565 BUG_ON(!cfqq->queued[sync]);
566 cfqq->queued[sync]--;
1da177e4 567
5e705374 568 elv_rb_del(&cfqq->sort_list, rq);
1da177e4 569
dd67d051 570 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
b4878f24 571 cfq_del_cfqq_rr(cfqd, cfqq);
1da177e4
LT
572}
573
5e705374 574static void cfq_add_rq_rb(struct request *rq)
1da177e4 575{
5e705374 576 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 577 struct cfq_data *cfqd = cfqq->cfqd;
21183b07 578 struct request *__alias;
1da177e4 579
5380a101 580 cfqq->queued[rq_is_sync(rq)]++;
1da177e4
LT
581
582 /*
583 * looks a little odd, but the first insert might return an alias.
584 * if that happens, put the alias on the dispatch list
585 */
21183b07 586 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
5e705374 587 cfq_dispatch_insert(cfqd->queue, __alias);
5fccbf61
JA
588
589 if (!cfq_cfqq_on_rr(cfqq))
590 cfq_add_cfqq_rr(cfqd, cfqq);
5044eed4
JA
591
592 /*
593 * check if this request is a better next-serve candidate
594 */
595 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
596 BUG_ON(!cfqq->next_rq);
1da177e4
LT
597}
598
599static inline void
5e705374 600cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4 601{
5380a101
JA
602 elv_rb_del(&cfqq->sort_list, rq);
603 cfqq->queued[rq_is_sync(rq)]--;
5e705374 604 cfq_add_rq_rb(rq);
1da177e4
LT
605}
606
206dc69b
JA
607static struct request *
608cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4 609{
206dc69b 610 struct task_struct *tsk = current;
7749a8d4 611 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
206dc69b 612 struct cfq_queue *cfqq;
1da177e4 613
206dc69b 614 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
89850f7e
JA
615 if (cfqq) {
616 sector_t sector = bio->bi_sector + bio_sectors(bio);
617
21183b07 618 return elv_rb_find(&cfqq->sort_list, sector);
89850f7e 619 }
1da177e4 620
1da177e4
LT
621 return NULL;
622}
623
b4878f24 624static void cfq_activate_request(request_queue_t *q, struct request *rq)
1da177e4 625{
22e2c507 626 struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c 627
b4878f24 628 cfqd->rq_in_driver++;
25776e35
JA
629
630 /*
631 * If the depth is larger 1, it really could be queueing. But lets
632 * make the mark a little higher - idling could still be good for
633 * low queueing, and a low queueing number could also just indicate
634 * a SCSI mid layer like behaviour where limit+1 is often seen.
635 */
636 if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
637 cfqd->hw_tag = 1;
6d048f53
JA
638
639 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
1da177e4
LT
640}
641
b4878f24 642static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
1da177e4 643{
b4878f24
JA
644 struct cfq_data *cfqd = q->elevator->elevator_data;
645
646 WARN_ON(!cfqd->rq_in_driver);
647 cfqd->rq_in_driver--;
1da177e4
LT
648}
649
b4878f24 650static void cfq_remove_request(struct request *rq)
1da177e4 651{
5e705374 652 struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07 653
5e705374
JA
654 if (cfqq->next_rq == rq)
655 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4 656
b4878f24 657 list_del_init(&rq->queuelist);
5e705374 658 cfq_del_rq_rb(rq);
374f84ac
JA
659
660 if (rq_is_meta(rq)) {
661 WARN_ON(!cfqq->meta_pending);
662 cfqq->meta_pending--;
663 }
1da177e4
LT
664}
665
498d3aa2 666static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
1da177e4
LT
667{
668 struct cfq_data *cfqd = q->elevator->elevator_data;
669 struct request *__rq;
1da177e4 670
206dc69b 671 __rq = cfq_find_rq_fmerge(cfqd, bio);
22e2c507 672 if (__rq && elv_rq_merge_ok(__rq, bio)) {
9817064b
JA
673 *req = __rq;
674 return ELEVATOR_FRONT_MERGE;
1da177e4
LT
675 }
676
677 return ELEVATOR_NO_MERGE;
1da177e4
LT
678}
679
21183b07
JA
680static void cfq_merged_request(request_queue_t *q, struct request *req,
681 int type)
1da177e4 682{
21183b07 683 if (type == ELEVATOR_FRONT_MERGE) {
5e705374 684 struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4 685
5e705374 686 cfq_reposition_rq_rb(cfqq, req);
1da177e4 687 }
1da177e4
LT
688}
689
690static void
691cfq_merged_requests(request_queue_t *q, struct request *rq,
692 struct request *next)
693{
22e2c507
JA
694 /*
695 * reposition in fifo if next is older than rq
696 */
697 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
698 time_before(next->start_time, rq->start_time))
699 list_move(&rq->queuelist, &next->queuelist);
700
b4878f24 701 cfq_remove_request(next);
22e2c507
JA
702}
703
da775265
JA
704static int cfq_allow_merge(request_queue_t *q, struct request *rq,
705 struct bio *bio)
706{
707 struct cfq_data *cfqd = q->elevator->elevator_data;
708 const int rw = bio_data_dir(bio);
709 struct cfq_queue *cfqq;
710 pid_t key;
711
712 /*
ec8acb69 713 * Disallow merge of a sync bio into an async request.
da775265 714 */
ec8acb69 715 if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
da775265
JA
716 return 0;
717
718 /*
719d3402
JA
719 * Lookup the cfqq that this bio will be queued with. Allow
720 * merge only if rq is queued there.
da775265 721 */
719d3402 722 key = cfq_queue_pid(current, rw, bio_sync(bio));
da775265 723 cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
719d3402
JA
724
725 if (cfqq == RQ_CFQQ(rq))
726 return 1;
da775265 727
ec8acb69 728 return 0;
da775265
JA
729}
730
22e2c507
JA
731static inline void
732__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
733{
734 if (cfqq) {
735 /*
736 * stop potential idle class queues waiting service
737 */
738 del_timer(&cfqd->idle_class_timer);
739
22e2c507 740 cfqq->slice_end = 0;
3b18152c
JA
741 cfq_clear_cfqq_must_alloc_slice(cfqq);
742 cfq_clear_cfqq_fifo_expire(cfqq);
44f7c160 743 cfq_mark_cfqq_slice_new(cfqq);
1afba045 744 cfq_clear_cfqq_queue_new(cfqq);
22e2c507
JA
745 }
746
747 cfqd->active_queue = cfqq;
748}
749
7b14e3b5
JA
750/*
751 * current cfqq expired its slice (or was too idle), select new one
752 */
753static void
754__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3c6bd2f8 755 int preempted, int timed_out)
7b14e3b5 756{
7b14e3b5
JA
757 if (cfq_cfqq_wait_request(cfqq))
758 del_timer(&cfqd->idle_slice_timer);
759
7b14e3b5
JA
760 cfq_clear_cfqq_must_dispatch(cfqq);
761 cfq_clear_cfqq_wait_request(cfqq);
762
763 /*
764 * store what was left of this slice, if the queue idled out
765 * or was preempted
766 */
3c6bd2f8 767 if (timed_out && !cfq_cfqq_slice_new(cfqq))
c5b680f3 768 cfqq->slice_resid = cfqq->slice_end - jiffies;
7b14e3b5 769
edd75ffd 770 cfq_resort_rr_list(cfqd, cfqq);
7b14e3b5
JA
771
772 if (cfqq == cfqd->active_queue)
773 cfqd->active_queue = NULL;
774
775 if (cfqd->active_cic) {
776 put_io_context(cfqd->active_cic->ioc);
777 cfqd->active_cic = NULL;
778 }
779
780 cfqd->dispatch_slice = 0;
781}
782
3c6bd2f8
JA
783static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted,
784 int timed_out)
7b14e3b5
JA
785{
786 struct cfq_queue *cfqq = cfqd->active_queue;
787
788 if (cfqq)
3c6bd2f8 789 __cfq_slice_expired(cfqd, cfqq, preempted, timed_out);
7b14e3b5
JA
790}
791
498d3aa2
JA
792/*
793 * Get next queue for service. Unless we have a queue preemption,
794 * we'll simply select the first cfqq in the service tree.
795 */
6d048f53 796static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507 797{
edd75ffd
JA
798 struct cfq_queue *cfqq;
799 struct rb_node *n;
22e2c507 800
edd75ffd
JA
801 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
802 return NULL;
d9e7620e 803
edd75ffd
JA
804 n = cfq_rb_first(&cfqd->service_tree);
805 cfqq = rb_entry(n, struct cfq_queue, rb_node);
498d3aa2 806
edd75ffd
JA
807 if (cfq_class_idle(cfqq)) {
808 unsigned long end;
809
810 /*
811 * if we have idle queues and no rt or be queues had
812 * pending requests, either allow immediate service if
813 * the grace period has passed or arm the idle grace
814 * timer
815 */
816 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
817 if (time_before(jiffies, end)) {
818 mod_timer(&cfqd->idle_class_timer, end);
819 cfqq = NULL;
67060e37 820 }
22e2c507
JA
821 }
822
6d048f53
JA
823 return cfqq;
824}
825
498d3aa2
JA
826/*
827 * Get and set a new active queue for service.
828 */
6d048f53
JA
829static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
830{
831 struct cfq_queue *cfqq;
832
d9e7620e 833 cfqq = cfq_get_next_queue(cfqd);
22e2c507 834 __cfq_set_active_queue(cfqd, cfqq);
3b18152c 835 return cfqq;
22e2c507
JA
836}
837
d9e7620e
JA
838static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
839 struct request *rq)
840{
841 if (rq->sector >= cfqd->last_position)
842 return rq->sector - cfqd->last_position;
843 else
844 return cfqd->last_position - rq->sector;
845}
846
6d048f53
JA
847static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
848{
849 struct cfq_io_context *cic = cfqd->active_cic;
850
851 if (!sample_valid(cic->seek_samples))
852 return 0;
853
854 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
855}
856
d9e7620e
JA
857static int cfq_close_cooperator(struct cfq_data *cfq_data,
858 struct cfq_queue *cfqq)
6d048f53 859{
6d048f53 860 /*
d9e7620e
JA
861 * We should notice if some of the queues are cooperating, eg
862 * working closely on the same area of the disk. In that case,
863 * we can group them together and don't waste time idling.
6d048f53 864 */
d9e7620e 865 return 0;
6d048f53
JA
866}
867
868#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
caaa5f9f 869
6d048f53 870static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507 871{
1792669c 872 struct cfq_queue *cfqq = cfqd->active_queue;
206dc69b 873 struct cfq_io_context *cic;
7b14e3b5
JA
874 unsigned long sl;
875
dd67d051 876 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f53 877 WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507
JA
878
879 /*
880 * idle is disabled, either manually or by past process history
881 */
6d048f53
JA
882 if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
883 return;
884
22e2c507
JA
885 /*
886 * task has exited, don't wait
887 */
206dc69b
JA
888 cic = cfqd->active_cic;
889 if (!cic || !cic->ioc->task)
6d048f53
JA
890 return;
891
892 /*
893 * See if this prio level has a good candidate
894 */
1afba045
JA
895 if (cfq_close_cooperator(cfqd, cfqq) &&
896 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
6d048f53 897 return;
22e2c507 898
3b18152c
JA
899 cfq_mark_cfqq_must_dispatch(cfqq);
900 cfq_mark_cfqq_wait_request(cfqq);
22e2c507 901
206dc69b
JA
902 /*
903 * we don't want to idle for seeks, but we do want to allow
904 * fair distribution of slice time for a process doing back-to-back
905 * seeks. so allow a little bit of time for him to submit a new rq
906 */
6d048f53 907 sl = cfqd->cfq_slice_idle;
caaa5f9f 908 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
d9e7620e 909 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
206dc69b 910
7b14e3b5 911 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1da177e4
LT
912}
913
498d3aa2
JA
914/*
915 * Move request from internal lists to the request queue dispatch list.
916 */
5e705374 917static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
1da177e4 918{
5e705374 919 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 920
5380a101 921 cfq_remove_request(rq);
6d048f53 922 cfqq->dispatched++;
5380a101 923 elv_dispatch_sort(q, rq);
1da177e4
LT
924}
925
926/*
927 * return expired entry, or NULL to just start from scratch in rbtree
928 */
5e705374 929static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4
LT
930{
931 struct cfq_data *cfqd = cfqq->cfqd;
22e2c507 932 struct request *rq;
89850f7e 933 int fifo;
1da177e4 934
3b18152c 935 if (cfq_cfqq_fifo_expire(cfqq))
1da177e4 936 return NULL;
cb887411
JA
937
938 cfq_mark_cfqq_fifo_expire(cfqq);
939
89850f7e
JA
940 if (list_empty(&cfqq->fifo))
941 return NULL;
1da177e4 942
6d048f53 943 fifo = cfq_cfqq_sync(cfqq);
89850f7e 944 rq = rq_entry_fifo(cfqq->fifo.next);
1da177e4 945
6d048f53
JA
946 if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
947 return NULL;
1da177e4 948
6d048f53 949 return rq;
1da177e4
LT
950}
951
22e2c507
JA
952static inline int
953cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
954{
955 const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4 956
22e2c507 957 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4 958
22e2c507 959 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1da177e4
LT
960}
961
22e2c507 962/*
498d3aa2
JA
963 * Select a queue for service. If we have a current active queue,
964 * check whether to continue servicing it, or retrieve and set a new one.
22e2c507 965 */
1b5ed5e1 966static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4 967{
1da177e4 968 struct cfq_queue *cfqq;
1da177e4 969
22e2c507
JA
970 cfqq = cfqd->active_queue;
971 if (!cfqq)
972 goto new_queue;
1da177e4 973
22e2c507 974 /*
6d048f53 975 * The active queue has run out of time, expire it and select new.
22e2c507 976 */
6d048f53 977 if (cfq_slice_used(cfqq))
3b18152c 978 goto expire;
1da177e4 979
22e2c507 980 /*
6d048f53
JA
981 * The active queue has requests and isn't expired, allow it to
982 * dispatch.
22e2c507 983 */
dd67d051 984 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 985 goto keep_queue;
6d048f53
JA
986
987 /*
988 * No requests pending. If the active queue still has requests in
989 * flight or is idling for a new request, allow either of these
990 * conditions to happen (or time out) before selecting a new queue.
991 */
992 if (cfqq->dispatched || timer_pending(&cfqd->idle_slice_timer)) {
caaa5f9f
JA
993 cfqq = NULL;
994 goto keep_queue;
22e2c507
JA
995 }
996
3b18152c 997expire:
3c6bd2f8 998 cfq_slice_expired(cfqd, 0, 0);
3b18152c
JA
999new_queue:
1000 cfqq = cfq_set_active_queue(cfqd);
22e2c507 1001keep_queue:
3b18152c 1002 return cfqq;
22e2c507
JA
1003}
1004
498d3aa2
JA
1005/*
1006 * Dispatch some requests from cfqq, moving them to the request queue
1007 * dispatch list.
1008 */
22e2c507
JA
1009static int
1010__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1011 int max_dispatch)
1012{
1013 int dispatched = 0;
1014
dd67d051 1015 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
22e2c507
JA
1016
1017 do {
5e705374 1018 struct request *rq;
1da177e4
LT
1019
1020 /*
22e2c507 1021 * follow expired path, else get first next available
1da177e4 1022 */
5e705374
JA
1023 if ((rq = cfq_check_fifo(cfqq)) == NULL)
1024 rq = cfqq->next_rq;
22e2c507
JA
1025
1026 /*
1027 * finally, insert request into driver dispatch list
1028 */
5e705374 1029 cfq_dispatch_insert(cfqd->queue, rq);
1da177e4 1030
22e2c507
JA
1031 cfqd->dispatch_slice++;
1032 dispatched++;
1da177e4 1033
22e2c507 1034 if (!cfqd->active_cic) {
5e705374
JA
1035 atomic_inc(&RQ_CIC(rq)->ioc->refcount);
1036 cfqd->active_cic = RQ_CIC(rq);
22e2c507 1037 }
1da177e4 1038
dd67d051 1039 if (RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507
JA
1040 break;
1041
1042 } while (dispatched < max_dispatch);
1043
22e2c507
JA
1044 /*
1045 * expire an async queue immediately if it has used up its slice. idle
1046 * queue always expire after 1 dispatch round.
1047 */
a9938006 1048 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
22e2c507 1049 cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
a9938006 1050 cfq_class_idle(cfqq))) {
44f7c160 1051 cfqq->slice_end = jiffies + 1;
3c6bd2f8 1052 cfq_slice_expired(cfqd, 0, 0);
44f7c160 1053 }
22e2c507
JA
1054
1055 return dispatched;
1056}
1057
d9e7620e
JA
1058static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1059{
1060 int dispatched = 0;
1061
1062 while (cfqq->next_rq) {
1063 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1064 dispatched++;
1065 }
1066
1067 BUG_ON(!list_empty(&cfqq->fifo));
1068 return dispatched;
1069}
1070
498d3aa2
JA
1071/*
1072 * Drain our current requests. Used for barriers and when switching
1073 * io schedulers on-the-fly.
1074 */
d9e7620e 1075static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1 1076{
d9e7620e
JA
1077 int dispatched = 0;
1078 struct rb_node *n;
1079
cc09e299 1080 while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
d9e7620e 1081 struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
1b5ed5e1 1082
d9e7620e
JA
1083 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1084 }
1b5ed5e1 1085
3c6bd2f8 1086 cfq_slice_expired(cfqd, 0, 0);
1b5ed5e1
TH
1087
1088 BUG_ON(cfqd->busy_queues);
1089
1090 return dispatched;
1091}
1092
d9e7620e 1093static int cfq_dispatch_requests(request_queue_t *q, int force)
22e2c507
JA
1094{
1095 struct cfq_data *cfqd = q->elevator->elevator_data;
6d048f53 1096 struct cfq_queue *cfqq;
caaa5f9f 1097 int dispatched;
22e2c507
JA
1098
1099 if (!cfqd->busy_queues)
1100 return 0;
1101
1b5ed5e1
TH
1102 if (unlikely(force))
1103 return cfq_forced_dispatch(cfqd);
1104
caaa5f9f 1105 dispatched = 0;
caaa5f9f 1106 while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
b4878f24
JA
1107 int max_dispatch;
1108
a9938006 1109 if (cfqd->busy_queues > 1) {
a9938006
JA
1110 /*
1111 * So we have dispatched before in this round, if the
1112 * next queue has idling enabled (must be sync), don't
6d048f53 1113 * allow it service until the previous have completed.
a9938006 1114 */
6d048f53
JA
1115 if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq) &&
1116 dispatched)
1117 break;
1118 if (cfqq->dispatched >= cfqd->cfq_quantum)
a9938006
JA
1119 break;
1120 }
9ede209e 1121
3b18152c
JA
1122 cfq_clear_cfqq_must_dispatch(cfqq);
1123 cfq_clear_cfqq_wait_request(cfqq);
22e2c507
JA
1124 del_timer(&cfqd->idle_slice_timer);
1125
1b5ed5e1
TH
1126 max_dispatch = cfqd->cfq_quantum;
1127 if (cfq_class_idle(cfqq))
1128 max_dispatch = 1;
1da177e4 1129
caaa5f9f 1130 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1da177e4
LT
1131 }
1132
caaa5f9f 1133 return dispatched;
1da177e4
LT
1134}
1135
1da177e4 1136/*
5e705374
JA
1137 * task holds one reference to the queue, dropped when task exits. each rq
1138 * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4
LT
1139 *
1140 * queue lock must be held here.
1141 */
1142static void cfq_put_queue(struct cfq_queue *cfqq)
1143{
22e2c507
JA
1144 struct cfq_data *cfqd = cfqq->cfqd;
1145
1146 BUG_ON(atomic_read(&cfqq->ref) <= 0);
1da177e4
LT
1147
1148 if (!atomic_dec_and_test(&cfqq->ref))
1149 return;
1150
1151 BUG_ON(rb_first(&cfqq->sort_list));
22e2c507 1152 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3b18152c 1153 BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4 1154
28f95cbc 1155 if (unlikely(cfqd->active_queue == cfqq)) {
3c6bd2f8 1156 __cfq_slice_expired(cfqd, cfqq, 0, 0);
28f95cbc
JA
1157 cfq_schedule_dispatch(cfqd);
1158 }
22e2c507 1159
1da177e4
LT
1160 /*
1161 * it's on the empty list and still hashed
1162 */
1da177e4
LT
1163 hlist_del(&cfqq->cfq_hash);
1164 kmem_cache_free(cfq_pool, cfqq);
1165}
1166
1ea25ecb 1167static struct cfq_queue *
3b18152c
JA
1168__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
1169 const int hashval)
1da177e4
LT
1170{
1171 struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
206dc69b
JA
1172 struct hlist_node *entry;
1173 struct cfq_queue *__cfqq;
1da177e4 1174
206dc69b 1175 hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
b0a6916b 1176 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
1da177e4 1177
206dc69b 1178 if (__cfqq->key == key && (__p == prio || !prio))
1da177e4
LT
1179 return __cfqq;
1180 }
1181
1182 return NULL;
1183}
1184
1185static struct cfq_queue *
3b18152c 1186cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
1da177e4 1187{
3b18152c 1188 return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
1da177e4
LT
1189}
1190
e2d74ac0 1191static void cfq_free_io_context(struct io_context *ioc)
1da177e4 1192{
22e2c507 1193 struct cfq_io_context *__cic;
e2d74ac0
JA
1194 struct rb_node *n;
1195 int freed = 0;
1da177e4 1196
e2d74ac0
JA
1197 while ((n = rb_first(&ioc->cic_root)) != NULL) {
1198 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1199 rb_erase(&__cic->rb_node, &ioc->cic_root);
22e2c507 1200 kmem_cache_free(cfq_ioc_pool, __cic);
334e94de 1201 freed++;
1da177e4
LT
1202 }
1203
4050cf16
JA
1204 elv_ioc_count_mod(ioc_count, -freed);
1205
1206 if (ioc_gone && !elv_ioc_count_read(ioc_count))
334e94de 1207 complete(ioc_gone);
1da177e4
LT
1208}
1209
89850f7e 1210static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 1211{
28f95cbc 1212 if (unlikely(cfqq == cfqd->active_queue)) {
3c6bd2f8 1213 __cfq_slice_expired(cfqd, cfqq, 0, 0);
28f95cbc
JA
1214 cfq_schedule_dispatch(cfqd);
1215 }
22e2c507 1216
89850f7e
JA
1217 cfq_put_queue(cfqq);
1218}
22e2c507 1219
89850f7e
JA
1220static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1221 struct cfq_io_context *cic)
1222{
fc46379d
JA
1223 list_del_init(&cic->queue_list);
1224 smp_wmb();
1225 cic->key = NULL;
1226
12a05732 1227 if (cic->cfqq[ASYNC]) {
89850f7e 1228 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
12a05732
AV
1229 cic->cfqq[ASYNC] = NULL;
1230 }
1231
1232 if (cic->cfqq[SYNC]) {
89850f7e 1233 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
12a05732
AV
1234 cic->cfqq[SYNC] = NULL;
1235 }
89850f7e
JA
1236}
1237
89850f7e
JA
1238static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1239{
1240 struct cfq_data *cfqd = cic->key;
1241
89850f7e
JA
1242 if (cfqd) {
1243 request_queue_t *q = cfqd->queue;
1244
fc46379d 1245 spin_lock_irq(q->queue_lock);
89850f7e 1246 __cfq_exit_single_io_context(cfqd, cic);
fc46379d 1247 spin_unlock_irq(q->queue_lock);
89850f7e 1248 }
1da177e4
LT
1249}
1250
498d3aa2
JA
1251/*
1252 * The process that ioc belongs to has exited, we need to clean up
1253 * and put the internal structures we have that belongs to that process.
1254 */
e2d74ac0 1255static void cfq_exit_io_context(struct io_context *ioc)
1da177e4 1256{
22e2c507 1257 struct cfq_io_context *__cic;
e2d74ac0 1258 struct rb_node *n;
22e2c507 1259
1da177e4
LT
1260 /*
1261 * put the reference this task is holding to the various queues
1262 */
e2d74ac0
JA
1263
1264 n = rb_first(&ioc->cic_root);
1265 while (n != NULL) {
1266 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1267
22e2c507 1268 cfq_exit_single_io_context(__cic);
e2d74ac0 1269 n = rb_next(n);
1da177e4 1270 }
1da177e4
LT
1271}
1272
22e2c507 1273static struct cfq_io_context *
8267e268 1274cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1da177e4 1275{
b5deef90 1276 struct cfq_io_context *cic;
1da177e4 1277
b5deef90 1278 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
1da177e4 1279 if (cic) {
553698f9 1280 memset(cic, 0, sizeof(*cic));
22e2c507 1281 cic->last_end_request = jiffies;
553698f9 1282 INIT_LIST_HEAD(&cic->queue_list);
22e2c507
JA
1283 cic->dtor = cfq_free_io_context;
1284 cic->exit = cfq_exit_io_context;
4050cf16 1285 elv_ioc_count_inc(ioc_count);
1da177e4
LT
1286 }
1287
1288 return cic;
1289}
1290
22e2c507
JA
1291static void cfq_init_prio_data(struct cfq_queue *cfqq)
1292{
1293 struct task_struct *tsk = current;
1294 int ioprio_class;
1295
3b18152c 1296 if (!cfq_cfqq_prio_changed(cfqq))
22e2c507
JA
1297 return;
1298
1299 ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
1300 switch (ioprio_class) {
1301 default:
1302 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1303 case IOPRIO_CLASS_NONE:
1304 /*
1305 * no prio set, place us in the middle of the BE classes
1306 */
1307 cfqq->ioprio = task_nice_ioprio(tsk);
1308 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1309 break;
1310 case IOPRIO_CLASS_RT:
1311 cfqq->ioprio = task_ioprio(tsk);
1312 cfqq->ioprio_class = IOPRIO_CLASS_RT;
1313 break;
1314 case IOPRIO_CLASS_BE:
1315 cfqq->ioprio = task_ioprio(tsk);
1316 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1317 break;
1318 case IOPRIO_CLASS_IDLE:
1319 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1320 cfqq->ioprio = 7;
3b18152c 1321 cfq_clear_cfqq_idle_window(cfqq);
22e2c507
JA
1322 break;
1323 }
1324
1325 /*
1326 * keep track of original prio settings in case we have to temporarily
1327 * elevate the priority of this queue
1328 */
1329 cfqq->org_ioprio = cfqq->ioprio;
1330 cfqq->org_ioprio_class = cfqq->ioprio_class;
3b18152c 1331 cfq_clear_cfqq_prio_changed(cfqq);
22e2c507
JA
1332}
1333
478a82b0 1334static inline void changed_ioprio(struct cfq_io_context *cic)
22e2c507 1335{
478a82b0
AV
1336 struct cfq_data *cfqd = cic->key;
1337 struct cfq_queue *cfqq;
c1b707d2 1338 unsigned long flags;
35e6077c 1339
caaa5f9f
JA
1340 if (unlikely(!cfqd))
1341 return;
1342
c1b707d2 1343 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
caaa5f9f
JA
1344
1345 cfqq = cic->cfqq[ASYNC];
1346 if (cfqq) {
1347 struct cfq_queue *new_cfqq;
1348 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
1349 GFP_ATOMIC);
1350 if (new_cfqq) {
1351 cic->cfqq[ASYNC] = new_cfqq;
1352 cfq_put_queue(cfqq);
1353 }
22e2c507 1354 }
caaa5f9f
JA
1355
1356 cfqq = cic->cfqq[SYNC];
1357 if (cfqq)
1358 cfq_mark_cfqq_prio_changed(cfqq);
1359
c1b707d2 1360 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
22e2c507
JA
1361}
1362
fc46379d 1363static void cfq_ioc_set_ioprio(struct io_context *ioc)
22e2c507 1364{
a6a0763a 1365 struct cfq_io_context *cic;
e2d74ac0 1366 struct rb_node *n;
a6a0763a 1367
fc46379d 1368 ioc->ioprio_changed = 0;
a6a0763a 1369
e2d74ac0
JA
1370 n = rb_first(&ioc->cic_root);
1371 while (n != NULL) {
1372 cic = rb_entry(n, struct cfq_io_context, rb_node);
3793c65c 1373
478a82b0 1374 changed_ioprio(cic);
e2d74ac0
JA
1375 n = rb_next(n);
1376 }
22e2c507
JA
1377}
1378
1379static struct cfq_queue *
6f325a13 1380cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
8267e268 1381 gfp_t gfp_mask)
22e2c507
JA
1382{
1383 const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1384 struct cfq_queue *cfqq, *new_cfqq = NULL;
6f325a13 1385 unsigned short ioprio;
22e2c507
JA
1386
1387retry:
6f325a13 1388 ioprio = tsk->ioprio;
3b18152c 1389 cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
22e2c507
JA
1390
1391 if (!cfqq) {
1392 if (new_cfqq) {
1393 cfqq = new_cfqq;
1394 new_cfqq = NULL;
1395 } else if (gfp_mask & __GFP_WAIT) {
89850f7e
JA
1396 /*
1397 * Inform the allocator of the fact that we will
1398 * just repeat this allocation if it fails, to allow
1399 * the allocator to do whatever it needs to attempt to
1400 * free memory.
1401 */
22e2c507 1402 spin_unlock_irq(cfqd->queue->queue_lock);
b5deef90 1403 new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
22e2c507
JA
1404 spin_lock_irq(cfqd->queue->queue_lock);
1405 goto retry;
1406 } else {
b5deef90 1407 cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
22e2c507
JA
1408 if (!cfqq)
1409 goto out;
1410 }
1411
1412 memset(cfqq, 0, sizeof(*cfqq));
1413
1414 INIT_HLIST_NODE(&cfqq->cfq_hash);
d9e7620e 1415 RB_CLEAR_NODE(&cfqq->rb_node);
22e2c507
JA
1416 INIT_LIST_HEAD(&cfqq->fifo);
1417
1418 cfqq->key = key;
1419 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1420 atomic_set(&cfqq->ref, 0);
1421 cfqq->cfqd = cfqd;
c5b680f3 1422
a9938006
JA
1423 if (key != CFQ_KEY_ASYNC)
1424 cfq_mark_cfqq_idle_window(cfqq);
1425
3b18152c 1426 cfq_mark_cfqq_prio_changed(cfqq);
53b03744 1427 cfq_mark_cfqq_queue_new(cfqq);
3b18152c 1428 cfq_init_prio_data(cfqq);
22e2c507
JA
1429 }
1430
1431 if (new_cfqq)
1432 kmem_cache_free(cfq_pool, new_cfqq);
1433
1434 atomic_inc(&cfqq->ref);
1435out:
1436 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1437 return cfqq;
1438}
1439
498d3aa2
JA
1440/*
1441 * We drop cfq io contexts lazily, so we may find a dead one.
1442 */
dbecf3ab
OH
1443static void
1444cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1445{
fc46379d 1446 WARN_ON(!list_empty(&cic->queue_list));
dbecf3ab 1447 rb_erase(&cic->rb_node, &ioc->cic_root);
dbecf3ab 1448 kmem_cache_free(cfq_ioc_pool, cic);
4050cf16 1449 elv_ioc_count_dec(ioc_count);
dbecf3ab
OH
1450}
1451
e2d74ac0
JA
1452static struct cfq_io_context *
1453cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1454{
dbecf3ab 1455 struct rb_node *n;
e2d74ac0 1456 struct cfq_io_context *cic;
be3b0753 1457 void *k, *key = cfqd;
e2d74ac0 1458
dbecf3ab
OH
1459restart:
1460 n = ioc->cic_root.rb_node;
e2d74ac0
JA
1461 while (n) {
1462 cic = rb_entry(n, struct cfq_io_context, rb_node);
be3b0753
OH
1463 /* ->key must be copied to avoid race with cfq_exit_queue() */
1464 k = cic->key;
1465 if (unlikely(!k)) {
dbecf3ab
OH
1466 cfq_drop_dead_cic(ioc, cic);
1467 goto restart;
1468 }
e2d74ac0 1469
be3b0753 1470 if (key < k)
e2d74ac0 1471 n = n->rb_left;
be3b0753 1472 else if (key > k)
e2d74ac0
JA
1473 n = n->rb_right;
1474 else
1475 return cic;
1476 }
1477
1478 return NULL;
1479}
1480
1481static inline void
1482cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1483 struct cfq_io_context *cic)
1484{
dbecf3ab
OH
1485 struct rb_node **p;
1486 struct rb_node *parent;
e2d74ac0 1487 struct cfq_io_context *__cic;
0261d688 1488 unsigned long flags;
be3b0753 1489 void *k;
e2d74ac0 1490
e2d74ac0
JA
1491 cic->ioc = ioc;
1492 cic->key = cfqd;
1493
dbecf3ab
OH
1494restart:
1495 parent = NULL;
1496 p = &ioc->cic_root.rb_node;
e2d74ac0
JA
1497 while (*p) {
1498 parent = *p;
1499 __cic = rb_entry(parent, struct cfq_io_context, rb_node);
be3b0753
OH
1500 /* ->key must be copied to avoid race with cfq_exit_queue() */
1501 k = __cic->key;
1502 if (unlikely(!k)) {
be33c3a6 1503 cfq_drop_dead_cic(ioc, __cic);
dbecf3ab
OH
1504 goto restart;
1505 }
e2d74ac0 1506
be3b0753 1507 if (cic->key < k)
e2d74ac0 1508 p = &(*p)->rb_left;
be3b0753 1509 else if (cic->key > k)
e2d74ac0
JA
1510 p = &(*p)->rb_right;
1511 else
1512 BUG();
1513 }
1514
1515 rb_link_node(&cic->rb_node, parent, p);
1516 rb_insert_color(&cic->rb_node, &ioc->cic_root);
fc46379d 1517
0261d688 1518 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
e2d74ac0 1519 list_add(&cic->queue_list, &cfqd->cic_list);
0261d688 1520 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
e2d74ac0
JA
1521}
1522
1da177e4
LT
1523/*
1524 * Setup general io context and cfq io context. There can be several cfq
1525 * io contexts per general io context, if this process is doing io to more
e2d74ac0 1526 * than one device managed by cfq.
1da177e4
LT
1527 */
1528static struct cfq_io_context *
e2d74ac0 1529cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1da177e4 1530{
22e2c507 1531 struct io_context *ioc = NULL;
1da177e4 1532 struct cfq_io_context *cic;
1da177e4 1533
22e2c507 1534 might_sleep_if(gfp_mask & __GFP_WAIT);
1da177e4 1535
b5deef90 1536 ioc = get_io_context(gfp_mask, cfqd->queue->node);
1da177e4
LT
1537 if (!ioc)
1538 return NULL;
1539
e2d74ac0
JA
1540 cic = cfq_cic_rb_lookup(cfqd, ioc);
1541 if (cic)
1542 goto out;
1da177e4 1543
e2d74ac0
JA
1544 cic = cfq_alloc_io_context(cfqd, gfp_mask);
1545 if (cic == NULL)
1546 goto err;
1da177e4 1547
e2d74ac0 1548 cfq_cic_link(cfqd, ioc, cic);
1da177e4 1549out:
fc46379d
JA
1550 smp_read_barrier_depends();
1551 if (unlikely(ioc->ioprio_changed))
1552 cfq_ioc_set_ioprio(ioc);
1553
1da177e4
LT
1554 return cic;
1555err:
1556 put_io_context(ioc);
1557 return NULL;
1558}
1559
22e2c507
JA
1560static void
1561cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1da177e4 1562{
aaf1228d
JA
1563 unsigned long elapsed = jiffies - cic->last_end_request;
1564 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
db3b5848 1565
22e2c507
JA
1566 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1567 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1568 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1569}
1da177e4 1570
206dc69b 1571static void
6d048f53
JA
1572cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1573 struct request *rq)
206dc69b
JA
1574{
1575 sector_t sdist;
1576 u64 total;
1577
5e705374
JA
1578 if (cic->last_request_pos < rq->sector)
1579 sdist = rq->sector - cic->last_request_pos;
206dc69b 1580 else
5e705374 1581 sdist = cic->last_request_pos - rq->sector;
206dc69b 1582
6d048f53
JA
1583 if (!cic->seek_samples) {
1584 cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1585 cfqd->new_seek_mean = cfqd->new_seek_total / 256;
1586 }
1587
206dc69b
JA
1588 /*
1589 * Don't allow the seek distance to get too large from the
1590 * odd fragment, pagein, etc
1591 */
1592 if (cic->seek_samples <= 60) /* second&third seek */
1593 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1594 else
1595 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1596
1597 cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1598 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1599 total = cic->seek_total + (cic->seek_samples/2);
1600 do_div(total, cic->seek_samples);
1601 cic->seek_mean = (sector_t)total;
1602}
1da177e4 1603
22e2c507
JA
1604/*
1605 * Disable idle window if the process thinks too long or seeks so much that
1606 * it doesn't matter
1607 */
1608static void
1609cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1610 struct cfq_io_context *cic)
1611{
3b18152c 1612 int enable_idle = cfq_cfqq_idle_window(cfqq);
1da177e4 1613
caaa5f9f
JA
1614 if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
1615 (cfqd->hw_tag && CIC_SEEKY(cic)))
22e2c507
JA
1616 enable_idle = 0;
1617 else if (sample_valid(cic->ttime_samples)) {
1618 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1619 enable_idle = 0;
1620 else
1621 enable_idle = 1;
1da177e4
LT
1622 }
1623
3b18152c
JA
1624 if (enable_idle)
1625 cfq_mark_cfqq_idle_window(cfqq);
1626 else
1627 cfq_clear_cfqq_idle_window(cfqq);
22e2c507 1628}
1da177e4 1629
22e2c507
JA
1630/*
1631 * Check if new_cfqq should preempt the currently active queue. Return 0 for
1632 * no or if we aren't sure, a 1 will cause a preempt.
1633 */
1634static int
1635cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e705374 1636 struct request *rq)
22e2c507 1637{
6d048f53 1638 struct cfq_queue *cfqq;
22e2c507 1639
6d048f53
JA
1640 cfqq = cfqd->active_queue;
1641 if (!cfqq)
22e2c507
JA
1642 return 0;
1643
6d048f53
JA
1644 if (cfq_slice_used(cfqq))
1645 return 1;
1646
1647 if (cfq_class_idle(new_cfqq))
caaa5f9f 1648 return 0;
22e2c507
JA
1649
1650 if (cfq_class_idle(cfqq))
1651 return 1;
1e3335de 1652
374f84ac
JA
1653 /*
1654 * if the new request is sync, but the currently running queue is
1655 * not, let the sync request have priority.
1656 */
5e705374 1657 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
22e2c507 1658 return 1;
1e3335de 1659
374f84ac
JA
1660 /*
1661 * So both queues are sync. Let the new request get disk time if
1662 * it's a metadata request and the current queue is doing regular IO.
1663 */
1664 if (rq_is_meta(rq) && !cfqq->meta_pending)
1665 return 1;
22e2c507 1666
1e3335de
JA
1667 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
1668 return 0;
1669
1670 /*
1671 * if this request is as-good as one we would expect from the
1672 * current cfqq, let it preempt
1673 */
6d048f53 1674 if (cfq_rq_close(cfqd, rq))
1e3335de
JA
1675 return 1;
1676
22e2c507
JA
1677 return 0;
1678}
1679
1680/*
1681 * cfqq preempts the active queue. if we allowed preempt with no slice left,
1682 * let it have half of its nominal slice.
1683 */
1684static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1685{
3c6bd2f8 1686 cfq_slice_expired(cfqd, 1, 1);
22e2c507 1687
bf572256
JA
1688 /*
1689 * Put the new queue at the front of the of the current list,
1690 * so we know that it will be selected next.
1691 */
1692 BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd
JA
1693
1694 cfq_service_tree_add(cfqd, cfqq, 1);
bf572256 1695
44f7c160
JA
1696 cfqq->slice_end = 0;
1697 cfq_mark_cfqq_slice_new(cfqq);
22e2c507
JA
1698}
1699
22e2c507 1700/*
5e705374 1701 * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507
JA
1702 * something we should do about it
1703 */
1704static void
5e705374
JA
1705cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1706 struct request *rq)
22e2c507 1707{
5e705374 1708 struct cfq_io_context *cic = RQ_CIC(rq);
12e9fddd 1709
374f84ac
JA
1710 if (rq_is_meta(rq))
1711 cfqq->meta_pending++;
1712
9c2c38a1 1713 cfq_update_io_thinktime(cfqd, cic);
6d048f53 1714 cfq_update_io_seektime(cfqd, cic, rq);
9c2c38a1
JA
1715 cfq_update_idle_window(cfqd, cfqq, cic);
1716
5e705374 1717 cic->last_request_pos = rq->sector + rq->nr_sectors;
6d048f53 1718 cfqq->last_request_pos = cic->last_request_pos;
22e2c507
JA
1719
1720 if (cfqq == cfqd->active_queue) {
1721 /*
1722 * if we are waiting for a request for this queue, let it rip
1723 * immediately and flag that we must not expire this queue
1724 * just now
1725 */
3b18152c
JA
1726 if (cfq_cfqq_wait_request(cfqq)) {
1727 cfq_mark_cfqq_must_dispatch(cfqq);
22e2c507 1728 del_timer(&cfqd->idle_slice_timer);
dc72ef4a 1729 blk_start_queueing(cfqd->queue);
22e2c507 1730 }
5e705374 1731 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507
JA
1732 /*
1733 * not the active queue - expire current slice if it is
1734 * idle and has expired it's mean thinktime or this new queue
1735 * has some old slice time left and is of higher priority
1736 */
1737 cfq_preempt_queue(cfqd, cfqq);
3b18152c 1738 cfq_mark_cfqq_must_dispatch(cfqq);
dc72ef4a 1739 blk_start_queueing(cfqd->queue);
22e2c507 1740 }
1da177e4
LT
1741}
1742
b4878f24 1743static void cfq_insert_request(request_queue_t *q, struct request *rq)
1da177e4 1744{
b4878f24 1745 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 1746 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507
JA
1747
1748 cfq_init_prio_data(cfqq);
1da177e4 1749
5e705374 1750 cfq_add_rq_rb(rq);
1da177e4 1751
22e2c507
JA
1752 list_add_tail(&rq->queuelist, &cfqq->fifo);
1753
5e705374 1754 cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4
LT
1755}
1756
1da177e4
LT
1757static void cfq_completed_request(request_queue_t *q, struct request *rq)
1758{
5e705374 1759 struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f24 1760 struct cfq_data *cfqd = cfqq->cfqd;
5380a101 1761 const int sync = rq_is_sync(rq);
b4878f24 1762 unsigned long now;
1da177e4 1763
b4878f24 1764 now = jiffies;
1da177e4 1765
b4878f24 1766 WARN_ON(!cfqd->rq_in_driver);
6d048f53 1767 WARN_ON(!cfqq->dispatched);
b4878f24 1768 cfqd->rq_in_driver--;
6d048f53 1769 cfqq->dispatched--;
1da177e4 1770
b4878f24
JA
1771 if (!cfq_class_idle(cfqq))
1772 cfqd->last_end_request = now;
3b18152c 1773
caaa5f9f 1774 if (sync)
5e705374 1775 RQ_CIC(rq)->last_end_request = now;
caaa5f9f
JA
1776
1777 /*
1778 * If this is the active queue, check if it needs to be expired,
1779 * or if we want to idle in case it has no pending requests.
1780 */
1781 if (cfqd->active_queue == cfqq) {
44f7c160
JA
1782 if (cfq_cfqq_slice_new(cfqq)) {
1783 cfq_set_prio_slice(cfqd, cfqq);
1784 cfq_clear_cfqq_slice_new(cfqq);
1785 }
1786 if (cfq_slice_used(cfqq))
3c6bd2f8 1787 cfq_slice_expired(cfqd, 0, 1);
6d048f53
JA
1788 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
1789 cfq_arm_slice_timer(cfqd);
caaa5f9f 1790 }
6d048f53
JA
1791
1792 if (!cfqd->rq_in_driver)
1793 cfq_schedule_dispatch(cfqd);
1da177e4
LT
1794}
1795
22e2c507
JA
1796/*
1797 * we temporarily boost lower priority queues if they are holding fs exclusive
1798 * resources. they are boosted to normal prio (CLASS_BE/4)
1799 */
1800static void cfq_prio_boost(struct cfq_queue *cfqq)
1da177e4 1801{
22e2c507
JA
1802 if (has_fs_excl()) {
1803 /*
1804 * boost idle prio on transactions that would lock out other
1805 * users of the filesystem
1806 */
1807 if (cfq_class_idle(cfqq))
1808 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1809 if (cfqq->ioprio > IOPRIO_NORM)
1810 cfqq->ioprio = IOPRIO_NORM;
1811 } else {
1812 /*
1813 * check if we need to unboost the queue
1814 */
1815 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1816 cfqq->ioprio_class = cfqq->org_ioprio_class;
1817 if (cfqq->ioprio != cfqq->org_ioprio)
1818 cfqq->ioprio = cfqq->org_ioprio;
1819 }
22e2c507 1820}
1da177e4 1821
89850f7e 1822static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507 1823{
3b18152c 1824 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
99f95e52 1825 !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c 1826 cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507 1827 return ELV_MQUEUE_MUST;
3b18152c 1828 }
1da177e4 1829
22e2c507 1830 return ELV_MQUEUE_MAY;
22e2c507
JA
1831}
1832
cb78b285 1833static int cfq_may_queue(request_queue_t *q, int rw)
22e2c507
JA
1834{
1835 struct cfq_data *cfqd = q->elevator->elevator_data;
1836 struct task_struct *tsk = current;
1837 struct cfq_queue *cfqq;
7749a8d4
JA
1838 unsigned int key;
1839
1840 key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
22e2c507
JA
1841
1842 /*
1843 * don't force setup of a queue from here, as a call to may_queue
1844 * does not necessarily imply that a request actually will be queued.
1845 * so just lookup a possibly existing queue, or return 'may queue'
1846 * if that fails
1847 */
7749a8d4 1848 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
22e2c507
JA
1849 if (cfqq) {
1850 cfq_init_prio_data(cfqq);
1851 cfq_prio_boost(cfqq);
1852
89850f7e 1853 return __cfq_may_queue(cfqq);
22e2c507
JA
1854 }
1855
1856 return ELV_MQUEUE_MAY;
1da177e4
LT
1857}
1858
1da177e4
LT
1859/*
1860 * queue lock held here
1861 */
bb37b94c 1862static void cfq_put_request(struct request *rq)
1da177e4 1863{
5e705374 1864 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 1865
5e705374 1866 if (cfqq) {
22e2c507 1867 const int rw = rq_data_dir(rq);
1da177e4 1868
22e2c507
JA
1869 BUG_ON(!cfqq->allocated[rw]);
1870 cfqq->allocated[rw]--;
1da177e4 1871
5e705374 1872 put_io_context(RQ_CIC(rq)->ioc);
1da177e4 1873
1da177e4 1874 rq->elevator_private = NULL;
5e705374 1875 rq->elevator_private2 = NULL;
1da177e4 1876
1da177e4
LT
1877 cfq_put_queue(cfqq);
1878 }
1879}
1880
1881/*
22e2c507 1882 * Allocate cfq data structures associated with this request.
1da177e4 1883 */
22e2c507 1884static int
cb78b285 1885cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1da177e4
LT
1886{
1887 struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c 1888 struct task_struct *tsk = current;
1da177e4
LT
1889 struct cfq_io_context *cic;
1890 const int rw = rq_data_dir(rq);
7749a8d4
JA
1891 const int is_sync = rq_is_sync(rq);
1892 pid_t key = cfq_queue_pid(tsk, rw, is_sync);
22e2c507 1893 struct cfq_queue *cfqq;
1da177e4
LT
1894 unsigned long flags;
1895
1896 might_sleep_if(gfp_mask & __GFP_WAIT);
1897
e2d74ac0 1898 cic = cfq_get_io_context(cfqd, gfp_mask);
22e2c507 1899
1da177e4
LT
1900 spin_lock_irqsave(q->queue_lock, flags);
1901
22e2c507
JA
1902 if (!cic)
1903 goto queue_fail;
1904
12a05732 1905 if (!cic->cfqq[is_sync]) {
6f325a13 1906 cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
22e2c507
JA
1907 if (!cfqq)
1908 goto queue_fail;
1da177e4 1909
12a05732 1910 cic->cfqq[is_sync] = cfqq;
22e2c507 1911 } else
12a05732 1912 cfqq = cic->cfqq[is_sync];
1da177e4
LT
1913
1914 cfqq->allocated[rw]++;
3b18152c 1915 cfq_clear_cfqq_must_alloc(cfqq);
22e2c507 1916 atomic_inc(&cfqq->ref);
1da177e4 1917
5e705374 1918 spin_unlock_irqrestore(q->queue_lock, flags);
3b18152c 1919
5e705374
JA
1920 rq->elevator_private = cic;
1921 rq->elevator_private2 = cfqq;
1922 return 0;
1da177e4 1923
22e2c507
JA
1924queue_fail:
1925 if (cic)
1926 put_io_context(cic->ioc);
89850f7e 1927
3b18152c 1928 cfq_schedule_dispatch(cfqd);
1da177e4
LT
1929 spin_unlock_irqrestore(q->queue_lock, flags);
1930 return 1;
1931}
1932
65f27f38 1933static void cfq_kick_queue(struct work_struct *work)
22e2c507 1934{
65f27f38
DH
1935 struct cfq_data *cfqd =
1936 container_of(work, struct cfq_data, unplug_work);
1937 request_queue_t *q = cfqd->queue;
22e2c507
JA
1938 unsigned long flags;
1939
1940 spin_lock_irqsave(q->queue_lock, flags);
dc72ef4a 1941 blk_start_queueing(q);
22e2c507
JA
1942 spin_unlock_irqrestore(q->queue_lock, flags);
1943}
1944
1945/*
1946 * Timer running if the active_queue is currently idling inside its time slice
1947 */
1948static void cfq_idle_slice_timer(unsigned long data)
1949{
1950 struct cfq_data *cfqd = (struct cfq_data *) data;
1951 struct cfq_queue *cfqq;
1952 unsigned long flags;
3c6bd2f8 1953 int timed_out = 1;
22e2c507
JA
1954
1955 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1956
1957 if ((cfqq = cfqd->active_queue) != NULL) {
3c6bd2f8
JA
1958 timed_out = 0;
1959
22e2c507
JA
1960 /*
1961 * expired
1962 */
44f7c160 1963 if (cfq_slice_used(cfqq))
22e2c507
JA
1964 goto expire;
1965
1966 /*
1967 * only expire and reinvoke request handler, if there are
1968 * other queues with pending requests
1969 */
caaa5f9f 1970 if (!cfqd->busy_queues)
22e2c507 1971 goto out_cont;
22e2c507
JA
1972
1973 /*
1974 * not expired and it has a request pending, let it dispatch
1975 */
dd67d051 1976 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
3b18152c 1977 cfq_mark_cfqq_must_dispatch(cfqq);
22e2c507
JA
1978 goto out_kick;
1979 }
1980 }
1981expire:
3c6bd2f8 1982 cfq_slice_expired(cfqd, 0, timed_out);
22e2c507 1983out_kick:
3b18152c 1984 cfq_schedule_dispatch(cfqd);
22e2c507
JA
1985out_cont:
1986 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1987}
1988
1989/*
1990 * Timer running if an idle class queue is waiting for service
1991 */
1992static void cfq_idle_class_timer(unsigned long data)
1993{
1994 struct cfq_data *cfqd = (struct cfq_data *) data;
1995 unsigned long flags, end;
1996
1997 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1998
1999 /*
2000 * race with a non-idle queue, reset timer
2001 */
2002 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
ae818a38
JA
2003 if (!time_after_eq(jiffies, end))
2004 mod_timer(&cfqd->idle_class_timer, end);
2005 else
3b18152c 2006 cfq_schedule_dispatch(cfqd);
22e2c507
JA
2007
2008 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2009}
2010
3b18152c
JA
2011static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2012{
2013 del_timer_sync(&cfqd->idle_slice_timer);
2014 del_timer_sync(&cfqd->idle_class_timer);
2015 blk_sync_queue(cfqd->queue);
2016}
22e2c507 2017
1da177e4
LT
2018static void cfq_exit_queue(elevator_t *e)
2019{
22e2c507 2020 struct cfq_data *cfqd = e->elevator_data;
d9ff4187 2021 request_queue_t *q = cfqd->queue;
22e2c507 2022
3b18152c 2023 cfq_shutdown_timer_wq(cfqd);
e2d74ac0 2024
d9ff4187 2025 spin_lock_irq(q->queue_lock);
e2d74ac0 2026
d9ff4187 2027 if (cfqd->active_queue)
3c6bd2f8 2028 __cfq_slice_expired(cfqd, cfqd->active_queue, 0, 0);
e2d74ac0
JA
2029
2030 while (!list_empty(&cfqd->cic_list)) {
d9ff4187
AV
2031 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2032 struct cfq_io_context,
2033 queue_list);
89850f7e
JA
2034
2035 __cfq_exit_single_io_context(cfqd, cic);
d9ff4187 2036 }
e2d74ac0 2037
d9ff4187 2038 spin_unlock_irq(q->queue_lock);
a90d742e
AV
2039
2040 cfq_shutdown_timer_wq(cfqd);
2041
a90d742e
AV
2042 kfree(cfqd->cfq_hash);
2043 kfree(cfqd);
1da177e4
LT
2044}
2045
bb37b94c 2046static void *cfq_init_queue(request_queue_t *q)
1da177e4
LT
2047{
2048 struct cfq_data *cfqd;
2049 int i;
2050
b5deef90 2051 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
1da177e4 2052 if (!cfqd)
bc1c1169 2053 return NULL;
1da177e4
LT
2054
2055 memset(cfqd, 0, sizeof(*cfqd));
22e2c507 2056
cc09e299 2057 cfqd->service_tree = CFQ_RB_ROOT;
d9ff4187 2058 INIT_LIST_HEAD(&cfqd->cic_list);
1da177e4 2059
b5deef90 2060 cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
1da177e4 2061 if (!cfqd->cfq_hash)
5e705374 2062 goto out_free;
1da177e4 2063
1da177e4
LT
2064 for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
2065 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
2066
1da177e4 2067 cfqd->queue = q;
1da177e4 2068
22e2c507
JA
2069 init_timer(&cfqd->idle_slice_timer);
2070 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2071 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2072
2073 init_timer(&cfqd->idle_class_timer);
2074 cfqd->idle_class_timer.function = cfq_idle_class_timer;
2075 cfqd->idle_class_timer.data = (unsigned long) cfqd;
2076
65f27f38 2077 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507 2078
1da177e4 2079 cfqd->cfq_quantum = cfq_quantum;
22e2c507
JA
2080 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2081 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4
LT
2082 cfqd->cfq_back_max = cfq_back_max;
2083 cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507
JA
2084 cfqd->cfq_slice[0] = cfq_slice_async;
2085 cfqd->cfq_slice[1] = cfq_slice_sync;
2086 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2087 cfqd->cfq_slice_idle = cfq_slice_idle;
3b18152c 2088
bc1c1169 2089 return cfqd;
5e705374 2090out_free:
1da177e4 2091 kfree(cfqd);
bc1c1169 2092 return NULL;
1da177e4
LT
2093}
2094
2095static void cfq_slab_kill(void)
2096{
1da177e4
LT
2097 if (cfq_pool)
2098 kmem_cache_destroy(cfq_pool);
2099 if (cfq_ioc_pool)
2100 kmem_cache_destroy(cfq_ioc_pool);
2101}
2102
2103static int __init cfq_slab_setup(void)
2104{
1da177e4
LT
2105 cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
2106 NULL, NULL);
2107 if (!cfq_pool)
2108 goto fail;
2109
2110 cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
2111 sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
2112 if (!cfq_ioc_pool)
2113 goto fail;
2114
2115 return 0;
2116fail:
2117 cfq_slab_kill();
2118 return -ENOMEM;
2119}
2120
1da177e4
LT
2121/*
2122 * sysfs parts below -->
2123 */
1da177e4
LT
2124static ssize_t
2125cfq_var_show(unsigned int var, char *page)
2126{
2127 return sprintf(page, "%d\n", var);
2128}
2129
2130static ssize_t
2131cfq_var_store(unsigned int *var, const char *page, size_t count)
2132{
2133 char *p = (char *) page;
2134
2135 *var = simple_strtoul(p, &p, 10);
2136 return count;
2137}
2138
1da177e4 2139#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
3d1ab40f 2140static ssize_t __FUNC(elevator_t *e, char *page) \
1da177e4 2141{ \
3d1ab40f 2142 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
2143 unsigned int __data = __VAR; \
2144 if (__CONV) \
2145 __data = jiffies_to_msecs(__data); \
2146 return cfq_var_show(__data, (page)); \
2147}
2148SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507
JA
2149SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2150SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e
AV
2151SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2152SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507
JA
2153SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2154SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2155SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2156SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
1da177e4
LT
2157#undef SHOW_FUNCTION
2158
2159#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
3d1ab40f 2160static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
1da177e4 2161{ \
3d1ab40f 2162 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
2163 unsigned int __data; \
2164 int ret = cfq_var_store(&__data, (page), count); \
2165 if (__data < (MIN)) \
2166 __data = (MIN); \
2167 else if (__data > (MAX)) \
2168 __data = (MAX); \
2169 if (__CONV) \
2170 *(__PTR) = msecs_to_jiffies(__data); \
2171 else \
2172 *(__PTR) = __data; \
2173 return ret; \
2174}
2175STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
22e2c507
JA
2176STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2177STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
e572ec7e
AV
2178STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2179STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
22e2c507
JA
2180STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2181STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2182STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2183STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
1da177e4
LT
2184#undef STORE_FUNCTION
2185
e572ec7e
AV
2186#define CFQ_ATTR(name) \
2187 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2188
2189static struct elv_fs_entry cfq_attrs[] = {
2190 CFQ_ATTR(quantum),
e572ec7e
AV
2191 CFQ_ATTR(fifo_expire_sync),
2192 CFQ_ATTR(fifo_expire_async),
2193 CFQ_ATTR(back_seek_max),
2194 CFQ_ATTR(back_seek_penalty),
2195 CFQ_ATTR(slice_sync),
2196 CFQ_ATTR(slice_async),
2197 CFQ_ATTR(slice_async_rq),
2198 CFQ_ATTR(slice_idle),
e572ec7e 2199 __ATTR_NULL
1da177e4
LT
2200};
2201
1da177e4
LT
2202static struct elevator_type iosched_cfq = {
2203 .ops = {
2204 .elevator_merge_fn = cfq_merge,
2205 .elevator_merged_fn = cfq_merged_request,
2206 .elevator_merge_req_fn = cfq_merged_requests,
da775265 2207 .elevator_allow_merge_fn = cfq_allow_merge,
b4878f24 2208 .elevator_dispatch_fn = cfq_dispatch_requests,
1da177e4 2209 .elevator_add_req_fn = cfq_insert_request,
b4878f24 2210 .elevator_activate_req_fn = cfq_activate_request,
1da177e4
LT
2211 .elevator_deactivate_req_fn = cfq_deactivate_request,
2212 .elevator_queue_empty_fn = cfq_queue_empty,
2213 .elevator_completed_req_fn = cfq_completed_request,
21183b07
JA
2214 .elevator_former_req_fn = elv_rb_former_request,
2215 .elevator_latter_req_fn = elv_rb_latter_request,
1da177e4
LT
2216 .elevator_set_req_fn = cfq_set_request,
2217 .elevator_put_req_fn = cfq_put_request,
2218 .elevator_may_queue_fn = cfq_may_queue,
2219 .elevator_init_fn = cfq_init_queue,
2220 .elevator_exit_fn = cfq_exit_queue,
fc46379d 2221 .trim = cfq_free_io_context,
1da177e4 2222 },
3d1ab40f 2223 .elevator_attrs = cfq_attrs,
1da177e4
LT
2224 .elevator_name = "cfq",
2225 .elevator_owner = THIS_MODULE,
2226};
2227
2228static int __init cfq_init(void)
2229{
2230 int ret;
2231
22e2c507
JA
2232 /*
2233 * could be 0 on HZ < 1000 setups
2234 */
2235 if (!cfq_slice_async)
2236 cfq_slice_async = 1;
2237 if (!cfq_slice_idle)
2238 cfq_slice_idle = 1;
2239
1da177e4
LT
2240 if (cfq_slab_setup())
2241 return -ENOMEM;
2242
2243 ret = elv_register(&iosched_cfq);
22e2c507
JA
2244 if (ret)
2245 cfq_slab_kill();
1da177e4 2246
1da177e4
LT
2247 return ret;
2248}
2249
2250static void __exit cfq_exit(void)
2251{
6e9a4738 2252 DECLARE_COMPLETION_ONSTACK(all_gone);
1da177e4 2253 elv_unregister(&iosched_cfq);
334e94de 2254 ioc_gone = &all_gone;
fba82272
OH
2255 /* ioc_gone's update must be visible before reading ioc_count */
2256 smp_wmb();
4050cf16 2257 if (elv_ioc_count_read(ioc_count))
fba82272 2258 wait_for_completion(ioc_gone);
334e94de 2259 synchronize_rcu();
83521d3e 2260 cfq_slab_kill();
1da177e4
LT
2261}
2262
2263module_init(cfq_init);
2264module_exit(cfq_exit);
2265
2266MODULE_AUTHOR("Jens Axboe");
2267MODULE_LICENSE("GPL");
2268MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");