]> bbs.cooldavid.org Git - net-next-2.6.git/blob - block/cfq-iosched.c
cfq-iosched: Implement tunable group_idle
[net-next-2.6.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include "cfq.h"
18
19 /*
20  * tunables
21  */
22 /* max queue in one round of service */
23 static const int cfq_quantum = 8;
24 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
25 /* maximum backwards seek, in KiB */
26 static const int cfq_back_max = 16 * 1024;
27 /* penalty of a backwards seek */
28 static const int cfq_back_penalty = 2;
29 static const int cfq_slice_sync = HZ / 10;
30 static int cfq_slice_async = HZ / 25;
31 static const int cfq_slice_async_rq = 2;
32 static int cfq_slice_idle = HZ / 125;
33 static int cfq_group_idle = HZ / 125;
34 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
35 static const int cfq_hist_divisor = 4;
36
37 /*
38  * offset from end of service tree
39  */
40 #define CFQ_IDLE_DELAY          (HZ / 5)
41
42 /*
43  * below this threshold, we consider thinktime immediate
44  */
45 #define CFQ_MIN_TT              (2)
46
47 #define CFQ_SLICE_SCALE         (5)
48 #define CFQ_HW_QUEUE_MIN        (5)
49 #define CFQ_SERVICE_SHIFT       12
50
51 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
52 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
53 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
54 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
55
56 #define RQ_CIC(rq)              \
57         ((struct cfq_io_context *) (rq)->elevator_private)
58 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private2)
59 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elevator_private3)
60
61 static struct kmem_cache *cfq_pool;
62 static struct kmem_cache *cfq_ioc_pool;
63
64 static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
65 static struct completion *ioc_gone;
66 static DEFINE_SPINLOCK(ioc_gone_lock);
67
68 static DEFINE_SPINLOCK(cic_index_lock);
69 static DEFINE_IDA(cic_index_ida);
70
71 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
72 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
73 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
74
75 #define sample_valid(samples)   ((samples) > 80)
76 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
77
78 /*
79  * Most of our rbtree usage is for sorting with min extraction, so
80  * if we cache the leftmost node we don't have to walk down the tree
81  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82  * move this into the elevator for the rq sorting as well.
83  */
84 struct cfq_rb_root {
85         struct rb_root rb;
86         struct rb_node *left;
87         unsigned count;
88         unsigned total_weight;
89         u64 min_vdisktime;
90         struct rb_node *active;
91 };
92 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
93                         .count = 0, .min_vdisktime = 0, }
94
95 /*
96  * Per process-grouping structure
97  */
98 struct cfq_queue {
99         /* reference count */
100         atomic_t ref;
101         /* various state flags, see below */
102         unsigned int flags;
103         /* parent cfq_data */
104         struct cfq_data *cfqd;
105         /* service_tree member */
106         struct rb_node rb_node;
107         /* service_tree key */
108         unsigned long rb_key;
109         /* prio tree member */
110         struct rb_node p_node;
111         /* prio tree root we belong to, if any */
112         struct rb_root *p_root;
113         /* sorted list of pending requests */
114         struct rb_root sort_list;
115         /* if fifo isn't expired, next request to serve */
116         struct request *next_rq;
117         /* requests queued in sort_list */
118         int queued[2];
119         /* currently allocated requests */
120         int allocated[2];
121         /* fifo list of requests in sort_list */
122         struct list_head fifo;
123
124         /* time when queue got scheduled in to dispatch first request. */
125         unsigned long dispatch_start;
126         unsigned int allocated_slice;
127         unsigned int slice_dispatch;
128         /* time when first request from queue completed and slice started. */
129         unsigned long slice_start;
130         unsigned long slice_end;
131         long slice_resid;
132
133         /* pending metadata requests */
134         int meta_pending;
135         /* number of requests that are on the dispatch list or inside driver */
136         int dispatched;
137
138         /* io prio of this group */
139         unsigned short ioprio, org_ioprio;
140         unsigned short ioprio_class, org_ioprio_class;
141
142         pid_t pid;
143
144         u32 seek_history;
145         sector_t last_request_pos;
146
147         struct cfq_rb_root *service_tree;
148         struct cfq_queue *new_cfqq;
149         struct cfq_group *cfqg;
150         struct cfq_group *orig_cfqg;
151 };
152
153 /*
154  * First index in the service_trees.
155  * IDLE is handled separately, so it has negative index
156  */
157 enum wl_prio_t {
158         BE_WORKLOAD = 0,
159         RT_WORKLOAD = 1,
160         IDLE_WORKLOAD = 2,
161 };
162
163 /*
164  * Second index in the service_trees.
165  */
166 enum wl_type_t {
167         ASYNC_WORKLOAD = 0,
168         SYNC_NOIDLE_WORKLOAD = 1,
169         SYNC_WORKLOAD = 2
170 };
171
172 /* This is per cgroup per device grouping structure */
173 struct cfq_group {
174         /* group service_tree member */
175         struct rb_node rb_node;
176
177         /* group service_tree key */
178         u64 vdisktime;
179         unsigned int weight;
180         bool on_st;
181
182         /* number of cfqq currently on this group */
183         int nr_cfqq;
184
185         /* Per group busy queus average. Useful for workload slice calc. */
186         unsigned int busy_queues_avg[2];
187         /*
188          * rr lists of queues with requests, onle rr for each priority class.
189          * Counts are embedded in the cfq_rb_root
190          */
191         struct cfq_rb_root service_trees[2][3];
192         struct cfq_rb_root service_tree_idle;
193
194         unsigned long saved_workload_slice;
195         enum wl_type_t saved_workload;
196         enum wl_prio_t saved_serving_prio;
197         struct blkio_group blkg;
198 #ifdef CONFIG_CFQ_GROUP_IOSCHED
199         struct hlist_node cfqd_node;
200         atomic_t ref;
201 #endif
202         /* number of requests that are on the dispatch list or inside driver */
203         int dispatched;
204 };
205
206 /*
207  * Per block device queue structure
208  */
209 struct cfq_data {
210         struct request_queue *queue;
211         /* Root service tree for cfq_groups */
212         struct cfq_rb_root grp_service_tree;
213         struct cfq_group root_group;
214
215         /*
216          * The priority currently being served
217          */
218         enum wl_prio_t serving_prio;
219         enum wl_type_t serving_type;
220         unsigned long workload_expires;
221         struct cfq_group *serving_group;
222         bool noidle_tree_requires_idle;
223
224         /*
225          * Each priority tree is sorted by next_request position.  These
226          * trees are used when determining if two or more queues are
227          * interleaving requests (see cfq_close_cooperator).
228          */
229         struct rb_root prio_trees[CFQ_PRIO_LISTS];
230
231         unsigned int busy_queues;
232
233         int rq_in_driver;
234         int rq_in_flight[2];
235
236         /*
237          * queue-depth detection
238          */
239         int rq_queued;
240         int hw_tag;
241         /*
242          * hw_tag can be
243          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
244          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
245          *  0 => no NCQ
246          */
247         int hw_tag_est_depth;
248         unsigned int hw_tag_samples;
249
250         /*
251          * idle window management
252          */
253         struct timer_list idle_slice_timer;
254         struct work_struct unplug_work;
255
256         struct cfq_queue *active_queue;
257         struct cfq_io_context *active_cic;
258
259         /*
260          * async queue for each priority case
261          */
262         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
263         struct cfq_queue *async_idle_cfqq;
264
265         sector_t last_position;
266
267         /*
268          * tunables, see top of file
269          */
270         unsigned int cfq_quantum;
271         unsigned int cfq_fifo_expire[2];
272         unsigned int cfq_back_penalty;
273         unsigned int cfq_back_max;
274         unsigned int cfq_slice[2];
275         unsigned int cfq_slice_async_rq;
276         unsigned int cfq_slice_idle;
277         unsigned int cfq_group_idle;
278         unsigned int cfq_latency;
279         unsigned int cfq_group_isolation;
280
281         unsigned int cic_index;
282         struct list_head cic_list;
283
284         /*
285          * Fallback dummy cfqq for extreme OOM conditions
286          */
287         struct cfq_queue oom_cfqq;
288
289         unsigned long last_delayed_sync;
290
291         /* List of cfq groups being managed on this device*/
292         struct hlist_head cfqg_list;
293         struct rcu_head rcu;
294 };
295
296 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
297
298 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
299                                             enum wl_prio_t prio,
300                                             enum wl_type_t type)
301 {
302         if (!cfqg)
303                 return NULL;
304
305         if (prio == IDLE_WORKLOAD)
306                 return &cfqg->service_tree_idle;
307
308         return &cfqg->service_trees[prio][type];
309 }
310
311 enum cfqq_state_flags {
312         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
313         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
314         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
315         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
316         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
317         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
318         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
319         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
320         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
321         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
322         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
323         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
324         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
325 };
326
327 #define CFQ_CFQQ_FNS(name)                                              \
328 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
329 {                                                                       \
330         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
331 }                                                                       \
332 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
333 {                                                                       \
334         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
335 }                                                                       \
336 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
337 {                                                                       \
338         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
339 }
340
341 CFQ_CFQQ_FNS(on_rr);
342 CFQ_CFQQ_FNS(wait_request);
343 CFQ_CFQQ_FNS(must_dispatch);
344 CFQ_CFQQ_FNS(must_alloc_slice);
345 CFQ_CFQQ_FNS(fifo_expire);
346 CFQ_CFQQ_FNS(idle_window);
347 CFQ_CFQQ_FNS(prio_changed);
348 CFQ_CFQQ_FNS(slice_new);
349 CFQ_CFQQ_FNS(sync);
350 CFQ_CFQQ_FNS(coop);
351 CFQ_CFQQ_FNS(split_coop);
352 CFQ_CFQQ_FNS(deep);
353 CFQ_CFQQ_FNS(wait_busy);
354 #undef CFQ_CFQQ_FNS
355
356 #ifdef CONFIG_CFQ_GROUP_IOSCHED
357 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
358         blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
359                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
360                         blkg_path(&(cfqq)->cfqg->blkg), ##args);
361
362 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
363         blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
364                                 blkg_path(&(cfqg)->blkg), ##args);      \
365
366 #else
367 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
368         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
369 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0);
370 #endif
371 #define cfq_log(cfqd, fmt, args...)     \
372         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
373
374 /* Traverses through cfq group service trees */
375 #define for_each_cfqg_st(cfqg, i, j, st) \
376         for (i = 0; i <= IDLE_WORKLOAD; i++) \
377                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
378                         : &cfqg->service_tree_idle; \
379                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
380                         (i == IDLE_WORKLOAD && j == 0); \
381                         j++, st = i < IDLE_WORKLOAD ? \
382                         &cfqg->service_trees[i][j]: NULL) \
383
384
385 static inline bool iops_mode(struct cfq_data *cfqd)
386 {
387         /*
388          * If we are not idling on queues and it is a NCQ drive, parallel
389          * execution of requests is on and measuring time is not possible
390          * in most of the cases until and unless we drive shallower queue
391          * depths and that becomes a performance bottleneck. In such cases
392          * switch to start providing fairness in terms of number of IOs.
393          */
394         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
395                 return true;
396         else
397                 return false;
398 }
399
400 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
401 {
402         if (cfq_class_idle(cfqq))
403                 return IDLE_WORKLOAD;
404         if (cfq_class_rt(cfqq))
405                 return RT_WORKLOAD;
406         return BE_WORKLOAD;
407 }
408
409
410 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
411 {
412         if (!cfq_cfqq_sync(cfqq))
413                 return ASYNC_WORKLOAD;
414         if (!cfq_cfqq_idle_window(cfqq))
415                 return SYNC_NOIDLE_WORKLOAD;
416         return SYNC_WORKLOAD;
417 }
418
419 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
420                                         struct cfq_data *cfqd,
421                                         struct cfq_group *cfqg)
422 {
423         if (wl == IDLE_WORKLOAD)
424                 return cfqg->service_tree_idle.count;
425
426         return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
427                 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
428                 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
429 }
430
431 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
432                                         struct cfq_group *cfqg)
433 {
434         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
435                 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
436 }
437
438 static void cfq_dispatch_insert(struct request_queue *, struct request *);
439 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
440                                        struct io_context *, gfp_t);
441 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
442                                                 struct io_context *);
443
444 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
445                                             bool is_sync)
446 {
447         return cic->cfqq[is_sync];
448 }
449
450 static inline void cic_set_cfqq(struct cfq_io_context *cic,
451                                 struct cfq_queue *cfqq, bool is_sync)
452 {
453         cic->cfqq[is_sync] = cfqq;
454 }
455
456 #define CIC_DEAD_KEY    1ul
457 #define CIC_DEAD_INDEX_SHIFT    1
458
459 static inline void *cfqd_dead_key(struct cfq_data *cfqd)
460 {
461         return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
462 }
463
464 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
465 {
466         struct cfq_data *cfqd = cic->key;
467
468         if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
469                 return NULL;
470
471         return cfqd;
472 }
473
474 /*
475  * We regard a request as SYNC, if it's either a read or has the SYNC bit
476  * set (in which case it could also be direct WRITE).
477  */
478 static inline bool cfq_bio_sync(struct bio *bio)
479 {
480         return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
481 }
482
483 /*
484  * scheduler run of queue, if there are requests pending and no one in the
485  * driver that will restart queueing
486  */
487 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
488 {
489         if (cfqd->busy_queues) {
490                 cfq_log(cfqd, "schedule dispatch");
491                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
492         }
493 }
494
495 static int cfq_queue_empty(struct request_queue *q)
496 {
497         struct cfq_data *cfqd = q->elevator->elevator_data;
498
499         return !cfqd->rq_queued;
500 }
501
502 /*
503  * Scale schedule slice based on io priority. Use the sync time slice only
504  * if a queue is marked sync and has sync io queued. A sync queue with async
505  * io only, should not get full sync slice length.
506  */
507 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
508                                  unsigned short prio)
509 {
510         const int base_slice = cfqd->cfq_slice[sync];
511
512         WARN_ON(prio >= IOPRIO_BE_NR);
513
514         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
515 }
516
517 static inline int
518 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
519 {
520         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
521 }
522
523 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
524 {
525         u64 d = delta << CFQ_SERVICE_SHIFT;
526
527         d = d * BLKIO_WEIGHT_DEFAULT;
528         do_div(d, cfqg->weight);
529         return d;
530 }
531
532 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
533 {
534         s64 delta = (s64)(vdisktime - min_vdisktime);
535         if (delta > 0)
536                 min_vdisktime = vdisktime;
537
538         return min_vdisktime;
539 }
540
541 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
542 {
543         s64 delta = (s64)(vdisktime - min_vdisktime);
544         if (delta < 0)
545                 min_vdisktime = vdisktime;
546
547         return min_vdisktime;
548 }
549
550 static void update_min_vdisktime(struct cfq_rb_root *st)
551 {
552         u64 vdisktime = st->min_vdisktime;
553         struct cfq_group *cfqg;
554
555         if (st->active) {
556                 cfqg = rb_entry_cfqg(st->active);
557                 vdisktime = cfqg->vdisktime;
558         }
559
560         if (st->left) {
561                 cfqg = rb_entry_cfqg(st->left);
562                 vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
563         }
564
565         st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime);
566 }
567
568 /*
569  * get averaged number of queues of RT/BE priority.
570  * average is updated, with a formula that gives more weight to higher numbers,
571  * to quickly follows sudden increases and decrease slowly
572  */
573
574 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
575                                         struct cfq_group *cfqg, bool rt)
576 {
577         unsigned min_q, max_q;
578         unsigned mult  = cfq_hist_divisor - 1;
579         unsigned round = cfq_hist_divisor / 2;
580         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
581
582         min_q = min(cfqg->busy_queues_avg[rt], busy);
583         max_q = max(cfqg->busy_queues_avg[rt], busy);
584         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
585                 cfq_hist_divisor;
586         return cfqg->busy_queues_avg[rt];
587 }
588
589 static inline unsigned
590 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
591 {
592         struct cfq_rb_root *st = &cfqd->grp_service_tree;
593
594         return cfq_target_latency * cfqg->weight / st->total_weight;
595 }
596
597 static inline void
598 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
599 {
600         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
601         if (cfqd->cfq_latency) {
602                 /*
603                  * interested queues (we consider only the ones with the same
604                  * priority class in the cfq group)
605                  */
606                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
607                                                 cfq_class_rt(cfqq));
608                 unsigned sync_slice = cfqd->cfq_slice[1];
609                 unsigned expect_latency = sync_slice * iq;
610                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
611
612                 if (expect_latency > group_slice) {
613                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
614                         /* scale low_slice according to IO priority
615                          * and sync vs async */
616                         unsigned low_slice =
617                                 min(slice, base_low_slice * slice / sync_slice);
618                         /* the adapted slice value is scaled to fit all iqs
619                          * into the target latency */
620                         slice = max(slice * group_slice / expect_latency,
621                                     low_slice);
622                 }
623         }
624         cfqq->slice_start = jiffies;
625         cfqq->slice_end = jiffies + slice;
626         cfqq->allocated_slice = slice;
627         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
628 }
629
630 /*
631  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
632  * isn't valid until the first request from the dispatch is activated
633  * and the slice time set.
634  */
635 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
636 {
637         if (cfq_cfqq_slice_new(cfqq))
638                 return 0;
639         if (time_before(jiffies, cfqq->slice_end))
640                 return 0;
641
642         return 1;
643 }
644
645 /*
646  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
647  * We choose the request that is closest to the head right now. Distance
648  * behind the head is penalized and only allowed to a certain extent.
649  */
650 static struct request *
651 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
652 {
653         sector_t s1, s2, d1 = 0, d2 = 0;
654         unsigned long back_max;
655 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
656 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
657         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
658
659         if (rq1 == NULL || rq1 == rq2)
660                 return rq2;
661         if (rq2 == NULL)
662                 return rq1;
663
664         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
665                 return rq1;
666         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
667                 return rq2;
668         if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
669                 return rq1;
670         else if ((rq2->cmd_flags & REQ_META) &&
671                  !(rq1->cmd_flags & REQ_META))
672                 return rq2;
673
674         s1 = blk_rq_pos(rq1);
675         s2 = blk_rq_pos(rq2);
676
677         /*
678          * by definition, 1KiB is 2 sectors
679          */
680         back_max = cfqd->cfq_back_max * 2;
681
682         /*
683          * Strict one way elevator _except_ in the case where we allow
684          * short backward seeks which are biased as twice the cost of a
685          * similar forward seek.
686          */
687         if (s1 >= last)
688                 d1 = s1 - last;
689         else if (s1 + back_max >= last)
690                 d1 = (last - s1) * cfqd->cfq_back_penalty;
691         else
692                 wrap |= CFQ_RQ1_WRAP;
693
694         if (s2 >= last)
695                 d2 = s2 - last;
696         else if (s2 + back_max >= last)
697                 d2 = (last - s2) * cfqd->cfq_back_penalty;
698         else
699                 wrap |= CFQ_RQ2_WRAP;
700
701         /* Found required data */
702
703         /*
704          * By doing switch() on the bit mask "wrap" we avoid having to
705          * check two variables for all permutations: --> faster!
706          */
707         switch (wrap) {
708         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
709                 if (d1 < d2)
710                         return rq1;
711                 else if (d2 < d1)
712                         return rq2;
713                 else {
714                         if (s1 >= s2)
715                                 return rq1;
716                         else
717                                 return rq2;
718                 }
719
720         case CFQ_RQ2_WRAP:
721                 return rq1;
722         case CFQ_RQ1_WRAP:
723                 return rq2;
724         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
725         default:
726                 /*
727                  * Since both rqs are wrapped,
728                  * start with the one that's further behind head
729                  * (--> only *one* back seek required),
730                  * since back seek takes more time than forward.
731                  */
732                 if (s1 <= s2)
733                         return rq1;
734                 else
735                         return rq2;
736         }
737 }
738
739 /*
740  * The below is leftmost cache rbtree addon
741  */
742 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
743 {
744         /* Service tree is empty */
745         if (!root->count)
746                 return NULL;
747
748         if (!root->left)
749                 root->left = rb_first(&root->rb);
750
751         if (root->left)
752                 return rb_entry(root->left, struct cfq_queue, rb_node);
753
754         return NULL;
755 }
756
757 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
758 {
759         if (!root->left)
760                 root->left = rb_first(&root->rb);
761
762         if (root->left)
763                 return rb_entry_cfqg(root->left);
764
765         return NULL;
766 }
767
768 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
769 {
770         rb_erase(n, root);
771         RB_CLEAR_NODE(n);
772 }
773
774 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
775 {
776         if (root->left == n)
777                 root->left = NULL;
778         rb_erase_init(n, &root->rb);
779         --root->count;
780 }
781
782 /*
783  * would be nice to take fifo expire time into account as well
784  */
785 static struct request *
786 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
787                   struct request *last)
788 {
789         struct rb_node *rbnext = rb_next(&last->rb_node);
790         struct rb_node *rbprev = rb_prev(&last->rb_node);
791         struct request *next = NULL, *prev = NULL;
792
793         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
794
795         if (rbprev)
796                 prev = rb_entry_rq(rbprev);
797
798         if (rbnext)
799                 next = rb_entry_rq(rbnext);
800         else {
801                 rbnext = rb_first(&cfqq->sort_list);
802                 if (rbnext && rbnext != &last->rb_node)
803                         next = rb_entry_rq(rbnext);
804         }
805
806         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
807 }
808
809 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
810                                       struct cfq_queue *cfqq)
811 {
812         /*
813          * just an approximation, should be ok.
814          */
815         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
816                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
817 }
818
819 static inline s64
820 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
821 {
822         return cfqg->vdisktime - st->min_vdisktime;
823 }
824
825 static void
826 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
827 {
828         struct rb_node **node = &st->rb.rb_node;
829         struct rb_node *parent = NULL;
830         struct cfq_group *__cfqg;
831         s64 key = cfqg_key(st, cfqg);
832         int left = 1;
833
834         while (*node != NULL) {
835                 parent = *node;
836                 __cfqg = rb_entry_cfqg(parent);
837
838                 if (key < cfqg_key(st, __cfqg))
839                         node = &parent->rb_left;
840                 else {
841                         node = &parent->rb_right;
842                         left = 0;
843                 }
844         }
845
846         if (left)
847                 st->left = &cfqg->rb_node;
848
849         rb_link_node(&cfqg->rb_node, parent, node);
850         rb_insert_color(&cfqg->rb_node, &st->rb);
851 }
852
853 static void
854 cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
855 {
856         struct cfq_rb_root *st = &cfqd->grp_service_tree;
857         struct cfq_group *__cfqg;
858         struct rb_node *n;
859
860         cfqg->nr_cfqq++;
861         if (cfqg->on_st)
862                 return;
863
864         /*
865          * Currently put the group at the end. Later implement something
866          * so that groups get lesser vtime based on their weights, so that
867          * if group does not loose all if it was not continously backlogged.
868          */
869         n = rb_last(&st->rb);
870         if (n) {
871                 __cfqg = rb_entry_cfqg(n);
872                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
873         } else
874                 cfqg->vdisktime = st->min_vdisktime;
875
876         __cfq_group_service_tree_add(st, cfqg);
877         cfqg->on_st = true;
878         st->total_weight += cfqg->weight;
879 }
880
881 static void
882 cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
883 {
884         struct cfq_rb_root *st = &cfqd->grp_service_tree;
885
886         if (st->active == &cfqg->rb_node)
887                 st->active = NULL;
888
889         BUG_ON(cfqg->nr_cfqq < 1);
890         cfqg->nr_cfqq--;
891
892         /* If there are other cfq queues under this group, don't delete it */
893         if (cfqg->nr_cfqq)
894                 return;
895
896         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
897         cfqg->on_st = false;
898         st->total_weight -= cfqg->weight;
899         if (!RB_EMPTY_NODE(&cfqg->rb_node))
900                 cfq_rb_erase(&cfqg->rb_node, st);
901         cfqg->saved_workload_slice = 0;
902         cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
903 }
904
905 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
906 {
907         unsigned int slice_used;
908
909         /*
910          * Queue got expired before even a single request completed or
911          * got expired immediately after first request completion.
912          */
913         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
914                 /*
915                  * Also charge the seek time incurred to the group, otherwise
916                  * if there are mutiple queues in the group, each can dispatch
917                  * a single request on seeky media and cause lots of seek time
918                  * and group will never know it.
919                  */
920                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
921                                         1);
922         } else {
923                 slice_used = jiffies - cfqq->slice_start;
924                 if (slice_used > cfqq->allocated_slice)
925                         slice_used = cfqq->allocated_slice;
926         }
927
928         return slice_used;
929 }
930
931 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
932                                 struct cfq_queue *cfqq)
933 {
934         struct cfq_rb_root *st = &cfqd->grp_service_tree;
935         unsigned int used_sl, charge;
936         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
937                         - cfqg->service_tree_idle.count;
938
939         BUG_ON(nr_sync < 0);
940         used_sl = charge = cfq_cfqq_slice_usage(cfqq);
941
942         if (iops_mode(cfqd))
943                 charge = cfqq->slice_dispatch;
944         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
945                 charge = cfqq->allocated_slice;
946
947         /* Can't update vdisktime while group is on service tree */
948         cfq_rb_erase(&cfqg->rb_node, st);
949         cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
950         __cfq_group_service_tree_add(st, cfqg);
951
952         /* This group is being expired. Save the context */
953         if (time_after(cfqd->workload_expires, jiffies)) {
954                 cfqg->saved_workload_slice = cfqd->workload_expires
955                                                 - jiffies;
956                 cfqg->saved_workload = cfqd->serving_type;
957                 cfqg->saved_serving_prio = cfqd->serving_prio;
958         } else
959                 cfqg->saved_workload_slice = 0;
960
961         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
962                                         st->min_vdisktime);
963         cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u",
964                         used_sl, cfqq->slice_dispatch, charge, iops_mode(cfqd));
965         cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
966         cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
967 }
968
969 #ifdef CONFIG_CFQ_GROUP_IOSCHED
970 static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
971 {
972         if (blkg)
973                 return container_of(blkg, struct cfq_group, blkg);
974         return NULL;
975 }
976
977 void
978 cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight)
979 {
980         cfqg_of_blkg(blkg)->weight = weight;
981 }
982
983 static struct cfq_group *
984 cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
985 {
986         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
987         struct cfq_group *cfqg = NULL;
988         void *key = cfqd;
989         int i, j;
990         struct cfq_rb_root *st;
991         struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
992         unsigned int major, minor;
993
994         cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
995         if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
996                 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
997                 cfqg->blkg.dev = MKDEV(major, minor);
998                 goto done;
999         }
1000         if (cfqg || !create)
1001                 goto done;
1002
1003         cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1004         if (!cfqg)
1005                 goto done;
1006
1007         for_each_cfqg_st(cfqg, i, j, st)
1008                 *st = CFQ_RB_ROOT;
1009         RB_CLEAR_NODE(&cfqg->rb_node);
1010
1011         /*
1012          * Take the initial reference that will be released on destroy
1013          * This can be thought of a joint reference by cgroup and
1014          * elevator which will be dropped by either elevator exit
1015          * or cgroup deletion path depending on who is exiting first.
1016          */
1017         atomic_set(&cfqg->ref, 1);
1018
1019         /* Add group onto cgroup list */
1020         sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1021         cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1022                                         MKDEV(major, minor));
1023         cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1024
1025         /* Add group on cfqd list */
1026         hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1027
1028 done:
1029         return cfqg;
1030 }
1031
1032 /*
1033  * Search for the cfq group current task belongs to. If create = 1, then also
1034  * create the cfq group if it does not exist. request_queue lock must be held.
1035  */
1036 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1037 {
1038         struct cgroup *cgroup;
1039         struct cfq_group *cfqg = NULL;
1040
1041         rcu_read_lock();
1042         cgroup = task_cgroup(current, blkio_subsys_id);
1043         cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
1044         if (!cfqg && create)
1045                 cfqg = &cfqd->root_group;
1046         rcu_read_unlock();
1047         return cfqg;
1048 }
1049
1050 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1051 {
1052         atomic_inc(&cfqg->ref);
1053         return cfqg;
1054 }
1055
1056 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1057 {
1058         /* Currently, all async queues are mapped to root group */
1059         if (!cfq_cfqq_sync(cfqq))
1060                 cfqg = &cfqq->cfqd->root_group;
1061
1062         cfqq->cfqg = cfqg;
1063         /* cfqq reference on cfqg */
1064         atomic_inc(&cfqq->cfqg->ref);
1065 }
1066
1067 static void cfq_put_cfqg(struct cfq_group *cfqg)
1068 {
1069         struct cfq_rb_root *st;
1070         int i, j;
1071
1072         BUG_ON(atomic_read(&cfqg->ref) <= 0);
1073         if (!atomic_dec_and_test(&cfqg->ref))
1074                 return;
1075         for_each_cfqg_st(cfqg, i, j, st)
1076                 BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL);
1077         kfree(cfqg);
1078 }
1079
1080 static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1081 {
1082         /* Something wrong if we are trying to remove same group twice */
1083         BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1084
1085         hlist_del_init(&cfqg->cfqd_node);
1086
1087         /*
1088          * Put the reference taken at the time of creation so that when all
1089          * queues are gone, group can be destroyed.
1090          */
1091         cfq_put_cfqg(cfqg);
1092 }
1093
1094 static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1095 {
1096         struct hlist_node *pos, *n;
1097         struct cfq_group *cfqg;
1098
1099         hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1100                 /*
1101                  * If cgroup removal path got to blk_group first and removed
1102                  * it from cgroup list, then it will take care of destroying
1103                  * cfqg also.
1104                  */
1105                 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1106                         cfq_destroy_cfqg(cfqd, cfqg);
1107         }
1108 }
1109
1110 /*
1111  * Blk cgroup controller notification saying that blkio_group object is being
1112  * delinked as associated cgroup object is going away. That also means that
1113  * no new IO will come in this group. So get rid of this group as soon as
1114  * any pending IO in the group is finished.
1115  *
1116  * This function is called under rcu_read_lock(). key is the rcu protected
1117  * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1118  * read lock.
1119  *
1120  * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1121  * it should not be NULL as even if elevator was exiting, cgroup deltion
1122  * path got to it first.
1123  */
1124 void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1125 {
1126         unsigned long  flags;
1127         struct cfq_data *cfqd = key;
1128
1129         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1130         cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1131         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1132 }
1133
1134 #else /* GROUP_IOSCHED */
1135 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1136 {
1137         return &cfqd->root_group;
1138 }
1139
1140 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1141 {
1142         return cfqg;
1143 }
1144
1145 static inline void
1146 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1147         cfqq->cfqg = cfqg;
1148 }
1149
1150 static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1151 static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1152
1153 #endif /* GROUP_IOSCHED */
1154
1155 /*
1156  * The cfqd->service_trees holds all pending cfq_queue's that have
1157  * requests waiting to be processed. It is sorted in the order that
1158  * we will service the queues.
1159  */
1160 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1161                                  bool add_front)
1162 {
1163         struct rb_node **p, *parent;
1164         struct cfq_queue *__cfqq;
1165         unsigned long rb_key;
1166         struct cfq_rb_root *service_tree;
1167         int left;
1168         int new_cfqq = 1;
1169         int group_changed = 0;
1170
1171 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1172         if (!cfqd->cfq_group_isolation
1173             && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD
1174             && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
1175                 /* Move this cfq to root group */
1176                 cfq_log_cfqq(cfqd, cfqq, "moving to root group");
1177                 if (!RB_EMPTY_NODE(&cfqq->rb_node))
1178                         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1179                 cfqq->orig_cfqg = cfqq->cfqg;
1180                 cfqq->cfqg = &cfqd->root_group;
1181                 atomic_inc(&cfqd->root_group.ref);
1182                 group_changed = 1;
1183         } else if (!cfqd->cfq_group_isolation
1184                    && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
1185                 /* cfqq is sequential now needs to go to its original group */
1186                 BUG_ON(cfqq->cfqg != &cfqd->root_group);
1187                 if (!RB_EMPTY_NODE(&cfqq->rb_node))
1188                         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1189                 cfq_put_cfqg(cfqq->cfqg);
1190                 cfqq->cfqg = cfqq->orig_cfqg;
1191                 cfqq->orig_cfqg = NULL;
1192                 group_changed = 1;
1193                 cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
1194         }
1195 #endif
1196
1197         service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1198                                                 cfqq_type(cfqq));
1199         if (cfq_class_idle(cfqq)) {
1200                 rb_key = CFQ_IDLE_DELAY;
1201                 parent = rb_last(&service_tree->rb);
1202                 if (parent && parent != &cfqq->rb_node) {
1203                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1204                         rb_key += __cfqq->rb_key;
1205                 } else
1206                         rb_key += jiffies;
1207         } else if (!add_front) {
1208                 /*
1209                  * Get our rb key offset. Subtract any residual slice
1210                  * value carried from last service. A negative resid
1211                  * count indicates slice overrun, and this should position
1212                  * the next service time further away in the tree.
1213                  */
1214                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1215                 rb_key -= cfqq->slice_resid;
1216                 cfqq->slice_resid = 0;
1217         } else {
1218                 rb_key = -HZ;
1219                 __cfqq = cfq_rb_first(service_tree);
1220                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1221         }
1222
1223         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1224                 new_cfqq = 0;
1225                 /*
1226                  * same position, nothing more to do
1227                  */
1228                 if (rb_key == cfqq->rb_key &&
1229                     cfqq->service_tree == service_tree)
1230                         return;
1231
1232                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1233                 cfqq->service_tree = NULL;
1234         }
1235
1236         left = 1;
1237         parent = NULL;
1238         cfqq->service_tree = service_tree;
1239         p = &service_tree->rb.rb_node;
1240         while (*p) {
1241                 struct rb_node **n;
1242
1243                 parent = *p;
1244                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1245
1246                 /*
1247                  * sort by key, that represents service time.
1248                  */
1249                 if (time_before(rb_key, __cfqq->rb_key))
1250                         n = &(*p)->rb_left;
1251                 else {
1252                         n = &(*p)->rb_right;
1253                         left = 0;
1254                 }
1255
1256                 p = n;
1257         }
1258
1259         if (left)
1260                 service_tree->left = &cfqq->rb_node;
1261
1262         cfqq->rb_key = rb_key;
1263         rb_link_node(&cfqq->rb_node, parent, p);
1264         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1265         service_tree->count++;
1266         if ((add_front || !new_cfqq) && !group_changed)
1267                 return;
1268         cfq_group_service_tree_add(cfqd, cfqq->cfqg);
1269 }
1270
1271 static struct cfq_queue *
1272 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1273                      sector_t sector, struct rb_node **ret_parent,
1274                      struct rb_node ***rb_link)
1275 {
1276         struct rb_node **p, *parent;
1277         struct cfq_queue *cfqq = NULL;
1278
1279         parent = NULL;
1280         p = &root->rb_node;
1281         while (*p) {
1282                 struct rb_node **n;
1283
1284                 parent = *p;
1285                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1286
1287                 /*
1288                  * Sort strictly based on sector.  Smallest to the left,
1289                  * largest to the right.
1290                  */
1291                 if (sector > blk_rq_pos(cfqq->next_rq))
1292                         n = &(*p)->rb_right;
1293                 else if (sector < blk_rq_pos(cfqq->next_rq))
1294                         n = &(*p)->rb_left;
1295                 else
1296                         break;
1297                 p = n;
1298                 cfqq = NULL;
1299         }
1300
1301         *ret_parent = parent;
1302         if (rb_link)
1303                 *rb_link = p;
1304         return cfqq;
1305 }
1306
1307 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1308 {
1309         struct rb_node **p, *parent;
1310         struct cfq_queue *__cfqq;
1311
1312         if (cfqq->p_root) {
1313                 rb_erase(&cfqq->p_node, cfqq->p_root);
1314                 cfqq->p_root = NULL;
1315         }
1316
1317         if (cfq_class_idle(cfqq))
1318                 return;
1319         if (!cfqq->next_rq)
1320                 return;
1321
1322         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1323         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1324                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
1325         if (!__cfqq) {
1326                 rb_link_node(&cfqq->p_node, parent, p);
1327                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1328         } else
1329                 cfqq->p_root = NULL;
1330 }
1331
1332 /*
1333  * Update cfqq's position in the service tree.
1334  */
1335 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1336 {
1337         /*
1338          * Resorting requires the cfqq to be on the RR list already.
1339          */
1340         if (cfq_cfqq_on_rr(cfqq)) {
1341                 cfq_service_tree_add(cfqd, cfqq, 0);
1342                 cfq_prio_tree_add(cfqd, cfqq);
1343         }
1344 }
1345
1346 /*
1347  * add to busy list of queues for service, trying to be fair in ordering
1348  * the pending list according to last request service
1349  */
1350 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1351 {
1352         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1353         BUG_ON(cfq_cfqq_on_rr(cfqq));
1354         cfq_mark_cfqq_on_rr(cfqq);
1355         cfqd->busy_queues++;
1356
1357         cfq_resort_rr_list(cfqd, cfqq);
1358 }
1359
1360 /*
1361  * Called when the cfqq no longer has requests pending, remove it from
1362  * the service tree.
1363  */
1364 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1365 {
1366         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1367         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1368         cfq_clear_cfqq_on_rr(cfqq);
1369
1370         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1371                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1372                 cfqq->service_tree = NULL;
1373         }
1374         if (cfqq->p_root) {
1375                 rb_erase(&cfqq->p_node, cfqq->p_root);
1376                 cfqq->p_root = NULL;
1377         }
1378
1379         cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1380         BUG_ON(!cfqd->busy_queues);
1381         cfqd->busy_queues--;
1382 }
1383
1384 /*
1385  * rb tree support functions
1386  */
1387 static void cfq_del_rq_rb(struct request *rq)
1388 {
1389         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1390         const int sync = rq_is_sync(rq);
1391
1392         BUG_ON(!cfqq->queued[sync]);
1393         cfqq->queued[sync]--;
1394
1395         elv_rb_del(&cfqq->sort_list, rq);
1396
1397         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1398                 /*
1399                  * Queue will be deleted from service tree when we actually
1400                  * expire it later. Right now just remove it from prio tree
1401                  * as it is empty.
1402                  */
1403                 if (cfqq->p_root) {
1404                         rb_erase(&cfqq->p_node, cfqq->p_root);
1405                         cfqq->p_root = NULL;
1406                 }
1407         }
1408 }
1409
1410 static void cfq_add_rq_rb(struct request *rq)
1411 {
1412         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1413         struct cfq_data *cfqd = cfqq->cfqd;
1414         struct request *__alias, *prev;
1415
1416         cfqq->queued[rq_is_sync(rq)]++;
1417
1418         /*
1419          * looks a little odd, but the first insert might return an alias.
1420          * if that happens, put the alias on the dispatch list
1421          */
1422         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
1423                 cfq_dispatch_insert(cfqd->queue, __alias);
1424
1425         if (!cfq_cfqq_on_rr(cfqq))
1426                 cfq_add_cfqq_rr(cfqd, cfqq);
1427
1428         /*
1429          * check if this request is a better next-serve candidate
1430          */
1431         prev = cfqq->next_rq;
1432         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1433
1434         /*
1435          * adjust priority tree position, if ->next_rq changes
1436          */
1437         if (prev != cfqq->next_rq)
1438                 cfq_prio_tree_add(cfqd, cfqq);
1439
1440         BUG_ON(!cfqq->next_rq);
1441 }
1442
1443 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1444 {
1445         elv_rb_del(&cfqq->sort_list, rq);
1446         cfqq->queued[rq_is_sync(rq)]--;
1447         cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1448                                         rq_data_dir(rq), rq_is_sync(rq));
1449         cfq_add_rq_rb(rq);
1450         cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1451                         &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1452                         rq_is_sync(rq));
1453 }
1454
1455 static struct request *
1456 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1457 {
1458         struct task_struct *tsk = current;
1459         struct cfq_io_context *cic;
1460         struct cfq_queue *cfqq;
1461
1462         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1463         if (!cic)
1464                 return NULL;
1465
1466         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1467         if (cfqq) {
1468                 sector_t sector = bio->bi_sector + bio_sectors(bio);
1469
1470                 return elv_rb_find(&cfqq->sort_list, sector);
1471         }
1472
1473         return NULL;
1474 }
1475
1476 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1477 {
1478         struct cfq_data *cfqd = q->elevator->elevator_data;
1479
1480         cfqd->rq_in_driver++;
1481         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1482                                                 cfqd->rq_in_driver);
1483
1484         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1485 }
1486
1487 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1488 {
1489         struct cfq_data *cfqd = q->elevator->elevator_data;
1490
1491         WARN_ON(!cfqd->rq_in_driver);
1492         cfqd->rq_in_driver--;
1493         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1494                                                 cfqd->rq_in_driver);
1495 }
1496
1497 static void cfq_remove_request(struct request *rq)
1498 {
1499         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1500
1501         if (cfqq->next_rq == rq)
1502                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1503
1504         list_del_init(&rq->queuelist);
1505         cfq_del_rq_rb(rq);
1506
1507         cfqq->cfqd->rq_queued--;
1508         cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1509                                         rq_data_dir(rq), rq_is_sync(rq));
1510         if (rq->cmd_flags & REQ_META) {
1511                 WARN_ON(!cfqq->meta_pending);
1512                 cfqq->meta_pending--;
1513         }
1514 }
1515
1516 static int cfq_merge(struct request_queue *q, struct request **req,
1517                      struct bio *bio)
1518 {
1519         struct cfq_data *cfqd = q->elevator->elevator_data;
1520         struct request *__rq;
1521
1522         __rq = cfq_find_rq_fmerge(cfqd, bio);
1523         if (__rq && elv_rq_merge_ok(__rq, bio)) {
1524                 *req = __rq;
1525                 return ELEVATOR_FRONT_MERGE;
1526         }
1527
1528         return ELEVATOR_NO_MERGE;
1529 }
1530
1531 static void cfq_merged_request(struct request_queue *q, struct request *req,
1532                                int type)
1533 {
1534         if (type == ELEVATOR_FRONT_MERGE) {
1535                 struct cfq_queue *cfqq = RQ_CFQQ(req);
1536
1537                 cfq_reposition_rq_rb(cfqq, req);
1538         }
1539 }
1540
1541 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1542                                 struct bio *bio)
1543 {
1544         cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1545                                         bio_data_dir(bio), cfq_bio_sync(bio));
1546 }
1547
1548 static void
1549 cfq_merged_requests(struct request_queue *q, struct request *rq,
1550                     struct request *next)
1551 {
1552         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1553         /*
1554          * reposition in fifo if next is older than rq
1555          */
1556         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1557             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1558                 list_move(&rq->queuelist, &next->queuelist);
1559                 rq_set_fifo_time(rq, rq_fifo_time(next));
1560         }
1561
1562         if (cfqq->next_rq == next)
1563                 cfqq->next_rq = rq;
1564         cfq_remove_request(next);
1565         cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1566                                         rq_data_dir(next), rq_is_sync(next));
1567 }
1568
1569 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1570                            struct bio *bio)
1571 {
1572         struct cfq_data *cfqd = q->elevator->elevator_data;
1573         struct cfq_io_context *cic;
1574         struct cfq_queue *cfqq;
1575
1576         /*
1577          * Disallow merge of a sync bio into an async request.
1578          */
1579         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1580                 return false;
1581
1582         /*
1583          * Lookup the cfqq that this bio will be queued with. Allow
1584          * merge only if rq is queued there.
1585          */
1586         cic = cfq_cic_lookup(cfqd, current->io_context);
1587         if (!cic)
1588                 return false;
1589
1590         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1591         return cfqq == RQ_CFQQ(rq);
1592 }
1593
1594 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1595 {
1596         del_timer(&cfqd->idle_slice_timer);
1597         cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1598 }
1599
1600 static void __cfq_set_active_queue(struct cfq_data *cfqd,
1601                                    struct cfq_queue *cfqq)
1602 {
1603         if (cfqq) {
1604                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1605                                 cfqd->serving_prio, cfqd->serving_type);
1606                 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1607                 cfqq->slice_start = 0;
1608                 cfqq->dispatch_start = jiffies;
1609                 cfqq->allocated_slice = 0;
1610                 cfqq->slice_end = 0;
1611                 cfqq->slice_dispatch = 0;
1612
1613                 cfq_clear_cfqq_wait_request(cfqq);
1614                 cfq_clear_cfqq_must_dispatch(cfqq);
1615                 cfq_clear_cfqq_must_alloc_slice(cfqq);
1616                 cfq_clear_cfqq_fifo_expire(cfqq);
1617                 cfq_mark_cfqq_slice_new(cfqq);
1618
1619                 cfq_del_timer(cfqd, cfqq);
1620         }
1621
1622         cfqd->active_queue = cfqq;
1623 }
1624
1625 /*
1626  * current cfqq expired its slice (or was too idle), select new one
1627  */
1628 static void
1629 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1630                     bool timed_out)
1631 {
1632         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1633
1634         if (cfq_cfqq_wait_request(cfqq))
1635                 cfq_del_timer(cfqd, cfqq);
1636
1637         cfq_clear_cfqq_wait_request(cfqq);
1638         cfq_clear_cfqq_wait_busy(cfqq);
1639
1640         /*
1641          * If this cfqq is shared between multiple processes, check to
1642          * make sure that those processes are still issuing I/Os within
1643          * the mean seek distance.  If not, it may be time to break the
1644          * queues apart again.
1645          */
1646         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1647                 cfq_mark_cfqq_split_coop(cfqq);
1648
1649         /*
1650          * store what was left of this slice, if the queue idled/timed out
1651          */
1652         if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
1653                 cfqq->slice_resid = cfqq->slice_end - jiffies;
1654                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1655         }
1656
1657         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1658
1659         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1660                 cfq_del_cfqq_rr(cfqd, cfqq);
1661
1662         cfq_resort_rr_list(cfqd, cfqq);
1663
1664         if (cfqq == cfqd->active_queue)
1665                 cfqd->active_queue = NULL;
1666
1667         if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
1668                 cfqd->grp_service_tree.active = NULL;
1669
1670         if (cfqd->active_cic) {
1671                 put_io_context(cfqd->active_cic->ioc);
1672                 cfqd->active_cic = NULL;
1673         }
1674 }
1675
1676 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1677 {
1678         struct cfq_queue *cfqq = cfqd->active_queue;
1679
1680         if (cfqq)
1681                 __cfq_slice_expired(cfqd, cfqq, timed_out);
1682 }
1683
1684 /*
1685  * Get next queue for service. Unless we have a queue preemption,
1686  * we'll simply select the first cfqq in the service tree.
1687  */
1688 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1689 {
1690         struct cfq_rb_root *service_tree =
1691                 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1692                                         cfqd->serving_type);
1693
1694         if (!cfqd->rq_queued)
1695                 return NULL;
1696
1697         /* There is nothing to dispatch */
1698         if (!service_tree)
1699                 return NULL;
1700         if (RB_EMPTY_ROOT(&service_tree->rb))
1701                 return NULL;
1702         return cfq_rb_first(service_tree);
1703 }
1704
1705 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1706 {
1707         struct cfq_group *cfqg;
1708         struct cfq_queue *cfqq;
1709         int i, j;
1710         struct cfq_rb_root *st;
1711
1712         if (!cfqd->rq_queued)
1713                 return NULL;
1714
1715         cfqg = cfq_get_next_cfqg(cfqd);
1716         if (!cfqg)
1717                 return NULL;
1718
1719         for_each_cfqg_st(cfqg, i, j, st)
1720                 if ((cfqq = cfq_rb_first(st)) != NULL)
1721                         return cfqq;
1722         return NULL;
1723 }
1724
1725 /*
1726  * Get and set a new active queue for service.
1727  */
1728 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1729                                               struct cfq_queue *cfqq)
1730 {
1731         if (!cfqq)
1732                 cfqq = cfq_get_next_queue(cfqd);
1733
1734         __cfq_set_active_queue(cfqd, cfqq);
1735         return cfqq;
1736 }
1737
1738 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1739                                           struct request *rq)
1740 {
1741         if (blk_rq_pos(rq) >= cfqd->last_position)
1742                 return blk_rq_pos(rq) - cfqd->last_position;
1743         else
1744                 return cfqd->last_position - blk_rq_pos(rq);
1745 }
1746
1747 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1748                                struct request *rq)
1749 {
1750         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1751 }
1752
1753 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1754                                     struct cfq_queue *cur_cfqq)
1755 {
1756         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1757         struct rb_node *parent, *node;
1758         struct cfq_queue *__cfqq;
1759         sector_t sector = cfqd->last_position;
1760
1761         if (RB_EMPTY_ROOT(root))
1762                 return NULL;
1763
1764         /*
1765          * First, if we find a request starting at the end of the last
1766          * request, choose it.
1767          */
1768         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1769         if (__cfqq)
1770                 return __cfqq;
1771
1772         /*
1773          * If the exact sector wasn't found, the parent of the NULL leaf
1774          * will contain the closest sector.
1775          */
1776         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1777         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1778                 return __cfqq;
1779
1780         if (blk_rq_pos(__cfqq->next_rq) < sector)
1781                 node = rb_next(&__cfqq->p_node);
1782         else
1783                 node = rb_prev(&__cfqq->p_node);
1784         if (!node)
1785                 return NULL;
1786
1787         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1788         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1789                 return __cfqq;
1790
1791         return NULL;
1792 }
1793
1794 /*
1795  * cfqd - obvious
1796  * cur_cfqq - passed in so that we don't decide that the current queue is
1797  *            closely cooperating with itself.
1798  *
1799  * So, basically we're assuming that that cur_cfqq has dispatched at least
1800  * one request, and that cfqd->last_position reflects a position on the disk
1801  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1802  * assumption.
1803  */
1804 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1805                                               struct cfq_queue *cur_cfqq)
1806 {
1807         struct cfq_queue *cfqq;
1808
1809         if (cfq_class_idle(cur_cfqq))
1810                 return NULL;
1811         if (!cfq_cfqq_sync(cur_cfqq))
1812                 return NULL;
1813         if (CFQQ_SEEKY(cur_cfqq))
1814                 return NULL;
1815
1816         /*
1817          * Don't search priority tree if it's the only queue in the group.
1818          */
1819         if (cur_cfqq->cfqg->nr_cfqq == 1)
1820                 return NULL;
1821
1822         /*
1823          * We should notice if some of the queues are cooperating, eg
1824          * working closely on the same area of the disk. In that case,
1825          * we can group them together and don't waste time idling.
1826          */
1827         cfqq = cfqq_close(cfqd, cur_cfqq);
1828         if (!cfqq)
1829                 return NULL;
1830
1831         /* If new queue belongs to different cfq_group, don't choose it */
1832         if (cur_cfqq->cfqg != cfqq->cfqg)
1833                 return NULL;
1834
1835         /*
1836          * It only makes sense to merge sync queues.
1837          */
1838         if (!cfq_cfqq_sync(cfqq))
1839                 return NULL;
1840         if (CFQQ_SEEKY(cfqq))
1841                 return NULL;
1842
1843         /*
1844          * Do not merge queues of different priority classes
1845          */
1846         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1847                 return NULL;
1848
1849         return cfqq;
1850 }
1851
1852 /*
1853  * Determine whether we should enforce idle window for this queue.
1854  */
1855
1856 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1857 {
1858         enum wl_prio_t prio = cfqq_prio(cfqq);
1859         struct cfq_rb_root *service_tree = cfqq->service_tree;
1860
1861         BUG_ON(!service_tree);
1862         BUG_ON(!service_tree->count);
1863
1864         if (!cfqd->cfq_slice_idle)
1865                 return false;
1866
1867         /* We never do for idle class queues. */
1868         if (prio == IDLE_WORKLOAD)
1869                 return false;
1870
1871         /* We do for queues that were marked with idle window flag. */
1872         if (cfq_cfqq_idle_window(cfqq) &&
1873            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1874                 return true;
1875
1876         /*
1877          * Otherwise, we do only if they are the last ones
1878          * in their service tree.
1879          */
1880         if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1881                 return 1;
1882         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1883                         service_tree->count);
1884         return 0;
1885 }
1886
1887 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1888 {
1889         struct cfq_queue *cfqq = cfqd->active_queue;
1890         struct cfq_io_context *cic;
1891         unsigned long sl, group_idle = 0;
1892
1893         /*
1894          * SSD device without seek penalty, disable idling. But only do so
1895          * for devices that support queuing, otherwise we still have a problem
1896          * with sync vs async workloads.
1897          */
1898         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1899                 return;
1900
1901         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1902         WARN_ON(cfq_cfqq_slice_new(cfqq));
1903
1904         /*
1905          * idle is disabled, either manually or by past process history
1906          */
1907         if (!cfq_should_idle(cfqd, cfqq)) {
1908                 /* no queue idling. Check for group idling */
1909                 if (cfqd->cfq_group_idle)
1910                         group_idle = cfqd->cfq_group_idle;
1911                 else
1912                         return;
1913         }
1914
1915         /*
1916          * still active requests from this queue, don't idle
1917          */
1918         if (cfqq->dispatched)
1919                 return;
1920
1921         /*
1922          * task has exited, don't wait
1923          */
1924         cic = cfqd->active_cic;
1925         if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1926                 return;
1927
1928         /*
1929          * If our average think time is larger than the remaining time
1930          * slice, then don't idle. This avoids overrunning the allotted
1931          * time slice.
1932          */
1933         if (sample_valid(cic->ttime_samples) &&
1934             (cfqq->slice_end - jiffies < cic->ttime_mean)) {
1935                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
1936                                 cic->ttime_mean);
1937                 return;
1938         }
1939
1940         /* There are other queues in the group, don't do group idle */
1941         if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1942                 return;
1943
1944         cfq_mark_cfqq_wait_request(cfqq);
1945
1946         if (group_idle)
1947                 sl = cfqd->cfq_group_idle;
1948         else
1949                 sl = cfqd->cfq_slice_idle;
1950
1951         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1952         cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1953         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1954                         group_idle ? 1 : 0);
1955 }
1956
1957 /*
1958  * Move request from internal lists to the request queue dispatch list.
1959  */
1960 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1961 {
1962         struct cfq_data *cfqd = q->elevator->elevator_data;
1963         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1964
1965         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1966
1967         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1968         cfq_remove_request(rq);
1969         cfqq->dispatched++;
1970         (RQ_CFQG(rq))->dispatched++;
1971         elv_dispatch_sort(q, rq);
1972
1973         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1974         cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1975                                         rq_data_dir(rq), rq_is_sync(rq));
1976 }
1977
1978 /*
1979  * return expired entry, or NULL to just start from scratch in rbtree
1980  */
1981 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1982 {
1983         struct request *rq = NULL;
1984
1985         if (cfq_cfqq_fifo_expire(cfqq))
1986                 return NULL;
1987
1988         cfq_mark_cfqq_fifo_expire(cfqq);
1989
1990         if (list_empty(&cfqq->fifo))
1991                 return NULL;
1992
1993         rq = rq_entry_fifo(cfqq->fifo.next);
1994         if (time_before(jiffies, rq_fifo_time(rq)))
1995                 rq = NULL;
1996
1997         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1998         return rq;
1999 }
2000
2001 static inline int
2002 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2003 {
2004         const int base_rq = cfqd->cfq_slice_async_rq;
2005
2006         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2007
2008         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
2009 }
2010
2011 /*
2012  * Must be called with the queue_lock held.
2013  */
2014 static int cfqq_process_refs(struct cfq_queue *cfqq)
2015 {
2016         int process_refs, io_refs;
2017
2018         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2019         process_refs = atomic_read(&cfqq->ref) - io_refs;
2020         BUG_ON(process_refs < 0);
2021         return process_refs;
2022 }
2023
2024 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2025 {
2026         int process_refs, new_process_refs;
2027         struct cfq_queue *__cfqq;
2028
2029         /*
2030          * If there are no process references on the new_cfqq, then it is
2031          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2032          * chain may have dropped their last reference (not just their
2033          * last process reference).
2034          */
2035         if (!cfqq_process_refs(new_cfqq))
2036                 return;
2037
2038         /* Avoid a circular list and skip interim queue merges */
2039         while ((__cfqq = new_cfqq->new_cfqq)) {
2040                 if (__cfqq == cfqq)
2041                         return;
2042                 new_cfqq = __cfqq;
2043         }
2044
2045         process_refs = cfqq_process_refs(cfqq);
2046         new_process_refs = cfqq_process_refs(new_cfqq);
2047         /*
2048          * If the process for the cfqq has gone away, there is no
2049          * sense in merging the queues.
2050          */
2051         if (process_refs == 0 || new_process_refs == 0)
2052                 return;
2053
2054         /*
2055          * Merge in the direction of the lesser amount of work.
2056          */
2057         if (new_process_refs >= process_refs) {
2058                 cfqq->new_cfqq = new_cfqq;
2059                 atomic_add(process_refs, &new_cfqq->ref);
2060         } else {
2061                 new_cfqq->new_cfqq = cfqq;
2062                 atomic_add(new_process_refs, &cfqq->ref);
2063         }
2064 }
2065
2066 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2067                                 struct cfq_group *cfqg, enum wl_prio_t prio)
2068 {
2069         struct cfq_queue *queue;
2070         int i;
2071         bool key_valid = false;
2072         unsigned long lowest_key = 0;
2073         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2074
2075         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2076                 /* select the one with lowest rb_key */
2077                 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2078                 if (queue &&
2079                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
2080                         lowest_key = queue->rb_key;
2081                         cur_best = i;
2082                         key_valid = true;
2083                 }
2084         }
2085
2086         return cur_best;
2087 }
2088
2089 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2090 {
2091         unsigned slice;
2092         unsigned count;
2093         struct cfq_rb_root *st;
2094         unsigned group_slice;
2095
2096         if (!cfqg) {
2097                 cfqd->serving_prio = IDLE_WORKLOAD;
2098                 cfqd->workload_expires = jiffies + 1;
2099                 return;
2100         }
2101
2102         /* Choose next priority. RT > BE > IDLE */
2103         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2104                 cfqd->serving_prio = RT_WORKLOAD;
2105         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2106                 cfqd->serving_prio = BE_WORKLOAD;
2107         else {
2108                 cfqd->serving_prio = IDLE_WORKLOAD;
2109                 cfqd->workload_expires = jiffies + 1;
2110                 return;
2111         }
2112
2113         /*
2114          * For RT and BE, we have to choose also the type
2115          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2116          * expiration time
2117          */
2118         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2119         count = st->count;
2120
2121         /*
2122          * check workload expiration, and that we still have other queues ready
2123          */
2124         if (count && !time_after(jiffies, cfqd->workload_expires))
2125                 return;
2126
2127         /* otherwise select new workload type */
2128         cfqd->serving_type =
2129                 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2130         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2131         count = st->count;
2132
2133         /*
2134          * the workload slice is computed as a fraction of target latency
2135          * proportional to the number of queues in that workload, over
2136          * all the queues in the same priority class
2137          */
2138         group_slice = cfq_group_slice(cfqd, cfqg);
2139
2140         slice = group_slice * count /
2141                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2142                       cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2143
2144         if (cfqd->serving_type == ASYNC_WORKLOAD) {
2145                 unsigned int tmp;
2146
2147                 /*
2148                  * Async queues are currently system wide. Just taking
2149                  * proportion of queues with-in same group will lead to higher
2150                  * async ratio system wide as generally root group is going
2151                  * to have higher weight. A more accurate thing would be to
2152                  * calculate system wide asnc/sync ratio.
2153                  */
2154                 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2155                 tmp = tmp/cfqd->busy_queues;
2156                 slice = min_t(unsigned, slice, tmp);
2157
2158                 /* async workload slice is scaled down according to
2159                  * the sync/async slice ratio. */
2160                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2161         } else
2162                 /* sync workload slice is at least 2 * cfq_slice_idle */
2163                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2164
2165         slice = max_t(unsigned, slice, CFQ_MIN_TT);
2166         cfq_log(cfqd, "workload slice:%d", slice);
2167         cfqd->workload_expires = jiffies + slice;
2168         cfqd->noidle_tree_requires_idle = false;
2169 }
2170
2171 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2172 {
2173         struct cfq_rb_root *st = &cfqd->grp_service_tree;
2174         struct cfq_group *cfqg;
2175
2176         if (RB_EMPTY_ROOT(&st->rb))
2177                 return NULL;
2178         cfqg = cfq_rb_first_group(st);
2179         st->active = &cfqg->rb_node;
2180         update_min_vdisktime(st);
2181         return cfqg;
2182 }
2183
2184 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2185 {
2186         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2187
2188         cfqd->serving_group = cfqg;
2189
2190         /* Restore the workload type data */
2191         if (cfqg->saved_workload_slice) {
2192                 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2193                 cfqd->serving_type = cfqg->saved_workload;
2194                 cfqd->serving_prio = cfqg->saved_serving_prio;
2195         } else
2196                 cfqd->workload_expires = jiffies - 1;
2197
2198         choose_service_tree(cfqd, cfqg);
2199 }
2200
2201 /*
2202  * Select a queue for service. If we have a current active queue,
2203  * check whether to continue servicing it, or retrieve and set a new one.
2204  */
2205 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2206 {
2207         struct cfq_queue *cfqq, *new_cfqq = NULL;
2208
2209         cfqq = cfqd->active_queue;
2210         if (!cfqq)
2211                 goto new_queue;
2212
2213         if (!cfqd->rq_queued)
2214                 return NULL;
2215
2216         /*
2217          * We were waiting for group to get backlogged. Expire the queue
2218          */
2219         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2220                 goto expire;
2221
2222         /*
2223          * The active queue has run out of time, expire it and select new.
2224          */
2225         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2226                 /*
2227                  * If slice had not expired at the completion of last request
2228                  * we might not have turned on wait_busy flag. Don't expire
2229                  * the queue yet. Allow the group to get backlogged.
2230                  *
2231                  * The very fact that we have used the slice, that means we
2232                  * have been idling all along on this queue and it should be
2233                  * ok to wait for this request to complete.
2234                  */
2235                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2236                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2237                         cfqq = NULL;
2238                         goto keep_queue;
2239                 } else
2240                         goto check_group_idle;
2241         }
2242
2243         /*
2244          * The active queue has requests and isn't expired, allow it to
2245          * dispatch.
2246          */
2247         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2248                 goto keep_queue;
2249
2250         /*
2251          * If another queue has a request waiting within our mean seek
2252          * distance, let it run.  The expire code will check for close
2253          * cooperators and put the close queue at the front of the service
2254          * tree.  If possible, merge the expiring queue with the new cfqq.
2255          */
2256         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2257         if (new_cfqq) {
2258                 if (!cfqq->new_cfqq)
2259                         cfq_setup_merge(cfqq, new_cfqq);
2260                 goto expire;
2261         }
2262
2263         /*
2264          * No requests pending. If the active queue still has requests in
2265          * flight or is idling for a new request, allow either of these
2266          * conditions to happen (or time out) before selecting a new queue.
2267          */
2268         if (timer_pending(&cfqd->idle_slice_timer)) {
2269                 cfqq = NULL;
2270                 goto keep_queue;
2271         }
2272
2273         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2274                 cfqq = NULL;
2275                 goto keep_queue;
2276         }
2277
2278         /*
2279          * If group idle is enabled and there are requests dispatched from
2280          * this group, wait for requests to complete.
2281          */
2282 check_group_idle:
2283         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2284             && cfqq->cfqg->dispatched) {
2285                 cfqq = NULL;
2286                 goto keep_queue;
2287         }
2288
2289 expire:
2290         cfq_slice_expired(cfqd, 0);
2291 new_queue:
2292         /*
2293          * Current queue expired. Check if we have to switch to a new
2294          * service tree
2295          */
2296         if (!new_cfqq)
2297                 cfq_choose_cfqg(cfqd);
2298
2299         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2300 keep_queue:
2301         return cfqq;
2302 }
2303
2304 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2305 {
2306         int dispatched = 0;
2307
2308         while (cfqq->next_rq) {
2309                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2310                 dispatched++;
2311         }
2312
2313         BUG_ON(!list_empty(&cfqq->fifo));
2314
2315         /* By default cfqq is not expired if it is empty. Do it explicitly */
2316         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2317         return dispatched;
2318 }
2319
2320 /*
2321  * Drain our current requests. Used for barriers and when switching
2322  * io schedulers on-the-fly.
2323  */
2324 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2325 {
2326         struct cfq_queue *cfqq;
2327         int dispatched = 0;
2328
2329         /* Expire the timeslice of the current active queue first */
2330         cfq_slice_expired(cfqd, 0);
2331         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2332                 __cfq_set_active_queue(cfqd, cfqq);
2333                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2334         }
2335
2336         BUG_ON(cfqd->busy_queues);
2337
2338         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2339         return dispatched;
2340 }
2341
2342 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2343         struct cfq_queue *cfqq)
2344 {
2345         /* the queue hasn't finished any request, can't estimate */
2346         if (cfq_cfqq_slice_new(cfqq))
2347                 return 1;
2348         if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2349                 cfqq->slice_end))
2350                 return 1;
2351
2352         return 0;
2353 }
2354
2355 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2356 {
2357         unsigned int max_dispatch;
2358
2359         /*
2360          * Drain async requests before we start sync IO
2361          */
2362         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2363                 return false;
2364
2365         /*
2366          * If this is an async queue and we have sync IO in flight, let it wait
2367          */
2368         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2369                 return false;
2370
2371         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2372         if (cfq_class_idle(cfqq))
2373                 max_dispatch = 1;
2374
2375         /*
2376          * Does this cfqq already have too much IO in flight?
2377          */
2378         if (cfqq->dispatched >= max_dispatch) {
2379                 /*
2380                  * idle queue must always only have a single IO in flight
2381                  */
2382                 if (cfq_class_idle(cfqq))
2383                         return false;
2384
2385                 /*
2386                  * We have other queues, don't allow more IO from this one
2387                  */
2388                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
2389                         return false;
2390
2391                 /*
2392                  * Sole queue user, no limit
2393                  */
2394                 if (cfqd->busy_queues == 1)
2395                         max_dispatch = -1;
2396                 else
2397                         /*
2398                          * Normally we start throttling cfqq when cfq_quantum/2
2399                          * requests have been dispatched. But we can drive
2400                          * deeper queue depths at the beginning of slice
2401                          * subjected to upper limit of cfq_quantum.
2402                          * */
2403                         max_dispatch = cfqd->cfq_quantum;
2404         }
2405
2406         /*
2407          * Async queues must wait a bit before being allowed dispatch.
2408          * We also ramp up the dispatch depth gradually for async IO,
2409          * based on the last sync IO we serviced
2410          */
2411         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2412                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2413                 unsigned int depth;
2414
2415                 depth = last_sync / cfqd->cfq_slice[1];
2416                 if (!depth && !cfqq->dispatched)
2417                         depth = 1;
2418                 if (depth < max_dispatch)
2419                         max_dispatch = depth;
2420         }
2421
2422         /*
2423          * If we're below the current max, allow a dispatch
2424          */
2425         return cfqq->dispatched < max_dispatch;
2426 }
2427
2428 /*
2429  * Dispatch a request from cfqq, moving them to the request queue
2430  * dispatch list.
2431  */
2432 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2433 {
2434         struct request *rq;
2435
2436         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2437
2438         if (!cfq_may_dispatch(cfqd, cfqq))
2439                 return false;
2440
2441         /*
2442          * follow expired path, else get first next available
2443          */
2444         rq = cfq_check_fifo(cfqq);
2445         if (!rq)
2446                 rq = cfqq->next_rq;
2447
2448         /*
2449          * insert request into driver dispatch list
2450          */
2451         cfq_dispatch_insert(cfqd->queue, rq);
2452
2453         if (!cfqd->active_cic) {
2454                 struct cfq_io_context *cic = RQ_CIC(rq);
2455
2456                 atomic_long_inc(&cic->ioc->refcount);
2457                 cfqd->active_cic = cic;
2458         }
2459
2460         return true;
2461 }
2462
2463 /*
2464  * Find the cfqq that we need to service and move a request from that to the
2465  * dispatch list
2466  */
2467 static int cfq_dispatch_requests(struct request_queue *q, int force)
2468 {
2469         struct cfq_data *cfqd = q->elevator->elevator_data;
2470         struct cfq_queue *cfqq;
2471
2472         if (!cfqd->busy_queues)
2473                 return 0;
2474
2475         if (unlikely(force))
2476                 return cfq_forced_dispatch(cfqd);
2477
2478         cfqq = cfq_select_queue(cfqd);
2479         if (!cfqq)
2480                 return 0;
2481
2482         /*
2483          * Dispatch a request from this cfqq, if it is allowed
2484          */
2485         if (!cfq_dispatch_request(cfqd, cfqq))
2486                 return 0;
2487
2488         cfqq->slice_dispatch++;
2489         cfq_clear_cfqq_must_dispatch(cfqq);
2490
2491         /*
2492          * expire an async queue immediately if it has used up its slice. idle
2493          * queue always expire after 1 dispatch round.
2494          */
2495         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2496             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2497             cfq_class_idle(cfqq))) {
2498                 cfqq->slice_end = jiffies + 1;
2499                 cfq_slice_expired(cfqd, 0);
2500         }
2501
2502         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2503         return 1;
2504 }
2505
2506 /*
2507  * task holds one reference to the queue, dropped when task exits. each rq
2508  * in-flight on this queue also holds a reference, dropped when rq is freed.
2509  *
2510  * Each cfq queue took a reference on the parent group. Drop it now.
2511  * queue lock must be held here.
2512  */
2513 static void cfq_put_queue(struct cfq_queue *cfqq)
2514 {
2515         struct cfq_data *cfqd = cfqq->cfqd;
2516         struct cfq_group *cfqg, *orig_cfqg;
2517
2518         BUG_ON(atomic_read(&cfqq->ref) <= 0);
2519
2520         if (!atomic_dec_and_test(&cfqq->ref))
2521                 return;
2522
2523         cfq_log_cfqq(cfqd, cfqq, "put_queue");
2524         BUG_ON(rb_first(&cfqq->sort_list));
2525         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2526         cfqg = cfqq->cfqg;
2527         orig_cfqg = cfqq->orig_cfqg;
2528
2529         if (unlikely(cfqd->active_queue == cfqq)) {
2530                 __cfq_slice_expired(cfqd, cfqq, 0);
2531                 cfq_schedule_dispatch(cfqd);
2532         }
2533
2534         BUG_ON(cfq_cfqq_on_rr(cfqq));
2535         kmem_cache_free(cfq_pool, cfqq);
2536         cfq_put_cfqg(cfqg);
2537         if (orig_cfqg)
2538                 cfq_put_cfqg(orig_cfqg);
2539 }
2540
2541 /*
2542  * Must always be called with the rcu_read_lock() held
2543  */
2544 static void
2545 __call_for_each_cic(struct io_context *ioc,
2546                     void (*func)(struct io_context *, struct cfq_io_context *))
2547 {
2548         struct cfq_io_context *cic;
2549         struct hlist_node *n;
2550
2551         hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2552                 func(ioc, cic);
2553 }
2554
2555 /*
2556  * Call func for each cic attached to this ioc.
2557  */
2558 static void
2559 call_for_each_cic(struct io_context *ioc,
2560                   void (*func)(struct io_context *, struct cfq_io_context *))
2561 {
2562         rcu_read_lock();
2563         __call_for_each_cic(ioc, func);
2564         rcu_read_unlock();
2565 }
2566
2567 static void cfq_cic_free_rcu(struct rcu_head *head)
2568 {
2569         struct cfq_io_context *cic;
2570
2571         cic = container_of(head, struct cfq_io_context, rcu_head);
2572
2573         kmem_cache_free(cfq_ioc_pool, cic);
2574         elv_ioc_count_dec(cfq_ioc_count);
2575
2576         if (ioc_gone) {
2577                 /*
2578                  * CFQ scheduler is exiting, grab exit lock and check
2579                  * the pending io context count. If it hits zero,
2580                  * complete ioc_gone and set it back to NULL
2581                  */
2582                 spin_lock(&ioc_gone_lock);
2583                 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2584                         complete(ioc_gone);
2585                         ioc_gone = NULL;
2586                 }
2587                 spin_unlock(&ioc_gone_lock);
2588         }
2589 }
2590
2591 static void cfq_cic_free(struct cfq_io_context *cic)
2592 {
2593         call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
2594 }
2595
2596 static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2597 {
2598         unsigned long flags;
2599         unsigned long dead_key = (unsigned long) cic->key;
2600
2601         BUG_ON(!(dead_key & CIC_DEAD_KEY));
2602
2603         spin_lock_irqsave(&ioc->lock, flags);
2604         radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
2605         hlist_del_rcu(&cic->cic_list);
2606         spin_unlock_irqrestore(&ioc->lock, flags);
2607
2608         cfq_cic_free(cic);
2609 }
2610
2611 /*
2612  * Must be called with rcu_read_lock() held or preemption otherwise disabled.
2613  * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
2614  * and ->trim() which is called with the task lock held
2615  */
2616 static void cfq_free_io_context(struct io_context *ioc)
2617 {
2618         /*
2619          * ioc->refcount is zero here, or we are called from elv_unregister(),
2620          * so no more cic's are allowed to be linked into this ioc.  So it
2621          * should be ok to iterate over the known list, we will see all cic's
2622          * since no new ones are added.
2623          */
2624         __call_for_each_cic(ioc, cic_free_func);
2625 }
2626
2627 static void cfq_put_cooperator(struct cfq_queue *cfqq)
2628 {
2629         struct cfq_queue *__cfqq, *next;
2630
2631         /*
2632          * If this queue was scheduled to merge with another queue, be
2633          * sure to drop the reference taken on that queue (and others in
2634          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2635          */
2636         __cfqq = cfqq->new_cfqq;
2637         while (__cfqq) {
2638                 if (__cfqq == cfqq) {
2639                         WARN(1, "cfqq->new_cfqq loop detected\n");
2640                         break;
2641                 }
2642                 next = __cfqq->new_cfqq;
2643                 cfq_put_queue(__cfqq);
2644                 __cfqq = next;
2645         }
2646 }
2647
2648 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2649 {
2650         if (unlikely(cfqq == cfqd->active_queue)) {
2651                 __cfq_slice_expired(cfqd, cfqq, 0);
2652                 cfq_schedule_dispatch(cfqd);
2653         }
2654
2655         cfq_put_cooperator(cfqq);
2656
2657         cfq_put_queue(cfqq);
2658 }
2659
2660 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2661                                          struct cfq_io_context *cic)
2662 {
2663         struct io_context *ioc = cic->ioc;
2664
2665         list_del_init(&cic->queue_list);
2666
2667         /*
2668          * Make sure dead mark is seen for dead queues
2669          */
2670         smp_wmb();
2671         cic->key = cfqd_dead_key(cfqd);
2672
2673         if (ioc->ioc_data == cic)
2674                 rcu_assign_pointer(ioc->ioc_data, NULL);
2675
2676         if (cic->cfqq[BLK_RW_ASYNC]) {
2677                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2678                 cic->cfqq[BLK_RW_ASYNC] = NULL;
2679         }
2680
2681         if (cic->cfqq[BLK_RW_SYNC]) {
2682                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2683                 cic->cfqq[BLK_RW_SYNC] = NULL;
2684         }
2685 }
2686
2687 static void cfq_exit_single_io_context(struct io_context *ioc,
2688                                        struct cfq_io_context *cic)
2689 {
2690         struct cfq_data *cfqd = cic_to_cfqd(cic);
2691
2692         if (cfqd) {
2693                 struct request_queue *q = cfqd->queue;
2694                 unsigned long flags;
2695
2696                 spin_lock_irqsave(q->queue_lock, flags);
2697
2698                 /*
2699                  * Ensure we get a fresh copy of the ->key to prevent
2700                  * race between exiting task and queue
2701                  */
2702                 smp_read_barrier_depends();
2703                 if (cic->key == cfqd)
2704                         __cfq_exit_single_io_context(cfqd, cic);
2705
2706                 spin_unlock_irqrestore(q->queue_lock, flags);
2707         }
2708 }
2709
2710 /*
2711  * The process that ioc belongs to has exited, we need to clean up
2712  * and put the internal structures we have that belongs to that process.
2713  */
2714 static void cfq_exit_io_context(struct io_context *ioc)
2715 {
2716         call_for_each_cic(ioc, cfq_exit_single_io_context);
2717 }
2718
2719 static struct cfq_io_context *
2720 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2721 {
2722         struct cfq_io_context *cic;
2723
2724         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
2725                                                         cfqd->queue->node);
2726         if (cic) {
2727                 cic->last_end_request = jiffies;
2728                 INIT_LIST_HEAD(&cic->queue_list);
2729                 INIT_HLIST_NODE(&cic->cic_list);
2730                 cic->dtor = cfq_free_io_context;
2731                 cic->exit = cfq_exit_io_context;
2732                 elv_ioc_count_inc(cfq_ioc_count);
2733         }
2734
2735         return cic;
2736 }
2737
2738 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2739 {
2740         struct task_struct *tsk = current;
2741         int ioprio_class;
2742
2743         if (!cfq_cfqq_prio_changed(cfqq))
2744                 return;
2745
2746         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2747         switch (ioprio_class) {
2748         default:
2749                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2750         case IOPRIO_CLASS_NONE:
2751                 /*
2752                  * no prio set, inherit CPU scheduling settings
2753                  */
2754                 cfqq->ioprio = task_nice_ioprio(tsk);
2755                 cfqq->ioprio_class = task_nice_ioclass(tsk);
2756                 break;
2757         case IOPRIO_CLASS_RT:
2758                 cfqq->ioprio = task_ioprio(ioc);
2759                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2760                 break;
2761         case IOPRIO_CLASS_BE:
2762                 cfqq->ioprio = task_ioprio(ioc);
2763                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2764                 break;
2765         case IOPRIO_CLASS_IDLE:
2766                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2767                 cfqq->ioprio = 7;
2768                 cfq_clear_cfqq_idle_window(cfqq);
2769                 break;
2770         }
2771
2772         /*
2773          * keep track of original prio settings in case we have to temporarily
2774          * elevate the priority of this queue
2775          */
2776         cfqq->org_ioprio = cfqq->ioprio;
2777         cfqq->org_ioprio_class = cfqq->ioprio_class;
2778         cfq_clear_cfqq_prio_changed(cfqq);
2779 }
2780
2781 static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2782 {
2783         struct cfq_data *cfqd = cic_to_cfqd(cic);
2784         struct cfq_queue *cfqq;
2785         unsigned long flags;
2786
2787         if (unlikely(!cfqd))
2788                 return;
2789
2790         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2791
2792         cfqq = cic->cfqq[BLK_RW_ASYNC];
2793         if (cfqq) {
2794                 struct cfq_queue *new_cfqq;
2795                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2796                                                 GFP_ATOMIC);
2797                 if (new_cfqq) {
2798                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2799                         cfq_put_queue(cfqq);
2800                 }
2801         }
2802
2803         cfqq = cic->cfqq[BLK_RW_SYNC];
2804         if (cfqq)
2805                 cfq_mark_cfqq_prio_changed(cfqq);
2806
2807         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2808 }
2809
2810 static void cfq_ioc_set_ioprio(struct io_context *ioc)
2811 {
2812         call_for_each_cic(ioc, changed_ioprio);
2813         ioc->ioprio_changed = 0;
2814 }
2815
2816 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2817                           pid_t pid, bool is_sync)
2818 {
2819         RB_CLEAR_NODE(&cfqq->rb_node);
2820         RB_CLEAR_NODE(&cfqq->p_node);
2821         INIT_LIST_HEAD(&cfqq->fifo);
2822
2823         atomic_set(&cfqq->ref, 0);
2824         cfqq->cfqd = cfqd;
2825
2826         cfq_mark_cfqq_prio_changed(cfqq);
2827
2828         if (is_sync) {
2829                 if (!cfq_class_idle(cfqq))
2830                         cfq_mark_cfqq_idle_window(cfqq);
2831                 cfq_mark_cfqq_sync(cfqq);
2832         }
2833         cfqq->pid = pid;
2834 }
2835
2836 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2837 static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2838 {
2839         struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2840         struct cfq_data *cfqd = cic_to_cfqd(cic);
2841         unsigned long flags;
2842         struct request_queue *q;
2843
2844         if (unlikely(!cfqd))
2845                 return;
2846
2847         q = cfqd->queue;
2848
2849         spin_lock_irqsave(q->queue_lock, flags);
2850
2851         if (sync_cfqq) {
2852                 /*
2853                  * Drop reference to sync queue. A new sync queue will be
2854                  * assigned in new group upon arrival of a fresh request.
2855                  */
2856                 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2857                 cic_set_cfqq(cic, NULL, 1);
2858                 cfq_put_queue(sync_cfqq);
2859         }
2860
2861         spin_unlock_irqrestore(q->queue_lock, flags);
2862 }
2863
2864 static void cfq_ioc_set_cgroup(struct io_context *ioc)
2865 {
2866         call_for_each_cic(ioc, changed_cgroup);
2867         ioc->cgroup_changed = 0;
2868 }
2869 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2870
2871 static struct cfq_queue *
2872 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2873                      struct io_context *ioc, gfp_t gfp_mask)
2874 {
2875         struct cfq_queue *cfqq, *new_cfqq = NULL;
2876         struct cfq_io_context *cic;
2877         struct cfq_group *cfqg;
2878
2879 retry:
2880         cfqg = cfq_get_cfqg(cfqd, 1);
2881         cic = cfq_cic_lookup(cfqd, ioc);
2882         /* cic always exists here */
2883         cfqq = cic_to_cfqq(cic, is_sync);
2884
2885         /*
2886          * Always try a new alloc if we fell back to the OOM cfqq
2887          * originally, since it should just be a temporary situation.
2888          */
2889         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2890                 cfqq = NULL;
2891                 if (new_cfqq) {
2892                         cfqq = new_cfqq;
2893                         new_cfqq = NULL;
2894                 } else if (gfp_mask & __GFP_WAIT) {
2895                         spin_unlock_irq(cfqd->queue->queue_lock);
2896                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
2897                                         gfp_mask | __GFP_ZERO,
2898                                         cfqd->queue->node);
2899                         spin_lock_irq(cfqd->queue->queue_lock);
2900                         if (new_cfqq)
2901                                 goto retry;
2902                 } else {
2903                         cfqq = kmem_cache_alloc_node(cfq_pool,
2904                                         gfp_mask | __GFP_ZERO,
2905                                         cfqd->queue->node);
2906                 }
2907
2908                 if (cfqq) {
2909                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2910                         cfq_init_prio_data(cfqq, ioc);
2911                         cfq_link_cfqq_cfqg(cfqq, cfqg);
2912                         cfq_log_cfqq(cfqd, cfqq, "alloced");
2913                 } else
2914                         cfqq = &cfqd->oom_cfqq;
2915         }
2916
2917         if (new_cfqq)
2918                 kmem_cache_free(cfq_pool, new_cfqq);
2919
2920         return cfqq;
2921 }
2922
2923 static struct cfq_queue **
2924 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2925 {
2926         switch (ioprio_class) {
2927         case IOPRIO_CLASS_RT:
2928                 return &cfqd->async_cfqq[0][ioprio];
2929         case IOPRIO_CLASS_BE:
2930                 return &cfqd->async_cfqq[1][ioprio];
2931         case IOPRIO_CLASS_IDLE:
2932                 return &cfqd->async_idle_cfqq;
2933         default:
2934                 BUG();
2935         }
2936 }
2937
2938 static struct cfq_queue *
2939 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2940               gfp_t gfp_mask)
2941 {
2942         const int ioprio = task_ioprio(ioc);
2943         const int ioprio_class = task_ioprio_class(ioc);
2944         struct cfq_queue **async_cfqq = NULL;
2945         struct cfq_queue *cfqq = NULL;
2946
2947         if (!is_sync) {
2948                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2949                 cfqq = *async_cfqq;
2950         }
2951
2952         if (!cfqq)
2953                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2954
2955         /*
2956          * pin the queue now that it's allocated, scheduler exit will prune it
2957          */
2958         if (!is_sync && !(*async_cfqq)) {
2959                 atomic_inc(&cfqq->ref);
2960                 *async_cfqq = cfqq;
2961         }
2962
2963         atomic_inc(&cfqq->ref);
2964         return cfqq;
2965 }
2966
2967 /*
2968  * We drop cfq io contexts lazily, so we may find a dead one.
2969  */
2970 static void
2971 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2972                   struct cfq_io_context *cic)
2973 {
2974         unsigned long flags;
2975
2976         WARN_ON(!list_empty(&cic->queue_list));
2977         BUG_ON(cic->key != cfqd_dead_key(cfqd));
2978
2979         spin_lock_irqsave(&ioc->lock, flags);
2980
2981         BUG_ON(ioc->ioc_data == cic);
2982
2983         radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
2984         hlist_del_rcu(&cic->cic_list);
2985         spin_unlock_irqrestore(&ioc->lock, flags);
2986
2987         cfq_cic_free(cic);
2988 }
2989
2990 static struct cfq_io_context *
2991 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
2992 {
2993         struct cfq_io_context *cic;
2994         unsigned long flags;
2995
2996         if (unlikely(!ioc))
2997                 return NULL;
2998
2999         rcu_read_lock();
3000
3001         /*
3002          * we maintain a last-hit cache, to avoid browsing over the tree
3003          */
3004         cic = rcu_dereference(ioc->ioc_data);
3005         if (cic && cic->key == cfqd) {
3006                 rcu_read_unlock();
3007                 return cic;
3008         }
3009
3010         do {
3011                 cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
3012                 rcu_read_unlock();
3013                 if (!cic)
3014                         break;
3015                 if (unlikely(cic->key != cfqd)) {
3016                         cfq_drop_dead_cic(cfqd, ioc, cic);
3017                         rcu_read_lock();
3018                         continue;
3019                 }
3020
3021                 spin_lock_irqsave(&ioc->lock, flags);
3022                 rcu_assign_pointer(ioc->ioc_data, cic);
3023                 spin_unlock_irqrestore(&ioc->lock, flags);
3024                 break;
3025         } while (1);
3026
3027         return cic;
3028 }
3029
3030 /*
3031  * Add cic into ioc, using cfqd as the search key. This enables us to lookup
3032  * the process specific cfq io context when entered from the block layer.
3033  * Also adds the cic to a per-cfqd list, used when this queue is removed.
3034  */
3035 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
3036                         struct cfq_io_context *cic, gfp_t gfp_mask)
3037 {
3038         unsigned long flags;
3039         int ret;
3040
3041         ret = radix_tree_preload(gfp_mask);
3042         if (!ret) {
3043                 cic->ioc = ioc;
3044                 cic->key = cfqd;
3045
3046                 spin_lock_irqsave(&ioc->lock, flags);
3047                 ret = radix_tree_insert(&ioc->radix_root,
3048                                                 cfqd->cic_index, cic);
3049                 if (!ret)
3050                         hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
3051                 spin_unlock_irqrestore(&ioc->lock, flags);
3052
3053                 radix_tree_preload_end();
3054
3055                 if (!ret) {
3056                         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3057                         list_add(&cic->queue_list, &cfqd->cic_list);
3058                         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3059                 }
3060         }
3061
3062         if (ret)
3063                 printk(KERN_ERR "cfq: cic link failed!\n");
3064
3065         return ret;
3066 }
3067
3068 /*
3069  * Setup general io context and cfq io context. There can be several cfq
3070  * io contexts per general io context, if this process is doing io to more
3071  * than one device managed by cfq.
3072  */
3073 static struct cfq_io_context *
3074 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3075 {
3076         struct io_context *ioc = NULL;
3077         struct cfq_io_context *cic;
3078
3079         might_sleep_if(gfp_mask & __GFP_WAIT);
3080
3081         ioc = get_io_context(gfp_mask, cfqd->queue->node);
3082         if (!ioc)
3083                 return NULL;
3084
3085         cic = cfq_cic_lookup(cfqd, ioc);
3086         if (cic)
3087                 goto out;
3088
3089         cic = cfq_alloc_io_context(cfqd, gfp_mask);
3090         if (cic == NULL)
3091                 goto err;
3092
3093         if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
3094                 goto err_free;
3095
3096 out:
3097         smp_read_barrier_depends();
3098         if (unlikely(ioc->ioprio_changed))
3099                 cfq_ioc_set_ioprio(ioc);
3100
3101 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3102         if (unlikely(ioc->cgroup_changed))
3103                 cfq_ioc_set_cgroup(ioc);
3104 #endif
3105         return cic;
3106 err_free:
3107         cfq_cic_free(cic);
3108 err:
3109         put_io_context(ioc);
3110         return NULL;
3111 }
3112
3113 static void
3114 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
3115 {
3116         unsigned long elapsed = jiffies - cic->last_end_request;
3117         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
3118
3119         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
3120         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
3121         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
3122 }
3123
3124 static void
3125 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3126                        struct request *rq)
3127 {
3128         sector_t sdist = 0;
3129         sector_t n_sec = blk_rq_sectors(rq);
3130         if (cfqq->last_request_pos) {
3131                 if (cfqq->last_request_pos < blk_rq_pos(rq))
3132                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3133                 else
3134                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3135         }
3136
3137         cfqq->seek_history <<= 1;
3138         if (blk_queue_nonrot(cfqd->queue))
3139                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3140         else
3141                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3142 }
3143
3144 /*
3145  * Disable idle window if the process thinks too long or seeks so much that
3146  * it doesn't matter
3147  */
3148 static void
3149 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3150                        struct cfq_io_context *cic)
3151 {
3152         int old_idle, enable_idle;
3153
3154         /*
3155          * Don't idle for async or idle io prio class
3156          */
3157         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3158                 return;
3159
3160         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3161
3162         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3163                 cfq_mark_cfqq_deep(cfqq);
3164
3165         if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3166             (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3167                 enable_idle = 0;
3168         else if (sample_valid(cic->ttime_samples)) {
3169                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
3170                         enable_idle = 0;
3171                 else
3172                         enable_idle = 1;
3173         }
3174
3175         if (old_idle != enable_idle) {
3176                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3177                 if (enable_idle)
3178                         cfq_mark_cfqq_idle_window(cfqq);
3179                 else
3180                         cfq_clear_cfqq_idle_window(cfqq);
3181         }
3182 }
3183
3184 /*
3185  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3186  * no or if we aren't sure, a 1 will cause a preempt.
3187  */
3188 static bool
3189 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3190                    struct request *rq)
3191 {
3192         struct cfq_queue *cfqq;
3193
3194         cfqq = cfqd->active_queue;
3195         if (!cfqq)
3196                 return false;
3197
3198         if (cfq_class_idle(new_cfqq))
3199                 return false;
3200
3201         if (cfq_class_idle(cfqq))
3202                 return true;
3203
3204         /*
3205          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3206          */
3207         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3208                 return false;
3209
3210         /*
3211          * if the new request is sync, but the currently running queue is
3212          * not, let the sync request have priority.
3213          */
3214         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3215                 return true;
3216
3217         if (new_cfqq->cfqg != cfqq->cfqg)
3218                 return false;
3219
3220         if (cfq_slice_used(cfqq))
3221                 return true;
3222
3223         /* Allow preemption only if we are idling on sync-noidle tree */
3224         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3225             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3226             new_cfqq->service_tree->count == 2 &&
3227             RB_EMPTY_ROOT(&cfqq->sort_list))
3228                 return true;
3229
3230         /*
3231          * So both queues are sync. Let the new request get disk time if
3232          * it's a metadata request and the current queue is doing regular IO.
3233          */
3234         if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
3235                 return true;
3236
3237         /*
3238          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3239          */
3240         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3241                 return true;
3242
3243         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3244                 return false;
3245
3246         /*
3247          * if this request is as-good as one we would expect from the
3248          * current cfqq, let it preempt
3249          */
3250         if (cfq_rq_close(cfqd, cfqq, rq))
3251                 return true;
3252
3253         return false;
3254 }
3255
3256 /*
3257  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3258  * let it have half of its nominal slice.
3259  */
3260 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3261 {
3262         cfq_log_cfqq(cfqd, cfqq, "preempt");
3263         cfq_slice_expired(cfqd, 1);
3264
3265         /*
3266          * Put the new queue at the front of the of the current list,
3267          * so we know that it will be selected next.
3268          */
3269         BUG_ON(!cfq_cfqq_on_rr(cfqq));
3270
3271         cfq_service_tree_add(cfqd, cfqq, 1);
3272
3273         cfqq->slice_end = 0;
3274         cfq_mark_cfqq_slice_new(cfqq);
3275 }
3276
3277 /*
3278  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3279  * something we should do about it
3280  */
3281 static void
3282 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3283                 struct request *rq)
3284 {
3285         struct cfq_io_context *cic = RQ_CIC(rq);
3286
3287         cfqd->rq_queued++;
3288         if (rq->cmd_flags & REQ_META)
3289                 cfqq->meta_pending++;
3290
3291         cfq_update_io_thinktime(cfqd, cic);
3292         cfq_update_io_seektime(cfqd, cfqq, rq);
3293         cfq_update_idle_window(cfqd, cfqq, cic);
3294
3295         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3296
3297         if (cfqq == cfqd->active_queue) {
3298                 /*
3299                  * Remember that we saw a request from this process, but
3300                  * don't start queuing just yet. Otherwise we risk seeing lots
3301                  * of tiny requests, because we disrupt the normal plugging
3302                  * and merging. If the request is already larger than a single
3303                  * page, let it rip immediately. For that case we assume that
3304                  * merging is already done. Ditto for a busy system that
3305                  * has other work pending, don't risk delaying until the
3306                  * idle timer unplug to continue working.
3307                  */
3308                 if (cfq_cfqq_wait_request(cfqq)) {
3309                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3310                             cfqd->busy_queues > 1) {
3311                                 cfq_del_timer(cfqd, cfqq);
3312                                 cfq_clear_cfqq_wait_request(cfqq);
3313                                 __blk_run_queue(cfqd->queue);
3314                         } else {
3315                                 cfq_blkiocg_update_idle_time_stats(
3316                                                 &cfqq->cfqg->blkg);
3317                                 cfq_mark_cfqq_must_dispatch(cfqq);
3318                         }
3319                 }
3320         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3321                 /*
3322                  * not the active queue - expire current slice if it is
3323                  * idle and has expired it's mean thinktime or this new queue
3324                  * has some old slice time left and is of higher priority or
3325                  * this new queue is RT and the current one is BE
3326                  */
3327                 cfq_preempt_queue(cfqd, cfqq);
3328                 __blk_run_queue(cfqd->queue);
3329         }
3330 }
3331
3332 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3333 {
3334         struct cfq_data *cfqd = q->elevator->elevator_data;
3335         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3336
3337         cfq_log_cfqq(cfqd, cfqq, "insert_request");
3338         cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
3339
3340         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3341         list_add_tail(&rq->queuelist, &cfqq->fifo);
3342         cfq_add_rq_rb(rq);
3343         cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3344                         &cfqd->serving_group->blkg, rq_data_dir(rq),
3345                         rq_is_sync(rq));
3346         cfq_rq_enqueued(cfqd, cfqq, rq);
3347 }
3348
3349 /*
3350  * Update hw_tag based on peak queue depth over 50 samples under
3351  * sufficient load.
3352  */
3353 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3354 {
3355         struct cfq_queue *cfqq = cfqd->active_queue;
3356
3357         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3358                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3359
3360         if (cfqd->hw_tag == 1)
3361                 return;
3362
3363         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3364             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3365                 return;
3366
3367         /*
3368          * If active queue hasn't enough requests and can idle, cfq might not
3369          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3370          * case
3371          */
3372         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3373             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3374             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3375                 return;
3376
3377         if (cfqd->hw_tag_samples++ < 50)
3378                 return;
3379
3380         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3381                 cfqd->hw_tag = 1;
3382         else
3383                 cfqd->hw_tag = 0;
3384 }
3385
3386 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3387 {
3388         struct cfq_io_context *cic = cfqd->active_cic;
3389
3390         /* If there are other queues in the group, don't wait */
3391         if (cfqq->cfqg->nr_cfqq > 1)
3392                 return false;
3393
3394         if (cfq_slice_used(cfqq))
3395                 return true;
3396
3397         /* if slice left is less than think time, wait busy */
3398         if (cic && sample_valid(cic->ttime_samples)
3399             && (cfqq->slice_end - jiffies < cic->ttime_mean))
3400                 return true;
3401
3402         /*
3403          * If think times is less than a jiffy than ttime_mean=0 and above
3404          * will not be true. It might happen that slice has not expired yet
3405          * but will expire soon (4-5 ns) during select_queue(). To cover the
3406          * case where think time is less than a jiffy, mark the queue wait
3407          * busy if only 1 jiffy is left in the slice.
3408          */
3409         if (cfqq->slice_end - jiffies == 1)
3410                 return true;
3411
3412         return false;
3413 }
3414
3415 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3416 {
3417         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3418         struct cfq_data *cfqd = cfqq->cfqd;
3419         const int sync = rq_is_sync(rq);
3420         unsigned long now;
3421
3422         now = jiffies;
3423         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3424                      !!(rq->cmd_flags & REQ_NOIDLE));
3425
3426         cfq_update_hw_tag(cfqd);
3427
3428         WARN_ON(!cfqd->rq_in_driver);
3429         WARN_ON(!cfqq->dispatched);
3430         cfqd->rq_in_driver--;
3431         cfqq->dispatched--;
3432         (RQ_CFQG(rq))->dispatched--;
3433         cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3434                         rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3435                         rq_data_dir(rq), rq_is_sync(rq));
3436
3437         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3438
3439         if (sync) {
3440                 RQ_CIC(rq)->last_end_request = now;
3441                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3442                         cfqd->last_delayed_sync = now;
3443         }
3444
3445         /*
3446          * If this is the active queue, check if it needs to be expired,
3447          * or if we want to idle in case it has no pending requests.
3448          */
3449         if (cfqd->active_queue == cfqq) {
3450                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3451
3452                 if (cfq_cfqq_slice_new(cfqq)) {
3453                         cfq_set_prio_slice(cfqd, cfqq);
3454                         cfq_clear_cfqq_slice_new(cfqq);
3455                 }
3456
3457                 /*
3458                  * Should we wait for next request to come in before we expire
3459                  * the queue.
3460                  */
3461                 if (cfq_should_wait_busy(cfqd, cfqq)) {
3462                         unsigned long extend_sl = cfqd->cfq_slice_idle;
3463                         if (!cfqd->cfq_slice_idle)
3464                                 extend_sl = cfqd->cfq_group_idle;
3465                         cfqq->slice_end = jiffies + extend_sl;
3466                         cfq_mark_cfqq_wait_busy(cfqq);
3467                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3468                 }
3469
3470                 /*
3471                  * Idling is not enabled on:
3472                  * - expired queues
3473                  * - idle-priority queues
3474                  * - async queues
3475                  * - queues with still some requests queued
3476                  * - when there is a close cooperator
3477                  */
3478                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3479                         cfq_slice_expired(cfqd, 1);
3480                 else if (sync && cfqq_empty &&
3481                          !cfq_close_cooperator(cfqd, cfqq)) {
3482                         cfqd->noidle_tree_requires_idle |=
3483                                 !(rq->cmd_flags & REQ_NOIDLE);
3484                         /*
3485                          * Idling is enabled for SYNC_WORKLOAD.
3486                          * SYNC_NOIDLE_WORKLOAD idles at the end of the tree
3487                          * only if we processed at least one !REQ_NOIDLE request
3488                          */
3489                         if (cfqd->serving_type == SYNC_WORKLOAD
3490                             || cfqd->noidle_tree_requires_idle
3491                             || cfqq->cfqg->nr_cfqq == 1)
3492                                 cfq_arm_slice_timer(cfqd);
3493                 }
3494         }
3495
3496         if (!cfqd->rq_in_driver)
3497                 cfq_schedule_dispatch(cfqd);
3498 }
3499
3500 /*
3501  * we temporarily boost lower priority queues if they are holding fs exclusive
3502  * resources. they are boosted to normal prio (CLASS_BE/4)
3503  */
3504 static void cfq_prio_boost(struct cfq_queue *cfqq)
3505 {
3506         if (has_fs_excl()) {
3507                 /*
3508                  * boost idle prio on transactions that would lock out other
3509                  * users of the filesystem
3510                  */
3511                 if (cfq_class_idle(cfqq))
3512                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
3513                 if (cfqq->ioprio > IOPRIO_NORM)
3514                         cfqq->ioprio = IOPRIO_NORM;
3515         } else {
3516                 /*
3517                  * unboost the queue (if needed)
3518                  */
3519                 cfqq->ioprio_class = cfqq->org_ioprio_class;
3520                 cfqq->ioprio = cfqq->org_ioprio;
3521         }
3522 }
3523
3524 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3525 {
3526         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3527                 cfq_mark_cfqq_must_alloc_slice(cfqq);
3528                 return ELV_MQUEUE_MUST;
3529         }
3530
3531         return ELV_MQUEUE_MAY;
3532 }
3533
3534 static int cfq_may_queue(struct request_queue *q, int rw)
3535 {
3536         struct cfq_data *cfqd = q->elevator->elevator_data;
3537         struct task_struct *tsk = current;
3538         struct cfq_io_context *cic;
3539         struct cfq_queue *cfqq;
3540
3541         /*
3542          * don't force setup of a queue from here, as a call to may_queue
3543          * does not necessarily imply that a request actually will be queued.
3544          * so just lookup a possibly existing queue, or return 'may queue'
3545          * if that fails
3546          */
3547         cic = cfq_cic_lookup(cfqd, tsk->io_context);
3548         if (!cic)
3549                 return ELV_MQUEUE_MAY;
3550
3551         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3552         if (cfqq) {
3553                 cfq_init_prio_data(cfqq, cic->ioc);
3554                 cfq_prio_boost(cfqq);
3555
3556                 return __cfq_may_queue(cfqq);
3557         }
3558
3559         return ELV_MQUEUE_MAY;
3560 }
3561
3562 /*
3563  * queue lock held here
3564  */
3565 static void cfq_put_request(struct request *rq)
3566 {
3567         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3568
3569         if (cfqq) {
3570                 const int rw = rq_data_dir(rq);
3571
3572                 BUG_ON(!cfqq->allocated[rw]);
3573                 cfqq->allocated[rw]--;
3574
3575                 put_io_context(RQ_CIC(rq)->ioc);
3576
3577                 rq->elevator_private = NULL;
3578                 rq->elevator_private2 = NULL;
3579
3580                 /* Put down rq reference on cfqg */
3581                 cfq_put_cfqg(RQ_CFQG(rq));
3582                 rq->elevator_private3 = NULL;
3583
3584                 cfq_put_queue(cfqq);
3585         }
3586 }
3587
3588 static struct cfq_queue *
3589 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3590                 struct cfq_queue *cfqq)
3591 {
3592         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3593         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3594         cfq_mark_cfqq_coop(cfqq->new_cfqq);
3595         cfq_put_queue(cfqq);
3596         return cic_to_cfqq(cic, 1);
3597 }
3598
3599 /*
3600  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3601  * was the last process referring to said cfqq.
3602  */
3603 static struct cfq_queue *
3604 split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3605 {
3606         if (cfqq_process_refs(cfqq) == 1) {
3607                 cfqq->pid = current->pid;
3608                 cfq_clear_cfqq_coop(cfqq);
3609                 cfq_clear_cfqq_split_coop(cfqq);
3610                 return cfqq;
3611         }
3612
3613         cic_set_cfqq(cic, NULL, 1);
3614
3615         cfq_put_cooperator(cfqq);
3616
3617         cfq_put_queue(cfqq);
3618         return NULL;
3619 }
3620 /*
3621  * Allocate cfq data structures associated with this request.
3622  */
3623 static int
3624 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3625 {
3626         struct cfq_data *cfqd = q->elevator->elevator_data;
3627         struct cfq_io_context *cic;
3628         const int rw = rq_data_dir(rq);
3629         const bool is_sync = rq_is_sync(rq);
3630         struct cfq_queue *cfqq;
3631         unsigned long flags;
3632
3633         might_sleep_if(gfp_mask & __GFP_WAIT);
3634
3635         cic = cfq_get_io_context(cfqd, gfp_mask);
3636
3637         spin_lock_irqsave(q->queue_lock, flags);
3638
3639         if (!cic)
3640                 goto queue_fail;
3641
3642 new_queue:
3643         cfqq = cic_to_cfqq(cic, is_sync);
3644         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3645                 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
3646                 cic_set_cfqq(cic, cfqq, is_sync);
3647         } else {
3648                 /*
3649                  * If the queue was seeky for too long, break it apart.
3650                  */
3651                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3652                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3653                         cfqq = split_cfqq(cic, cfqq);
3654                         if (!cfqq)
3655                                 goto new_queue;
3656                 }
3657
3658                 /*
3659                  * Check to see if this queue is scheduled to merge with
3660                  * another, closely cooperating queue.  The merging of
3661                  * queues happens here as it must be done in process context.
3662                  * The reference on new_cfqq was taken in merge_cfqqs.
3663                  */
3664                 if (cfqq->new_cfqq)
3665                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3666         }
3667
3668         cfqq->allocated[rw]++;
3669         atomic_inc(&cfqq->ref);
3670
3671         spin_unlock_irqrestore(q->queue_lock, flags);
3672
3673         rq->elevator_private = cic;
3674         rq->elevator_private2 = cfqq;
3675         rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
3676         return 0;
3677
3678 queue_fail:
3679         if (cic)
3680                 put_io_context(cic->ioc);
3681
3682         cfq_schedule_dispatch(cfqd);
3683         spin_unlock_irqrestore(q->queue_lock, flags);
3684         cfq_log(cfqd, "set_request fail");
3685         return 1;
3686 }
3687
3688 static void cfq_kick_queue(struct work_struct *work)
3689 {
3690         struct cfq_data *cfqd =
3691                 container_of(work, struct cfq_data, unplug_work);
3692         struct request_queue *q = cfqd->queue;
3693
3694         spin_lock_irq(q->queue_lock);
3695         __blk_run_queue(cfqd->queue);
3696         spin_unlock_irq(q->queue_lock);
3697 }
3698
3699 /*
3700  * Timer running if the active_queue is currently idling inside its time slice
3701  */
3702 static void cfq_idle_slice_timer(unsigned long data)
3703 {
3704         struct cfq_data *cfqd = (struct cfq_data *) data;
3705         struct cfq_queue *cfqq;
3706         unsigned long flags;
3707         int timed_out = 1;
3708
3709         cfq_log(cfqd, "idle timer fired");
3710
3711         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3712
3713         cfqq = cfqd->active_queue;
3714         if (cfqq) {
3715                 timed_out = 0;
3716
3717                 /*
3718                  * We saw a request before the queue expired, let it through
3719                  */
3720                 if (cfq_cfqq_must_dispatch(cfqq))
3721                         goto out_kick;
3722
3723                 /*
3724                  * expired
3725                  */
3726                 if (cfq_slice_used(cfqq))
3727                         goto expire;
3728
3729                 /*
3730                  * only expire and reinvoke request handler, if there are
3731                  * other queues with pending requests
3732                  */
3733                 if (!cfqd->busy_queues)
3734                         goto out_cont;
3735
3736                 /*
3737                  * not expired and it has a request pending, let it dispatch
3738                  */
3739                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3740                         goto out_kick;
3741
3742                 /*
3743                  * Queue depth flag is reset only when the idle didn't succeed
3744                  */
3745                 cfq_clear_cfqq_deep(cfqq);
3746         }
3747 expire:
3748         cfq_slice_expired(cfqd, timed_out);
3749 out_kick:
3750         cfq_schedule_dispatch(cfqd);
3751 out_cont:
3752         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3753 }
3754
3755 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3756 {
3757         del_timer_sync(&cfqd->idle_slice_timer);
3758         cancel_work_sync(&cfqd->unplug_work);
3759 }
3760
3761 static void cfq_put_async_queues(struct cfq_data *cfqd)
3762 {
3763         int i;
3764
3765         for (i = 0; i < IOPRIO_BE_NR; i++) {
3766                 if (cfqd->async_cfqq[0][i])
3767                         cfq_put_queue(cfqd->async_cfqq[0][i]);
3768                 if (cfqd->async_cfqq[1][i])
3769                         cfq_put_queue(cfqd->async_cfqq[1][i]);
3770         }
3771
3772         if (cfqd->async_idle_cfqq)
3773                 cfq_put_queue(cfqd->async_idle_cfqq);
3774 }
3775
3776 static void cfq_cfqd_free(struct rcu_head *head)
3777 {
3778         kfree(container_of(head, struct cfq_data, rcu));
3779 }
3780
3781 static void cfq_exit_queue(struct elevator_queue *e)
3782 {
3783         struct cfq_data *cfqd = e->elevator_data;
3784         struct request_queue *q = cfqd->queue;
3785
3786         cfq_shutdown_timer_wq(cfqd);
3787
3788         spin_lock_irq(q->queue_lock);
3789
3790         if (cfqd->active_queue)
3791                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3792
3793         while (!list_empty(&cfqd->cic_list)) {
3794                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3795                                                         struct cfq_io_context,
3796                                                         queue_list);
3797
3798                 __cfq_exit_single_io_context(cfqd, cic);
3799         }
3800
3801         cfq_put_async_queues(cfqd);
3802         cfq_release_cfq_groups(cfqd);
3803         cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3804
3805         spin_unlock_irq(q->queue_lock);
3806
3807         cfq_shutdown_timer_wq(cfqd);
3808
3809         spin_lock(&cic_index_lock);
3810         ida_remove(&cic_index_ida, cfqd->cic_index);
3811         spin_unlock(&cic_index_lock);
3812
3813         /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
3814         call_rcu(&cfqd->rcu, cfq_cfqd_free);
3815 }
3816
3817 static int cfq_alloc_cic_index(void)
3818 {
3819         int index, error;
3820
3821         do {
3822                 if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
3823                         return -ENOMEM;
3824
3825                 spin_lock(&cic_index_lock);
3826                 error = ida_get_new(&cic_index_ida, &index);
3827                 spin_unlock(&cic_index_lock);
3828                 if (error && error != -EAGAIN)
3829                         return error;
3830         } while (error);
3831
3832         return index;
3833 }
3834
3835 static void *cfq_init_queue(struct request_queue *q)
3836 {
3837         struct cfq_data *cfqd;
3838         int i, j;
3839         struct cfq_group *cfqg;
3840         struct cfq_rb_root *st;
3841
3842         i = cfq_alloc_cic_index();
3843         if (i < 0)
3844                 return NULL;
3845
3846         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3847         if (!cfqd)
3848                 return NULL;
3849
3850         cfqd->cic_index = i;
3851
3852         /* Init root service tree */
3853         cfqd->grp_service_tree = CFQ_RB_ROOT;
3854
3855         /* Init root group */
3856         cfqg = &cfqd->root_group;
3857         for_each_cfqg_st(cfqg, i, j, st)
3858                 *st = CFQ_RB_ROOT;
3859         RB_CLEAR_NODE(&cfqg->rb_node);
3860
3861         /* Give preference to root group over other groups */
3862         cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3863
3864 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3865         /*
3866          * Take a reference to root group which we never drop. This is just
3867          * to make sure that cfq_put_cfqg() does not try to kfree root group
3868          */
3869         atomic_set(&cfqg->ref, 1);
3870         rcu_read_lock();
3871         cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3872                                         (void *)cfqd, 0);
3873         rcu_read_unlock();
3874 #endif
3875         /*
3876          * Not strictly needed (since RB_ROOT just clears the node and we
3877          * zeroed cfqd on alloc), but better be safe in case someone decides
3878          * to add magic to the rb code
3879          */
3880         for (i = 0; i < CFQ_PRIO_LISTS; i++)
3881                 cfqd->prio_trees[i] = RB_ROOT;
3882
3883         /*
3884          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3885          * Grab a permanent reference to it, so that the normal code flow
3886          * will not attempt to free it.
3887          */
3888         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3889         atomic_inc(&cfqd->oom_cfqq.ref);
3890         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3891
3892         INIT_LIST_HEAD(&cfqd->cic_list);
3893
3894         cfqd->queue = q;
3895
3896         init_timer(&cfqd->idle_slice_timer);
3897         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3898         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3899
3900         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3901
3902         cfqd->cfq_quantum = cfq_quantum;
3903         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3904         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3905         cfqd->cfq_back_max = cfq_back_max;
3906         cfqd->cfq_back_penalty = cfq_back_penalty;
3907         cfqd->cfq_slice[0] = cfq_slice_async;
3908         cfqd->cfq_slice[1] = cfq_slice_sync;
3909         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3910         cfqd->cfq_slice_idle = cfq_slice_idle;
3911         cfqd->cfq_group_idle = cfq_group_idle;
3912         cfqd->cfq_latency = 1;
3913         cfqd->cfq_group_isolation = 0;
3914         cfqd->hw_tag = -1;
3915         /*
3916          * we optimistically start assuming sync ops weren't delayed in last
3917          * second, in order to have larger depth for async operations.
3918          */
3919         cfqd->last_delayed_sync = jiffies - HZ;
3920         return cfqd;
3921 }
3922
3923 static void cfq_slab_kill(void)
3924 {
3925         /*
3926          * Caller already ensured that pending RCU callbacks are completed,
3927          * so we should have no busy allocations at this point.
3928          */
3929         if (cfq_pool)
3930                 kmem_cache_destroy(cfq_pool);
3931         if (cfq_ioc_pool)
3932                 kmem_cache_destroy(cfq_ioc_pool);
3933 }
3934
3935 static int __init cfq_slab_setup(void)
3936 {
3937         cfq_pool = KMEM_CACHE(cfq_queue, 0);
3938         if (!cfq_pool)
3939                 goto fail;
3940
3941         cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
3942         if (!cfq_ioc_pool)
3943                 goto fail;
3944
3945         return 0;
3946 fail:
3947         cfq_slab_kill();
3948         return -ENOMEM;
3949 }
3950
3951 /*
3952  * sysfs parts below -->
3953  */
3954 static ssize_t
3955 cfq_var_show(unsigned int var, char *page)
3956 {
3957         return sprintf(page, "%d\n", var);
3958 }
3959
3960 static ssize_t
3961 cfq_var_store(unsigned int *var, const char *page, size_t count)
3962 {
3963         char *p = (char *) page;
3964
3965         *var = simple_strtoul(p, &p, 10);
3966         return count;
3967 }
3968
3969 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
3970 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
3971 {                                                                       \
3972         struct cfq_data *cfqd = e->elevator_data;                       \
3973         unsigned int __data = __VAR;                                    \
3974         if (__CONV)                                                     \
3975                 __data = jiffies_to_msecs(__data);                      \
3976         return cfq_var_show(__data, (page));                            \
3977 }
3978 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3979 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3980 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3981 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3982 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3983 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3984 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
3985 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3986 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3987 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3988 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3989 SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
3990 #undef SHOW_FUNCTION
3991
3992 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
3993 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
3994 {                                                                       \
3995         struct cfq_data *cfqd = e->elevator_data;                       \
3996         unsigned int __data;                                            \
3997         int ret = cfq_var_store(&__data, (page), count);                \
3998         if (__data < (MIN))                                             \
3999                 __data = (MIN);                                         \
4000         else if (__data > (MAX))                                        \
4001                 __data = (MAX);                                         \
4002         if (__CONV)                                                     \
4003                 *(__PTR) = msecs_to_jiffies(__data);                    \
4004         else                                                            \
4005                 *(__PTR) = __data;                                      \
4006         return ret;                                                     \
4007 }
4008 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4009 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4010                 UINT_MAX, 1);
4011 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4012                 UINT_MAX, 1);
4013 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4014 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4015                 UINT_MAX, 0);
4016 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4017 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4018 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4019 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4020 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4021                 UINT_MAX, 0);
4022 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4023 STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);
4024 #undef STORE_FUNCTION
4025
4026 #define CFQ_ATTR(name) \
4027         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4028
4029 static struct elv_fs_entry cfq_attrs[] = {
4030         CFQ_ATTR(quantum),
4031         CFQ_ATTR(fifo_expire_sync),
4032         CFQ_ATTR(fifo_expire_async),
4033         CFQ_ATTR(back_seek_max),
4034         CFQ_ATTR(back_seek_penalty),
4035         CFQ_ATTR(slice_sync),
4036         CFQ_ATTR(slice_async),
4037         CFQ_ATTR(slice_async_rq),
4038         CFQ_ATTR(slice_idle),
4039         CFQ_ATTR(group_idle),
4040         CFQ_ATTR(low_latency),
4041         CFQ_ATTR(group_isolation),
4042         __ATTR_NULL
4043 };
4044
4045 static struct elevator_type iosched_cfq = {
4046         .ops = {
4047                 .elevator_merge_fn =            cfq_merge,
4048                 .elevator_merged_fn =           cfq_merged_request,
4049                 .elevator_merge_req_fn =        cfq_merged_requests,
4050                 .elevator_allow_merge_fn =      cfq_allow_merge,
4051                 .elevator_bio_merged_fn =       cfq_bio_merged,
4052                 .elevator_dispatch_fn =         cfq_dispatch_requests,
4053                 .elevator_add_req_fn =          cfq_insert_request,
4054                 .elevator_activate_req_fn =     cfq_activate_request,
4055                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
4056                 .elevator_queue_empty_fn =      cfq_queue_empty,
4057                 .elevator_completed_req_fn =    cfq_completed_request,
4058                 .elevator_former_req_fn =       elv_rb_former_request,
4059                 .elevator_latter_req_fn =       elv_rb_latter_request,
4060                 .elevator_set_req_fn =          cfq_set_request,
4061                 .elevator_put_req_fn =          cfq_put_request,
4062                 .elevator_may_queue_fn =        cfq_may_queue,
4063                 .elevator_init_fn =             cfq_init_queue,
4064                 .elevator_exit_fn =             cfq_exit_queue,
4065                 .trim =                         cfq_free_io_context,
4066         },
4067         .elevator_attrs =       cfq_attrs,
4068         .elevator_name =        "cfq",
4069         .elevator_owner =       THIS_MODULE,
4070 };
4071
4072 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4073 static struct blkio_policy_type blkio_policy_cfq = {
4074         .ops = {
4075                 .blkio_unlink_group_fn =        cfq_unlink_blkio_group,
4076                 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
4077         },
4078 };
4079 #else
4080 static struct blkio_policy_type blkio_policy_cfq;
4081 #endif
4082
4083 static int __init cfq_init(void)
4084 {
4085         /*
4086          * could be 0 on HZ < 1000 setups
4087          */
4088         if (!cfq_slice_async)
4089                 cfq_slice_async = 1;
4090         if (!cfq_slice_idle)
4091                 cfq_slice_idle = 1;
4092
4093 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4094         if (!cfq_group_idle)
4095                 cfq_group_idle = 1;
4096 #else
4097                 cfq_group_idle = 0;
4098 #endif
4099         if (cfq_slab_setup())
4100                 return -ENOMEM;
4101
4102         elv_register(&iosched_cfq);
4103         blkio_policy_register(&blkio_policy_cfq);
4104
4105         return 0;
4106 }
4107
4108 static void __exit cfq_exit(void)
4109 {
4110         DECLARE_COMPLETION_ONSTACK(all_gone);
4111         blkio_policy_unregister(&blkio_policy_cfq);
4112         elv_unregister(&iosched_cfq);
4113         ioc_gone = &all_gone;
4114         /* ioc_gone's update must be visible before reading ioc_count */
4115         smp_wmb();
4116
4117         /*
4118          * this also protects us from entering cfq_slab_kill() with
4119          * pending RCU callbacks
4120          */
4121         if (elv_ioc_count_read(cfq_ioc_count))
4122                 wait_for_completion(&all_gone);
4123         ida_destroy(&cic_index_ida);
4124         cfq_slab_kill();
4125 }
4126
4127 module_init(cfq_init);
4128 module_exit(cfq_exit);
4129
4130 MODULE_AUTHOR("Jens Axboe");
4131 MODULE_LICENSE("GPL");
4132 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");