]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - block/cfq-iosched.c
block: remove wrappers for request type/flags
[net-next-2.6.git] / block / cfq-iosched.c
index 5ff4f4850e717ddb319423e9678e0e44cd7f265c..d4edeb8fceb8b33926e679e54459dc4b442386ae 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/rbtree.h>
 #include <linux/ioprio.h>
 #include <linux/blktrace_api.h>
-#include "blk-cgroup.h"
+#include "cfq.h"
 
 /*
  * tunables
@@ -646,9 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
                return rq1;
        else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
                return rq2;
-       if (rq_is_meta(rq1) && !rq_is_meta(rq2))
+       if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META))
                return rq1;
-       else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
+       else if ((rq2->cmd_flags & REQ_RW_META) &&
+                !(rq1->cmd_flags & REQ_RW_META))
                return rq2;
 
        s1 = blk_rq_pos(rq1);
@@ -879,7 +880,7 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
        if (!RB_EMPTY_NODE(&cfqg->rb_node))
                cfq_rb_erase(&cfqg->rb_node, st);
        cfqg->saved_workload_slice = 0;
-       blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
+       cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
 }
 
 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
@@ -939,8 +940,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
 
        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
                                        st->min_vdisktime);
-       blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
-       blkiocg_set_start_empty_time(&cfqg->blkg);
+       cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
+       cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
 }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
@@ -995,7 +996,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
 
        /* Add group onto cgroup list */
        sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
-       blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
+       cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
                                        MKDEV(major, minor));
        cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
 
@@ -1079,7 +1080,7 @@ static void cfq_release_cfq_groups(struct cfq_data *cfqd)
                 * it from cgroup list, then it will take care of destroying
                 * cfqg also.
                 */
-               if (!blkiocg_del_blkio_group(&cfqg->blkg))
+               if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
                        cfq_destroy_cfqg(cfqd, cfqg);
        }
 }
@@ -1421,10 +1422,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
 {
        elv_rb_del(&cfqq->sort_list, rq);
        cfqq->queued[rq_is_sync(rq)]--;
-       blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
-                                               rq_is_sync(rq));
+       cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
+                                       rq_data_dir(rq), rq_is_sync(rq));
        cfq_add_rq_rb(rq);
-       blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
+       cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
                        &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
                        rq_is_sync(rq));
 }
@@ -1482,9 +1483,9 @@ static void cfq_remove_request(struct request *rq)
        cfq_del_rq_rb(rq);
 
        cfqq->cfqd->rq_queued--;
-       blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
-                                               rq_is_sync(rq));
-       if (rq_is_meta(rq)) {
+       cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
+                                       rq_data_dir(rq), rq_is_sync(rq));
+       if (rq->cmd_flags & REQ_RW_META) {
                WARN_ON(!cfqq->meta_pending);
                cfqq->meta_pending--;
        }
@@ -1518,8 +1519,8 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
 static void cfq_bio_merged(struct request_queue *q, struct request *req,
                                struct bio *bio)
 {
-       blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio),
-                                       cfq_bio_sync(bio));
+       cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
+                                       bio_data_dir(bio), cfq_bio_sync(bio));
 }
 
 static void
@@ -1539,8 +1540,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
        if (cfqq->next_rq == next)
                cfqq->next_rq = rq;
        cfq_remove_request(next);
-       blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next),
-                                       rq_is_sync(next));
+       cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
+                                       rq_data_dir(next), rq_is_sync(next));
 }
 
 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -1571,7 +1572,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        del_timer(&cfqd->idle_slice_timer);
-       blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
+       cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
 }
 
 static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1580,7 +1581,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
        if (cfqq) {
                cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
                                cfqd->serving_prio, cfqd->serving_type);
-               blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
+               cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
                cfqq->slice_start = 0;
                cfqq->dispatch_start = jiffies;
                cfqq->allocated_slice = 0;
@@ -1911,7 +1912,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        sl = cfqd->cfq_slice_idle;
 
        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
-       blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
+       cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
        cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
 }
 
@@ -1931,7 +1932,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
        elv_dispatch_sort(q, rq);
 
        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
-       blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
+       cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
                                        rq_data_dir(rq), rq_is_sync(rq));
 }
 
@@ -1986,6 +1987,15 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
        int process_refs, new_process_refs;
        struct cfq_queue *__cfqq;
 
+       /*
+        * If there are no process references on the new_cfqq, then it is
+        * unsafe to follow the ->new_cfqq chain as other cfqq's in the
+        * chain may have dropped their last reference (not just their
+        * last process reference).
+        */
+       if (!cfqq_process_refs(new_cfqq))
+               return;
+
        /* Avoid a circular list and skip interim queue merges */
        while ((__cfqq = new_cfqq->new_cfqq)) {
                if (__cfqq == cfqq)
@@ -1994,17 +2004,17 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
        }
 
        process_refs = cfqq_process_refs(cfqq);
+       new_process_refs = cfqq_process_refs(new_cfqq);
        /*
         * If the process for the cfqq has gone away, there is no
         * sense in merging the queues.
         */
-       if (process_refs == 0)
+       if (process_refs == 0 || new_process_refs == 0)
                return;
 
        /*
         * Merge in the direction of the lesser amount of work.
         */
-       new_process_refs = cfqq_process_refs(new_cfqq);
        if (new_process_refs >= process_refs) {
                cfqq->new_cfqq = new_cfqq;
                atomic_add(process_refs, &new_cfqq->ref);
@@ -3167,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
         * So both queues are sync. Let the new request get disk time if
         * it's a metadata request and the current queue is doing regular IO.
         */
-       if (rq_is_meta(rq) && !cfqq->meta_pending)
+       if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending)
                return true;
 
        /*
@@ -3221,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        struct cfq_io_context *cic = RQ_CIC(rq);
 
        cfqd->rq_queued++;
-       if (rq_is_meta(rq))
+       if (rq->cmd_flags & REQ_RW_META)
                cfqq->meta_pending++;
 
        cfq_update_io_thinktime(cfqd, cic);
@@ -3248,7 +3258,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                                cfq_clear_cfqq_wait_request(cfqq);
                                __blk_run_queue(cfqd->queue);
                        } else {
-                               blkiocg_update_idle_time_stats(
+                               cfq_blkiocg_update_idle_time_stats(
                                                &cfqq->cfqg->blkg);
                                cfq_mark_cfqq_must_dispatch(cfqq);
                        }
@@ -3276,7 +3286,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
        rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
        list_add_tail(&rq->queuelist, &cfqq->fifo);
        cfq_add_rq_rb(rq);
-       blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
+       cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
                        &cfqd->serving_group->blkg, rq_data_dir(rq),
                        rq_is_sync(rq));
        cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -3356,7 +3366,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        unsigned long now;
 
        now = jiffies;
-       cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq));
+       cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
+                    !!(rq->cmd_flags & REQ_NOIDLE));
 
        cfq_update_hw_tag(cfqd);
 
@@ -3364,9 +3375,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        WARN_ON(!cfqq->dispatched);
        cfqd->rq_in_driver--;
        cfqq->dispatched--;
-       blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq),
-                       rq_io_start_time_ns(rq), rq_data_dir(rq),
-                       rq_is_sync(rq));
+       cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
+                       rq_start_time_ns(rq), rq_io_start_time_ns(rq),
+                       rq_data_dir(rq), rq_is_sync(rq));
 
        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
 
@@ -3410,11 +3421,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                        cfq_slice_expired(cfqd, 1);
                else if (sync && cfqq_empty &&
                         !cfq_close_cooperator(cfqd, cfqq)) {
-                       cfqd->noidle_tree_requires_idle |= !rq_noidle(rq);
+                       cfqd->noidle_tree_requires_idle |=
+                               !(rq->cmd_flags & REQ_NOIDLE);
                        /*
                         * Idling is enabled for SYNC_WORKLOAD.
                         * SYNC_NOIDLE_WORKLOAD idles at the end of the tree
-                        * only if we processed at least one !rq_noidle request
+                        * only if we processed at least one !REQ_NOIDLE request
                         */
                        if (cfqd->serving_type == SYNC_WORKLOAD
                            || cfqd->noidle_tree_requires_idle
@@ -3730,7 +3742,7 @@ static void cfq_exit_queue(struct elevator_queue *e)
 
        cfq_put_async_queues(cfqd);
        cfq_release_cfq_groups(cfqd);
-       blkiocg_del_blkio_group(&cfqd->root_group.blkg);
+       cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
 
        spin_unlock_irq(q->queue_lock);
 
@@ -3798,8 +3810,8 @@ static void *cfq_init_queue(struct request_queue *q)
         */
        atomic_set(&cfqg->ref, 1);
        rcu_read_lock();
-       blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
-                                       0);
+       cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
+                                       (void *)cfqd, 0);
        rcu_read_unlock();
 #endif
        /*