]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - block/blk-throttle.c
can: pch_can: fix section mismatch warning by using a whitelisted name
[net-next-2.6.git] / block / blk-throttle.c
index bc2936b80addb0d7bf89c8bb39f8e1d3bedc08c7..56ad4531b41234f87485e4f4bca5ca84d4d10dc1 100644 (file)
@@ -70,6 +70,9 @@ struct throtl_grp {
        /* When did we start a new slice */
        unsigned long slice_start[2];
        unsigned long slice_end[2];
+
+       /* Some throttle limits got updated for the group */
+       bool limits_changed;
 };
 
 struct throtl_data
@@ -93,6 +96,8 @@ struct throtl_data
 
        /* Work for dispatching throttled bios */
        struct delayed_work throtl_work;
+
+       atomic_t limits_changed;
 };
 
 enum tg_state_flags {
@@ -373,7 +378,8 @@ throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 static inline void
 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 {
-       unsigned long nr_slices, bytes_trim, time_elapsed, io_trim;
+       unsigned long nr_slices, time_elapsed, io_trim;
+       u64 bytes_trim, tmp;
 
        BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 
@@ -391,8 +397,10 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 
        if (!nr_slices)
                return;
+       tmp = tg->bps[rw] * throtl_slice * nr_slices;
+       do_div(tmp, HZ);
+       bytes_trim = tmp;
 
-       bytes_trim = (tg->bps[rw] * throtl_slice * nr_slices)/HZ;
        io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
 
        if (!bytes_trim && !io_trim)
@@ -410,7 +418,7 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 
        tg->slice_start[rw] += nr_slices * throtl_slice;
 
-       throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%lu io=%lu"
+       throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
                        " start=%lu end=%lu jiffies=%lu",
                        rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
                        tg->slice_start[rw], tg->slice_end[rw], jiffies);
@@ -422,6 +430,7 @@ static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
        bool rw = bio_data_dir(bio);
        unsigned int io_allowed;
        unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
+       u64 tmp;
 
        jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 
@@ -431,8 +440,20 @@ static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
 
        jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 
-       io_allowed = (tg->iops[rw] * jiffies_to_msecs(jiffy_elapsed_rnd))
-                               / MSEC_PER_SEC;
+       /*
+        * jiffy_elapsed_rnd should not be a big value as minimum iops can be
+        * 1 then at max jiffy elapsed should be equivalent of 1 second as we
+        * will allow dispatch after 1 second and after that slice should
+        * have been trimmed.
+        */
+
+       tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
+       do_div(tmp, HZ);
+
+       if (tmp > UINT_MAX)
+               io_allowed = UINT_MAX;
+       else
+               io_allowed = tmp;
 
        if (tg->io_disp[rw] + 1 <= io_allowed) {
                if (wait)
@@ -457,7 +478,7 @@ static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
                struct bio *bio, unsigned long *wait)
 {
        bool rw = bio_data_dir(bio);
-       u64 bytes_allowed, extra_bytes;
+       u64 bytes_allowed, extra_bytes, tmp;
        unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 
        jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
@@ -468,8 +489,9 @@ static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
 
        jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 
-       bytes_allowed = (tg->bps[rw] * jiffies_to_msecs(jiffy_elapsed_rnd))
-                               / MSEC_PER_SEC;
+       tmp = tg->bps[rw] * jiffy_elapsed_rnd;
+       do_div(tmp, HZ);
+       bytes_allowed = tmp;
 
        if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
                if (wait)
@@ -592,15 +614,6 @@ static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
        min_wait = min(read_wait, write_wait);
        disptime = jiffies + min_wait;
 
-       /*
-        * If group is already on active tree, then update dispatch time
-        * only if it is lesser than existing dispatch time. Otherwise
-        * always update the dispatch time
-        */
-
-       if (throtl_tg_on_rr(tg) && time_before(disptime, tg->disptime))
-               return;
-
        /* Update dispatch time */
        throtl_dequeue_tg(td, tg);
        tg->disptime = disptime;
@@ -691,6 +704,46 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
        return nr_disp;
 }
 
+static void throtl_process_limit_change(struct throtl_data *td)
+{
+       struct throtl_grp *tg;
+       struct hlist_node *pos, *n;
+
+       /*
+        * Make sure atomic_inc() effects from
+        * throtl_update_blkio_group_read_bps(), group of functions are
+        * visible.
+        * Is this required or smp_mb__after_atomic_inc() was suffcient
+        * after the atomic_inc().
+        */
+       smp_rmb();
+       if (!atomic_read(&td->limits_changed))
+               return;
+
+       throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
+
+       hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
+               /*
+                * Do I need an smp_rmb() here to make sure tg->limits_changed
+                * update is visible. I am relying on smp_rmb() at the
+                * beginning of function and not putting a new one here.
+                */
+
+               if (throtl_tg_on_rr(tg) && tg->limits_changed) {
+                       throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
+                               " riops=%u wiops=%u", tg->bps[READ],
+                               tg->bps[WRITE], tg->iops[READ],
+                               tg->iops[WRITE]);
+                       tg_update_disptime(td, tg);
+                       tg->limits_changed = false;
+               }
+       }
+
+       smp_mb__before_atomic_dec();
+       atomic_dec(&td->limits_changed);
+       smp_mb__after_atomic_dec();
+}
+
 /* Dispatch throttled bios. Should be called without queue lock held. */
 static int throtl_dispatch(struct request_queue *q)
 {
@@ -701,6 +754,8 @@ static int throtl_dispatch(struct request_queue *q)
 
        spin_lock_irq(q->queue_lock);
 
+       throtl_process_limit_change(td);
+
        if (!total_nr_queued(td))
                goto out;
 
@@ -821,28 +876,74 @@ void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
        spin_unlock_irqrestore(td->queue->queue_lock, flags);
 }
 
-static void throtl_update_blkio_group_read_bps (struct blkio_group *blkg,
-                       u64 read_bps)
+/*
+ * For all update functions, key should be a valid pointer because these
+ * update functions are called under blkcg_lock, that means, blkg is
+ * valid and in turn key is valid. queue exit path can not race becuase
+ * of blkcg_lock
+ *
+ * Can not take queue lock in update functions as queue lock under blkcg_lock
+ * is not allowed. Under other paths we take blkcg_lock under queue_lock.
+ */
+static void throtl_update_blkio_group_read_bps(void *key,
+                               struct blkio_group *blkg, u64 read_bps)
 {
+       struct throtl_data *td = key;
+
        tg_of_blkg(blkg)->bps[READ] = read_bps;
+       /* Make sure read_bps is updated before setting limits_changed */
+       smp_wmb();
+       tg_of_blkg(blkg)->limits_changed = true;
+
+       /* Make sure tg->limits_changed is updated before td->limits_changed */
+       smp_mb__before_atomic_inc();
+       atomic_inc(&td->limits_changed);
+       smp_mb__after_atomic_inc();
+
+       /* Schedule a work now to process the limit change */
+       throtl_schedule_delayed_work(td->queue, 0);
 }
 
-static void throtl_update_blkio_group_write_bps (struct blkio_group *blkg,
-                       u64 write_bps)
+static void throtl_update_blkio_group_write_bps(void *key,
+                               struct blkio_group *blkg, u64 write_bps)
 {
+       struct throtl_data *td = key;
+
        tg_of_blkg(blkg)->bps[WRITE] = write_bps;
+       smp_wmb();
+       tg_of_blkg(blkg)->limits_changed = true;
+       smp_mb__before_atomic_inc();
+       atomic_inc(&td->limits_changed);
+       smp_mb__after_atomic_inc();
+       throtl_schedule_delayed_work(td->queue, 0);
 }
 
-static void throtl_update_blkio_group_read_iops (struct blkio_group *blkg,
-                       unsigned int read_iops)
+static void throtl_update_blkio_group_read_iops(void *key,
+                       struct blkio_group *blkg, unsigned int read_iops)
 {
+       struct throtl_data *td = key;
+
        tg_of_blkg(blkg)->iops[READ] = read_iops;
+       smp_wmb();
+       tg_of_blkg(blkg)->limits_changed = true;
+       smp_mb__before_atomic_inc();
+       atomic_inc(&td->limits_changed);
+       smp_mb__after_atomic_inc();
+       throtl_schedule_delayed_work(td->queue, 0);
 }
 
-static void throtl_update_blkio_group_write_iops (struct blkio_group *blkg,
-                       unsigned int write_iops)
+static void throtl_update_blkio_group_write_iops(void *key,
+                       struct blkio_group *blkg, unsigned int write_iops)
 {
+       struct throtl_data *td = key;
+
        tg_of_blkg(blkg)->iops[WRITE] = write_iops;
+       smp_wmb();
+       tg_of_blkg(blkg)->limits_changed = true;
+       smp_mb__before_atomic_inc();
+       atomic_inc(&td->limits_changed);
+       smp_mb__after_atomic_inc();
+       throtl_schedule_delayed_work(td->queue, 0);
 }
 
 void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -886,8 +987,14 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
                /*
                 * There is already another bio queued in same dir. No
                 * need to update dispatch time.
+                * Still update the disptime if rate limits on this group
+                * were changed.
                 */
-               update_disptime = false;
+               if (!tg->limits_changed)
+                       update_disptime = false;
+               else
+                       tg->limits_changed = false;
+
                goto queue_bio;
        }
 
@@ -929,6 +1036,7 @@ int blk_throtl_init(struct request_queue *q)
 
        INIT_HLIST_HEAD(&td->tg_list);
        td->tg_service_tree = THROTL_RB_ROOT;
+       atomic_set(&td->limits_changed, 0);
 
        /* Init root group */
        tg = &td->root_tg;
@@ -996,6 +1104,13 @@ void blk_throtl_exit(struct request_queue *q)
         */
        if (wait)
                synchronize_rcu();
+
+       /*
+        * Just being safe to make sure after previous flush if some body did
+        * update limits through cgroup and another work got queued, cancel
+        * it.
+        */
+       throtl_shutdown_timer_wq(q);
        throtl_td_free(td);
 }