]> bbs.cooldavid.org Git - net-next-2.6.git/blame - block/blk-cgroup.c
Merge branch 'master' into for-2.6.35
[net-next-2.6.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190
VG
14#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
9d6a986c 16#include <linux/module.h>
accee785 17#include <linux/err.h>
9195291e 18#include <linux/blkdev.h>
5a0e3ad6 19#include <linux/slab.h>
31e4c28d 20#include "blk-cgroup.h"
34d0f179 21#include <linux/genhd.h>
3e252066 22
84c124da
DS
23#define MAX_KEY_LEN 100
24
3e252066
VG
25static DEFINE_SPINLOCK(blkio_list_lock);
26static LIST_HEAD(blkio_list);
b1c35769 27
31e4c28d 28struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
9d6a986c
VG
29EXPORT_SYMBOL_GPL(blkio_root_cgroup);
30
67523c48
BB
31static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
32 struct cgroup *);
33static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
34 struct task_struct *, bool);
35static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup *, struct task_struct *, bool);
37static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
39
40struct cgroup_subsys blkio_subsys = {
41 .name = "blkio",
42 .create = blkiocg_create,
43 .can_attach = blkiocg_can_attach,
44 .attach = blkiocg_attach,
45 .destroy = blkiocg_destroy,
46 .populate = blkiocg_populate,
47#ifdef CONFIG_BLK_CGROUP
48 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
49 .subsys_id = blkio_subsys_id,
50#endif
51 .use_id = 1,
52 .module = THIS_MODULE,
53};
54EXPORT_SYMBOL_GPL(blkio_subsys);
55
34d0f179
GJ
56static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
57 struct blkio_policy_node *pn)
58{
59 list_add(&pn->node, &blkcg->policy_list);
60}
61
62/* Must be called with blkcg->lock held */
63static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
64{
65 list_del(&pn->node);
66}
67
68/* Must be called with blkcg->lock held */
69static struct blkio_policy_node *
70blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev)
71{
72 struct blkio_policy_node *pn;
73
74 list_for_each_entry(pn, &blkcg->policy_list, node) {
75 if (pn->dev == dev)
76 return pn;
77 }
78
79 return NULL;
80}
81
31e4c28d
VG
82struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
83{
84 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
85 struct blkio_cgroup, css);
86}
9d6a986c 87EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
31e4c28d 88
84c124da
DS
89void blkio_group_init(struct blkio_group *blkg)
90{
91 spin_lock_init(&blkg->stats_lock);
92}
93EXPORT_SYMBOL_GPL(blkio_group_init);
94
9195291e
DS
95/*
96 * Add to the appropriate stat variable depending on the request type.
97 * This should be called with the blkg->stats_lock held.
98 */
84c124da
DS
99static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
100 bool sync)
9195291e 101{
84c124da
DS
102 if (direction)
103 stat[BLKIO_STAT_WRITE] += add;
9195291e 104 else
84c124da
DS
105 stat[BLKIO_STAT_READ] += add;
106 if (sync)
107 stat[BLKIO_STAT_SYNC] += add;
9195291e 108 else
84c124da 109 stat[BLKIO_STAT_ASYNC] += add;
9195291e
DS
110}
111
cdc1184c
DS
112/*
113 * Decrements the appropriate stat variable if non-zero depending on the
114 * request type. Panics on value being zero.
115 * This should be called with the blkg->stats_lock held.
116 */
117static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
118{
119 if (direction) {
120 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
121 stat[BLKIO_STAT_WRITE]--;
122 } else {
123 BUG_ON(stat[BLKIO_STAT_READ] == 0);
124 stat[BLKIO_STAT_READ]--;
125 }
126 if (sync) {
127 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
128 stat[BLKIO_STAT_SYNC]--;
129 } else {
130 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
131 stat[BLKIO_STAT_ASYNC]--;
132 }
133}
134
135#ifdef CONFIG_DEBUG_BLK_CGROUP
812df48d
DS
136/* This should be called with the blkg->stats_lock held. */
137static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
138 struct blkio_group *curr_blkg)
139{
140 if (blkio_blkg_waiting(&blkg->stats))
141 return;
142 if (blkg == curr_blkg)
143 return;
144 blkg->stats.start_group_wait_time = sched_clock();
145 blkio_mark_blkg_waiting(&blkg->stats);
146}
147
148/* This should be called with the blkg->stats_lock held. */
149static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
150{
151 unsigned long long now;
152
153 if (!blkio_blkg_waiting(stats))
154 return;
155
156 now = sched_clock();
157 if (time_after64(now, stats->start_group_wait_time))
158 stats->group_wait_time += now - stats->start_group_wait_time;
159 blkio_clear_blkg_waiting(stats);
160}
161
162/* This should be called with the blkg->stats_lock held. */
163static void blkio_end_empty_time(struct blkio_group_stats *stats)
164{
165 unsigned long long now;
166
167 if (!blkio_blkg_empty(stats))
168 return;
169
170 now = sched_clock();
171 if (time_after64(now, stats->start_empty_time))
172 stats->empty_time += now - stats->start_empty_time;
173 blkio_clear_blkg_empty(stats);
174}
175
176void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
177{
178 unsigned long flags;
179
180 spin_lock_irqsave(&blkg->stats_lock, flags);
181 BUG_ON(blkio_blkg_idling(&blkg->stats));
182 blkg->stats.start_idle_time = sched_clock();
183 blkio_mark_blkg_idling(&blkg->stats);
184 spin_unlock_irqrestore(&blkg->stats_lock, flags);
185}
186EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
187
188void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
189{
190 unsigned long flags;
191 unsigned long long now;
192 struct blkio_group_stats *stats;
193
194 spin_lock_irqsave(&blkg->stats_lock, flags);
195 stats = &blkg->stats;
196 if (blkio_blkg_idling(stats)) {
197 now = sched_clock();
198 if (time_after64(now, stats->start_idle_time))
199 stats->idle_time += now - stats->start_idle_time;
200 blkio_clear_blkg_idling(stats);
201 }
202 spin_unlock_irqrestore(&blkg->stats_lock, flags);
203}
204EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
205
a11cdaa7 206void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
cdc1184c
DS
207{
208 unsigned long flags;
209 struct blkio_group_stats *stats;
210
211 spin_lock_irqsave(&blkg->stats_lock, flags);
212 stats = &blkg->stats;
213 stats->avg_queue_size_sum +=
214 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
215 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
216 stats->avg_queue_size_samples++;
812df48d 217 blkio_update_group_wait_time(stats);
cdc1184c
DS
218 spin_unlock_irqrestore(&blkg->stats_lock, flags);
219}
a11cdaa7
DS
220EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
221
222void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
223 unsigned long dequeue)
224{
225 blkg->stats.dequeue += dequeue;
226}
227EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
812df48d
DS
228#else
229static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
230 struct blkio_group *curr_blkg) {}
231static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
cdc1184c
DS
232#endif
233
a11cdaa7 234void blkiocg_update_io_add_stats(struct blkio_group *blkg,
cdc1184c
DS
235 struct blkio_group *curr_blkg, bool direction,
236 bool sync)
237{
238 unsigned long flags;
239
240 spin_lock_irqsave(&blkg->stats_lock, flags);
241 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
242 sync);
812df48d
DS
243 blkio_end_empty_time(&blkg->stats);
244 blkio_set_start_group_wait_time(blkg, curr_blkg);
cdc1184c
DS
245 spin_unlock_irqrestore(&blkg->stats_lock, flags);
246}
a11cdaa7 247EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
cdc1184c 248
a11cdaa7 249void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
cdc1184c
DS
250 bool direction, bool sync)
251{
252 unsigned long flags;
253
254 spin_lock_irqsave(&blkg->stats_lock, flags);
255 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
256 direction, sync);
257 spin_unlock_irqrestore(&blkg->stats_lock, flags);
258}
a11cdaa7 259EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
cdc1184c 260
303a3acb 261void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
22084190 262{
303a3acb
DS
263 unsigned long flags;
264
265 spin_lock_irqsave(&blkg->stats_lock, flags);
266 blkg->stats.time += time;
267 spin_unlock_irqrestore(&blkg->stats_lock, flags);
22084190 268}
303a3acb 269EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
22084190 270
812df48d
DS
271void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore)
272{
273 unsigned long flags;
274 struct blkio_group_stats *stats;
275
276 spin_lock_irqsave(&blkg->stats_lock, flags);
277 stats = &blkg->stats;
278
279 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
280 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
281 spin_unlock_irqrestore(&blkg->stats_lock, flags);
282 return;
283 }
284
285 /*
286 * If ignore is set, we do not panic on the empty flag being set
287 * already. This is to avoid cases where there are superfluous timeslice
288 * complete events (for eg., forced_dispatch in CFQ) when no IOs are
289 * served which could result in triggering the empty check incorrectly.
290 */
291 BUG_ON(!ignore && blkio_blkg_empty(stats));
292 stats->start_empty_time = sched_clock();
293 blkio_mark_blkg_empty(stats);
294 spin_unlock_irqrestore(&blkg->stats_lock, flags);
295}
296EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
297
84c124da
DS
298void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
299 uint64_t bytes, bool direction, bool sync)
9195291e
DS
300{
301 struct blkio_group_stats *stats;
302 unsigned long flags;
303
304 spin_lock_irqsave(&blkg->stats_lock, flags);
305 stats = &blkg->stats;
84c124da
DS
306 stats->sectors += bytes >> 9;
307 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
308 sync);
309 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
310 direction, sync);
9195291e
DS
311 spin_unlock_irqrestore(&blkg->stats_lock, flags);
312}
84c124da 313EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
9195291e 314
84c124da
DS
315void blkiocg_update_completion_stats(struct blkio_group *blkg,
316 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
9195291e
DS
317{
318 struct blkio_group_stats *stats;
319 unsigned long flags;
320 unsigned long long now = sched_clock();
321
322 spin_lock_irqsave(&blkg->stats_lock, flags);
323 stats = &blkg->stats;
84c124da
DS
324 if (time_after64(now, io_start_time))
325 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
326 now - io_start_time, direction, sync);
327 if (time_after64(io_start_time, start_time))
328 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
329 io_start_time - start_time, direction, sync);
9195291e
DS
330 spin_unlock_irqrestore(&blkg->stats_lock, flags);
331}
84c124da 332EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
9195291e 333
812d4026
DS
334void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
335 bool sync)
336{
337 unsigned long flags;
338
339 spin_lock_irqsave(&blkg->stats_lock, flags);
340 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
341 sync);
342 spin_unlock_irqrestore(&blkg->stats_lock, flags);
343}
344EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
345
31e4c28d 346void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
22084190 347 struct blkio_group *blkg, void *key, dev_t dev)
31e4c28d
VG
348{
349 unsigned long flags;
350
351 spin_lock_irqsave(&blkcg->lock, flags);
352 rcu_assign_pointer(blkg->key, key);
b1c35769 353 blkg->blkcg_id = css_id(&blkcg->css);
31e4c28d
VG
354 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
355 spin_unlock_irqrestore(&blkcg->lock, flags);
2868ef7b
VG
356#ifdef CONFIG_DEBUG_BLK_CGROUP
357 /* Need to take css reference ? */
358 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
359#endif
22084190 360 blkg->dev = dev;
31e4c28d 361}
9d6a986c 362EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
31e4c28d 363
b1c35769
VG
364static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
365{
366 hlist_del_init_rcu(&blkg->blkcg_node);
367 blkg->blkcg_id = 0;
368}
369
370/*
371 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
372 * indicating that blk_group was unhashed by the time we got to it.
373 */
31e4c28d
VG
374int blkiocg_del_blkio_group(struct blkio_group *blkg)
375{
b1c35769
VG
376 struct blkio_cgroup *blkcg;
377 unsigned long flags;
378 struct cgroup_subsys_state *css;
379 int ret = 1;
380
381 rcu_read_lock();
382 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
383 if (!css)
384 goto out;
385
386 blkcg = container_of(css, struct blkio_cgroup, css);
387 spin_lock_irqsave(&blkcg->lock, flags);
388 if (!hlist_unhashed(&blkg->blkcg_node)) {
389 __blkiocg_del_blkio_group(blkg);
390 ret = 0;
391 }
392 spin_unlock_irqrestore(&blkcg->lock, flags);
393out:
394 rcu_read_unlock();
395 return ret;
31e4c28d 396}
9d6a986c 397EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
31e4c28d
VG
398
399/* called under rcu_read_lock(). */
400struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
401{
402 struct blkio_group *blkg;
403 struct hlist_node *n;
404 void *__key;
405
406 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
407 __key = blkg->key;
408 if (__key == key)
409 return blkg;
410 }
411
412 return NULL;
413}
9d6a986c 414EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
31e4c28d
VG
415
416#define SHOW_FUNCTION(__VAR) \
417static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
418 struct cftype *cftype) \
419{ \
420 struct blkio_cgroup *blkcg; \
421 \
422 blkcg = cgroup_to_blkio_cgroup(cgroup); \
423 return (u64)blkcg->__VAR; \
424}
425
426SHOW_FUNCTION(weight);
427#undef SHOW_FUNCTION
428
429static int
430blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
431{
432 struct blkio_cgroup *blkcg;
f8d461d6
VG
433 struct blkio_group *blkg;
434 struct hlist_node *n;
3e252066 435 struct blkio_policy_type *blkiop;
34d0f179 436 struct blkio_policy_node *pn;
31e4c28d
VG
437
438 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
439 return -EINVAL;
440
441 blkcg = cgroup_to_blkio_cgroup(cgroup);
bcf4dd43 442 spin_lock(&blkio_list_lock);
f8d461d6 443 spin_lock_irq(&blkcg->lock);
31e4c28d 444 blkcg->weight = (unsigned int)val;
34d0f179 445
3e252066 446 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
34d0f179
GJ
447 pn = blkio_policy_search_node(blkcg, blkg->dev);
448
449 if (pn)
450 continue;
451
3e252066
VG
452 list_for_each_entry(blkiop, &blkio_list, list)
453 blkiop->ops.blkio_update_group_weight_fn(blkg,
454 blkcg->weight);
3e252066 455 }
f8d461d6 456 spin_unlock_irq(&blkcg->lock);
bcf4dd43 457 spin_unlock(&blkio_list_lock);
31e4c28d
VG
458 return 0;
459}
460
303a3acb 461static int
84c124da 462blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
303a3acb
DS
463{
464 struct blkio_cgroup *blkcg;
465 struct blkio_group *blkg;
812df48d 466 struct blkio_group_stats *stats;
303a3acb 467 struct hlist_node *n;
cdc1184c
DS
468 uint64_t queued[BLKIO_STAT_TOTAL];
469 int i;
812df48d
DS
470#ifdef CONFIG_DEBUG_BLK_CGROUP
471 bool idling, waiting, empty;
472 unsigned long long now = sched_clock();
473#endif
303a3acb
DS
474
475 blkcg = cgroup_to_blkio_cgroup(cgroup);
476 spin_lock_irq(&blkcg->lock);
477 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
478 spin_lock(&blkg->stats_lock);
812df48d
DS
479 stats = &blkg->stats;
480#ifdef CONFIG_DEBUG_BLK_CGROUP
481 idling = blkio_blkg_idling(stats);
482 waiting = blkio_blkg_waiting(stats);
483 empty = blkio_blkg_empty(stats);
484#endif
cdc1184c 485 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
486 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
487 memset(stats, 0, sizeof(struct blkio_group_stats));
cdc1184c 488 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
489 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
490#ifdef CONFIG_DEBUG_BLK_CGROUP
491 if (idling) {
492 blkio_mark_blkg_idling(stats);
493 stats->start_idle_time = now;
494 }
495 if (waiting) {
496 blkio_mark_blkg_waiting(stats);
497 stats->start_group_wait_time = now;
498 }
499 if (empty) {
500 blkio_mark_blkg_empty(stats);
501 stats->start_empty_time = now;
502 }
503#endif
303a3acb
DS
504 spin_unlock(&blkg->stats_lock);
505 }
506 spin_unlock_irq(&blkcg->lock);
507 return 0;
508}
509
84c124da
DS
510static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
511 int chars_left, bool diskname_only)
303a3acb 512{
84c124da 513 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
303a3acb
DS
514 chars_left -= strlen(str);
515 if (chars_left <= 0) {
516 printk(KERN_WARNING
517 "Possibly incorrect cgroup stat display format");
518 return;
519 }
84c124da
DS
520 if (diskname_only)
521 return;
303a3acb 522 switch (type) {
84c124da 523 case BLKIO_STAT_READ:
303a3acb
DS
524 strlcat(str, " Read", chars_left);
525 break;
84c124da 526 case BLKIO_STAT_WRITE:
303a3acb
DS
527 strlcat(str, " Write", chars_left);
528 break;
84c124da 529 case BLKIO_STAT_SYNC:
303a3acb
DS
530 strlcat(str, " Sync", chars_left);
531 break;
84c124da 532 case BLKIO_STAT_ASYNC:
303a3acb
DS
533 strlcat(str, " Async", chars_left);
534 break;
84c124da 535 case BLKIO_STAT_TOTAL:
303a3acb
DS
536 strlcat(str, " Total", chars_left);
537 break;
538 default:
539 strlcat(str, " Invalid", chars_left);
540 }
541}
542
84c124da
DS
543static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
544 struct cgroup_map_cb *cb, dev_t dev)
545{
546 blkio_get_key_name(0, dev, str, chars_left, true);
547 cb->fill(cb, str, val);
548 return val;
549}
303a3acb 550
84c124da
DS
551/* This should be called with blkg->stats_lock held */
552static uint64_t blkio_get_stat(struct blkio_group *blkg,
553 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
303a3acb
DS
554{
555 uint64_t disk_total;
556 char key_str[MAX_KEY_LEN];
84c124da
DS
557 enum stat_sub_type sub_type;
558
559 if (type == BLKIO_STAT_TIME)
560 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
561 blkg->stats.time, cb, dev);
562 if (type == BLKIO_STAT_SECTORS)
563 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
564 blkg->stats.sectors, cb, dev);
565#ifdef CONFIG_DEBUG_BLK_CGROUP
cdc1184c
DS
566 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
567 uint64_t sum = blkg->stats.avg_queue_size_sum;
568 uint64_t samples = blkg->stats.avg_queue_size_samples;
569 if (samples)
570 do_div(sum, samples);
571 else
572 sum = 0;
573 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
574 }
812df48d
DS
575 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
576 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
577 blkg->stats.group_wait_time, cb, dev);
578 if (type == BLKIO_STAT_IDLE_TIME)
579 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
580 blkg->stats.idle_time, cb, dev);
581 if (type == BLKIO_STAT_EMPTY_TIME)
582 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
583 blkg->stats.empty_time, cb, dev);
84c124da
DS
584 if (type == BLKIO_STAT_DEQUEUE)
585 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
586 blkg->stats.dequeue, cb, dev);
587#endif
303a3acb 588
84c124da
DS
589 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
590 sub_type++) {
591 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
592 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
303a3acb 593 }
84c124da
DS
594 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
595 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
596 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
303a3acb
DS
597 cb->fill(cb, key_str, disk_total);
598 return disk_total;
599}
600
84c124da 601#define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
22084190 602static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
303a3acb 603 struct cftype *cftype, struct cgroup_map_cb *cb) \
22084190
VG
604{ \
605 struct blkio_cgroup *blkcg; \
606 struct blkio_group *blkg; \
607 struct hlist_node *n; \
303a3acb 608 uint64_t cgroup_total = 0; \
22084190
VG
609 \
610 if (!cgroup_lock_live_group(cgroup)) \
611 return -ENODEV; \
612 \
613 blkcg = cgroup_to_blkio_cgroup(cgroup); \
614 rcu_read_lock(); \
615 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
303a3acb
DS
616 if (blkg->dev) { \
617 spin_lock_irq(&blkg->stats_lock); \
84c124da
DS
618 cgroup_total += blkio_get_stat(blkg, cb, \
619 blkg->dev, type); \
303a3acb
DS
620 spin_unlock_irq(&blkg->stats_lock); \
621 } \
22084190 622 } \
303a3acb
DS
623 if (show_total) \
624 cb->fill(cb, "Total", cgroup_total); \
22084190
VG
625 rcu_read_unlock(); \
626 cgroup_unlock(); \
627 return 0; \
628}
629
84c124da
DS
630SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
631SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
632SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
633SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
634SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
635SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
812d4026 636SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
cdc1184c 637SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
22084190 638#ifdef CONFIG_DEBUG_BLK_CGROUP
84c124da 639SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
cdc1184c 640SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
812df48d
DS
641SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
642SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
643SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
22084190
VG
644#endif
645#undef SHOW_FUNCTION_PER_GROUP
646
34d0f179
GJ
647static int blkio_check_dev_num(dev_t dev)
648{
649 int part = 0;
650 struct gendisk *disk;
651
652 disk = get_gendisk(dev, &part);
653 if (!disk || part)
654 return -ENODEV;
655
656 return 0;
657}
658
659static int blkio_policy_parse_and_set(char *buf,
660 struct blkio_policy_node *newpn)
661{
662 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
663 int ret;
664 unsigned long major, minor, temp;
665 int i = 0;
666 dev_t dev;
667
668 memset(s, 0, sizeof(s));
669
670 while ((p = strsep(&buf, " ")) != NULL) {
671 if (!*p)
672 continue;
673
674 s[i++] = p;
675
676 /* Prevent from inputing too many things */
677 if (i == 3)
678 break;
679 }
680
681 if (i != 2)
682 return -EINVAL;
683
684 p = strsep(&s[0], ":");
685 if (p != NULL)
686 major_s = p;
687 else
688 return -EINVAL;
689
690 minor_s = s[0];
691 if (!minor_s)
692 return -EINVAL;
693
694 ret = strict_strtoul(major_s, 10, &major);
695 if (ret)
696 return -EINVAL;
697
698 ret = strict_strtoul(minor_s, 10, &minor);
699 if (ret)
700 return -EINVAL;
701
702 dev = MKDEV(major, minor);
703
704 ret = blkio_check_dev_num(dev);
705 if (ret)
706 return ret;
707
708 newpn->dev = dev;
709
710 if (s[1] == NULL)
711 return -EINVAL;
712
713 ret = strict_strtoul(s[1], 10, &temp);
714 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
715 temp > BLKIO_WEIGHT_MAX)
716 return -EINVAL;
717
718 newpn->weight = temp;
719
720 return 0;
721}
722
723unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
724 dev_t dev)
725{
726 struct blkio_policy_node *pn;
727
728 pn = blkio_policy_search_node(blkcg, dev);
729 if (pn)
730 return pn->weight;
731 else
732 return blkcg->weight;
733}
734EXPORT_SYMBOL_GPL(blkcg_get_weight);
735
736
737static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
738 const char *buffer)
739{
740 int ret = 0;
741 char *buf;
742 struct blkio_policy_node *newpn, *pn;
743 struct blkio_cgroup *blkcg;
744 struct blkio_group *blkg;
745 int keep_newpn = 0;
746 struct hlist_node *n;
747 struct blkio_policy_type *blkiop;
748
749 buf = kstrdup(buffer, GFP_KERNEL);
750 if (!buf)
751 return -ENOMEM;
752
753 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
754 if (!newpn) {
755 ret = -ENOMEM;
756 goto free_buf;
757 }
758
759 ret = blkio_policy_parse_and_set(buf, newpn);
760 if (ret)
761 goto free_newpn;
762
763 blkcg = cgroup_to_blkio_cgroup(cgrp);
764
765 spin_lock_irq(&blkcg->lock);
766
767 pn = blkio_policy_search_node(blkcg, newpn->dev);
768 if (!pn) {
769 if (newpn->weight != 0) {
770 blkio_policy_insert_node(blkcg, newpn);
771 keep_newpn = 1;
772 }
773 spin_unlock_irq(&blkcg->lock);
774 goto update_io_group;
775 }
776
777 if (newpn->weight == 0) {
778 /* weight == 0 means deleteing a specific weight */
779 blkio_policy_delete_node(pn);
780 spin_unlock_irq(&blkcg->lock);
781 goto update_io_group;
782 }
783 spin_unlock_irq(&blkcg->lock);
784
785 pn->weight = newpn->weight;
786
787update_io_group:
788 /* update weight for each cfqg */
789 spin_lock(&blkio_list_lock);
790 spin_lock_irq(&blkcg->lock);
791
792 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
793 if (newpn->dev == blkg->dev) {
794 list_for_each_entry(blkiop, &blkio_list, list)
795 blkiop->ops.blkio_update_group_weight_fn(blkg,
796 newpn->weight ?
797 newpn->weight :
798 blkcg->weight);
799 }
800 }
801
802 spin_unlock_irq(&blkcg->lock);
803 spin_unlock(&blkio_list_lock);
804
805free_newpn:
806 if (!keep_newpn)
807 kfree(newpn);
808free_buf:
809 kfree(buf);
810 return ret;
811}
812
813static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft,
814 struct seq_file *m)
815{
816 struct blkio_cgroup *blkcg;
817 struct blkio_policy_node *pn;
818
819 seq_printf(m, "dev\tweight\n");
820
821 blkcg = cgroup_to_blkio_cgroup(cgrp);
822 if (list_empty(&blkcg->policy_list))
823 goto out;
824
825 spin_lock_irq(&blkcg->lock);
826 list_for_each_entry(pn, &blkcg->policy_list, node) {
827 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
828 MINOR(pn->dev), pn->weight);
829 }
830 spin_unlock_irq(&blkcg->lock);
831
832out:
833 return 0;
834}
835
31e4c28d 836struct cftype blkio_files[] = {
34d0f179
GJ
837 {
838 .name = "weight_device",
839 .read_seq_string = blkiocg_weight_device_read,
840 .write_string = blkiocg_weight_device_write,
841 .max_write_len = 256,
842 },
31e4c28d
VG
843 {
844 .name = "weight",
845 .read_u64 = blkiocg_weight_read,
846 .write_u64 = blkiocg_weight_write,
847 },
22084190
VG
848 {
849 .name = "time",
303a3acb 850 .read_map = blkiocg_time_read,
22084190
VG
851 },
852 {
853 .name = "sectors",
303a3acb 854 .read_map = blkiocg_sectors_read,
303a3acb
DS
855 },
856 {
857 .name = "io_service_bytes",
858 .read_map = blkiocg_io_service_bytes_read,
303a3acb
DS
859 },
860 {
861 .name = "io_serviced",
862 .read_map = blkiocg_io_serviced_read,
303a3acb
DS
863 },
864 {
865 .name = "io_service_time",
866 .read_map = blkiocg_io_service_time_read,
303a3acb
DS
867 },
868 {
869 .name = "io_wait_time",
870 .read_map = blkiocg_io_wait_time_read,
84c124da 871 },
812d4026
DS
872 {
873 .name = "io_merged",
874 .read_map = blkiocg_io_merged_read,
875 },
cdc1184c
DS
876 {
877 .name = "io_queued",
878 .read_map = blkiocg_io_queued_read,
879 },
84c124da
DS
880 {
881 .name = "reset_stats",
882 .write_u64 = blkiocg_reset_stats,
22084190
VG
883 },
884#ifdef CONFIG_DEBUG_BLK_CGROUP
cdc1184c
DS
885 {
886 .name = "avg_queue_size",
887 .read_map = blkiocg_avg_queue_size_read,
888 },
812df48d
DS
889 {
890 .name = "group_wait_time",
891 .read_map = blkiocg_group_wait_time_read,
892 },
893 {
894 .name = "idle_time",
895 .read_map = blkiocg_idle_time_read,
896 },
897 {
898 .name = "empty_time",
899 .read_map = blkiocg_empty_time_read,
900 },
cdc1184c 901 {
22084190 902 .name = "dequeue",
303a3acb 903 .read_map = blkiocg_dequeue_read,
cdc1184c 904 },
22084190 905#endif
31e4c28d
VG
906};
907
908static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
909{
910 return cgroup_add_files(cgroup, subsys, blkio_files,
911 ARRAY_SIZE(blkio_files));
912}
913
914static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
915{
916 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
b1c35769
VG
917 unsigned long flags;
918 struct blkio_group *blkg;
919 void *key;
3e252066 920 struct blkio_policy_type *blkiop;
34d0f179 921 struct blkio_policy_node *pn, *pntmp;
b1c35769
VG
922
923 rcu_read_lock();
924remove_entry:
925 spin_lock_irqsave(&blkcg->lock, flags);
926
927 if (hlist_empty(&blkcg->blkg_list)) {
928 spin_unlock_irqrestore(&blkcg->lock, flags);
929 goto done;
930 }
931
932 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
933 blkcg_node);
934 key = rcu_dereference(blkg->key);
935 __blkiocg_del_blkio_group(blkg);
31e4c28d 936
b1c35769
VG
937 spin_unlock_irqrestore(&blkcg->lock, flags);
938
939 /*
940 * This blkio_group is being unlinked as associated cgroup is going
941 * away. Let all the IO controlling policies know about this event.
942 *
943 * Currently this is static call to one io controlling policy. Once
944 * we have more policies in place, we need some dynamic registration
945 * of callback function.
946 */
3e252066
VG
947 spin_lock(&blkio_list_lock);
948 list_for_each_entry(blkiop, &blkio_list, list)
949 blkiop->ops.blkio_unlink_group_fn(key, blkg);
950 spin_unlock(&blkio_list_lock);
b1c35769 951 goto remove_entry;
34d0f179 952
b1c35769 953done:
34d0f179
GJ
954 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
955 blkio_policy_delete_node(pn);
956 kfree(pn);
957 }
31e4c28d 958 free_css_id(&blkio_subsys, &blkcg->css);
b1c35769 959 rcu_read_unlock();
67523c48
BB
960 if (blkcg != &blkio_root_cgroup)
961 kfree(blkcg);
31e4c28d
VG
962}
963
964static struct cgroup_subsys_state *
965blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
966{
967 struct blkio_cgroup *blkcg, *parent_blkcg;
968
969 if (!cgroup->parent) {
970 blkcg = &blkio_root_cgroup;
971 goto done;
972 }
973
974 /* Currently we do not support hierarchy deeper than two level (0,1) */
975 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
976 if (css_depth(&parent_blkcg->css) > 0)
977 return ERR_PTR(-EINVAL);
978
979 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
980 if (!blkcg)
981 return ERR_PTR(-ENOMEM);
982
983 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
984done:
985 spin_lock_init(&blkcg->lock);
986 INIT_HLIST_HEAD(&blkcg->blkg_list);
987
34d0f179 988 INIT_LIST_HEAD(&blkcg->policy_list);
31e4c28d
VG
989 return &blkcg->css;
990}
991
992/*
993 * We cannot support shared io contexts, as we have no mean to support
994 * two tasks with the same ioc in two different groups without major rework
995 * of the main cic data structures. For now we allow a task to change
996 * its cgroup only if it's the only owner of its ioc.
997 */
998static int blkiocg_can_attach(struct cgroup_subsys *subsys,
999 struct cgroup *cgroup, struct task_struct *tsk,
1000 bool threadgroup)
1001{
1002 struct io_context *ioc;
1003 int ret = 0;
1004
1005 /* task_lock() is needed to avoid races with exit_io_context() */
1006 task_lock(tsk);
1007 ioc = tsk->io_context;
1008 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1009 ret = -EINVAL;
1010 task_unlock(tsk);
1011
1012 return ret;
1013}
1014
1015static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
1016 struct cgroup *prev, struct task_struct *tsk,
1017 bool threadgroup)
1018{
1019 struct io_context *ioc;
1020
1021 task_lock(tsk);
1022 ioc = tsk->io_context;
1023 if (ioc)
1024 ioc->cgroup_changed = 1;
1025 task_unlock(tsk);
1026}
1027
3e252066
VG
1028void blkio_policy_register(struct blkio_policy_type *blkiop)
1029{
1030 spin_lock(&blkio_list_lock);
1031 list_add_tail(&blkiop->list, &blkio_list);
1032 spin_unlock(&blkio_list_lock);
1033}
1034EXPORT_SYMBOL_GPL(blkio_policy_register);
1035
1036void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1037{
1038 spin_lock(&blkio_list_lock);
1039 list_del_init(&blkiop->list);
1040 spin_unlock(&blkio_list_lock);
1041}
1042EXPORT_SYMBOL_GPL(blkio_policy_unregister);
67523c48
BB
1043
1044static int __init init_cgroup_blkio(void)
1045{
1046 return cgroup_load_subsys(&blkio_subsys);
1047}
1048
1049static void __exit exit_cgroup_blkio(void)
1050{
1051 cgroup_unload_subsys(&blkio_subsys);
1052}
1053
1054module_init(init_cgroup_blkio);
1055module_exit(exit_cgroup_blkio);
1056MODULE_LICENSE("GPL");