]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
22084190 VG |
14 | #include <linux/seq_file.h> |
15 | #include <linux/kdev_t.h> | |
9d6a986c | 16 | #include <linux/module.h> |
accee785 | 17 | #include <linux/err.h> |
9195291e | 18 | #include <linux/blkdev.h> |
31e4c28d | 19 | #include "blk-cgroup.h" |
3e252066 | 20 | |
84c124da DS |
21 | #define MAX_KEY_LEN 100 |
22 | ||
3e252066 VG |
23 | static DEFINE_SPINLOCK(blkio_list_lock); |
24 | static LIST_HEAD(blkio_list); | |
b1c35769 | 25 | |
31e4c28d | 26 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; |
9d6a986c VG |
27 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); |
28 | ||
67523c48 BB |
29 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, |
30 | struct cgroup *); | |
31 | static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, | |
32 | struct task_struct *, bool); | |
33 | static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, | |
34 | struct cgroup *, struct task_struct *, bool); | |
35 | static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); | |
36 | static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); | |
37 | ||
38 | struct cgroup_subsys blkio_subsys = { | |
39 | .name = "blkio", | |
40 | .create = blkiocg_create, | |
41 | .can_attach = blkiocg_can_attach, | |
42 | .attach = blkiocg_attach, | |
43 | .destroy = blkiocg_destroy, | |
44 | .populate = blkiocg_populate, | |
45 | #ifdef CONFIG_BLK_CGROUP | |
46 | /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */ | |
47 | .subsys_id = blkio_subsys_id, | |
48 | #endif | |
49 | .use_id = 1, | |
50 | .module = THIS_MODULE, | |
51 | }; | |
52 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
53 | ||
31e4c28d VG |
54 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) |
55 | { | |
56 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | |
57 | struct blkio_cgroup, css); | |
58 | } | |
9d6a986c | 59 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); |
31e4c28d | 60 | |
84c124da DS |
61 | void blkio_group_init(struct blkio_group *blkg) |
62 | { | |
63 | spin_lock_init(&blkg->stats_lock); | |
64 | } | |
65 | EXPORT_SYMBOL_GPL(blkio_group_init); | |
66 | ||
9195291e DS |
67 | /* |
68 | * Add to the appropriate stat variable depending on the request type. | |
69 | * This should be called with the blkg->stats_lock held. | |
70 | */ | |
84c124da DS |
71 | static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction, |
72 | bool sync) | |
9195291e | 73 | { |
84c124da DS |
74 | if (direction) |
75 | stat[BLKIO_STAT_WRITE] += add; | |
9195291e | 76 | else |
84c124da DS |
77 | stat[BLKIO_STAT_READ] += add; |
78 | if (sync) | |
79 | stat[BLKIO_STAT_SYNC] += add; | |
9195291e | 80 | else |
84c124da | 81 | stat[BLKIO_STAT_ASYNC] += add; |
9195291e DS |
82 | } |
83 | ||
cdc1184c DS |
84 | /* |
85 | * Decrements the appropriate stat variable if non-zero depending on the | |
86 | * request type. Panics on value being zero. | |
87 | * This should be called with the blkg->stats_lock held. | |
88 | */ | |
89 | static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync) | |
90 | { | |
91 | if (direction) { | |
92 | BUG_ON(stat[BLKIO_STAT_WRITE] == 0); | |
93 | stat[BLKIO_STAT_WRITE]--; | |
94 | } else { | |
95 | BUG_ON(stat[BLKIO_STAT_READ] == 0); | |
96 | stat[BLKIO_STAT_READ]--; | |
97 | } | |
98 | if (sync) { | |
99 | BUG_ON(stat[BLKIO_STAT_SYNC] == 0); | |
100 | stat[BLKIO_STAT_SYNC]--; | |
101 | } else { | |
102 | BUG_ON(stat[BLKIO_STAT_ASYNC] == 0); | |
103 | stat[BLKIO_STAT_ASYNC]--; | |
104 | } | |
105 | } | |
106 | ||
107 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
812df48d DS |
108 | /* This should be called with the blkg->stats_lock held. */ |
109 | static void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
110 | struct blkio_group *curr_blkg) | |
111 | { | |
112 | if (blkio_blkg_waiting(&blkg->stats)) | |
113 | return; | |
114 | if (blkg == curr_blkg) | |
115 | return; | |
116 | blkg->stats.start_group_wait_time = sched_clock(); | |
117 | blkio_mark_blkg_waiting(&blkg->stats); | |
118 | } | |
119 | ||
120 | /* This should be called with the blkg->stats_lock held. */ | |
121 | static void blkio_update_group_wait_time(struct blkio_group_stats *stats) | |
122 | { | |
123 | unsigned long long now; | |
124 | ||
125 | if (!blkio_blkg_waiting(stats)) | |
126 | return; | |
127 | ||
128 | now = sched_clock(); | |
129 | if (time_after64(now, stats->start_group_wait_time)) | |
130 | stats->group_wait_time += now - stats->start_group_wait_time; | |
131 | blkio_clear_blkg_waiting(stats); | |
132 | } | |
133 | ||
134 | /* This should be called with the blkg->stats_lock held. */ | |
135 | static void blkio_end_empty_time(struct blkio_group_stats *stats) | |
136 | { | |
137 | unsigned long long now; | |
138 | ||
139 | if (!blkio_blkg_empty(stats)) | |
140 | return; | |
141 | ||
142 | now = sched_clock(); | |
143 | if (time_after64(now, stats->start_empty_time)) | |
144 | stats->empty_time += now - stats->start_empty_time; | |
145 | blkio_clear_blkg_empty(stats); | |
146 | } | |
147 | ||
148 | void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) | |
149 | { | |
150 | unsigned long flags; | |
151 | ||
152 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
153 | BUG_ON(blkio_blkg_idling(&blkg->stats)); | |
154 | blkg->stats.start_idle_time = sched_clock(); | |
155 | blkio_mark_blkg_idling(&blkg->stats); | |
156 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
157 | } | |
158 | EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); | |
159 | ||
160 | void blkiocg_update_idle_time_stats(struct blkio_group *blkg) | |
161 | { | |
162 | unsigned long flags; | |
163 | unsigned long long now; | |
164 | struct blkio_group_stats *stats; | |
165 | ||
166 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
167 | stats = &blkg->stats; | |
168 | if (blkio_blkg_idling(stats)) { | |
169 | now = sched_clock(); | |
170 | if (time_after64(now, stats->start_idle_time)) | |
171 | stats->idle_time += now - stats->start_idle_time; | |
172 | blkio_clear_blkg_idling(stats); | |
173 | } | |
174 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
175 | } | |
176 | EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); | |
177 | ||
cdc1184c DS |
178 | void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg) |
179 | { | |
180 | unsigned long flags; | |
181 | struct blkio_group_stats *stats; | |
182 | ||
183 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
184 | stats = &blkg->stats; | |
185 | stats->avg_queue_size_sum += | |
186 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] + | |
187 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]; | |
188 | stats->avg_queue_size_samples++; | |
812df48d | 189 | blkio_update_group_wait_time(stats); |
cdc1184c DS |
190 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
191 | } | |
192 | EXPORT_SYMBOL_GPL(blkiocg_update_set_active_queue_stats); | |
812df48d DS |
193 | #else |
194 | static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
195 | struct blkio_group *curr_blkg) {} | |
196 | static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {} | |
cdc1184c DS |
197 | #endif |
198 | ||
199 | void blkiocg_update_request_add_stats(struct blkio_group *blkg, | |
200 | struct blkio_group *curr_blkg, bool direction, | |
201 | bool sync) | |
202 | { | |
203 | unsigned long flags; | |
204 | ||
205 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
206 | blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction, | |
207 | sync); | |
812df48d DS |
208 | blkio_end_empty_time(&blkg->stats); |
209 | blkio_set_start_group_wait_time(blkg, curr_blkg); | |
cdc1184c DS |
210 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
211 | } | |
212 | EXPORT_SYMBOL_GPL(blkiocg_update_request_add_stats); | |
213 | ||
214 | void blkiocg_update_request_remove_stats(struct blkio_group *blkg, | |
215 | bool direction, bool sync) | |
216 | { | |
217 | unsigned long flags; | |
218 | ||
219 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
220 | blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], | |
221 | direction, sync); | |
222 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
223 | } | |
224 | EXPORT_SYMBOL_GPL(blkiocg_update_request_remove_stats); | |
225 | ||
303a3acb | 226 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) |
22084190 | 227 | { |
303a3acb DS |
228 | unsigned long flags; |
229 | ||
230 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
231 | blkg->stats.time += time; | |
232 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
22084190 | 233 | } |
303a3acb | 234 | EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); |
22084190 | 235 | |
812df48d DS |
236 | void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore) |
237 | { | |
238 | unsigned long flags; | |
239 | struct blkio_group_stats *stats; | |
240 | ||
241 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
242 | stats = &blkg->stats; | |
243 | ||
244 | if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] || | |
245 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) { | |
246 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
247 | return; | |
248 | } | |
249 | ||
250 | /* | |
251 | * If ignore is set, we do not panic on the empty flag being set | |
252 | * already. This is to avoid cases where there are superfluous timeslice | |
253 | * complete events (for eg., forced_dispatch in CFQ) when no IOs are | |
254 | * served which could result in triggering the empty check incorrectly. | |
255 | */ | |
256 | BUG_ON(!ignore && blkio_blkg_empty(stats)); | |
257 | stats->start_empty_time = sched_clock(); | |
258 | blkio_mark_blkg_empty(stats); | |
259 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
260 | } | |
261 | EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); | |
262 | ||
84c124da DS |
263 | void blkiocg_update_dispatch_stats(struct blkio_group *blkg, |
264 | uint64_t bytes, bool direction, bool sync) | |
9195291e DS |
265 | { |
266 | struct blkio_group_stats *stats; | |
267 | unsigned long flags; | |
268 | ||
269 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
270 | stats = &blkg->stats; | |
84c124da DS |
271 | stats->sectors += bytes >> 9; |
272 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction, | |
273 | sync); | |
274 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes, | |
275 | direction, sync); | |
9195291e DS |
276 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
277 | } | |
84c124da | 278 | EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); |
9195291e | 279 | |
84c124da DS |
280 | void blkiocg_update_completion_stats(struct blkio_group *blkg, |
281 | uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) | |
9195291e DS |
282 | { |
283 | struct blkio_group_stats *stats; | |
284 | unsigned long flags; | |
285 | unsigned long long now = sched_clock(); | |
286 | ||
287 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
288 | stats = &blkg->stats; | |
84c124da DS |
289 | if (time_after64(now, io_start_time)) |
290 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME], | |
291 | now - io_start_time, direction, sync); | |
292 | if (time_after64(io_start_time, start_time)) | |
293 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME], | |
294 | io_start_time - start_time, direction, sync); | |
9195291e DS |
295 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
296 | } | |
84c124da | 297 | EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); |
9195291e | 298 | |
812d4026 DS |
299 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, |
300 | bool sync) | |
301 | { | |
302 | unsigned long flags; | |
303 | ||
304 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
305 | blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction, | |
306 | sync); | |
307 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
308 | } | |
309 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); | |
310 | ||
31e4c28d | 311 | void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
22084190 | 312 | struct blkio_group *blkg, void *key, dev_t dev) |
31e4c28d VG |
313 | { |
314 | unsigned long flags; | |
315 | ||
316 | spin_lock_irqsave(&blkcg->lock, flags); | |
317 | rcu_assign_pointer(blkg->key, key); | |
b1c35769 | 318 | blkg->blkcg_id = css_id(&blkcg->css); |
31e4c28d VG |
319 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
320 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
2868ef7b VG |
321 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
322 | /* Need to take css reference ? */ | |
323 | cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); | |
324 | #endif | |
22084190 | 325 | blkg->dev = dev; |
31e4c28d | 326 | } |
9d6a986c | 327 | EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group); |
31e4c28d | 328 | |
b1c35769 VG |
329 | static void __blkiocg_del_blkio_group(struct blkio_group *blkg) |
330 | { | |
331 | hlist_del_init_rcu(&blkg->blkcg_node); | |
332 | blkg->blkcg_id = 0; | |
333 | } | |
334 | ||
335 | /* | |
336 | * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1 | |
337 | * indicating that blk_group was unhashed by the time we got to it. | |
338 | */ | |
31e4c28d VG |
339 | int blkiocg_del_blkio_group(struct blkio_group *blkg) |
340 | { | |
b1c35769 VG |
341 | struct blkio_cgroup *blkcg; |
342 | unsigned long flags; | |
343 | struct cgroup_subsys_state *css; | |
344 | int ret = 1; | |
345 | ||
346 | rcu_read_lock(); | |
347 | css = css_lookup(&blkio_subsys, blkg->blkcg_id); | |
348 | if (!css) | |
349 | goto out; | |
350 | ||
351 | blkcg = container_of(css, struct blkio_cgroup, css); | |
352 | spin_lock_irqsave(&blkcg->lock, flags); | |
353 | if (!hlist_unhashed(&blkg->blkcg_node)) { | |
354 | __blkiocg_del_blkio_group(blkg); | |
355 | ret = 0; | |
356 | } | |
357 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
358 | out: | |
359 | rcu_read_unlock(); | |
360 | return ret; | |
31e4c28d | 361 | } |
9d6a986c | 362 | EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group); |
31e4c28d VG |
363 | |
364 | /* called under rcu_read_lock(). */ | |
365 | struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) | |
366 | { | |
367 | struct blkio_group *blkg; | |
368 | struct hlist_node *n; | |
369 | void *__key; | |
370 | ||
371 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
372 | __key = blkg->key; | |
373 | if (__key == key) | |
374 | return blkg; | |
375 | } | |
376 | ||
377 | return NULL; | |
378 | } | |
9d6a986c | 379 | EXPORT_SYMBOL_GPL(blkiocg_lookup_group); |
31e4c28d VG |
380 | |
381 | #define SHOW_FUNCTION(__VAR) \ | |
382 | static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \ | |
383 | struct cftype *cftype) \ | |
384 | { \ | |
385 | struct blkio_cgroup *blkcg; \ | |
386 | \ | |
387 | blkcg = cgroup_to_blkio_cgroup(cgroup); \ | |
388 | return (u64)blkcg->__VAR; \ | |
389 | } | |
390 | ||
391 | SHOW_FUNCTION(weight); | |
392 | #undef SHOW_FUNCTION | |
393 | ||
394 | static int | |
395 | blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |
396 | { | |
397 | struct blkio_cgroup *blkcg; | |
f8d461d6 VG |
398 | struct blkio_group *blkg; |
399 | struct hlist_node *n; | |
3e252066 | 400 | struct blkio_policy_type *blkiop; |
31e4c28d VG |
401 | |
402 | if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX) | |
403 | return -EINVAL; | |
404 | ||
405 | blkcg = cgroup_to_blkio_cgroup(cgroup); | |
bcf4dd43 | 406 | spin_lock(&blkio_list_lock); |
f8d461d6 | 407 | spin_lock_irq(&blkcg->lock); |
31e4c28d | 408 | blkcg->weight = (unsigned int)val; |
3e252066 | 409 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
3e252066 VG |
410 | list_for_each_entry(blkiop, &blkio_list, list) |
411 | blkiop->ops.blkio_update_group_weight_fn(blkg, | |
412 | blkcg->weight); | |
3e252066 | 413 | } |
f8d461d6 | 414 | spin_unlock_irq(&blkcg->lock); |
bcf4dd43 | 415 | spin_unlock(&blkio_list_lock); |
31e4c28d VG |
416 | return 0; |
417 | } | |
418 | ||
303a3acb | 419 | static int |
84c124da | 420 | blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) |
303a3acb DS |
421 | { |
422 | struct blkio_cgroup *blkcg; | |
423 | struct blkio_group *blkg; | |
812df48d | 424 | struct blkio_group_stats *stats; |
303a3acb | 425 | struct hlist_node *n; |
cdc1184c DS |
426 | uint64_t queued[BLKIO_STAT_TOTAL]; |
427 | int i; | |
812df48d DS |
428 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
429 | bool idling, waiting, empty; | |
430 | unsigned long long now = sched_clock(); | |
431 | #endif | |
303a3acb DS |
432 | |
433 | blkcg = cgroup_to_blkio_cgroup(cgroup); | |
434 | spin_lock_irq(&blkcg->lock); | |
435 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
436 | spin_lock(&blkg->stats_lock); | |
812df48d DS |
437 | stats = &blkg->stats; |
438 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
439 | idling = blkio_blkg_idling(stats); | |
440 | waiting = blkio_blkg_waiting(stats); | |
441 | empty = blkio_blkg_empty(stats); | |
442 | #endif | |
cdc1184c | 443 | for (i = 0; i < BLKIO_STAT_TOTAL; i++) |
812df48d DS |
444 | queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i]; |
445 | memset(stats, 0, sizeof(struct blkio_group_stats)); | |
cdc1184c | 446 | for (i = 0; i < BLKIO_STAT_TOTAL; i++) |
812df48d DS |
447 | stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i]; |
448 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
449 | if (idling) { | |
450 | blkio_mark_blkg_idling(stats); | |
451 | stats->start_idle_time = now; | |
452 | } | |
453 | if (waiting) { | |
454 | blkio_mark_blkg_waiting(stats); | |
455 | stats->start_group_wait_time = now; | |
456 | } | |
457 | if (empty) { | |
458 | blkio_mark_blkg_empty(stats); | |
459 | stats->start_empty_time = now; | |
460 | } | |
461 | #endif | |
303a3acb DS |
462 | spin_unlock(&blkg->stats_lock); |
463 | } | |
464 | spin_unlock_irq(&blkcg->lock); | |
465 | return 0; | |
466 | } | |
467 | ||
84c124da DS |
468 | static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str, |
469 | int chars_left, bool diskname_only) | |
303a3acb | 470 | { |
84c124da | 471 | snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev)); |
303a3acb DS |
472 | chars_left -= strlen(str); |
473 | if (chars_left <= 0) { | |
474 | printk(KERN_WARNING | |
475 | "Possibly incorrect cgroup stat display format"); | |
476 | return; | |
477 | } | |
84c124da DS |
478 | if (diskname_only) |
479 | return; | |
303a3acb | 480 | switch (type) { |
84c124da | 481 | case BLKIO_STAT_READ: |
303a3acb DS |
482 | strlcat(str, " Read", chars_left); |
483 | break; | |
84c124da | 484 | case BLKIO_STAT_WRITE: |
303a3acb DS |
485 | strlcat(str, " Write", chars_left); |
486 | break; | |
84c124da | 487 | case BLKIO_STAT_SYNC: |
303a3acb DS |
488 | strlcat(str, " Sync", chars_left); |
489 | break; | |
84c124da | 490 | case BLKIO_STAT_ASYNC: |
303a3acb DS |
491 | strlcat(str, " Async", chars_left); |
492 | break; | |
84c124da | 493 | case BLKIO_STAT_TOTAL: |
303a3acb DS |
494 | strlcat(str, " Total", chars_left); |
495 | break; | |
496 | default: | |
497 | strlcat(str, " Invalid", chars_left); | |
498 | } | |
499 | } | |
500 | ||
84c124da DS |
501 | static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val, |
502 | struct cgroup_map_cb *cb, dev_t dev) | |
503 | { | |
504 | blkio_get_key_name(0, dev, str, chars_left, true); | |
505 | cb->fill(cb, str, val); | |
506 | return val; | |
507 | } | |
303a3acb | 508 | |
84c124da DS |
509 | /* This should be called with blkg->stats_lock held */ |
510 | static uint64_t blkio_get_stat(struct blkio_group *blkg, | |
511 | struct cgroup_map_cb *cb, dev_t dev, enum stat_type type) | |
303a3acb DS |
512 | { |
513 | uint64_t disk_total; | |
514 | char key_str[MAX_KEY_LEN]; | |
84c124da DS |
515 | enum stat_sub_type sub_type; |
516 | ||
517 | if (type == BLKIO_STAT_TIME) | |
518 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
519 | blkg->stats.time, cb, dev); | |
520 | if (type == BLKIO_STAT_SECTORS) | |
521 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
522 | blkg->stats.sectors, cb, dev); | |
523 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
cdc1184c DS |
524 | if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { |
525 | uint64_t sum = blkg->stats.avg_queue_size_sum; | |
526 | uint64_t samples = blkg->stats.avg_queue_size_samples; | |
527 | if (samples) | |
528 | do_div(sum, samples); | |
529 | else | |
530 | sum = 0; | |
531 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev); | |
532 | } | |
812df48d DS |
533 | if (type == BLKIO_STAT_GROUP_WAIT_TIME) |
534 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
535 | blkg->stats.group_wait_time, cb, dev); | |
536 | if (type == BLKIO_STAT_IDLE_TIME) | |
537 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
538 | blkg->stats.idle_time, cb, dev); | |
539 | if (type == BLKIO_STAT_EMPTY_TIME) | |
540 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
541 | blkg->stats.empty_time, cb, dev); | |
84c124da DS |
542 | if (type == BLKIO_STAT_DEQUEUE) |
543 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
544 | blkg->stats.dequeue, cb, dev); | |
545 | #endif | |
303a3acb | 546 | |
84c124da DS |
547 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; |
548 | sub_type++) { | |
549 | blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false); | |
550 | cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]); | |
303a3acb | 551 | } |
84c124da DS |
552 | disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] + |
553 | blkg->stats.stat_arr[type][BLKIO_STAT_WRITE]; | |
554 | blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false); | |
303a3acb DS |
555 | cb->fill(cb, key_str, disk_total); |
556 | return disk_total; | |
557 | } | |
558 | ||
84c124da | 559 | #define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \ |
22084190 | 560 | static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \ |
303a3acb | 561 | struct cftype *cftype, struct cgroup_map_cb *cb) \ |
22084190 VG |
562 | { \ |
563 | struct blkio_cgroup *blkcg; \ | |
564 | struct blkio_group *blkg; \ | |
565 | struct hlist_node *n; \ | |
303a3acb | 566 | uint64_t cgroup_total = 0; \ |
22084190 VG |
567 | \ |
568 | if (!cgroup_lock_live_group(cgroup)) \ | |
569 | return -ENODEV; \ | |
570 | \ | |
571 | blkcg = cgroup_to_blkio_cgroup(cgroup); \ | |
572 | rcu_read_lock(); \ | |
573 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\ | |
303a3acb DS |
574 | if (blkg->dev) { \ |
575 | spin_lock_irq(&blkg->stats_lock); \ | |
84c124da DS |
576 | cgroup_total += blkio_get_stat(blkg, cb, \ |
577 | blkg->dev, type); \ | |
303a3acb DS |
578 | spin_unlock_irq(&blkg->stats_lock); \ |
579 | } \ | |
22084190 | 580 | } \ |
303a3acb DS |
581 | if (show_total) \ |
582 | cb->fill(cb, "Total", cgroup_total); \ | |
22084190 VG |
583 | rcu_read_unlock(); \ |
584 | cgroup_unlock(); \ | |
585 | return 0; \ | |
586 | } | |
587 | ||
84c124da DS |
588 | SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0); |
589 | SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0); | |
590 | SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1); | |
591 | SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1); | |
592 | SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1); | |
593 | SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1); | |
812d4026 | 594 | SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1); |
cdc1184c | 595 | SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1); |
22084190 | 596 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
84c124da | 597 | SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0); |
cdc1184c | 598 | SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0); |
812df48d DS |
599 | SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0); |
600 | SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0); | |
601 | SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0); | |
22084190 VG |
602 | #endif |
603 | #undef SHOW_FUNCTION_PER_GROUP | |
604 | ||
605 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
9195291e | 606 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
22084190 VG |
607 | unsigned long dequeue) |
608 | { | |
303a3acb | 609 | blkg->stats.dequeue += dequeue; |
22084190 | 610 | } |
9195291e | 611 | EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); |
22084190 VG |
612 | #endif |
613 | ||
31e4c28d VG |
614 | struct cftype blkio_files[] = { |
615 | { | |
616 | .name = "weight", | |
617 | .read_u64 = blkiocg_weight_read, | |
618 | .write_u64 = blkiocg_weight_write, | |
619 | }, | |
22084190 VG |
620 | { |
621 | .name = "time", | |
303a3acb | 622 | .read_map = blkiocg_time_read, |
22084190 VG |
623 | }, |
624 | { | |
625 | .name = "sectors", | |
303a3acb | 626 | .read_map = blkiocg_sectors_read, |
303a3acb DS |
627 | }, |
628 | { | |
629 | .name = "io_service_bytes", | |
630 | .read_map = blkiocg_io_service_bytes_read, | |
303a3acb DS |
631 | }, |
632 | { | |
633 | .name = "io_serviced", | |
634 | .read_map = blkiocg_io_serviced_read, | |
303a3acb DS |
635 | }, |
636 | { | |
637 | .name = "io_service_time", | |
638 | .read_map = blkiocg_io_service_time_read, | |
303a3acb DS |
639 | }, |
640 | { | |
641 | .name = "io_wait_time", | |
642 | .read_map = blkiocg_io_wait_time_read, | |
84c124da | 643 | }, |
812d4026 DS |
644 | { |
645 | .name = "io_merged", | |
646 | .read_map = blkiocg_io_merged_read, | |
647 | }, | |
cdc1184c DS |
648 | { |
649 | .name = "io_queued", | |
650 | .read_map = blkiocg_io_queued_read, | |
651 | }, | |
84c124da DS |
652 | { |
653 | .name = "reset_stats", | |
654 | .write_u64 = blkiocg_reset_stats, | |
22084190 VG |
655 | }, |
656 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
cdc1184c DS |
657 | { |
658 | .name = "avg_queue_size", | |
659 | .read_map = blkiocg_avg_queue_size_read, | |
660 | }, | |
812df48d DS |
661 | { |
662 | .name = "group_wait_time", | |
663 | .read_map = blkiocg_group_wait_time_read, | |
664 | }, | |
665 | { | |
666 | .name = "idle_time", | |
667 | .read_map = blkiocg_idle_time_read, | |
668 | }, | |
669 | { | |
670 | .name = "empty_time", | |
671 | .read_map = blkiocg_empty_time_read, | |
672 | }, | |
cdc1184c | 673 | { |
22084190 | 674 | .name = "dequeue", |
303a3acb | 675 | .read_map = blkiocg_dequeue_read, |
cdc1184c | 676 | }, |
22084190 | 677 | #endif |
31e4c28d VG |
678 | }; |
679 | ||
680 | static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
681 | { | |
682 | return cgroup_add_files(cgroup, subsys, blkio_files, | |
683 | ARRAY_SIZE(blkio_files)); | |
684 | } | |
685 | ||
686 | static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
687 | { | |
688 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
b1c35769 VG |
689 | unsigned long flags; |
690 | struct blkio_group *blkg; | |
691 | void *key; | |
3e252066 | 692 | struct blkio_policy_type *blkiop; |
b1c35769 VG |
693 | |
694 | rcu_read_lock(); | |
695 | remove_entry: | |
696 | spin_lock_irqsave(&blkcg->lock, flags); | |
697 | ||
698 | if (hlist_empty(&blkcg->blkg_list)) { | |
699 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
700 | goto done; | |
701 | } | |
702 | ||
703 | blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, | |
704 | blkcg_node); | |
705 | key = rcu_dereference(blkg->key); | |
706 | __blkiocg_del_blkio_group(blkg); | |
31e4c28d | 707 | |
b1c35769 VG |
708 | spin_unlock_irqrestore(&blkcg->lock, flags); |
709 | ||
710 | /* | |
711 | * This blkio_group is being unlinked as associated cgroup is going | |
712 | * away. Let all the IO controlling policies know about this event. | |
713 | * | |
714 | * Currently this is static call to one io controlling policy. Once | |
715 | * we have more policies in place, we need some dynamic registration | |
716 | * of callback function. | |
717 | */ | |
3e252066 VG |
718 | spin_lock(&blkio_list_lock); |
719 | list_for_each_entry(blkiop, &blkio_list, list) | |
720 | blkiop->ops.blkio_unlink_group_fn(key, blkg); | |
721 | spin_unlock(&blkio_list_lock); | |
b1c35769 VG |
722 | goto remove_entry; |
723 | done: | |
31e4c28d | 724 | free_css_id(&blkio_subsys, &blkcg->css); |
b1c35769 | 725 | rcu_read_unlock(); |
67523c48 BB |
726 | if (blkcg != &blkio_root_cgroup) |
727 | kfree(blkcg); | |
31e4c28d VG |
728 | } |
729 | ||
730 | static struct cgroup_subsys_state * | |
731 | blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
732 | { | |
733 | struct blkio_cgroup *blkcg, *parent_blkcg; | |
734 | ||
735 | if (!cgroup->parent) { | |
736 | blkcg = &blkio_root_cgroup; | |
737 | goto done; | |
738 | } | |
739 | ||
740 | /* Currently we do not support hierarchy deeper than two level (0,1) */ | |
741 | parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent); | |
742 | if (css_depth(&parent_blkcg->css) > 0) | |
743 | return ERR_PTR(-EINVAL); | |
744 | ||
745 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | |
746 | if (!blkcg) | |
747 | return ERR_PTR(-ENOMEM); | |
748 | ||
749 | blkcg->weight = BLKIO_WEIGHT_DEFAULT; | |
750 | done: | |
751 | spin_lock_init(&blkcg->lock); | |
752 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
753 | ||
754 | return &blkcg->css; | |
755 | } | |
756 | ||
757 | /* | |
758 | * We cannot support shared io contexts, as we have no mean to support | |
759 | * two tasks with the same ioc in two different groups without major rework | |
760 | * of the main cic data structures. For now we allow a task to change | |
761 | * its cgroup only if it's the only owner of its ioc. | |
762 | */ | |
763 | static int blkiocg_can_attach(struct cgroup_subsys *subsys, | |
764 | struct cgroup *cgroup, struct task_struct *tsk, | |
765 | bool threadgroup) | |
766 | { | |
767 | struct io_context *ioc; | |
768 | int ret = 0; | |
769 | ||
770 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
771 | task_lock(tsk); | |
772 | ioc = tsk->io_context; | |
773 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
774 | ret = -EINVAL; | |
775 | task_unlock(tsk); | |
776 | ||
777 | return ret; | |
778 | } | |
779 | ||
780 | static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, | |
781 | struct cgroup *prev, struct task_struct *tsk, | |
782 | bool threadgroup) | |
783 | { | |
784 | struct io_context *ioc; | |
785 | ||
786 | task_lock(tsk); | |
787 | ioc = tsk->io_context; | |
788 | if (ioc) | |
789 | ioc->cgroup_changed = 1; | |
790 | task_unlock(tsk); | |
791 | } | |
792 | ||
3e252066 VG |
793 | void blkio_policy_register(struct blkio_policy_type *blkiop) |
794 | { | |
795 | spin_lock(&blkio_list_lock); | |
796 | list_add_tail(&blkiop->list, &blkio_list); | |
797 | spin_unlock(&blkio_list_lock); | |
798 | } | |
799 | EXPORT_SYMBOL_GPL(blkio_policy_register); | |
800 | ||
801 | void blkio_policy_unregister(struct blkio_policy_type *blkiop) | |
802 | { | |
803 | spin_lock(&blkio_list_lock); | |
804 | list_del_init(&blkiop->list); | |
805 | spin_unlock(&blkio_list_lock); | |
806 | } | |
807 | EXPORT_SYMBOL_GPL(blkio_policy_unregister); | |
67523c48 BB |
808 | |
809 | static int __init init_cgroup_blkio(void) | |
810 | { | |
811 | return cgroup_load_subsys(&blkio_subsys); | |
812 | } | |
813 | ||
814 | static void __exit exit_cgroup_blkio(void) | |
815 | { | |
816 | cgroup_unload_subsys(&blkio_subsys); | |
817 | } | |
818 | ||
819 | module_init(init_cgroup_blkio); | |
820 | module_exit(exit_cgroup_blkio); | |
821 | MODULE_LICENSE("GPL"); |