]>
Commit | Line | Data |
---|---|---|
8cdea7c0 BS |
1 | /* memcontrol.c - Memory Controller |
2 | * | |
3 | * Copyright IBM Corporation, 2007 | |
4 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
5 | * | |
78fb7466 PE |
6 | * Copyright 2007 OpenVZ SWsoft Inc |
7 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8 | * | |
8cdea7c0 BS |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #include <linux/res_counter.h> | |
21 | #include <linux/memcontrol.h> | |
22 | #include <linux/cgroup.h> | |
78fb7466 | 23 | #include <linux/mm.h> |
8a9f3ccd | 24 | #include <linux/page-flags.h> |
66e1707b | 25 | #include <linux/backing-dev.h> |
8a9f3ccd BS |
26 | #include <linux/bit_spinlock.h> |
27 | #include <linux/rcupdate.h> | |
66e1707b BS |
28 | #include <linux/swap.h> |
29 | #include <linux/spinlock.h> | |
30 | #include <linux/fs.h> | |
8cdea7c0 | 31 | |
8697d331 BS |
32 | #include <asm/uaccess.h> |
33 | ||
8cdea7c0 | 34 | struct cgroup_subsys mem_cgroup_subsys; |
66e1707b | 35 | static const int MEM_CGROUP_RECLAIM_RETRIES = 5; |
8cdea7c0 BS |
36 | |
37 | /* | |
38 | * The memory controller data structure. The memory controller controls both | |
39 | * page cache and RSS per cgroup. We would eventually like to provide | |
40 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | |
41 | * to help the administrator determine what knobs to tune. | |
42 | * | |
43 | * TODO: Add a water mark for the memory controller. Reclaim will begin when | |
8a9f3ccd BS |
44 | * we hit the water mark. May be even add a low water mark, such that |
45 | * no reclaim occurs from a cgroup at it's low water mark, this is | |
46 | * a feature that will be implemented much later in the future. | |
8cdea7c0 BS |
47 | */ |
48 | struct mem_cgroup { | |
49 | struct cgroup_subsys_state css; | |
50 | /* | |
51 | * the counter to account for memory usage | |
52 | */ | |
53 | struct res_counter res; | |
78fb7466 PE |
54 | /* |
55 | * Per cgroup active and inactive list, similar to the | |
56 | * per zone LRU lists. | |
57 | * TODO: Consider making these lists per zone | |
58 | */ | |
59 | struct list_head active_list; | |
60 | struct list_head inactive_list; | |
66e1707b BS |
61 | /* |
62 | * spin_lock to protect the per cgroup LRU | |
63 | */ | |
64 | spinlock_t lru_lock; | |
8697d331 | 65 | unsigned long control_type; /* control RSS or RSS+Pagecache */ |
8cdea7c0 BS |
66 | }; |
67 | ||
8a9f3ccd BS |
68 | /* |
69 | * We use the lower bit of the page->page_cgroup pointer as a bit spin | |
70 | * lock. We need to ensure that page->page_cgroup is atleast two | |
71 | * byte aligned (based on comments from Nick Piggin) | |
72 | */ | |
73 | #define PAGE_CGROUP_LOCK_BIT 0x0 | |
74 | #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT) | |
75 | ||
8cdea7c0 BS |
76 | /* |
77 | * A page_cgroup page is associated with every page descriptor. The | |
78 | * page_cgroup helps us identify information about the cgroup | |
79 | */ | |
80 | struct page_cgroup { | |
81 | struct list_head lru; /* per cgroup LRU list */ | |
82 | struct page *page; | |
83 | struct mem_cgroup *mem_cgroup; | |
8a9f3ccd BS |
84 | atomic_t ref_cnt; /* Helpful when pages move b/w */ |
85 | /* mapped and cached states */ | |
8cdea7c0 BS |
86 | }; |
87 | ||
8697d331 BS |
88 | enum { |
89 | MEM_CGROUP_TYPE_UNSPEC = 0, | |
90 | MEM_CGROUP_TYPE_MAPPED, | |
91 | MEM_CGROUP_TYPE_CACHED, | |
92 | MEM_CGROUP_TYPE_ALL, | |
93 | MEM_CGROUP_TYPE_MAX, | |
94 | }; | |
95 | ||
96 | static struct mem_cgroup init_mem_cgroup; | |
8cdea7c0 BS |
97 | |
98 | static inline | |
99 | struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | |
100 | { | |
101 | return container_of(cgroup_subsys_state(cont, | |
102 | mem_cgroup_subsys_id), struct mem_cgroup, | |
103 | css); | |
104 | } | |
105 | ||
78fb7466 PE |
106 | static inline |
107 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) | |
108 | { | |
109 | return container_of(task_subsys_state(p, mem_cgroup_subsys_id), | |
110 | struct mem_cgroup, css); | |
111 | } | |
112 | ||
113 | void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p) | |
114 | { | |
115 | struct mem_cgroup *mem; | |
116 | ||
117 | mem = mem_cgroup_from_task(p); | |
118 | css_get(&mem->css); | |
119 | mm->mem_cgroup = mem; | |
120 | } | |
121 | ||
122 | void mm_free_cgroup(struct mm_struct *mm) | |
123 | { | |
124 | css_put(&mm->mem_cgroup->css); | |
125 | } | |
126 | ||
8a9f3ccd BS |
127 | static inline int page_cgroup_locked(struct page *page) |
128 | { | |
129 | return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, | |
130 | &page->page_cgroup); | |
131 | } | |
132 | ||
78fb7466 PE |
133 | void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) |
134 | { | |
8a9f3ccd BS |
135 | int locked; |
136 | ||
137 | /* | |
138 | * While resetting the page_cgroup we might not hold the | |
139 | * page_cgroup lock. free_hot_cold_page() is an example | |
140 | * of such a scenario | |
141 | */ | |
142 | if (pc) | |
143 | VM_BUG_ON(!page_cgroup_locked(page)); | |
144 | locked = (page->page_cgroup & PAGE_CGROUP_LOCK); | |
145 | page->page_cgroup = ((unsigned long)pc | locked); | |
78fb7466 PE |
146 | } |
147 | ||
148 | struct page_cgroup *page_get_page_cgroup(struct page *page) | |
149 | { | |
8a9f3ccd BS |
150 | return (struct page_cgroup *) |
151 | (page->page_cgroup & ~PAGE_CGROUP_LOCK); | |
152 | } | |
153 | ||
8697d331 | 154 | static void __always_inline lock_page_cgroup(struct page *page) |
8a9f3ccd BS |
155 | { |
156 | bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | |
157 | VM_BUG_ON(!page_cgroup_locked(page)); | |
158 | } | |
159 | ||
8697d331 | 160 | static void __always_inline unlock_page_cgroup(struct page *page) |
8a9f3ccd BS |
161 | { |
162 | bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | |
163 | } | |
164 | ||
8697d331 | 165 | static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) |
66e1707b BS |
166 | { |
167 | if (active) | |
168 | list_move(&pc->lru, &pc->mem_cgroup->active_list); | |
169 | else | |
170 | list_move(&pc->lru, &pc->mem_cgroup->inactive_list); | |
171 | } | |
172 | ||
4c4a2214 DR |
173 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) |
174 | { | |
175 | int ret; | |
176 | ||
177 | task_lock(task); | |
178 | ret = task->mm && mm_cgroup(task->mm) == mem; | |
179 | task_unlock(task); | |
180 | return ret; | |
181 | } | |
182 | ||
66e1707b BS |
183 | /* |
184 | * This routine assumes that the appropriate zone's lru lock is already held | |
185 | */ | |
186 | void mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | |
187 | { | |
188 | struct mem_cgroup *mem; | |
189 | if (!pc) | |
190 | return; | |
191 | ||
192 | mem = pc->mem_cgroup; | |
193 | ||
194 | spin_lock(&mem->lru_lock); | |
195 | __mem_cgroup_move_lists(pc, active); | |
196 | spin_unlock(&mem->lru_lock); | |
197 | } | |
198 | ||
199 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |
200 | struct list_head *dst, | |
201 | unsigned long *scanned, int order, | |
202 | int mode, struct zone *z, | |
203 | struct mem_cgroup *mem_cont, | |
204 | int active) | |
205 | { | |
206 | unsigned long nr_taken = 0; | |
207 | struct page *page; | |
208 | unsigned long scan; | |
209 | LIST_HEAD(pc_list); | |
210 | struct list_head *src; | |
211 | struct page_cgroup *pc; | |
212 | ||
213 | if (active) | |
214 | src = &mem_cont->active_list; | |
215 | else | |
216 | src = &mem_cont->inactive_list; | |
217 | ||
218 | spin_lock(&mem_cont->lru_lock); | |
219 | for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { | |
220 | pc = list_entry(src->prev, struct page_cgroup, lru); | |
221 | page = pc->page; | |
222 | VM_BUG_ON(!pc); | |
223 | ||
224 | if (PageActive(page) && !active) { | |
225 | __mem_cgroup_move_lists(pc, true); | |
226 | scan--; | |
227 | continue; | |
228 | } | |
229 | if (!PageActive(page) && active) { | |
230 | __mem_cgroup_move_lists(pc, false); | |
231 | scan--; | |
232 | continue; | |
233 | } | |
234 | ||
235 | /* | |
236 | * Reclaim, per zone | |
237 | * TODO: make the active/inactive lists per zone | |
238 | */ | |
239 | if (page_zone(page) != z) | |
240 | continue; | |
241 | ||
242 | /* | |
243 | * Check if the meta page went away from under us | |
244 | */ | |
245 | if (!list_empty(&pc->lru)) | |
246 | list_move(&pc->lru, &pc_list); | |
247 | else | |
248 | continue; | |
249 | ||
250 | if (__isolate_lru_page(page, mode) == 0) { | |
251 | list_move(&page->lru, dst); | |
252 | nr_taken++; | |
253 | } | |
254 | } | |
255 | ||
256 | list_splice(&pc_list, src); | |
257 | spin_unlock(&mem_cont->lru_lock); | |
258 | ||
259 | *scanned = scan; | |
260 | return nr_taken; | |
261 | } | |
262 | ||
8a9f3ccd BS |
263 | /* |
264 | * Charge the memory controller for page usage. | |
265 | * Return | |
266 | * 0 if the charge was successful | |
267 | * < 0 if the cgroup is over its limit | |
268 | */ | |
e1a1cd59 BS |
269 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, |
270 | gfp_t gfp_mask) | |
8a9f3ccd BS |
271 | { |
272 | struct mem_cgroup *mem; | |
273 | struct page_cgroup *pc, *race_pc; | |
66e1707b BS |
274 | unsigned long flags; |
275 | unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; | |
8a9f3ccd BS |
276 | |
277 | /* | |
278 | * Should page_cgroup's go to their own slab? | |
279 | * One could optimize the performance of the charging routine | |
280 | * by saving a bit in the page_flags and using it as a lock | |
281 | * to see if the cgroup page already has a page_cgroup associated | |
282 | * with it | |
283 | */ | |
66e1707b | 284 | retry: |
8a9f3ccd BS |
285 | lock_page_cgroup(page); |
286 | pc = page_get_page_cgroup(page); | |
287 | /* | |
288 | * The page_cgroup exists and the page has already been accounted | |
289 | */ | |
290 | if (pc) { | |
66e1707b BS |
291 | if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) { |
292 | /* this page is under being uncharged ? */ | |
293 | unlock_page_cgroup(page); | |
294 | cpu_relax(); | |
295 | goto retry; | |
296 | } else | |
297 | goto done; | |
8a9f3ccd BS |
298 | } |
299 | ||
300 | unlock_page_cgroup(page); | |
301 | ||
e1a1cd59 | 302 | pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); |
8a9f3ccd BS |
303 | if (pc == NULL) |
304 | goto err; | |
305 | ||
306 | rcu_read_lock(); | |
307 | /* | |
308 | * We always charge the cgroup the mm_struct belongs to | |
309 | * the mm_struct's mem_cgroup changes on task migration if the | |
310 | * thread group leader migrates. It's possible that mm is not | |
311 | * set, if so charge the init_mm (happens for pagecache usage). | |
312 | */ | |
313 | if (!mm) | |
314 | mm = &init_mm; | |
315 | ||
316 | mem = rcu_dereference(mm->mem_cgroup); | |
317 | /* | |
318 | * For every charge from the cgroup, increment reference | |
319 | * count | |
320 | */ | |
321 | css_get(&mem->css); | |
322 | rcu_read_unlock(); | |
323 | ||
324 | /* | |
325 | * If we created the page_cgroup, we should free it on exceeding | |
326 | * the cgroup limit. | |
327 | */ | |
0eea1030 | 328 | while (res_counter_charge(&mem->res, PAGE_SIZE)) { |
e1a1cd59 BS |
329 | bool is_atomic = gfp_mask & GFP_ATOMIC; |
330 | /* | |
331 | * We cannot reclaim under GFP_ATOMIC, fail the charge | |
332 | */ | |
333 | if (is_atomic) | |
334 | goto noreclaim; | |
335 | ||
336 | if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) | |
66e1707b BS |
337 | continue; |
338 | ||
339 | /* | |
340 | * try_to_free_mem_cgroup_pages() might not give us a full | |
341 | * picture of reclaim. Some pages are reclaimed and might be | |
342 | * moved to swap cache or just unmapped from the cgroup. | |
343 | * Check the limit again to see if the reclaim reduced the | |
344 | * current usage of the cgroup before giving up | |
345 | */ | |
346 | if (res_counter_check_under_limit(&mem->res)) | |
347 | continue; | |
348 | /* | |
349 | * Since we control both RSS and cache, we end up with a | |
350 | * very interesting scenario where we end up reclaiming | |
351 | * memory (essentially RSS), since the memory is pushed | |
352 | * to swap cache, we eventually end up adding those | |
353 | * pages back to our list. Hence we give ourselves a | |
354 | * few chances before we fail | |
355 | */ | |
356 | else if (nr_retries--) { | |
357 | congestion_wait(WRITE, HZ/10); | |
358 | continue; | |
359 | } | |
e1a1cd59 | 360 | noreclaim: |
8a9f3ccd | 361 | css_put(&mem->css); |
e1a1cd59 BS |
362 | if (!is_atomic) |
363 | mem_cgroup_out_of_memory(mem, GFP_KERNEL); | |
8a9f3ccd BS |
364 | goto free_pc; |
365 | } | |
366 | ||
367 | lock_page_cgroup(page); | |
368 | /* | |
369 | * Check if somebody else beat us to allocating the page_cgroup | |
370 | */ | |
371 | race_pc = page_get_page_cgroup(page); | |
372 | if (race_pc) { | |
373 | kfree(pc); | |
374 | pc = race_pc; | |
375 | atomic_inc(&pc->ref_cnt); | |
0eea1030 | 376 | res_counter_uncharge(&mem->res, PAGE_SIZE); |
8a9f3ccd BS |
377 | css_put(&mem->css); |
378 | goto done; | |
379 | } | |
380 | ||
381 | atomic_set(&pc->ref_cnt, 1); | |
382 | pc->mem_cgroup = mem; | |
383 | pc->page = page; | |
384 | page_assign_page_cgroup(page, pc); | |
385 | ||
66e1707b BS |
386 | spin_lock_irqsave(&mem->lru_lock, flags); |
387 | list_add(&pc->lru, &mem->active_list); | |
388 | spin_unlock_irqrestore(&mem->lru_lock, flags); | |
389 | ||
8a9f3ccd BS |
390 | done: |
391 | unlock_page_cgroup(page); | |
392 | return 0; | |
393 | free_pc: | |
394 | kfree(pc); | |
8a9f3ccd | 395 | err: |
8a9f3ccd BS |
396 | return -ENOMEM; |
397 | } | |
398 | ||
8697d331 BS |
399 | /* |
400 | * See if the cached pages should be charged at all? | |
401 | */ | |
e1a1cd59 BS |
402 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
403 | gfp_t gfp_mask) | |
8697d331 BS |
404 | { |
405 | struct mem_cgroup *mem; | |
406 | if (!mm) | |
407 | mm = &init_mm; | |
408 | ||
409 | mem = rcu_dereference(mm->mem_cgroup); | |
410 | if (mem->control_type == MEM_CGROUP_TYPE_ALL) | |
e1a1cd59 | 411 | return mem_cgroup_charge(page, mm, gfp_mask); |
8697d331 BS |
412 | else |
413 | return 0; | |
414 | } | |
415 | ||
8a9f3ccd BS |
416 | /* |
417 | * Uncharging is always a welcome operation, we never complain, simply | |
418 | * uncharge. | |
419 | */ | |
420 | void mem_cgroup_uncharge(struct page_cgroup *pc) | |
421 | { | |
422 | struct mem_cgroup *mem; | |
423 | struct page *page; | |
66e1707b | 424 | unsigned long flags; |
8a9f3ccd | 425 | |
8697d331 BS |
426 | /* |
427 | * This can handle cases when a page is not charged at all and we | |
428 | * are switching between handling the control_type. | |
429 | */ | |
8a9f3ccd BS |
430 | if (!pc) |
431 | return; | |
432 | ||
433 | if (atomic_dec_and_test(&pc->ref_cnt)) { | |
434 | page = pc->page; | |
435 | lock_page_cgroup(page); | |
436 | mem = pc->mem_cgroup; | |
437 | css_put(&mem->css); | |
438 | page_assign_page_cgroup(page, NULL); | |
439 | unlock_page_cgroup(page); | |
0eea1030 | 440 | res_counter_uncharge(&mem->res, PAGE_SIZE); |
66e1707b BS |
441 | |
442 | spin_lock_irqsave(&mem->lru_lock, flags); | |
443 | list_del_init(&pc->lru); | |
444 | spin_unlock_irqrestore(&mem->lru_lock, flags); | |
8a9f3ccd BS |
445 | kfree(pc); |
446 | } | |
78fb7466 PE |
447 | } |
448 | ||
0eea1030 BS |
449 | int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) |
450 | { | |
451 | *tmp = memparse(buf, &buf); | |
452 | if (*buf != '\0') | |
453 | return -EINVAL; | |
454 | ||
455 | /* | |
456 | * Round up the value to the closest page size | |
457 | */ | |
458 | *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT; | |
459 | return 0; | |
460 | } | |
461 | ||
462 | static ssize_t mem_cgroup_read(struct cgroup *cont, | |
463 | struct cftype *cft, struct file *file, | |
464 | char __user *userbuf, size_t nbytes, loff_t *ppos) | |
8cdea7c0 BS |
465 | { |
466 | return res_counter_read(&mem_cgroup_from_cont(cont)->res, | |
0eea1030 BS |
467 | cft->private, userbuf, nbytes, ppos, |
468 | NULL); | |
8cdea7c0 BS |
469 | } |
470 | ||
471 | static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, | |
472 | struct file *file, const char __user *userbuf, | |
473 | size_t nbytes, loff_t *ppos) | |
474 | { | |
475 | return res_counter_write(&mem_cgroup_from_cont(cont)->res, | |
0eea1030 BS |
476 | cft->private, userbuf, nbytes, ppos, |
477 | mem_cgroup_write_strategy); | |
8cdea7c0 BS |
478 | } |
479 | ||
8697d331 BS |
480 | static ssize_t mem_control_type_write(struct cgroup *cont, |
481 | struct cftype *cft, struct file *file, | |
482 | const char __user *userbuf, | |
483 | size_t nbytes, loff_t *pos) | |
484 | { | |
485 | int ret; | |
486 | char *buf, *end; | |
487 | unsigned long tmp; | |
488 | struct mem_cgroup *mem; | |
489 | ||
490 | mem = mem_cgroup_from_cont(cont); | |
491 | buf = kmalloc(nbytes + 1, GFP_KERNEL); | |
492 | ret = -ENOMEM; | |
493 | if (buf == NULL) | |
494 | goto out; | |
495 | ||
496 | buf[nbytes] = 0; | |
497 | ret = -EFAULT; | |
498 | if (copy_from_user(buf, userbuf, nbytes)) | |
499 | goto out_free; | |
500 | ||
501 | ret = -EINVAL; | |
502 | tmp = simple_strtoul(buf, &end, 10); | |
503 | if (*end != '\0') | |
504 | goto out_free; | |
505 | ||
506 | if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX) | |
507 | goto out_free; | |
508 | ||
509 | mem->control_type = tmp; | |
510 | ret = nbytes; | |
511 | out_free: | |
512 | kfree(buf); | |
513 | out: | |
514 | return ret; | |
515 | } | |
516 | ||
517 | static ssize_t mem_control_type_read(struct cgroup *cont, | |
518 | struct cftype *cft, | |
519 | struct file *file, char __user *userbuf, | |
520 | size_t nbytes, loff_t *ppos) | |
521 | { | |
522 | unsigned long val; | |
523 | char buf[64], *s; | |
524 | struct mem_cgroup *mem; | |
525 | ||
526 | mem = mem_cgroup_from_cont(cont); | |
527 | s = buf; | |
528 | val = mem->control_type; | |
529 | s += sprintf(s, "%lu\n", val); | |
530 | return simple_read_from_buffer((void __user *)userbuf, nbytes, | |
531 | ppos, buf, s - buf); | |
532 | } | |
533 | ||
8cdea7c0 BS |
534 | static struct cftype mem_cgroup_files[] = { |
535 | { | |
0eea1030 | 536 | .name = "usage_in_bytes", |
8cdea7c0 BS |
537 | .private = RES_USAGE, |
538 | .read = mem_cgroup_read, | |
539 | }, | |
540 | { | |
0eea1030 | 541 | .name = "limit_in_bytes", |
8cdea7c0 BS |
542 | .private = RES_LIMIT, |
543 | .write = mem_cgroup_write, | |
544 | .read = mem_cgroup_read, | |
545 | }, | |
546 | { | |
547 | .name = "failcnt", | |
548 | .private = RES_FAILCNT, | |
549 | .read = mem_cgroup_read, | |
550 | }, | |
8697d331 BS |
551 | { |
552 | .name = "control_type", | |
553 | .write = mem_control_type_write, | |
554 | .read = mem_control_type_read, | |
555 | }, | |
8cdea7c0 BS |
556 | }; |
557 | ||
78fb7466 PE |
558 | static struct mem_cgroup init_mem_cgroup; |
559 | ||
8cdea7c0 BS |
560 | static struct cgroup_subsys_state * |
561 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |
562 | { | |
563 | struct mem_cgroup *mem; | |
564 | ||
78fb7466 PE |
565 | if (unlikely((cont->parent) == NULL)) { |
566 | mem = &init_mem_cgroup; | |
567 | init_mm.mem_cgroup = mem; | |
568 | } else | |
569 | mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); | |
570 | ||
571 | if (mem == NULL) | |
572 | return NULL; | |
8cdea7c0 BS |
573 | |
574 | res_counter_init(&mem->res); | |
8a9f3ccd BS |
575 | INIT_LIST_HEAD(&mem->active_list); |
576 | INIT_LIST_HEAD(&mem->inactive_list); | |
66e1707b | 577 | spin_lock_init(&mem->lru_lock); |
8697d331 | 578 | mem->control_type = MEM_CGROUP_TYPE_ALL; |
8cdea7c0 BS |
579 | return &mem->css; |
580 | } | |
581 | ||
582 | static void mem_cgroup_destroy(struct cgroup_subsys *ss, | |
583 | struct cgroup *cont) | |
584 | { | |
585 | kfree(mem_cgroup_from_cont(cont)); | |
586 | } | |
587 | ||
588 | static int mem_cgroup_populate(struct cgroup_subsys *ss, | |
589 | struct cgroup *cont) | |
590 | { | |
591 | return cgroup_add_files(cont, ss, mem_cgroup_files, | |
592 | ARRAY_SIZE(mem_cgroup_files)); | |
593 | } | |
594 | ||
67e465a7 BS |
595 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, |
596 | struct cgroup *cont, | |
597 | struct cgroup *old_cont, | |
598 | struct task_struct *p) | |
599 | { | |
600 | struct mm_struct *mm; | |
601 | struct mem_cgroup *mem, *old_mem; | |
602 | ||
603 | mm = get_task_mm(p); | |
604 | if (mm == NULL) | |
605 | return; | |
606 | ||
607 | mem = mem_cgroup_from_cont(cont); | |
608 | old_mem = mem_cgroup_from_cont(old_cont); | |
609 | ||
610 | if (mem == old_mem) | |
611 | goto out; | |
612 | ||
613 | /* | |
614 | * Only thread group leaders are allowed to migrate, the mm_struct is | |
615 | * in effect owned by the leader | |
616 | */ | |
617 | if (p->tgid != p->pid) | |
618 | goto out; | |
619 | ||
620 | css_get(&mem->css); | |
621 | rcu_assign_pointer(mm->mem_cgroup, mem); | |
622 | css_put(&old_mem->css); | |
623 | ||
624 | out: | |
625 | mmput(mm); | |
626 | return; | |
627 | } | |
628 | ||
8cdea7c0 BS |
629 | struct cgroup_subsys mem_cgroup_subsys = { |
630 | .name = "memory", | |
631 | .subsys_id = mem_cgroup_subsys_id, | |
632 | .create = mem_cgroup_create, | |
633 | .destroy = mem_cgroup_destroy, | |
634 | .populate = mem_cgroup_populate, | |
67e465a7 | 635 | .attach = mem_cgroup_move_task, |
78fb7466 | 636 | .early_init = 1, |
8cdea7c0 | 637 | }; |