]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/memcontrol.c
devices cgroup: allow mkfifo
[net-next-2.6.git] / mm / memcontrol.c
CommitLineData
8cdea7c0
BS
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
8cdea7c0
BS
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/res_counter.h>
21#include <linux/memcontrol.h>
22#include <linux/cgroup.h>
78fb7466 23#include <linux/mm.h>
d52aa412 24#include <linux/smp.h>
8a9f3ccd 25#include <linux/page-flags.h>
66e1707b 26#include <linux/backing-dev.h>
8a9f3ccd
BS
27#include <linux/bit_spinlock.h>
28#include <linux/rcupdate.h>
b6ac57d5 29#include <linux/slab.h>
66e1707b
BS
30#include <linux/swap.h>
31#include <linux/spinlock.h>
32#include <linux/fs.h>
d2ceb9b7 33#include <linux/seq_file.h>
33327948 34#include <linux/vmalloc.h>
b69408e8 35#include <linux/mm_inline.h>
52d4b9ac 36#include <linux/page_cgroup.h>
8cdea7c0 37
8697d331
BS
38#include <asm/uaccess.h>
39
a181b0e8 40struct cgroup_subsys mem_cgroup_subsys __read_mostly;
a181b0e8 41#define MEM_CGROUP_RECLAIM_RETRIES 5
8cdea7c0 42
d52aa412
KH
43/*
44 * Statistics for memory cgroup.
45 */
46enum mem_cgroup_stat_index {
47 /*
48 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
49 */
50 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
51 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
55e462b0
BR
52 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
53 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
d52aa412
KH
54
55 MEM_CGROUP_STAT_NSTATS,
56};
57
58struct mem_cgroup_stat_cpu {
59 s64 count[MEM_CGROUP_STAT_NSTATS];
60} ____cacheline_aligned_in_smp;
61
62struct mem_cgroup_stat {
63 struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
64};
65
66/*
67 * For accounting under irq disable, no need for increment preempt count.
68 */
addb9efe 69static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
d52aa412
KH
70 enum mem_cgroup_stat_index idx, int val)
71{
addb9efe 72 stat->count[idx] += val;
d52aa412
KH
73}
74
75static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
76 enum mem_cgroup_stat_index idx)
77{
78 int cpu;
79 s64 ret = 0;
80 for_each_possible_cpu(cpu)
81 ret += stat->cpustat[cpu].count[idx];
82 return ret;
83}
84
6d12e2d8
KH
85/*
86 * per-zone information in memory controller.
87 */
6d12e2d8 88struct mem_cgroup_per_zone {
072c56c1
KH
89 /*
90 * spin_lock to protect the per cgroup LRU
91 */
92 spinlock_t lru_lock;
b69408e8
CL
93 struct list_head lists[NR_LRU_LISTS];
94 unsigned long count[NR_LRU_LISTS];
6d12e2d8
KH
95};
96/* Macro for accessing counter */
97#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
98
99struct mem_cgroup_per_node {
100 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
101};
102
103struct mem_cgroup_lru_info {
104 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
105};
106
8cdea7c0
BS
107/*
108 * The memory controller data structure. The memory controller controls both
109 * page cache and RSS per cgroup. We would eventually like to provide
110 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
111 * to help the administrator determine what knobs to tune.
112 *
113 * TODO: Add a water mark for the memory controller. Reclaim will begin when
8a9f3ccd
BS
114 * we hit the water mark. May be even add a low water mark, such that
115 * no reclaim occurs from a cgroup at it's low water mark, this is
116 * a feature that will be implemented much later in the future.
8cdea7c0
BS
117 */
118struct mem_cgroup {
119 struct cgroup_subsys_state css;
120 /*
121 * the counter to account for memory usage
122 */
123 struct res_counter res;
78fb7466
PE
124 /*
125 * Per cgroup active and inactive list, similar to the
126 * per zone LRU lists.
78fb7466 127 */
6d12e2d8 128 struct mem_cgroup_lru_info info;
072c56c1 129
6c48a1d0 130 int prev_priority; /* for recording reclaim priority */
d52aa412
KH
131 /*
132 * statistics.
133 */
134 struct mem_cgroup_stat stat;
8cdea7c0 135};
8869b8f6 136static struct mem_cgroup init_mem_cgroup;
8cdea7c0 137
217bc319
KH
138enum charge_type {
139 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
140 MEM_CGROUP_CHARGE_TYPE_MAPPED,
4f98a2fe 141 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
c05555b5
KH
142 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
143 NR_CHARGE_TYPE,
144};
145
52d4b9ac
KH
146/* only for here (for easy reading.) */
147#define PCGF_CACHE (1UL << PCG_CACHE)
148#define PCGF_USED (1UL << PCG_USED)
149#define PCGF_ACTIVE (1UL << PCG_ACTIVE)
150#define PCGF_LOCK (1UL << PCG_LOCK)
151#define PCGF_FILE (1UL << PCG_FILE)
c05555b5
KH
152static const unsigned long
153pcg_default_flags[NR_CHARGE_TYPE] = {
52d4b9ac
KH
154 PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */
155 PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */
156 PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
157 0, /* FORCE */
217bc319
KH
158};
159
d52aa412
KH
160/*
161 * Always modified under lru lock. Then, not necessary to preempt_disable()
162 */
c05555b5
KH
163static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
164 struct page_cgroup *pc,
165 bool charge)
d52aa412
KH
166{
167 int val = (charge)? 1 : -1;
168 struct mem_cgroup_stat *stat = &mem->stat;
addb9efe 169 struct mem_cgroup_stat_cpu *cpustat;
d52aa412 170
8869b8f6 171 VM_BUG_ON(!irqs_disabled());
addb9efe
KH
172
173 cpustat = &stat->cpustat[smp_processor_id()];
c05555b5 174 if (PageCgroupCache(pc))
addb9efe 175 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
d52aa412 176 else
addb9efe 177 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
55e462b0
BR
178
179 if (charge)
addb9efe 180 __mem_cgroup_stat_add_safe(cpustat,
55e462b0
BR
181 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
182 else
addb9efe 183 __mem_cgroup_stat_add_safe(cpustat,
55e462b0 184 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
6d12e2d8
KH
185}
186
d5b69e38 187static struct mem_cgroup_per_zone *
6d12e2d8
KH
188mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
189{
6d12e2d8
KH
190 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
191}
192
d5b69e38 193static struct mem_cgroup_per_zone *
6d12e2d8
KH
194page_cgroup_zoneinfo(struct page_cgroup *pc)
195{
196 struct mem_cgroup *mem = pc->mem_cgroup;
197 int nid = page_cgroup_nid(pc);
198 int zid = page_cgroup_zid(pc);
d52aa412 199
6d12e2d8
KH
200 return mem_cgroup_zoneinfo(mem, nid, zid);
201}
202
203static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
b69408e8 204 enum lru_list idx)
6d12e2d8
KH
205{
206 int nid, zid;
207 struct mem_cgroup_per_zone *mz;
208 u64 total = 0;
209
210 for_each_online_node(nid)
211 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
212 mz = mem_cgroup_zoneinfo(mem, nid, zid);
213 total += MEM_CGROUP_ZSTAT(mz, idx);
214 }
215 return total;
d52aa412
KH
216}
217
d5b69e38 218static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
8cdea7c0
BS
219{
220 return container_of(cgroup_subsys_state(cont,
221 mem_cgroup_subsys_id), struct mem_cgroup,
222 css);
223}
224
cf475ad2 225struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 226{
31a78f23
BS
227 /*
228 * mm_update_next_owner() may clear mm->owner to NULL
229 * if it races with swapoff, page migration, etc.
230 * So this can be called with p == NULL.
231 */
232 if (unlikely(!p))
233 return NULL;
234
78fb7466
PE
235 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
236 struct mem_cgroup, css);
237}
238
3eae90c3
KH
239static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
240 struct page_cgroup *pc)
6d12e2d8 241{
4f98a2fe
RR
242 int lru = LRU_BASE;
243
c05555b5 244 if (PageCgroupUnevictable(pc))
894bc310
LS
245 lru = LRU_UNEVICTABLE;
246 else {
c05555b5 247 if (PageCgroupActive(pc))
894bc310 248 lru += LRU_ACTIVE;
c05555b5 249 if (PageCgroupFile(pc))
894bc310
LS
250 lru += LRU_FILE;
251 }
6d12e2d8 252
b69408e8 253 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
6d12e2d8 254
c05555b5 255 mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false);
508b7be0 256 list_del(&pc->lru);
6d12e2d8
KH
257}
258
3eae90c3
KH
259static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
260 struct page_cgroup *pc)
6d12e2d8 261{
4f98a2fe 262 int lru = LRU_BASE;
b69408e8 263
c05555b5 264 if (PageCgroupUnevictable(pc))
894bc310
LS
265 lru = LRU_UNEVICTABLE;
266 else {
c05555b5 267 if (PageCgroupActive(pc))
894bc310 268 lru += LRU_ACTIVE;
c05555b5 269 if (PageCgroupFile(pc))
894bc310
LS
270 lru += LRU_FILE;
271 }
b69408e8
CL
272
273 MEM_CGROUP_ZSTAT(mz, lru) += 1;
274 list_add(&pc->lru, &mz->lists[lru]);
6d12e2d8 275
c05555b5 276 mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true);
6d12e2d8
KH
277}
278
894bc310 279static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
66e1707b 280{
6d12e2d8 281 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
c05555b5
KH
282 int active = PageCgroupActive(pc);
283 int file = PageCgroupFile(pc);
284 int unevictable = PageCgroupUnevictable(pc);
894bc310
LS
285 enum lru_list from = unevictable ? LRU_UNEVICTABLE :
286 (LRU_FILE * !!file + !!active);
6d12e2d8 287
894bc310
LS
288 if (lru == from)
289 return;
b69408e8 290
894bc310 291 MEM_CGROUP_ZSTAT(mz, from) -= 1;
c05555b5
KH
292 /*
293 * However this is done under mz->lru_lock, another flags, which
294 * are not related to LRU, will be modified from out-of-lock.
295 * We have to use atomic set/clear flags.
296 */
894bc310 297 if (is_unevictable_lru(lru)) {
c05555b5
KH
298 ClearPageCgroupActive(pc);
299 SetPageCgroupUnevictable(pc);
894bc310
LS
300 } else {
301 if (is_active_lru(lru))
c05555b5 302 SetPageCgroupActive(pc);
894bc310 303 else
c05555b5
KH
304 ClearPageCgroupActive(pc);
305 ClearPageCgroupUnevictable(pc);
894bc310 306 }
b69408e8 307
b69408e8
CL
308 MEM_CGROUP_ZSTAT(mz, lru) += 1;
309 list_move(&pc->lru, &mz->lists[lru]);
66e1707b
BS
310}
311
4c4a2214
DR
312int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
313{
314 int ret;
315
316 task_lock(task);
bd845e38 317 ret = task->mm && mm_match_cgroup(task->mm, mem);
4c4a2214
DR
318 task_unlock(task);
319 return ret;
320}
321
66e1707b
BS
322/*
323 * This routine assumes that the appropriate zone's lru lock is already held
324 */
894bc310 325void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
66e1707b 326{
427d5416 327 struct page_cgroup *pc;
072c56c1
KH
328 struct mem_cgroup_per_zone *mz;
329 unsigned long flags;
330
cede86ac
LZ
331 if (mem_cgroup_subsys.disabled)
332 return;
333
2680eed7
HD
334 /*
335 * We cannot lock_page_cgroup while holding zone's lru_lock,
336 * because other holders of lock_page_cgroup can be interrupted
337 * with an attempt to rotate_reclaimable_page. But we cannot
338 * safely get to page_cgroup without it, so just try_lock it:
339 * mem_cgroup_isolate_pages allows for page left on wrong list.
340 */
52d4b9ac
KH
341 pc = lookup_page_cgroup(page);
342 if (!trylock_page_cgroup(pc))
66e1707b 343 return;
52d4b9ac 344 if (pc && PageCgroupUsed(pc)) {
2680eed7 345 mz = page_cgroup_zoneinfo(pc);
2680eed7 346 spin_lock_irqsave(&mz->lru_lock, flags);
894bc310 347 __mem_cgroup_move_lists(pc, lru);
2680eed7 348 spin_unlock_irqrestore(&mz->lru_lock, flags);
9b3c0a07 349 }
52d4b9ac 350 unlock_page_cgroup(pc);
66e1707b
BS
351}
352
58ae83db
KH
353/*
354 * Calculate mapped_ratio under memory controller. This will be used in
355 * vmscan.c for deteremining we have to reclaim mapped pages.
356 */
357int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
358{
359 long total, rss;
360
361 /*
362 * usage is recorded in bytes. But, here, we assume the number of
363 * physical pages can be represented by "long" on any arch.
364 */
365 total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
366 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
367 return (int)((rss * 100L) / total);
368}
8869b8f6 369
6c48a1d0
KH
370/*
371 * prev_priority control...this will be used in memory reclaim path.
372 */
373int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
374{
375 return mem->prev_priority;
376}
377
378void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
379{
380 if (priority < mem->prev_priority)
381 mem->prev_priority = priority;
382}
383
384void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
385{
386 mem->prev_priority = priority;
387}
388
cc38108e
KH
389/*
390 * Calculate # of pages to be scanned in this priority/zone.
391 * See also vmscan.c
392 *
393 * priority starts from "DEF_PRIORITY" and decremented in each loop.
394 * (see include/linux/mmzone.h)
395 */
396
b69408e8
CL
397long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
398 int priority, enum lru_list lru)
cc38108e 399{
b69408e8 400 long nr_pages;
cc38108e
KH
401 int nid = zone->zone_pgdat->node_id;
402 int zid = zone_idx(zone);
403 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
404
b69408e8 405 nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
cc38108e 406
b69408e8 407 return (nr_pages >> priority);
cc38108e
KH
408}
409
66e1707b
BS
410unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
411 struct list_head *dst,
412 unsigned long *scanned, int order,
413 int mode, struct zone *z,
414 struct mem_cgroup *mem_cont,
4f98a2fe 415 int active, int file)
66e1707b
BS
416{
417 unsigned long nr_taken = 0;
418 struct page *page;
419 unsigned long scan;
420 LIST_HEAD(pc_list);
421 struct list_head *src;
ff7283fa 422 struct page_cgroup *pc, *tmp;
1ecaab2b
KH
423 int nid = z->zone_pgdat->node_id;
424 int zid = zone_idx(z);
425 struct mem_cgroup_per_zone *mz;
4f98a2fe 426 int lru = LRU_FILE * !!file + !!active;
66e1707b 427
cf475ad2 428 BUG_ON(!mem_cont);
1ecaab2b 429 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
b69408e8 430 src = &mz->lists[lru];
66e1707b 431
072c56c1 432 spin_lock(&mz->lru_lock);
ff7283fa
KH
433 scan = 0;
434 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
436c6541 435 if (scan >= nr_to_scan)
ff7283fa 436 break;
52d4b9ac
KH
437 if (unlikely(!PageCgroupUsed(pc)))
438 continue;
66e1707b 439 page = pc->page;
66e1707b 440
436c6541 441 if (unlikely(!PageLRU(page)))
ff7283fa 442 continue;
ff7283fa 443
4f98a2fe
RR
444 /*
445 * TODO: play better with lumpy reclaim, grabbing anything.
446 */
894bc310
LS
447 if (PageUnevictable(page) ||
448 (PageActive(page) && !active) ||
449 (!PageActive(page) && active)) {
450 __mem_cgroup_move_lists(pc, page_lru(page));
66e1707b
BS
451 continue;
452 }
453
436c6541
HD
454 scan++;
455 list_move(&pc->lru, &pc_list);
66e1707b 456
4f98a2fe 457 if (__isolate_lru_page(page, mode, file) == 0) {
66e1707b
BS
458 list_move(&page->lru, dst);
459 nr_taken++;
460 }
461 }
462
463 list_splice(&pc_list, src);
072c56c1 464 spin_unlock(&mz->lru_lock);
66e1707b
BS
465
466 *scanned = scan;
467 return nr_taken;
468}
469
8a9f3ccd
BS
470/*
471 * Charge the memory controller for page usage.
472 * Return
473 * 0 if the charge was successful
474 * < 0 if the cgroup is over its limit
475 */
217bc319 476static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
e8589cc1
KH
477 gfp_t gfp_mask, enum charge_type ctype,
478 struct mem_cgroup *memcg)
8a9f3ccd
BS
479{
480 struct mem_cgroup *mem;
9175e031 481 struct page_cgroup *pc;
66e1707b 482 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
072c56c1 483 struct mem_cgroup_per_zone *mz;
52d4b9ac 484 unsigned long flags;
8a9f3ccd 485
52d4b9ac
KH
486 pc = lookup_page_cgroup(page);
487 /* can happen at boot */
488 if (unlikely(!pc))
489 return 0;
490 prefetchw(pc);
8a9f3ccd 491 /*
3be91277
HD
492 * We always charge the cgroup the mm_struct belongs to.
493 * The mm_struct's mem_cgroup changes on task migration if the
8a9f3ccd
BS
494 * thread group leader migrates. It's possible that mm is not
495 * set, if so charge the init_mm (happens for pagecache usage).
496 */
52d4b9ac 497
69029cd5 498 if (likely(!memcg)) {
e8589cc1
KH
499 rcu_read_lock();
500 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
31a78f23
BS
501 if (unlikely(!mem)) {
502 rcu_read_unlock();
31a78f23
BS
503 return 0;
504 }
e8589cc1
KH
505 /*
506 * For every charge from the cgroup, increment reference count
507 */
508 css_get(&mem->css);
509 rcu_read_unlock();
510 } else {
511 mem = memcg;
512 css_get(&memcg->css);
513 }
8a9f3ccd 514
addb9efe 515 while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
3be91277
HD
516 if (!(gfp_mask & __GFP_WAIT))
517 goto out;
e1a1cd59
BS
518
519 if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
66e1707b
BS
520 continue;
521
522 /*
8869b8f6
HD
523 * try_to_free_mem_cgroup_pages() might not give us a full
524 * picture of reclaim. Some pages are reclaimed and might be
525 * moved to swap cache or just unmapped from the cgroup.
526 * Check the limit again to see if the reclaim reduced the
527 * current usage of the cgroup before giving up
528 */
66e1707b
BS
529 if (res_counter_check_under_limit(&mem->res))
530 continue;
3be91277
HD
531
532 if (!nr_retries--) {
533 mem_cgroup_out_of_memory(mem, gfp_mask);
534 goto out;
66e1707b 535 }
8a9f3ccd
BS
536 }
537
52d4b9ac
KH
538
539 lock_page_cgroup(pc);
540 if (unlikely(PageCgroupUsed(pc))) {
541 unlock_page_cgroup(pc);
542 res_counter_uncharge(&mem->res, PAGE_SIZE);
543 css_put(&mem->css);
544
545 goto done;
546 }
8a9f3ccd 547 pc->mem_cgroup = mem;
508b7be0
KH
548 /*
549 * If a page is accounted as a page cache, insert to inactive list.
550 * If anon, insert to active list.
551 */
c05555b5 552 pc->flags = pcg_default_flags[ctype];
3be91277 553
072c56c1 554 mz = page_cgroup_zoneinfo(pc);
52d4b9ac 555
072c56c1 556 spin_lock_irqsave(&mz->lru_lock, flags);
3eae90c3 557 __mem_cgroup_add_list(mz, pc);
072c56c1 558 spin_unlock_irqrestore(&mz->lru_lock, flags);
52d4b9ac 559 unlock_page_cgroup(pc);
66e1707b 560
8a9f3ccd 561done:
8a9f3ccd 562 return 0;
3be91277
HD
563out:
564 css_put(&mem->css);
8a9f3ccd
BS
565 return -ENOMEM;
566}
567
8869b8f6 568int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
217bc319 569{
cede86ac
LZ
570 if (mem_cgroup_subsys.disabled)
571 return 0;
52d4b9ac
KH
572 if (PageCompound(page))
573 return 0;
69029cd5
KH
574 /*
575 * If already mapped, we don't have to account.
576 * If page cache, page->mapping has address_space.
577 * But page->mapping may have out-of-use anon_vma pointer,
578 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
579 * is NULL.
580 */
581 if (page_mapped(page) || (page->mapping && !PageAnon(page)))
582 return 0;
583 if (unlikely(!mm))
584 mm = &init_mm;
217bc319 585 return mem_cgroup_charge_common(page, mm, gfp_mask,
e8589cc1 586 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
217bc319
KH
587}
588
e1a1cd59
BS
589int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
590 gfp_t gfp_mask)
8697d331 591{
cede86ac
LZ
592 if (mem_cgroup_subsys.disabled)
593 return 0;
52d4b9ac
KH
594 if (PageCompound(page))
595 return 0;
accf163e
KH
596 /*
597 * Corner case handling. This is called from add_to_page_cache()
598 * in usual. But some FS (shmem) precharges this page before calling it
599 * and call add_to_page_cache() with GFP_NOWAIT.
600 *
601 * For GFP_NOWAIT case, the page may be pre-charged before calling
602 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
603 * charge twice. (It works but has to pay a bit larger cost.)
604 */
605 if (!(gfp_mask & __GFP_WAIT)) {
606 struct page_cgroup *pc;
607
52d4b9ac
KH
608
609 pc = lookup_page_cgroup(page);
610 if (!pc)
611 return 0;
612 lock_page_cgroup(pc);
613 if (PageCgroupUsed(pc)) {
614 unlock_page_cgroup(pc);
accf163e
KH
615 return 0;
616 }
52d4b9ac 617 unlock_page_cgroup(pc);
accf163e
KH
618 }
619
69029cd5 620 if (unlikely(!mm))
8697d331 621 mm = &init_mm;
accf163e 622
c05555b5
KH
623 if (page_is_file_cache(page))
624 return mem_cgroup_charge_common(page, mm, gfp_mask,
e8589cc1 625 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
c05555b5
KH
626 else
627 return mem_cgroup_charge_common(page, mm, gfp_mask,
628 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
e8589cc1
KH
629}
630
8a9f3ccd 631/*
69029cd5 632 * uncharge if !page_mapped(page)
8a9f3ccd 633 */
69029cd5
KH
634static void
635__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
8a9f3ccd 636{
8289546e 637 struct page_cgroup *pc;
8a9f3ccd 638 struct mem_cgroup *mem;
072c56c1 639 struct mem_cgroup_per_zone *mz;
66e1707b 640 unsigned long flags;
8a9f3ccd 641
4077960e
BS
642 if (mem_cgroup_subsys.disabled)
643 return;
644
8697d331 645 /*
3c541e14 646 * Check if our page_cgroup is valid
8697d331 647 */
52d4b9ac
KH
648 pc = lookup_page_cgroup(page);
649 if (unlikely(!pc || !PageCgroupUsed(pc)))
650 return;
b9c565d5 651
52d4b9ac
KH
652 lock_page_cgroup(pc);
653 if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page))
654 || !PageCgroupUsed(pc)) {
655 /* This happens at race in zap_pte_range() and do_swap_page()*/
656 unlock_page_cgroup(pc);
657 return;
658 }
659 ClearPageCgroupUsed(pc);
660 mem = pc->mem_cgroup;
b9c565d5 661
69029cd5
KH
662 mz = page_cgroup_zoneinfo(pc);
663 spin_lock_irqsave(&mz->lru_lock, flags);
664 __mem_cgroup_remove_list(mz, pc);
665 spin_unlock_irqrestore(&mz->lru_lock, flags);
52d4b9ac 666 unlock_page_cgroup(pc);
fb59e9f1 667
69029cd5
KH
668 res_counter_uncharge(&mem->res, PAGE_SIZE);
669 css_put(&mem->css);
6d12e2d8 670
69029cd5 671 return;
3c541e14
BS
672}
673
69029cd5
KH
674void mem_cgroup_uncharge_page(struct page *page)
675{
52d4b9ac
KH
676 /* early check. */
677 if (page_mapped(page))
678 return;
679 if (page->mapping && !PageAnon(page))
680 return;
69029cd5
KH
681 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
682}
683
684void mem_cgroup_uncharge_cache_page(struct page *page)
685{
686 VM_BUG_ON(page_mapped(page));
b7abea96 687 VM_BUG_ON(page->mapping);
69029cd5
KH
688 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
689}
690
ae41be37 691/*
e8589cc1 692 * Before starting migration, account against new page.
ae41be37 693 */
e8589cc1 694int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
ae41be37
KH
695{
696 struct page_cgroup *pc;
e8589cc1
KH
697 struct mem_cgroup *mem = NULL;
698 enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
699 int ret = 0;
8869b8f6 700
4077960e
BS
701 if (mem_cgroup_subsys.disabled)
702 return 0;
703
52d4b9ac
KH
704 pc = lookup_page_cgroup(page);
705 lock_page_cgroup(pc);
706 if (PageCgroupUsed(pc)) {
e8589cc1
KH
707 mem = pc->mem_cgroup;
708 css_get(&mem->css);
c05555b5 709 if (PageCgroupCache(pc)) {
4f98a2fe
RR
710 if (page_is_file_cache(page))
711 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
712 else
713 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
714 }
e8589cc1 715 }
52d4b9ac 716 unlock_page_cgroup(pc);
e8589cc1
KH
717 if (mem) {
718 ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
719 ctype, mem);
720 css_put(&mem->css);
721 }
722 return ret;
ae41be37 723}
8869b8f6 724
69029cd5 725/* remove redundant charge if migration failed*/
e8589cc1 726void mem_cgroup_end_migration(struct page *newpage)
ae41be37 727{
69029cd5
KH
728 /*
729 * At success, page->mapping is not NULL.
730 * special rollback care is necessary when
731 * 1. at migration failure. (newpage->mapping is cleared in this case)
732 * 2. the newpage was moved but not remapped again because the task
733 * exits and the newpage is obsolete. In this case, the new page
734 * may be a swapcache. So, we just call mem_cgroup_uncharge_page()
735 * always for avoiding mess. The page_cgroup will be removed if
736 * unnecessary. File cache pages is still on radix-tree. Don't
737 * care it.
738 */
739 if (!newpage->mapping)
740 __mem_cgroup_uncharge_common(newpage,
52d4b9ac 741 MEM_CGROUP_CHARGE_TYPE_FORCE);
69029cd5
KH
742 else if (PageAnon(newpage))
743 mem_cgroup_uncharge_page(newpage);
ae41be37 744}
78fb7466 745
c9b0ed51
KH
746/*
747 * A call to try to shrink memory usage under specified resource controller.
748 * This is typically used for page reclaiming for shmem for reducing side
749 * effect of page allocation from shmem, which is used by some mem_cgroup.
750 */
751int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
752{
753 struct mem_cgroup *mem;
754 int progress = 0;
755 int retry = MEM_CGROUP_RECLAIM_RETRIES;
756
cede86ac
LZ
757 if (mem_cgroup_subsys.disabled)
758 return 0;
9623e078
HD
759 if (!mm)
760 return 0;
cede86ac 761
c9b0ed51
KH
762 rcu_read_lock();
763 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
31a78f23
BS
764 if (unlikely(!mem)) {
765 rcu_read_unlock();
766 return 0;
767 }
c9b0ed51
KH
768 css_get(&mem->css);
769 rcu_read_unlock();
770
771 do {
772 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
a10cebf5 773 progress += res_counter_check_under_limit(&mem->res);
c9b0ed51
KH
774 } while (!progress && --retry);
775
776 css_put(&mem->css);
777 if (!retry)
778 return -ENOMEM;
779 return 0;
780}
781
d38d2a75
KM
782static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
783 unsigned long long val)
628f4235
KH
784{
785
786 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
787 int progress;
788 int ret = 0;
789
790 while (res_counter_set_limit(&memcg->res, val)) {
791 if (signal_pending(current)) {
792 ret = -EINTR;
793 break;
794 }
795 if (!retry_count) {
796 ret = -EBUSY;
797 break;
798 }
799 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
800 if (!progress)
801 retry_count--;
802 }
803 return ret;
804}
805
806
cc847582
KH
807/*
808 * This routine traverse page_cgroup in given list and drop them all.
cc847582
KH
809 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
810 */
811#define FORCE_UNCHARGE_BATCH (128)
8869b8f6 812static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
072c56c1 813 struct mem_cgroup_per_zone *mz,
b69408e8 814 enum lru_list lru)
cc847582
KH
815{
816 struct page_cgroup *pc;
817 struct page *page;
9b3c0a07 818 int count = FORCE_UNCHARGE_BATCH;
cc847582 819 unsigned long flags;
072c56c1
KH
820 struct list_head *list;
821
b69408e8 822 list = &mz->lists[lru];
cc847582 823
072c56c1 824 spin_lock_irqsave(&mz->lru_lock, flags);
9b3c0a07 825 while (!list_empty(list)) {
cc847582
KH
826 pc = list_entry(list->prev, struct page_cgroup, lru);
827 page = pc->page;
52d4b9ac
KH
828 if (!PageCgroupUsed(pc))
829 break;
9b3c0a07
HT
830 get_page(page);
831 spin_unlock_irqrestore(&mz->lru_lock, flags);
e8589cc1
KH
832 /*
833 * Check if this page is on LRU. !LRU page can be found
834 * if it's under page migration.
835 */
836 if (PageLRU(page)) {
69029cd5
KH
837 __mem_cgroup_uncharge_common(page,
838 MEM_CGROUP_CHARGE_TYPE_FORCE);
e8589cc1
KH
839 put_page(page);
840 if (--count <= 0) {
841 count = FORCE_UNCHARGE_BATCH;
842 cond_resched();
843 }
52d4b9ac
KH
844 } else {
845 spin_lock_irqsave(&mz->lru_lock, flags);
846 break;
847 }
9b3c0a07 848 spin_lock_irqsave(&mz->lru_lock, flags);
cc847582 849 }
072c56c1 850 spin_unlock_irqrestore(&mz->lru_lock, flags);
cc847582
KH
851}
852
853/*
854 * make mem_cgroup's charge to be 0 if there is no task.
855 * This enables deleting this mem_cgroup.
856 */
d5b69e38 857static int mem_cgroup_force_empty(struct mem_cgroup *mem)
cc847582
KH
858{
859 int ret = -EBUSY;
1ecaab2b 860 int node, zid;
8869b8f6 861
cc847582
KH
862 css_get(&mem->css);
863 /*
864 * page reclaim code (kswapd etc..) will move pages between
8869b8f6 865 * active_list <-> inactive_list while we don't take a lock.
cc847582
KH
866 * So, we have to do loop here until all lists are empty.
867 */
1ecaab2b 868 while (mem->res.usage > 0) {
cc847582
KH
869 if (atomic_read(&mem->css.cgroup->count) > 0)
870 goto out;
52d4b9ac
KH
871 /* This is for making all *used* pages to be on LRU. */
872 lru_add_drain_all();
1ecaab2b
KH
873 for_each_node_state(node, N_POSSIBLE)
874 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
875 struct mem_cgroup_per_zone *mz;
b69408e8 876 enum lru_list l;
1ecaab2b 877 mz = mem_cgroup_zoneinfo(mem, node, zid);
b69408e8
CL
878 for_each_lru(l)
879 mem_cgroup_force_empty_list(mem, mz, l);
1ecaab2b 880 }
52d4b9ac 881 cond_resched();
cc847582
KH
882 }
883 ret = 0;
884out:
885 css_put(&mem->css);
886 return ret;
887}
888
2c3daa72 889static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
8cdea7c0 890{
2c3daa72
PM
891 return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
892 cft->private);
8cdea7c0 893}
628f4235
KH
894/*
895 * The user of this function is...
896 * RES_LIMIT.
897 */
856c13aa
PM
898static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
899 const char *buffer)
8cdea7c0 900{
628f4235
KH
901 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
902 unsigned long long val;
903 int ret;
904
905 switch (cft->private) {
906 case RES_LIMIT:
907 /* This function does all necessary parse...reuse it */
908 ret = res_counter_memparse_write_strategy(buffer, &val);
909 if (!ret)
910 ret = mem_cgroup_resize_limit(memcg, val);
911 break;
912 default:
913 ret = -EINVAL; /* should be BUG() ? */
914 break;
915 }
916 return ret;
8cdea7c0
BS
917}
918
29f2a4da 919static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
c84872e1
PE
920{
921 struct mem_cgroup *mem;
922
923 mem = mem_cgroup_from_cont(cont);
29f2a4da
PE
924 switch (event) {
925 case RES_MAX_USAGE:
926 res_counter_reset_max(&mem->res);
927 break;
928 case RES_FAILCNT:
929 res_counter_reset_failcnt(&mem->res);
930 break;
931 }
85cc59db 932 return 0;
c84872e1
PE
933}
934
85cc59db 935static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
cc847582 936{
85cc59db 937 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
cc847582
KH
938}
939
d2ceb9b7
KH
940static const struct mem_cgroup_stat_desc {
941 const char *msg;
942 u64 unit;
943} mem_cgroup_stat_desc[] = {
944 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
945 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
55e462b0
BR
946 [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
947 [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
d2ceb9b7
KH
948};
949
c64745cf
PM
950static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
951 struct cgroup_map_cb *cb)
d2ceb9b7 952{
d2ceb9b7
KH
953 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
954 struct mem_cgroup_stat *stat = &mem_cont->stat;
955 int i;
956
957 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
958 s64 val;
959
960 val = mem_cgroup_read_stat(stat, i);
961 val *= mem_cgroup_stat_desc[i].unit;
c64745cf 962 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
d2ceb9b7 963 }
6d12e2d8
KH
964 /* showing # of active pages */
965 {
4f98a2fe
RR
966 unsigned long active_anon, inactive_anon;
967 unsigned long active_file, inactive_file;
7b854121 968 unsigned long unevictable;
4f98a2fe
RR
969
970 inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
971 LRU_INACTIVE_ANON);
972 active_anon = mem_cgroup_get_all_zonestat(mem_cont,
973 LRU_ACTIVE_ANON);
974 inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
975 LRU_INACTIVE_FILE);
976 active_file = mem_cgroup_get_all_zonestat(mem_cont,
977 LRU_ACTIVE_FILE);
7b854121
LS
978 unevictable = mem_cgroup_get_all_zonestat(mem_cont,
979 LRU_UNEVICTABLE);
980
4f98a2fe
RR
981 cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
982 cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
983 cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
984 cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
7b854121
LS
985 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
986
6d12e2d8 987 }
d2ceb9b7
KH
988 return 0;
989}
990
8cdea7c0
BS
991static struct cftype mem_cgroup_files[] = {
992 {
0eea1030 993 .name = "usage_in_bytes",
8cdea7c0 994 .private = RES_USAGE,
2c3daa72 995 .read_u64 = mem_cgroup_read,
8cdea7c0 996 },
c84872e1
PE
997 {
998 .name = "max_usage_in_bytes",
999 .private = RES_MAX_USAGE,
29f2a4da 1000 .trigger = mem_cgroup_reset,
c84872e1
PE
1001 .read_u64 = mem_cgroup_read,
1002 },
8cdea7c0 1003 {
0eea1030 1004 .name = "limit_in_bytes",
8cdea7c0 1005 .private = RES_LIMIT,
856c13aa 1006 .write_string = mem_cgroup_write,
2c3daa72 1007 .read_u64 = mem_cgroup_read,
8cdea7c0
BS
1008 },
1009 {
1010 .name = "failcnt",
1011 .private = RES_FAILCNT,
29f2a4da 1012 .trigger = mem_cgroup_reset,
2c3daa72 1013 .read_u64 = mem_cgroup_read,
8cdea7c0 1014 },
cc847582
KH
1015 {
1016 .name = "force_empty",
85cc59db 1017 .trigger = mem_force_empty_write,
cc847582 1018 },
d2ceb9b7
KH
1019 {
1020 .name = "stat",
c64745cf 1021 .read_map = mem_control_stat_show,
d2ceb9b7 1022 },
8cdea7c0
BS
1023};
1024
6d12e2d8
KH
1025static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1026{
1027 struct mem_cgroup_per_node *pn;
1ecaab2b 1028 struct mem_cgroup_per_zone *mz;
b69408e8 1029 enum lru_list l;
41e3355d 1030 int zone, tmp = node;
1ecaab2b
KH
1031 /*
1032 * This routine is called against possible nodes.
1033 * But it's BUG to call kmalloc() against offline node.
1034 *
1035 * TODO: this routine can waste much memory for nodes which will
1036 * never be onlined. It's better to use memory hotplug callback
1037 * function.
1038 */
41e3355d
KH
1039 if (!node_state(node, N_NORMAL_MEMORY))
1040 tmp = -1;
1041 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6d12e2d8
KH
1042 if (!pn)
1043 return 1;
1ecaab2b 1044
6d12e2d8
KH
1045 mem->info.nodeinfo[node] = pn;
1046 memset(pn, 0, sizeof(*pn));
1ecaab2b
KH
1047
1048 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1049 mz = &pn->zoneinfo[zone];
072c56c1 1050 spin_lock_init(&mz->lru_lock);
b69408e8
CL
1051 for_each_lru(l)
1052 INIT_LIST_HEAD(&mz->lists[l]);
1ecaab2b 1053 }
6d12e2d8
KH
1054 return 0;
1055}
1056
1ecaab2b
KH
1057static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1058{
1059 kfree(mem->info.nodeinfo[node]);
1060}
1061
33327948
KH
1062static struct mem_cgroup *mem_cgroup_alloc(void)
1063{
1064 struct mem_cgroup *mem;
1065
1066 if (sizeof(*mem) < PAGE_SIZE)
1067 mem = kmalloc(sizeof(*mem), GFP_KERNEL);
1068 else
1069 mem = vmalloc(sizeof(*mem));
1070
1071 if (mem)
1072 memset(mem, 0, sizeof(*mem));
1073 return mem;
1074}
1075
1076static void mem_cgroup_free(struct mem_cgroup *mem)
1077{
1078 if (sizeof(*mem) < PAGE_SIZE)
1079 kfree(mem);
1080 else
1081 vfree(mem);
1082}
1083
1084
8cdea7c0
BS
1085static struct cgroup_subsys_state *
1086mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1087{
1088 struct mem_cgroup *mem;
6d12e2d8 1089 int node;
8cdea7c0 1090
b6ac57d5 1091 if (unlikely((cont->parent) == NULL)) {
78fb7466 1092 mem = &init_mem_cgroup;
b6ac57d5 1093 } else {
33327948
KH
1094 mem = mem_cgroup_alloc();
1095 if (!mem)
1096 return ERR_PTR(-ENOMEM);
b6ac57d5 1097 }
78fb7466 1098
8cdea7c0 1099 res_counter_init(&mem->res);
1ecaab2b 1100
6d12e2d8
KH
1101 for_each_node_state(node, N_POSSIBLE)
1102 if (alloc_mem_cgroup_per_zone_info(mem, node))
1103 goto free_out;
1104
8cdea7c0 1105 return &mem->css;
6d12e2d8
KH
1106free_out:
1107 for_each_node_state(node, N_POSSIBLE)
1ecaab2b 1108 free_mem_cgroup_per_zone_info(mem, node);
6d12e2d8 1109 if (cont->parent != NULL)
33327948 1110 mem_cgroup_free(mem);
2dda81ca 1111 return ERR_PTR(-ENOMEM);
8cdea7c0
BS
1112}
1113
df878fb0
KH
1114static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1115 struct cgroup *cont)
1116{
1117 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1118 mem_cgroup_force_empty(mem);
1119}
1120
8cdea7c0
BS
1121static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1122 struct cgroup *cont)
1123{
6d12e2d8
KH
1124 int node;
1125 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1126
1127 for_each_node_state(node, N_POSSIBLE)
1ecaab2b 1128 free_mem_cgroup_per_zone_info(mem, node);
6d12e2d8 1129
33327948 1130 mem_cgroup_free(mem_cgroup_from_cont(cont));
8cdea7c0
BS
1131}
1132
1133static int mem_cgroup_populate(struct cgroup_subsys *ss,
1134 struct cgroup *cont)
1135{
1136 return cgroup_add_files(cont, ss, mem_cgroup_files,
1137 ARRAY_SIZE(mem_cgroup_files));
1138}
1139
67e465a7
BS
1140static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1141 struct cgroup *cont,
1142 struct cgroup *old_cont,
1143 struct task_struct *p)
1144{
1145 struct mm_struct *mm;
1146 struct mem_cgroup *mem, *old_mem;
1147
1148 mm = get_task_mm(p);
1149 if (mm == NULL)
1150 return;
1151
1152 mem = mem_cgroup_from_cont(cont);
1153 old_mem = mem_cgroup_from_cont(old_cont);
1154
67e465a7
BS
1155 /*
1156 * Only thread group leaders are allowed to migrate, the mm_struct is
1157 * in effect owned by the leader
1158 */
52ea27eb 1159 if (!thread_group_leader(p))
67e465a7
BS
1160 goto out;
1161
67e465a7
BS
1162out:
1163 mmput(mm);
67e465a7
BS
1164}
1165
8cdea7c0
BS
1166struct cgroup_subsys mem_cgroup_subsys = {
1167 .name = "memory",
1168 .subsys_id = mem_cgroup_subsys_id,
1169 .create = mem_cgroup_create,
df878fb0 1170 .pre_destroy = mem_cgroup_pre_destroy,
8cdea7c0
BS
1171 .destroy = mem_cgroup_destroy,
1172 .populate = mem_cgroup_populate,
67e465a7 1173 .attach = mem_cgroup_move_task,
6d12e2d8 1174 .early_init = 0,
8cdea7c0 1175};