]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/memcontrol.c
memcg: fix swap accounting leak
[net-next-2.6.git] / mm / memcontrol.c
CommitLineData
8cdea7c0
BS
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
8cdea7c0
BS
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/res_counter.h>
21#include <linux/memcontrol.h>
22#include <linux/cgroup.h>
78fb7466 23#include <linux/mm.h>
d13d1443 24#include <linux/pagemap.h>
d52aa412 25#include <linux/smp.h>
8a9f3ccd 26#include <linux/page-flags.h>
66e1707b 27#include <linux/backing-dev.h>
8a9f3ccd
BS
28#include <linux/bit_spinlock.h>
29#include <linux/rcupdate.h>
8c7c6e34 30#include <linux/mutex.h>
b6ac57d5 31#include <linux/slab.h>
66e1707b
BS
32#include <linux/swap.h>
33#include <linux/spinlock.h>
34#include <linux/fs.h>
d2ceb9b7 35#include <linux/seq_file.h>
33327948 36#include <linux/vmalloc.h>
b69408e8 37#include <linux/mm_inline.h>
52d4b9ac 38#include <linux/page_cgroup.h>
08e552c6 39#include "internal.h"
8cdea7c0 40
8697d331
BS
41#include <asm/uaccess.h>
42
a181b0e8 43struct cgroup_subsys mem_cgroup_subsys __read_mostly;
a181b0e8 44#define MEM_CGROUP_RECLAIM_RETRIES 5
8cdea7c0 45
c077719b
KH
46#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
47/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
48int do_swap_account __read_mostly;
49static int really_do_swap_account __initdata = 1; /* for remember boot option*/
50#else
51#define do_swap_account (0)
52#endif
53
7f4d454d 54static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
c077719b 55
d52aa412
KH
56/*
57 * Statistics for memory cgroup.
58 */
59enum mem_cgroup_stat_index {
60 /*
61 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
62 */
63 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
64 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
55e462b0
BR
65 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
66 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
d52aa412
KH
67
68 MEM_CGROUP_STAT_NSTATS,
69};
70
71struct mem_cgroup_stat_cpu {
72 s64 count[MEM_CGROUP_STAT_NSTATS];
73} ____cacheline_aligned_in_smp;
74
75struct mem_cgroup_stat {
c8dad2bb 76 struct mem_cgroup_stat_cpu cpustat[0];
d52aa412
KH
77};
78
79/*
80 * For accounting under irq disable, no need for increment preempt count.
81 */
addb9efe 82static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
d52aa412
KH
83 enum mem_cgroup_stat_index idx, int val)
84{
addb9efe 85 stat->count[idx] += val;
d52aa412
KH
86}
87
88static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
89 enum mem_cgroup_stat_index idx)
90{
91 int cpu;
92 s64 ret = 0;
93 for_each_possible_cpu(cpu)
94 ret += stat->cpustat[cpu].count[idx];
95 return ret;
96}
97
6d12e2d8
KH
98/*
99 * per-zone information in memory controller.
100 */
6d12e2d8 101struct mem_cgroup_per_zone {
072c56c1
KH
102 /*
103 * spin_lock to protect the per cgroup LRU
104 */
b69408e8
CL
105 struct list_head lists[NR_LRU_LISTS];
106 unsigned long count[NR_LRU_LISTS];
3e2f41f1
KM
107
108 struct zone_reclaim_stat reclaim_stat;
6d12e2d8
KH
109};
110/* Macro for accessing counter */
111#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
112
113struct mem_cgroup_per_node {
114 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
115};
116
117struct mem_cgroup_lru_info {
118 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
119};
120
8cdea7c0
BS
121/*
122 * The memory controller data structure. The memory controller controls both
123 * page cache and RSS per cgroup. We would eventually like to provide
124 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
125 * to help the administrator determine what knobs to tune.
126 *
127 * TODO: Add a water mark for the memory controller. Reclaim will begin when
8a9f3ccd
BS
128 * we hit the water mark. May be even add a low water mark, such that
129 * no reclaim occurs from a cgroup at it's low water mark, this is
130 * a feature that will be implemented much later in the future.
8cdea7c0
BS
131 */
132struct mem_cgroup {
133 struct cgroup_subsys_state css;
134 /*
135 * the counter to account for memory usage
136 */
137 struct res_counter res;
8c7c6e34
KH
138 /*
139 * the counter to account for mem+swap usage.
140 */
141 struct res_counter memsw;
78fb7466
PE
142 /*
143 * Per cgroup active and inactive list, similar to the
144 * per zone LRU lists.
78fb7466 145 */
6d12e2d8 146 struct mem_cgroup_lru_info info;
072c56c1 147
2733c06a
KM
148 /*
149 protect against reclaim related member.
150 */
151 spinlock_t reclaim_param_lock;
152
6c48a1d0 153 int prev_priority; /* for recording reclaim priority */
6d61ef40
BS
154
155 /*
156 * While reclaiming in a hiearchy, we cache the last child we
157 * reclaimed from. Protected by cgroup_lock()
158 */
159 struct mem_cgroup *last_scanned_child;
18f59ea7
BS
160 /*
161 * Should the accounting and control be hierarchical, per subtree?
162 */
163 bool use_hierarchy;
a636b327 164 unsigned long last_oom_jiffies;
8c7c6e34
KH
165 int obsolete;
166 atomic_t refcnt;
14797e23 167
a7885eb8
KM
168 unsigned int swappiness;
169
d52aa412 170 /*
c8dad2bb 171 * statistics. This must be placed at the end of memcg.
d52aa412
KH
172 */
173 struct mem_cgroup_stat stat;
8cdea7c0
BS
174};
175
217bc319
KH
176enum charge_type {
177 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
178 MEM_CGROUP_CHARGE_TYPE_MAPPED,
4f98a2fe 179 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
c05555b5 180 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
d13d1443 181 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
c05555b5
KH
182 NR_CHARGE_TYPE,
183};
184
52d4b9ac
KH
185/* only for here (for easy reading.) */
186#define PCGF_CACHE (1UL << PCG_CACHE)
187#define PCGF_USED (1UL << PCG_USED)
52d4b9ac 188#define PCGF_LOCK (1UL << PCG_LOCK)
c05555b5
KH
189static const unsigned long
190pcg_default_flags[NR_CHARGE_TYPE] = {
08e552c6
KH
191 PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
192 PCGF_USED | PCGF_LOCK, /* Anon */
193 PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
52d4b9ac 194 0, /* FORCE */
217bc319
KH
195};
196
8c7c6e34
KH
197/* for encoding cft->private value on file */
198#define _MEM (0)
199#define _MEMSWAP (1)
200#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
201#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
202#define MEMFILE_ATTR(val) ((val) & 0xffff)
203
204static void mem_cgroup_get(struct mem_cgroup *mem);
205static void mem_cgroup_put(struct mem_cgroup *mem);
206
c05555b5
KH
207static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
208 struct page_cgroup *pc,
209 bool charge)
d52aa412
KH
210{
211 int val = (charge)? 1 : -1;
212 struct mem_cgroup_stat *stat = &mem->stat;
addb9efe 213 struct mem_cgroup_stat_cpu *cpustat;
08e552c6 214 int cpu = get_cpu();
d52aa412 215
08e552c6 216 cpustat = &stat->cpustat[cpu];
c05555b5 217 if (PageCgroupCache(pc))
addb9efe 218 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
d52aa412 219 else
addb9efe 220 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
55e462b0
BR
221
222 if (charge)
addb9efe 223 __mem_cgroup_stat_add_safe(cpustat,
55e462b0
BR
224 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
225 else
addb9efe 226 __mem_cgroup_stat_add_safe(cpustat,
55e462b0 227 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
08e552c6 228 put_cpu();
6d12e2d8
KH
229}
230
d5b69e38 231static struct mem_cgroup_per_zone *
6d12e2d8
KH
232mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
233{
6d12e2d8
KH
234 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
235}
236
d5b69e38 237static struct mem_cgroup_per_zone *
6d12e2d8
KH
238page_cgroup_zoneinfo(struct page_cgroup *pc)
239{
240 struct mem_cgroup *mem = pc->mem_cgroup;
241 int nid = page_cgroup_nid(pc);
242 int zid = page_cgroup_zid(pc);
d52aa412 243
54992762
KM
244 if (!mem)
245 return NULL;
246
6d12e2d8
KH
247 return mem_cgroup_zoneinfo(mem, nid, zid);
248}
249
250static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
b69408e8 251 enum lru_list idx)
6d12e2d8
KH
252{
253 int nid, zid;
254 struct mem_cgroup_per_zone *mz;
255 u64 total = 0;
256
257 for_each_online_node(nid)
258 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
259 mz = mem_cgroup_zoneinfo(mem, nid, zid);
260 total += MEM_CGROUP_ZSTAT(mz, idx);
261 }
262 return total;
d52aa412
KH
263}
264
d5b69e38 265static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
8cdea7c0
BS
266{
267 return container_of(cgroup_subsys_state(cont,
268 mem_cgroup_subsys_id), struct mem_cgroup,
269 css);
270}
271
cf475ad2 272struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 273{
31a78f23
BS
274 /*
275 * mm_update_next_owner() may clear mm->owner to NULL
276 * if it races with swapoff, page migration, etc.
277 * So this can be called with p == NULL.
278 */
279 if (unlikely(!p))
280 return NULL;
281
78fb7466
PE
282 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
283 struct mem_cgroup, css);
284}
285
08e552c6
KH
286/*
287 * Following LRU functions are allowed to be used without PCG_LOCK.
288 * Operations are called by routine of global LRU independently from memcg.
289 * What we have to take care of here is validness of pc->mem_cgroup.
290 *
291 * Changes to pc->mem_cgroup happens when
292 * 1. charge
293 * 2. moving account
294 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
295 * It is added to LRU before charge.
296 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
297 * When moving account, the page is not on LRU. It's isolated.
298 */
4f98a2fe 299
08e552c6
KH
300void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
301{
302 struct page_cgroup *pc;
303 struct mem_cgroup *mem;
304 struct mem_cgroup_per_zone *mz;
6d12e2d8 305
f8d66542 306 if (mem_cgroup_disabled())
08e552c6
KH
307 return;
308 pc = lookup_page_cgroup(page);
309 /* can happen while we handle swapcache. */
310 if (list_empty(&pc->lru))
311 return;
312 mz = page_cgroup_zoneinfo(pc);
313 mem = pc->mem_cgroup;
b69408e8 314 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
08e552c6
KH
315 list_del_init(&pc->lru);
316 return;
6d12e2d8
KH
317}
318
08e552c6 319void mem_cgroup_del_lru(struct page *page)
6d12e2d8 320{
08e552c6
KH
321 mem_cgroup_del_lru_list(page, page_lru(page));
322}
b69408e8 323
08e552c6
KH
324void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
325{
326 struct mem_cgroup_per_zone *mz;
327 struct page_cgroup *pc;
b69408e8 328
f8d66542 329 if (mem_cgroup_disabled())
08e552c6 330 return;
6d12e2d8 331
08e552c6
KH
332 pc = lookup_page_cgroup(page);
333 smp_rmb();
334 /* unused page is not rotated. */
335 if (!PageCgroupUsed(pc))
336 return;
337 mz = page_cgroup_zoneinfo(pc);
338 list_move(&pc->lru, &mz->lists[lru]);
6d12e2d8
KH
339}
340
08e552c6 341void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
66e1707b 342{
08e552c6
KH
343 struct page_cgroup *pc;
344 struct mem_cgroup_per_zone *mz;
6d12e2d8 345
f8d66542 346 if (mem_cgroup_disabled())
08e552c6
KH
347 return;
348 pc = lookup_page_cgroup(page);
349 /* barrier to sync with "charge" */
350 smp_rmb();
351 if (!PageCgroupUsed(pc))
894bc310 352 return;
b69408e8 353
08e552c6 354 mz = page_cgroup_zoneinfo(pc);
b69408e8 355 MEM_CGROUP_ZSTAT(mz, lru) += 1;
08e552c6
KH
356 list_add(&pc->lru, &mz->lists[lru]);
357}
358/*
359 * To add swapcache into LRU. Be careful to all this function.
360 * zone->lru_lock shouldn't be held and irq must not be disabled.
361 */
362static void mem_cgroup_lru_fixup(struct page *page)
363{
364 if (!isolate_lru_page(page))
365 putback_lru_page(page);
366}
367
368void mem_cgroup_move_lists(struct page *page,
369 enum lru_list from, enum lru_list to)
370{
f8d66542 371 if (mem_cgroup_disabled())
08e552c6
KH
372 return;
373 mem_cgroup_del_lru_list(page, from);
374 mem_cgroup_add_lru_list(page, to);
66e1707b
BS
375}
376
4c4a2214
DR
377int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
378{
379 int ret;
380
381 task_lock(task);
bd845e38 382 ret = task->mm && mm_match_cgroup(task->mm, mem);
4c4a2214
DR
383 task_unlock(task);
384 return ret;
385}
386
58ae83db
KH
387/*
388 * Calculate mapped_ratio under memory controller. This will be used in
389 * vmscan.c for deteremining we have to reclaim mapped pages.
390 */
391int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
392{
393 long total, rss;
394
395 /*
396 * usage is recorded in bytes. But, here, we assume the number of
397 * physical pages can be represented by "long" on any arch.
398 */
399 total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
400 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
401 return (int)((rss * 100L) / total);
402}
8869b8f6 403
6c48a1d0
KH
404/*
405 * prev_priority control...this will be used in memory reclaim path.
406 */
407int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
408{
2733c06a
KM
409 int prev_priority;
410
411 spin_lock(&mem->reclaim_param_lock);
412 prev_priority = mem->prev_priority;
413 spin_unlock(&mem->reclaim_param_lock);
414
415 return prev_priority;
6c48a1d0
KH
416}
417
418void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
419{
2733c06a 420 spin_lock(&mem->reclaim_param_lock);
6c48a1d0
KH
421 if (priority < mem->prev_priority)
422 mem->prev_priority = priority;
2733c06a 423 spin_unlock(&mem->reclaim_param_lock);
6c48a1d0
KH
424}
425
426void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
427{
2733c06a 428 spin_lock(&mem->reclaim_param_lock);
6c48a1d0 429 mem->prev_priority = priority;
2733c06a 430 spin_unlock(&mem->reclaim_param_lock);
6c48a1d0
KH
431}
432
c772be93 433static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
14797e23
KM
434{
435 unsigned long active;
436 unsigned long inactive;
c772be93
KM
437 unsigned long gb;
438 unsigned long inactive_ratio;
14797e23
KM
439
440 inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
441 active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
442
c772be93
KM
443 gb = (inactive + active) >> (30 - PAGE_SHIFT);
444 if (gb)
445 inactive_ratio = int_sqrt(10 * gb);
446 else
447 inactive_ratio = 1;
448
449 if (present_pages) {
450 present_pages[0] = inactive;
451 present_pages[1] = active;
452 }
453
454 return inactive_ratio;
455}
456
457int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
458{
459 unsigned long active;
460 unsigned long inactive;
461 unsigned long present_pages[2];
462 unsigned long inactive_ratio;
463
464 inactive_ratio = calc_inactive_ratio(memcg, present_pages);
465
466 inactive = present_pages[0];
467 active = present_pages[1];
468
469 if (inactive * inactive_ratio < active)
14797e23
KM
470 return 1;
471
472 return 0;
473}
474
a3d8e054
KM
475unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
476 struct zone *zone,
477 enum lru_list lru)
478{
479 int nid = zone->zone_pgdat->node_id;
480 int zid = zone_idx(zone);
481 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
482
483 return MEM_CGROUP_ZSTAT(mz, lru);
484}
485
3e2f41f1
KM
486struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
487 struct zone *zone)
488{
489 int nid = zone->zone_pgdat->node_id;
490 int zid = zone_idx(zone);
491 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
492
493 return &mz->reclaim_stat;
494}
495
496struct zone_reclaim_stat *
497mem_cgroup_get_reclaim_stat_from_page(struct page *page)
498{
499 struct page_cgroup *pc;
500 struct mem_cgroup_per_zone *mz;
501
502 if (mem_cgroup_disabled())
503 return NULL;
504
505 pc = lookup_page_cgroup(page);
506 mz = page_cgroup_zoneinfo(pc);
507 if (!mz)
508 return NULL;
509
510 return &mz->reclaim_stat;
511}
512
66e1707b
BS
513unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
514 struct list_head *dst,
515 unsigned long *scanned, int order,
516 int mode, struct zone *z,
517 struct mem_cgroup *mem_cont,
4f98a2fe 518 int active, int file)
66e1707b
BS
519{
520 unsigned long nr_taken = 0;
521 struct page *page;
522 unsigned long scan;
523 LIST_HEAD(pc_list);
524 struct list_head *src;
ff7283fa 525 struct page_cgroup *pc, *tmp;
1ecaab2b
KH
526 int nid = z->zone_pgdat->node_id;
527 int zid = zone_idx(z);
528 struct mem_cgroup_per_zone *mz;
4f98a2fe 529 int lru = LRU_FILE * !!file + !!active;
66e1707b 530
cf475ad2 531 BUG_ON(!mem_cont);
1ecaab2b 532 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
b69408e8 533 src = &mz->lists[lru];
66e1707b 534
ff7283fa
KH
535 scan = 0;
536 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
436c6541 537 if (scan >= nr_to_scan)
ff7283fa 538 break;
08e552c6
KH
539
540 page = pc->page;
52d4b9ac
KH
541 if (unlikely(!PageCgroupUsed(pc)))
542 continue;
436c6541 543 if (unlikely(!PageLRU(page)))
ff7283fa 544 continue;
ff7283fa 545
436c6541 546 scan++;
4f98a2fe 547 if (__isolate_lru_page(page, mode, file) == 0) {
66e1707b
BS
548 list_move(&page->lru, dst);
549 nr_taken++;
550 }
551 }
552
66e1707b
BS
553 *scanned = scan;
554 return nr_taken;
555}
556
6d61ef40
BS
557#define mem_cgroup_from_res_counter(counter, member) \
558 container_of(counter, struct mem_cgroup, member)
559
560/*
561 * This routine finds the DFS walk successor. This routine should be
562 * called with cgroup_mutex held
563 */
564static struct mem_cgroup *
565mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
566{
567 struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
568
569 curr_cgroup = curr->css.cgroup;
570 root_cgroup = root_mem->css.cgroup;
571
572 if (!list_empty(&curr_cgroup->children)) {
573 /*
574 * Walk down to children
575 */
576 mem_cgroup_put(curr);
577 cgroup = list_entry(curr_cgroup->children.next,
578 struct cgroup, sibling);
579 curr = mem_cgroup_from_cont(cgroup);
580 mem_cgroup_get(curr);
581 goto done;
582 }
583
584visit_parent:
585 if (curr_cgroup == root_cgroup) {
586 mem_cgroup_put(curr);
587 curr = root_mem;
588 mem_cgroup_get(curr);
589 goto done;
590 }
591
592 /*
593 * Goto next sibling
594 */
595 if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
596 mem_cgroup_put(curr);
597 cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
598 sibling);
599 curr = mem_cgroup_from_cont(cgroup);
600 mem_cgroup_get(curr);
601 goto done;
602 }
603
604 /*
605 * Go up to next parent and next parent's sibling if need be
606 */
607 curr_cgroup = curr_cgroup->parent;
608 goto visit_parent;
609
610done:
611 root_mem->last_scanned_child = curr;
612 return curr;
613}
614
615/*
616 * Visit the first child (need not be the first child as per the ordering
617 * of the cgroup list, since we track last_scanned_child) of @mem and use
618 * that to reclaim free pages from.
619 */
620static struct mem_cgroup *
621mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
622{
623 struct cgroup *cgroup;
624 struct mem_cgroup *ret;
625 bool obsolete = (root_mem->last_scanned_child &&
626 root_mem->last_scanned_child->obsolete);
627
628 /*
629 * Scan all children under the mem_cgroup mem
630 */
631 cgroup_lock();
632 if (list_empty(&root_mem->css.cgroup->children)) {
633 ret = root_mem;
634 goto done;
635 }
636
637 if (!root_mem->last_scanned_child || obsolete) {
638
639 if (obsolete)
640 mem_cgroup_put(root_mem->last_scanned_child);
641
642 cgroup = list_first_entry(&root_mem->css.cgroup->children,
643 struct cgroup, sibling);
644 ret = mem_cgroup_from_cont(cgroup);
645 mem_cgroup_get(ret);
646 } else
647 ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
648 root_mem);
649
650done:
651 root_mem->last_scanned_child = ret;
652 cgroup_unlock();
653 return ret;
654}
655
b85a96c0
DN
656static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
657{
658 if (do_swap_account) {
659 if (res_counter_check_under_limit(&mem->res) &&
660 res_counter_check_under_limit(&mem->memsw))
661 return true;
662 } else
663 if (res_counter_check_under_limit(&mem->res))
664 return true;
665 return false;
666}
667
a7885eb8
KM
668static unsigned int get_swappiness(struct mem_cgroup *memcg)
669{
670 struct cgroup *cgrp = memcg->css.cgroup;
671 unsigned int swappiness;
672
673 /* root ? */
674 if (cgrp->parent == NULL)
675 return vm_swappiness;
676
677 spin_lock(&memcg->reclaim_param_lock);
678 swappiness = memcg->swappiness;
679 spin_unlock(&memcg->reclaim_param_lock);
680
681 return swappiness;
682}
683
6d61ef40
BS
684/*
685 * Dance down the hierarchy if needed to reclaim memory. We remember the
686 * last child we reclaimed from, so that we don't end up penalizing
687 * one child extensively based on its position in the children list.
688 *
689 * root_mem is the original ancestor that we've been reclaim from.
690 */
691static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
692 gfp_t gfp_mask, bool noswap)
693{
694 struct mem_cgroup *next_mem;
695 int ret = 0;
696
697 /*
698 * Reclaim unconditionally and don't check for return value.
699 * We need to reclaim in the current group and down the tree.
700 * One might think about checking for children before reclaiming,
701 * but there might be left over accounting, even after children
702 * have left.
703 */
a7885eb8
KM
704 ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
705 get_swappiness(root_mem));
b85a96c0 706 if (mem_cgroup_check_under_limit(root_mem))
6d61ef40 707 return 0;
670ec2f1
DN
708 if (!root_mem->use_hierarchy)
709 return ret;
6d61ef40
BS
710
711 next_mem = mem_cgroup_get_first_node(root_mem);
712
713 while (next_mem != root_mem) {
714 if (next_mem->obsolete) {
715 mem_cgroup_put(next_mem);
716 cgroup_lock();
717 next_mem = mem_cgroup_get_first_node(root_mem);
718 cgroup_unlock();
719 continue;
720 }
a7885eb8
KM
721 ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
722 get_swappiness(next_mem));
b85a96c0 723 if (mem_cgroup_check_under_limit(root_mem))
6d61ef40
BS
724 return 0;
725 cgroup_lock();
726 next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
727 cgroup_unlock();
728 }
729 return ret;
730}
731
a636b327
KH
732bool mem_cgroup_oom_called(struct task_struct *task)
733{
734 bool ret = false;
735 struct mem_cgroup *mem;
736 struct mm_struct *mm;
737
738 rcu_read_lock();
739 mm = task->mm;
740 if (!mm)
741 mm = &init_mm;
742 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
743 if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
744 ret = true;
745 rcu_read_unlock();
746 return ret;
747}
f817ed48
KH
748/*
749 * Unlike exported interface, "oom" parameter is added. if oom==true,
750 * oom-killer can be invoked.
8a9f3ccd 751 */
f817ed48 752static int __mem_cgroup_try_charge(struct mm_struct *mm,
8c7c6e34
KH
753 gfp_t gfp_mask, struct mem_cgroup **memcg,
754 bool oom)
8a9f3ccd 755{
6d61ef40 756 struct mem_cgroup *mem, *mem_over_limit;
7a81b88c 757 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
28dbc4b6 758 struct res_counter *fail_res;
a636b327
KH
759
760 if (unlikely(test_thread_flag(TIF_MEMDIE))) {
761 /* Don't account this! */
762 *memcg = NULL;
763 return 0;
764 }
765
8a9f3ccd 766 /*
3be91277
HD
767 * We always charge the cgroup the mm_struct belongs to.
768 * The mm_struct's mem_cgroup changes on task migration if the
8a9f3ccd
BS
769 * thread group leader migrates. It's possible that mm is not
770 * set, if so charge the init_mm (happens for pagecache usage).
771 */
7a81b88c 772 if (likely(!*memcg)) {
e8589cc1
KH
773 rcu_read_lock();
774 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
31a78f23
BS
775 if (unlikely(!mem)) {
776 rcu_read_unlock();
31a78f23
BS
777 return 0;
778 }
e8589cc1
KH
779 /*
780 * For every charge from the cgroup, increment reference count
781 */
782 css_get(&mem->css);
7a81b88c 783 *memcg = mem;
e8589cc1
KH
784 rcu_read_unlock();
785 } else {
7a81b88c
KH
786 mem = *memcg;
787 css_get(&mem->css);
e8589cc1 788 }
8a9f3ccd 789
8c7c6e34
KH
790 while (1) {
791 int ret;
792 bool noswap = false;
7a81b88c 793
28dbc4b6 794 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
8c7c6e34
KH
795 if (likely(!ret)) {
796 if (!do_swap_account)
797 break;
28dbc4b6
BS
798 ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
799 &fail_res);
8c7c6e34
KH
800 if (likely(!ret))
801 break;
802 /* mem+swap counter fails */
803 res_counter_uncharge(&mem->res, PAGE_SIZE);
804 noswap = true;
6d61ef40
BS
805 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
806 memsw);
807 } else
808 /* mem counter fails */
809 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
810 res);
811
3be91277 812 if (!(gfp_mask & __GFP_WAIT))
7a81b88c 813 goto nomem;
e1a1cd59 814
6d61ef40
BS
815 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
816 noswap);
66e1707b
BS
817
818 /*
8869b8f6
HD
819 * try_to_free_mem_cgroup_pages() might not give us a full
820 * picture of reclaim. Some pages are reclaimed and might be
821 * moved to swap cache or just unmapped from the cgroup.
822 * Check the limit again to see if the reclaim reduced the
823 * current usage of the cgroup before giving up
8c7c6e34 824 *
8869b8f6 825 */
b85a96c0
DN
826 if (mem_cgroup_check_under_limit(mem_over_limit))
827 continue;
3be91277
HD
828
829 if (!nr_retries--) {
a636b327 830 if (oom) {
7f4d454d 831 mutex_lock(&memcg_tasklist);
88700756 832 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
7f4d454d 833 mutex_unlock(&memcg_tasklist);
88700756 834 mem_over_limit->last_oom_jiffies = jiffies;
a636b327 835 }
7a81b88c 836 goto nomem;
66e1707b 837 }
8a9f3ccd 838 }
7a81b88c
KH
839 return 0;
840nomem:
841 css_put(&mem->css);
842 return -ENOMEM;
843}
8a9f3ccd 844
7a81b88c 845/*
a5e924f5 846 * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
7a81b88c
KH
847 * USED state. If already USED, uncharge and return.
848 */
849
850static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
851 struct page_cgroup *pc,
852 enum charge_type ctype)
853{
7a81b88c
KH
854 /* try_charge() can return NULL to *memcg, taking care of it. */
855 if (!mem)
856 return;
52d4b9ac
KH
857
858 lock_page_cgroup(pc);
859 if (unlikely(PageCgroupUsed(pc))) {
860 unlock_page_cgroup(pc);
861 res_counter_uncharge(&mem->res, PAGE_SIZE);
8c7c6e34
KH
862 if (do_swap_account)
863 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
52d4b9ac 864 css_put(&mem->css);
7a81b88c 865 return;
52d4b9ac 866 }
8a9f3ccd 867 pc->mem_cgroup = mem;
08e552c6 868 smp_wmb();
c05555b5 869 pc->flags = pcg_default_flags[ctype];
3be91277 870
08e552c6 871 mem_cgroup_charge_statistics(mem, pc, true);
52d4b9ac 872
52d4b9ac 873 unlock_page_cgroup(pc);
7a81b88c 874}
66e1707b 875
f817ed48
KH
876/**
877 * mem_cgroup_move_account - move account of the page
878 * @pc: page_cgroup of the page.
879 * @from: mem_cgroup which the page is moved from.
880 * @to: mem_cgroup which the page is moved to. @from != @to.
881 *
882 * The caller must confirm following.
08e552c6 883 * - page is not on LRU (isolate_page() is useful.)
f817ed48
KH
884 *
885 * returns 0 at success,
886 * returns -EBUSY when lock is busy or "pc" is unstable.
887 *
888 * This function does "uncharge" from old cgroup but doesn't do "charge" to
889 * new cgroup. It should be done by a caller.
890 */
891
892static int mem_cgroup_move_account(struct page_cgroup *pc,
893 struct mem_cgroup *from, struct mem_cgroup *to)
894{
895 struct mem_cgroup_per_zone *from_mz, *to_mz;
896 int nid, zid;
897 int ret = -EBUSY;
898
f817ed48 899 VM_BUG_ON(from == to);
08e552c6 900 VM_BUG_ON(PageLRU(pc->page));
f817ed48
KH
901
902 nid = page_cgroup_nid(pc);
903 zid = page_cgroup_zid(pc);
904 from_mz = mem_cgroup_zoneinfo(from, nid, zid);
905 to_mz = mem_cgroup_zoneinfo(to, nid, zid);
906
f817ed48
KH
907 if (!trylock_page_cgroup(pc))
908 return ret;
909
910 if (!PageCgroupUsed(pc))
911 goto out;
912
913 if (pc->mem_cgroup != from)
914 goto out;
915
08e552c6
KH
916 css_put(&from->css);
917 res_counter_uncharge(&from->res, PAGE_SIZE);
918 mem_cgroup_charge_statistics(from, pc, false);
919 if (do_swap_account)
920 res_counter_uncharge(&from->memsw, PAGE_SIZE);
921 pc->mem_cgroup = to;
922 mem_cgroup_charge_statistics(to, pc, true);
923 css_get(&to->css);
924 ret = 0;
f817ed48
KH
925out:
926 unlock_page_cgroup(pc);
927 return ret;
928}
929
930/*
931 * move charges to its parent.
932 */
933
934static int mem_cgroup_move_parent(struct page_cgroup *pc,
935 struct mem_cgroup *child,
936 gfp_t gfp_mask)
937{
08e552c6 938 struct page *page = pc->page;
f817ed48
KH
939 struct cgroup *cg = child->css.cgroup;
940 struct cgroup *pcg = cg->parent;
941 struct mem_cgroup *parent;
f817ed48
KH
942 int ret;
943
944 /* Is ROOT ? */
945 if (!pcg)
946 return -EINVAL;
947
08e552c6 948
f817ed48
KH
949 parent = mem_cgroup_from_cont(pcg);
950
08e552c6 951
f817ed48 952 ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
a636b327 953 if (ret || !parent)
f817ed48
KH
954 return ret;
955
08e552c6
KH
956 if (!get_page_unless_zero(page))
957 return -EBUSY;
958
959 ret = isolate_lru_page(page);
960
961 if (ret)
962 goto cancel;
f817ed48 963
f817ed48 964 ret = mem_cgroup_move_account(pc, child, parent);
f817ed48 965
08e552c6 966 /* drop extra refcnt by try_charge() (move_account increment one) */
f817ed48 967 css_put(&parent->css);
08e552c6
KH
968 putback_lru_page(page);
969 if (!ret) {
970 put_page(page);
971 return 0;
8c7c6e34 972 }
08e552c6
KH
973 /* uncharge if move fails */
974cancel:
975 res_counter_uncharge(&parent->res, PAGE_SIZE);
976 if (do_swap_account)
977 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
978 put_page(page);
f817ed48
KH
979 return ret;
980}
981
7a81b88c
KH
982/*
983 * Charge the memory controller for page usage.
984 * Return
985 * 0 if the charge was successful
986 * < 0 if the cgroup is over its limit
987 */
988static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
989 gfp_t gfp_mask, enum charge_type ctype,
990 struct mem_cgroup *memcg)
991{
992 struct mem_cgroup *mem;
993 struct page_cgroup *pc;
994 int ret;
995
996 pc = lookup_page_cgroup(page);
997 /* can happen at boot */
998 if (unlikely(!pc))
999 return 0;
1000 prefetchw(pc);
1001
1002 mem = memcg;
f817ed48 1003 ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
a636b327 1004 if (ret || !mem)
7a81b88c
KH
1005 return ret;
1006
1007 __mem_cgroup_commit_charge(mem, pc, ctype);
8a9f3ccd 1008 return 0;
8a9f3ccd
BS
1009}
1010
7a81b88c
KH
1011int mem_cgroup_newpage_charge(struct page *page,
1012 struct mm_struct *mm, gfp_t gfp_mask)
217bc319 1013{
f8d66542 1014 if (mem_cgroup_disabled())
cede86ac 1015 return 0;
52d4b9ac
KH
1016 if (PageCompound(page))
1017 return 0;
69029cd5
KH
1018 /*
1019 * If already mapped, we don't have to account.
1020 * If page cache, page->mapping has address_space.
1021 * But page->mapping may have out-of-use anon_vma pointer,
1022 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1023 * is NULL.
1024 */
1025 if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1026 return 0;
1027 if (unlikely(!mm))
1028 mm = &init_mm;
217bc319 1029 return mem_cgroup_charge_common(page, mm, gfp_mask,
e8589cc1 1030 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
217bc319
KH
1031}
1032
e1a1cd59
BS
1033int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1034 gfp_t gfp_mask)
8697d331 1035{
f8d66542 1036 if (mem_cgroup_disabled())
cede86ac 1037 return 0;
52d4b9ac
KH
1038 if (PageCompound(page))
1039 return 0;
accf163e
KH
1040 /*
1041 * Corner case handling. This is called from add_to_page_cache()
1042 * in usual. But some FS (shmem) precharges this page before calling it
1043 * and call add_to_page_cache() with GFP_NOWAIT.
1044 *
1045 * For GFP_NOWAIT case, the page may be pre-charged before calling
1046 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1047 * charge twice. (It works but has to pay a bit larger cost.)
1048 */
1049 if (!(gfp_mask & __GFP_WAIT)) {
1050 struct page_cgroup *pc;
1051
52d4b9ac
KH
1052
1053 pc = lookup_page_cgroup(page);
1054 if (!pc)
1055 return 0;
1056 lock_page_cgroup(pc);
1057 if (PageCgroupUsed(pc)) {
1058 unlock_page_cgroup(pc);
accf163e
KH
1059 return 0;
1060 }
52d4b9ac 1061 unlock_page_cgroup(pc);
accf163e
KH
1062 }
1063
69029cd5 1064 if (unlikely(!mm))
8697d331 1065 mm = &init_mm;
accf163e 1066
c05555b5
KH
1067 if (page_is_file_cache(page))
1068 return mem_cgroup_charge_common(page, mm, gfp_mask,
e8589cc1 1069 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
c05555b5
KH
1070 else
1071 return mem_cgroup_charge_common(page, mm, gfp_mask,
1072 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
e8589cc1
KH
1073}
1074
8c7c6e34
KH
1075int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1076 struct page *page,
1077 gfp_t mask, struct mem_cgroup **ptr)
1078{
1079 struct mem_cgroup *mem;
1080 swp_entry_t ent;
1081
f8d66542 1082 if (mem_cgroup_disabled())
8c7c6e34
KH
1083 return 0;
1084
1085 if (!do_swap_account)
1086 goto charge_cur_mm;
1087
1088 /*
1089 * A racing thread's fault, or swapoff, may have already updated
1090 * the pte, and even removed page from swap cache: return success
1091 * to go on to do_swap_page()'s pte_same() test, which should fail.
1092 */
1093 if (!PageSwapCache(page))
1094 return 0;
1095
1096 ent.val = page_private(page);
1097
1098 mem = lookup_swap_cgroup(ent);
1099 if (!mem || mem->obsolete)
1100 goto charge_cur_mm;
1101 *ptr = mem;
1102 return __mem_cgroup_try_charge(NULL, mask, ptr, true);
1103charge_cur_mm:
1104 if (unlikely(!mm))
1105 mm = &init_mm;
1106 return __mem_cgroup_try_charge(mm, mask, ptr, true);
1107}
1108
d13d1443 1109#ifdef CONFIG_SWAP
8c7c6e34 1110
d13d1443
KH
1111int mem_cgroup_cache_charge_swapin(struct page *page,
1112 struct mm_struct *mm, gfp_t mask, bool locked)
1113{
1114 int ret = 0;
1115
f8d66542 1116 if (mem_cgroup_disabled())
d13d1443
KH
1117 return 0;
1118 if (unlikely(!mm))
1119 mm = &init_mm;
1120 if (!locked)
1121 lock_page(page);
1122 /*
1123 * If not locked, the page can be dropped from SwapCache until
1124 * we reach here.
1125 */
1126 if (PageSwapCache(page)) {
8c7c6e34
KH
1127 struct mem_cgroup *mem = NULL;
1128 swp_entry_t ent;
1129
1130 ent.val = page_private(page);
1131 if (do_swap_account) {
1132 mem = lookup_swap_cgroup(ent);
1133 if (mem && mem->obsolete)
1134 mem = NULL;
1135 if (mem)
1136 mm = NULL;
1137 }
d13d1443 1138 ret = mem_cgroup_charge_common(page, mm, mask,
8c7c6e34
KH
1139 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1140
1141 if (!ret && do_swap_account) {
1142 /* avoid double counting */
1143 mem = swap_cgroup_record(ent, NULL);
1144 if (mem) {
1145 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1146 mem_cgroup_put(mem);
1147 }
1148 }
d13d1443
KH
1149 }
1150 if (!locked)
1151 unlock_page(page);
08e552c6
KH
1152 /* add this page(page_cgroup) to the LRU we want. */
1153 mem_cgroup_lru_fixup(page);
d13d1443
KH
1154
1155 return ret;
1156}
1157#endif
1158
7a81b88c
KH
1159void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1160{
1161 struct page_cgroup *pc;
1162
f8d66542 1163 if (mem_cgroup_disabled())
7a81b88c
KH
1164 return;
1165 if (!ptr)
1166 return;
1167 pc = lookup_page_cgroup(page);
1168 __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
8c7c6e34
KH
1169 /*
1170 * Now swap is on-memory. This means this page may be
1171 * counted both as mem and swap....double count.
03f3c433
KH
1172 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1173 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1174 * may call delete_from_swap_cache() before reach here.
8c7c6e34 1175 */
03f3c433 1176 if (do_swap_account && PageSwapCache(page)) {
8c7c6e34
KH
1177 swp_entry_t ent = {.val = page_private(page)};
1178 struct mem_cgroup *memcg;
1179 memcg = swap_cgroup_record(ent, NULL);
1180 if (memcg) {
1181 /* If memcg is obsolete, memcg can be != ptr */
1182 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1183 mem_cgroup_put(memcg);
1184 }
1185
1186 }
08e552c6
KH
1187 /* add this page(page_cgroup) to the LRU we want. */
1188 mem_cgroup_lru_fixup(page);
7a81b88c
KH
1189}
1190
1191void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1192{
f8d66542 1193 if (mem_cgroup_disabled())
7a81b88c
KH
1194 return;
1195 if (!mem)
1196 return;
1197 res_counter_uncharge(&mem->res, PAGE_SIZE);
8c7c6e34
KH
1198 if (do_swap_account)
1199 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
7a81b88c
KH
1200 css_put(&mem->css);
1201}
1202
1203
8a9f3ccd 1204/*
69029cd5 1205 * uncharge if !page_mapped(page)
8a9f3ccd 1206 */
8c7c6e34 1207static struct mem_cgroup *
69029cd5 1208__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
8a9f3ccd 1209{
8289546e 1210 struct page_cgroup *pc;
8c7c6e34 1211 struct mem_cgroup *mem = NULL;
072c56c1 1212 struct mem_cgroup_per_zone *mz;
8a9f3ccd 1213
f8d66542 1214 if (mem_cgroup_disabled())
8c7c6e34 1215 return NULL;
4077960e 1216
d13d1443 1217 if (PageSwapCache(page))
8c7c6e34 1218 return NULL;
d13d1443 1219
8697d331 1220 /*
3c541e14 1221 * Check if our page_cgroup is valid
8697d331 1222 */
52d4b9ac
KH
1223 pc = lookup_page_cgroup(page);
1224 if (unlikely(!pc || !PageCgroupUsed(pc)))
8c7c6e34 1225 return NULL;
b9c565d5 1226
52d4b9ac 1227 lock_page_cgroup(pc);
d13d1443 1228
8c7c6e34
KH
1229 mem = pc->mem_cgroup;
1230
d13d1443
KH
1231 if (!PageCgroupUsed(pc))
1232 goto unlock_out;
1233
1234 switch (ctype) {
1235 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1236 if (page_mapped(page))
1237 goto unlock_out;
1238 break;
1239 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1240 if (!PageAnon(page)) { /* Shared memory */
1241 if (page->mapping && !page_is_file_cache(page))
1242 goto unlock_out;
1243 } else if (page_mapped(page)) /* Anon */
1244 goto unlock_out;
1245 break;
1246 default:
1247 break;
52d4b9ac 1248 }
d13d1443 1249
8c7c6e34
KH
1250 res_counter_uncharge(&mem->res, PAGE_SIZE);
1251 if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1252 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1253
08e552c6 1254 mem_cgroup_charge_statistics(mem, pc, false);
52d4b9ac 1255 ClearPageCgroupUsed(pc);
b9c565d5 1256
69029cd5 1257 mz = page_cgroup_zoneinfo(pc);
52d4b9ac 1258 unlock_page_cgroup(pc);
fb59e9f1 1259
a7fe942e
KH
1260 /* at swapout, this memcg will be accessed to record to swap */
1261 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1262 css_put(&mem->css);
6d12e2d8 1263
8c7c6e34 1264 return mem;
d13d1443
KH
1265
1266unlock_out:
1267 unlock_page_cgroup(pc);
8c7c6e34 1268 return NULL;
3c541e14
BS
1269}
1270
69029cd5
KH
1271void mem_cgroup_uncharge_page(struct page *page)
1272{
52d4b9ac
KH
1273 /* early check. */
1274 if (page_mapped(page))
1275 return;
1276 if (page->mapping && !PageAnon(page))
1277 return;
69029cd5
KH
1278 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1279}
1280
1281void mem_cgroup_uncharge_cache_page(struct page *page)
1282{
1283 VM_BUG_ON(page_mapped(page));
b7abea96 1284 VM_BUG_ON(page->mapping);
69029cd5
KH
1285 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1286}
1287
8c7c6e34
KH
1288/*
1289 * called from __delete_from_swap_cache() and drop "page" account.
1290 * memcg information is recorded to swap_cgroup of "ent"
1291 */
1292void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1293{
1294 struct mem_cgroup *memcg;
1295
1296 memcg = __mem_cgroup_uncharge_common(page,
1297 MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1298 /* record memcg information */
1299 if (do_swap_account && memcg) {
1300 swap_cgroup_record(ent, memcg);
1301 mem_cgroup_get(memcg);
1302 }
a7fe942e
KH
1303 if (memcg)
1304 css_put(&memcg->css);
8c7c6e34
KH
1305}
1306
1307#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1308/*
1309 * called from swap_entry_free(). remove record in swap_cgroup and
1310 * uncharge "memsw" account.
1311 */
1312void mem_cgroup_uncharge_swap(swp_entry_t ent)
d13d1443 1313{
8c7c6e34
KH
1314 struct mem_cgroup *memcg;
1315
1316 if (!do_swap_account)
1317 return;
1318
1319 memcg = swap_cgroup_record(ent, NULL);
1320 if (memcg) {
1321 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1322 mem_cgroup_put(memcg);
1323 }
d13d1443 1324}
8c7c6e34 1325#endif
d13d1443 1326
ae41be37 1327/*
01b1ae63
KH
1328 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1329 * page belongs to.
ae41be37 1330 */
01b1ae63 1331int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
ae41be37
KH
1332{
1333 struct page_cgroup *pc;
e8589cc1 1334 struct mem_cgroup *mem = NULL;
e8589cc1 1335 int ret = 0;
8869b8f6 1336
f8d66542 1337 if (mem_cgroup_disabled())
4077960e
BS
1338 return 0;
1339
52d4b9ac
KH
1340 pc = lookup_page_cgroup(page);
1341 lock_page_cgroup(pc);
1342 if (PageCgroupUsed(pc)) {
e8589cc1
KH
1343 mem = pc->mem_cgroup;
1344 css_get(&mem->css);
e8589cc1 1345 }
52d4b9ac 1346 unlock_page_cgroup(pc);
01b1ae63 1347
e8589cc1 1348 if (mem) {
3bb4edf2 1349 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
e8589cc1
KH
1350 css_put(&mem->css);
1351 }
01b1ae63 1352 *ptr = mem;
e8589cc1 1353 return ret;
ae41be37 1354}
8869b8f6 1355
69029cd5 1356/* remove redundant charge if migration failed*/
01b1ae63
KH
1357void mem_cgroup_end_migration(struct mem_cgroup *mem,
1358 struct page *oldpage, struct page *newpage)
ae41be37 1359{
01b1ae63
KH
1360 struct page *target, *unused;
1361 struct page_cgroup *pc;
1362 enum charge_type ctype;
1363
1364 if (!mem)
1365 return;
1366
1367 /* at migration success, oldpage->mapping is NULL. */
1368 if (oldpage->mapping) {
1369 target = oldpage;
1370 unused = NULL;
1371 } else {
1372 target = newpage;
1373 unused = oldpage;
1374 }
1375
1376 if (PageAnon(target))
1377 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1378 else if (page_is_file_cache(target))
1379 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1380 else
1381 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1382
1383 /* unused page is not on radix-tree now. */
d13d1443 1384 if (unused)
01b1ae63
KH
1385 __mem_cgroup_uncharge_common(unused, ctype);
1386
1387 pc = lookup_page_cgroup(target);
69029cd5 1388 /*
01b1ae63
KH
1389 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1390 * So, double-counting is effectively avoided.
1391 */
1392 __mem_cgroup_commit_charge(mem, pc, ctype);
1393
1394 /*
1395 * Both of oldpage and newpage are still under lock_page().
1396 * Then, we don't have to care about race in radix-tree.
1397 * But we have to be careful that this page is unmapped or not.
1398 *
1399 * There is a case for !page_mapped(). At the start of
1400 * migration, oldpage was mapped. But now, it's zapped.
1401 * But we know *target* page is not freed/reused under us.
1402 * mem_cgroup_uncharge_page() does all necessary checks.
69029cd5 1403 */
01b1ae63
KH
1404 if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1405 mem_cgroup_uncharge_page(target);
ae41be37 1406}
78fb7466 1407
c9b0ed51
KH
1408/*
1409 * A call to try to shrink memory usage under specified resource controller.
1410 * This is typically used for page reclaiming for shmem for reducing side
1411 * effect of page allocation from shmem, which is used by some mem_cgroup.
1412 */
1413int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
1414{
1415 struct mem_cgroup *mem;
1416 int progress = 0;
1417 int retry = MEM_CGROUP_RECLAIM_RETRIES;
1418
f8d66542 1419 if (mem_cgroup_disabled())
cede86ac 1420 return 0;
9623e078
HD
1421 if (!mm)
1422 return 0;
cede86ac 1423
c9b0ed51
KH
1424 rcu_read_lock();
1425 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
31a78f23
BS
1426 if (unlikely(!mem)) {
1427 rcu_read_unlock();
1428 return 0;
1429 }
c9b0ed51
KH
1430 css_get(&mem->css);
1431 rcu_read_unlock();
1432
1433 do {
42e9abb6 1434 progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
b85a96c0 1435 progress += mem_cgroup_check_under_limit(mem);
c9b0ed51
KH
1436 } while (!progress && --retry);
1437
1438 css_put(&mem->css);
1439 if (!retry)
1440 return -ENOMEM;
1441 return 0;
1442}
1443
8c7c6e34
KH
1444static DEFINE_MUTEX(set_limit_mutex);
1445
d38d2a75 1446static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
8c7c6e34 1447 unsigned long long val)
628f4235
KH
1448{
1449
1450 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1451 int progress;
8c7c6e34 1452 u64 memswlimit;
628f4235
KH
1453 int ret = 0;
1454
8c7c6e34 1455 while (retry_count) {
628f4235
KH
1456 if (signal_pending(current)) {
1457 ret = -EINTR;
1458 break;
1459 }
8c7c6e34
KH
1460 /*
1461 * Rather than hide all in some function, I do this in
1462 * open coded manner. You see what this really does.
1463 * We have to guarantee mem->res.limit < mem->memsw.limit.
1464 */
1465 mutex_lock(&set_limit_mutex);
1466 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1467 if (memswlimit < val) {
1468 ret = -EINVAL;
1469 mutex_unlock(&set_limit_mutex);
628f4235
KH
1470 break;
1471 }
8c7c6e34
KH
1472 ret = res_counter_set_limit(&memcg->res, val);
1473 mutex_unlock(&set_limit_mutex);
1474
1475 if (!ret)
1476 break;
1477
42e9abb6
DN
1478 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1479 false);
8c7c6e34
KH
1480 if (!progress) retry_count--;
1481 }
14797e23 1482
8c7c6e34
KH
1483 return ret;
1484}
1485
1486int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1487 unsigned long long val)
1488{
1489 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1490 u64 memlimit, oldusage, curusage;
1491 int ret;
1492
1493 if (!do_swap_account)
1494 return -EINVAL;
1495
1496 while (retry_count) {
1497 if (signal_pending(current)) {
1498 ret = -EINTR;
1499 break;
1500 }
1501 /*
1502 * Rather than hide all in some function, I do this in
1503 * open coded manner. You see what this really does.
1504 * We have to guarantee mem->res.limit < mem->memsw.limit.
1505 */
1506 mutex_lock(&set_limit_mutex);
1507 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1508 if (memlimit > val) {
1509 ret = -EINVAL;
1510 mutex_unlock(&set_limit_mutex);
1511 break;
1512 }
1513 ret = res_counter_set_limit(&memcg->memsw, val);
1514 mutex_unlock(&set_limit_mutex);
1515
1516 if (!ret)
1517 break;
1518
1519 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
42e9abb6 1520 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
8c7c6e34
KH
1521 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1522 if (curusage >= oldusage)
628f4235
KH
1523 retry_count--;
1524 }
1525 return ret;
1526}
1527
cc847582
KH
1528/*
1529 * This routine traverse page_cgroup in given list and drop them all.
cc847582
KH
1530 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1531 */
f817ed48 1532static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
08e552c6 1533 int node, int zid, enum lru_list lru)
cc847582 1534{
08e552c6
KH
1535 struct zone *zone;
1536 struct mem_cgroup_per_zone *mz;
f817ed48 1537 struct page_cgroup *pc, *busy;
08e552c6 1538 unsigned long flags, loop;
072c56c1 1539 struct list_head *list;
f817ed48 1540 int ret = 0;
072c56c1 1541
08e552c6
KH
1542 zone = &NODE_DATA(node)->node_zones[zid];
1543 mz = mem_cgroup_zoneinfo(mem, node, zid);
b69408e8 1544 list = &mz->lists[lru];
cc847582 1545
f817ed48
KH
1546 loop = MEM_CGROUP_ZSTAT(mz, lru);
1547 /* give some margin against EBUSY etc...*/
1548 loop += 256;
1549 busy = NULL;
1550 while (loop--) {
1551 ret = 0;
08e552c6 1552 spin_lock_irqsave(&zone->lru_lock, flags);
f817ed48 1553 if (list_empty(list)) {
08e552c6 1554 spin_unlock_irqrestore(&zone->lru_lock, flags);
52d4b9ac 1555 break;
f817ed48
KH
1556 }
1557 pc = list_entry(list->prev, struct page_cgroup, lru);
1558 if (busy == pc) {
1559 list_move(&pc->lru, list);
1560 busy = 0;
08e552c6 1561 spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed48
KH
1562 continue;
1563 }
08e552c6 1564 spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed48 1565
2c26fdd7 1566 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
f817ed48 1567 if (ret == -ENOMEM)
52d4b9ac 1568 break;
f817ed48
KH
1569
1570 if (ret == -EBUSY || ret == -EINVAL) {
1571 /* found lock contention or "pc" is obsolete. */
1572 busy = pc;
1573 cond_resched();
1574 } else
1575 busy = NULL;
cc847582 1576 }
08e552c6 1577
f817ed48
KH
1578 if (!ret && !list_empty(list))
1579 return -EBUSY;
1580 return ret;
cc847582
KH
1581}
1582
1583/*
1584 * make mem_cgroup's charge to be 0 if there is no task.
1585 * This enables deleting this mem_cgroup.
1586 */
c1e862c1 1587static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
cc847582 1588{
f817ed48
KH
1589 int ret;
1590 int node, zid, shrink;
1591 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
c1e862c1 1592 struct cgroup *cgrp = mem->css.cgroup;
8869b8f6 1593
cc847582 1594 css_get(&mem->css);
f817ed48
KH
1595
1596 shrink = 0;
c1e862c1
KH
1597 /* should free all ? */
1598 if (free_all)
1599 goto try_to_free;
f817ed48 1600move_account:
1ecaab2b 1601 while (mem->res.usage > 0) {
f817ed48 1602 ret = -EBUSY;
c1e862c1
KH
1603 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1604 goto out;
1605 ret = -EINTR;
1606 if (signal_pending(current))
cc847582 1607 goto out;
52d4b9ac
KH
1608 /* This is for making all *used* pages to be on LRU. */
1609 lru_add_drain_all();
f817ed48
KH
1610 ret = 0;
1611 for_each_node_state(node, N_POSSIBLE) {
1612 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
b69408e8 1613 enum lru_list l;
f817ed48
KH
1614 for_each_lru(l) {
1615 ret = mem_cgroup_force_empty_list(mem,
08e552c6 1616 node, zid, l);
f817ed48
KH
1617 if (ret)
1618 break;
1619 }
1ecaab2b 1620 }
f817ed48
KH
1621 if (ret)
1622 break;
1623 }
1624 /* it seems parent cgroup doesn't have enough mem */
1625 if (ret == -ENOMEM)
1626 goto try_to_free;
52d4b9ac 1627 cond_resched();
cc847582
KH
1628 }
1629 ret = 0;
1630out:
1631 css_put(&mem->css);
1632 return ret;
f817ed48
KH
1633
1634try_to_free:
c1e862c1
KH
1635 /* returns EBUSY if there is a task or if we come here twice. */
1636 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
f817ed48
KH
1637 ret = -EBUSY;
1638 goto out;
1639 }
c1e862c1
KH
1640 /* we call try-to-free pages for make this cgroup empty */
1641 lru_add_drain_all();
f817ed48
KH
1642 /* try to free all pages in this cgroup */
1643 shrink = 1;
1644 while (nr_retries && mem->res.usage > 0) {
1645 int progress;
c1e862c1
KH
1646
1647 if (signal_pending(current)) {
1648 ret = -EINTR;
1649 goto out;
1650 }
a7885eb8
KM
1651 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
1652 false, get_swappiness(mem));
c1e862c1 1653 if (!progress) {
f817ed48 1654 nr_retries--;
c1e862c1
KH
1655 /* maybe some writeback is necessary */
1656 congestion_wait(WRITE, HZ/10);
1657 }
f817ed48
KH
1658
1659 }
08e552c6 1660 lru_add_drain();
f817ed48
KH
1661 /* try move_account...there may be some *locked* pages. */
1662 if (mem->res.usage)
1663 goto move_account;
1664 ret = 0;
1665 goto out;
cc847582
KH
1666}
1667
c1e862c1
KH
1668int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1669{
1670 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1671}
1672
1673
18f59ea7
BS
1674static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1675{
1676 return mem_cgroup_from_cont(cont)->use_hierarchy;
1677}
1678
1679static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1680 u64 val)
1681{
1682 int retval = 0;
1683 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1684 struct cgroup *parent = cont->parent;
1685 struct mem_cgroup *parent_mem = NULL;
1686
1687 if (parent)
1688 parent_mem = mem_cgroup_from_cont(parent);
1689
1690 cgroup_lock();
1691 /*
1692 * If parent's use_hiearchy is set, we can't make any modifications
1693 * in the child subtrees. If it is unset, then the change can
1694 * occur, provided the current cgroup has no children.
1695 *
1696 * For the root cgroup, parent_mem is NULL, we allow value to be
1697 * set if there are no children.
1698 */
1699 if ((!parent_mem || !parent_mem->use_hierarchy) &&
1700 (val == 1 || val == 0)) {
1701 if (list_empty(&cont->children))
1702 mem->use_hierarchy = val;
1703 else
1704 retval = -EBUSY;
1705 } else
1706 retval = -EINVAL;
1707 cgroup_unlock();
1708
1709 return retval;
1710}
1711
2c3daa72 1712static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
8cdea7c0 1713{
8c7c6e34
KH
1714 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1715 u64 val = 0;
1716 int type, name;
1717
1718 type = MEMFILE_TYPE(cft->private);
1719 name = MEMFILE_ATTR(cft->private);
1720 switch (type) {
1721 case _MEM:
1722 val = res_counter_read_u64(&mem->res, name);
1723 break;
1724 case _MEMSWAP:
1725 if (do_swap_account)
1726 val = res_counter_read_u64(&mem->memsw, name);
1727 break;
1728 default:
1729 BUG();
1730 break;
1731 }
1732 return val;
8cdea7c0 1733}
628f4235
KH
1734/*
1735 * The user of this function is...
1736 * RES_LIMIT.
1737 */
856c13aa
PM
1738static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1739 const char *buffer)
8cdea7c0 1740{
628f4235 1741 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
8c7c6e34 1742 int type, name;
628f4235
KH
1743 unsigned long long val;
1744 int ret;
1745
8c7c6e34
KH
1746 type = MEMFILE_TYPE(cft->private);
1747 name = MEMFILE_ATTR(cft->private);
1748 switch (name) {
628f4235
KH
1749 case RES_LIMIT:
1750 /* This function does all necessary parse...reuse it */
1751 ret = res_counter_memparse_write_strategy(buffer, &val);
8c7c6e34
KH
1752 if (ret)
1753 break;
1754 if (type == _MEM)
628f4235 1755 ret = mem_cgroup_resize_limit(memcg, val);
8c7c6e34
KH
1756 else
1757 ret = mem_cgroup_resize_memsw_limit(memcg, val);
628f4235
KH
1758 break;
1759 default:
1760 ret = -EINVAL; /* should be BUG() ? */
1761 break;
1762 }
1763 return ret;
8cdea7c0
BS
1764}
1765
fee7b548
KH
1766static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
1767 unsigned long long *mem_limit, unsigned long long *memsw_limit)
1768{
1769 struct cgroup *cgroup;
1770 unsigned long long min_limit, min_memsw_limit, tmp;
1771
1772 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1773 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1774 cgroup = memcg->css.cgroup;
1775 if (!memcg->use_hierarchy)
1776 goto out;
1777
1778 while (cgroup->parent) {
1779 cgroup = cgroup->parent;
1780 memcg = mem_cgroup_from_cont(cgroup);
1781 if (!memcg->use_hierarchy)
1782 break;
1783 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
1784 min_limit = min(min_limit, tmp);
1785 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1786 min_memsw_limit = min(min_memsw_limit, tmp);
1787 }
1788out:
1789 *mem_limit = min_limit;
1790 *memsw_limit = min_memsw_limit;
1791 return;
1792}
1793
29f2a4da 1794static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
c84872e1
PE
1795{
1796 struct mem_cgroup *mem;
8c7c6e34 1797 int type, name;
c84872e1
PE
1798
1799 mem = mem_cgroup_from_cont(cont);
8c7c6e34
KH
1800 type = MEMFILE_TYPE(event);
1801 name = MEMFILE_ATTR(event);
1802 switch (name) {
29f2a4da 1803 case RES_MAX_USAGE:
8c7c6e34
KH
1804 if (type == _MEM)
1805 res_counter_reset_max(&mem->res);
1806 else
1807 res_counter_reset_max(&mem->memsw);
29f2a4da
PE
1808 break;
1809 case RES_FAILCNT:
8c7c6e34
KH
1810 if (type == _MEM)
1811 res_counter_reset_failcnt(&mem->res);
1812 else
1813 res_counter_reset_failcnt(&mem->memsw);
29f2a4da
PE
1814 break;
1815 }
85cc59db 1816 return 0;
c84872e1
PE
1817}
1818
d2ceb9b7
KH
1819static const struct mem_cgroup_stat_desc {
1820 const char *msg;
1821 u64 unit;
1822} mem_cgroup_stat_desc[] = {
1823 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
1824 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
55e462b0
BR
1825 [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
1826 [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
d2ceb9b7
KH
1827};
1828
c64745cf
PM
1829static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1830 struct cgroup_map_cb *cb)
d2ceb9b7 1831{
d2ceb9b7
KH
1832 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
1833 struct mem_cgroup_stat *stat = &mem_cont->stat;
1834 int i;
1835
1836 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
1837 s64 val;
1838
1839 val = mem_cgroup_read_stat(stat, i);
1840 val *= mem_cgroup_stat_desc[i].unit;
c64745cf 1841 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
d2ceb9b7 1842 }
6d12e2d8
KH
1843 /* showing # of active pages */
1844 {
4f98a2fe
RR
1845 unsigned long active_anon, inactive_anon;
1846 unsigned long active_file, inactive_file;
7b854121 1847 unsigned long unevictable;
4f98a2fe
RR
1848
1849 inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
1850 LRU_INACTIVE_ANON);
1851 active_anon = mem_cgroup_get_all_zonestat(mem_cont,
1852 LRU_ACTIVE_ANON);
1853 inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
1854 LRU_INACTIVE_FILE);
1855 active_file = mem_cgroup_get_all_zonestat(mem_cont,
1856 LRU_ACTIVE_FILE);
7b854121
LS
1857 unevictable = mem_cgroup_get_all_zonestat(mem_cont,
1858 LRU_UNEVICTABLE);
1859
4f98a2fe
RR
1860 cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
1861 cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
1862 cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
1863 cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
7b854121
LS
1864 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
1865
6d12e2d8 1866 }
fee7b548
KH
1867 {
1868 unsigned long long limit, memsw_limit;
1869 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
1870 cb->fill(cb, "hierarchical_memory_limit", limit);
1871 if (do_swap_account)
1872 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
1873 }
7f016ee8
KM
1874
1875#ifdef CONFIG_DEBUG_VM
c772be93 1876 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
7f016ee8
KM
1877
1878 {
1879 int nid, zid;
1880 struct mem_cgroup_per_zone *mz;
1881 unsigned long recent_rotated[2] = {0, 0};
1882 unsigned long recent_scanned[2] = {0, 0};
1883
1884 for_each_online_node(nid)
1885 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1886 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1887
1888 recent_rotated[0] +=
1889 mz->reclaim_stat.recent_rotated[0];
1890 recent_rotated[1] +=
1891 mz->reclaim_stat.recent_rotated[1];
1892 recent_scanned[0] +=
1893 mz->reclaim_stat.recent_scanned[0];
1894 recent_scanned[1] +=
1895 mz->reclaim_stat.recent_scanned[1];
1896 }
1897 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
1898 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
1899 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
1900 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
1901 }
1902#endif
1903
d2ceb9b7
KH
1904 return 0;
1905}
1906
a7885eb8
KM
1907static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
1908{
1909 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
1910
1911 return get_swappiness(memcg);
1912}
1913
1914static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
1915 u64 val)
1916{
1917 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
1918 struct mem_cgroup *parent;
1919 if (val > 100)
1920 return -EINVAL;
1921
1922 if (cgrp->parent == NULL)
1923 return -EINVAL;
1924
1925 parent = mem_cgroup_from_cont(cgrp->parent);
1926 /* If under hierarchy, only empty-root can set this value */
1927 if ((parent->use_hierarchy) ||
1928 (memcg->use_hierarchy && !list_empty(&cgrp->children)))
1929 return -EINVAL;
1930
1931 spin_lock(&memcg->reclaim_param_lock);
1932 memcg->swappiness = val;
1933 spin_unlock(&memcg->reclaim_param_lock);
1934
1935 return 0;
1936}
1937
c1e862c1 1938
8cdea7c0
BS
1939static struct cftype mem_cgroup_files[] = {
1940 {
0eea1030 1941 .name = "usage_in_bytes",
8c7c6e34 1942 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2c3daa72 1943 .read_u64 = mem_cgroup_read,
8cdea7c0 1944 },
c84872e1
PE
1945 {
1946 .name = "max_usage_in_bytes",
8c7c6e34 1947 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
29f2a4da 1948 .trigger = mem_cgroup_reset,
c84872e1
PE
1949 .read_u64 = mem_cgroup_read,
1950 },
8cdea7c0 1951 {
0eea1030 1952 .name = "limit_in_bytes",
8c7c6e34 1953 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
856c13aa 1954 .write_string = mem_cgroup_write,
2c3daa72 1955 .read_u64 = mem_cgroup_read,
8cdea7c0
BS
1956 },
1957 {
1958 .name = "failcnt",
8c7c6e34 1959 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
29f2a4da 1960 .trigger = mem_cgroup_reset,
2c3daa72 1961 .read_u64 = mem_cgroup_read,
8cdea7c0 1962 },
d2ceb9b7
KH
1963 {
1964 .name = "stat",
c64745cf 1965 .read_map = mem_control_stat_show,
d2ceb9b7 1966 },
c1e862c1
KH
1967 {
1968 .name = "force_empty",
1969 .trigger = mem_cgroup_force_empty_write,
1970 },
18f59ea7
BS
1971 {
1972 .name = "use_hierarchy",
1973 .write_u64 = mem_cgroup_hierarchy_write,
1974 .read_u64 = mem_cgroup_hierarchy_read,
1975 },
a7885eb8
KM
1976 {
1977 .name = "swappiness",
1978 .read_u64 = mem_cgroup_swappiness_read,
1979 .write_u64 = mem_cgroup_swappiness_write,
1980 },
8cdea7c0
BS
1981};
1982
8c7c6e34
KH
1983#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1984static struct cftype memsw_cgroup_files[] = {
1985 {
1986 .name = "memsw.usage_in_bytes",
1987 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
1988 .read_u64 = mem_cgroup_read,
1989 },
1990 {
1991 .name = "memsw.max_usage_in_bytes",
1992 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
1993 .trigger = mem_cgroup_reset,
1994 .read_u64 = mem_cgroup_read,
1995 },
1996 {
1997 .name = "memsw.limit_in_bytes",
1998 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
1999 .write_string = mem_cgroup_write,
2000 .read_u64 = mem_cgroup_read,
2001 },
2002 {
2003 .name = "memsw.failcnt",
2004 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2005 .trigger = mem_cgroup_reset,
2006 .read_u64 = mem_cgroup_read,
2007 },
2008};
2009
2010static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2011{
2012 if (!do_swap_account)
2013 return 0;
2014 return cgroup_add_files(cont, ss, memsw_cgroup_files,
2015 ARRAY_SIZE(memsw_cgroup_files));
2016};
2017#else
2018static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2019{
2020 return 0;
2021}
2022#endif
2023
6d12e2d8
KH
2024static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2025{
2026 struct mem_cgroup_per_node *pn;
1ecaab2b 2027 struct mem_cgroup_per_zone *mz;
b69408e8 2028 enum lru_list l;
41e3355d 2029 int zone, tmp = node;
1ecaab2b
KH
2030 /*
2031 * This routine is called against possible nodes.
2032 * But it's BUG to call kmalloc() against offline node.
2033 *
2034 * TODO: this routine can waste much memory for nodes which will
2035 * never be onlined. It's better to use memory hotplug callback
2036 * function.
2037 */
41e3355d
KH
2038 if (!node_state(node, N_NORMAL_MEMORY))
2039 tmp = -1;
2040 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6d12e2d8
KH
2041 if (!pn)
2042 return 1;
1ecaab2b 2043
6d12e2d8
KH
2044 mem->info.nodeinfo[node] = pn;
2045 memset(pn, 0, sizeof(*pn));
1ecaab2b
KH
2046
2047 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
2048 mz = &pn->zoneinfo[zone];
b69408e8
CL
2049 for_each_lru(l)
2050 INIT_LIST_HEAD(&mz->lists[l]);
1ecaab2b 2051 }
6d12e2d8
KH
2052 return 0;
2053}
2054
1ecaab2b
KH
2055static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2056{
2057 kfree(mem->info.nodeinfo[node]);
2058}
2059
c8dad2bb
JB
2060static int mem_cgroup_size(void)
2061{
2062 int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2063 return sizeof(struct mem_cgroup) + cpustat_size;
2064}
2065
33327948
KH
2066static struct mem_cgroup *mem_cgroup_alloc(void)
2067{
2068 struct mem_cgroup *mem;
c8dad2bb 2069 int size = mem_cgroup_size();
33327948 2070
c8dad2bb
JB
2071 if (size < PAGE_SIZE)
2072 mem = kmalloc(size, GFP_KERNEL);
33327948 2073 else
c8dad2bb 2074 mem = vmalloc(size);
33327948
KH
2075
2076 if (mem)
c8dad2bb 2077 memset(mem, 0, size);
33327948
KH
2078 return mem;
2079}
2080
8c7c6e34
KH
2081/*
2082 * At destroying mem_cgroup, references from swap_cgroup can remain.
2083 * (scanning all at force_empty is too costly...)
2084 *
2085 * Instead of clearing all references at force_empty, we remember
2086 * the number of reference from swap_cgroup and free mem_cgroup when
2087 * it goes down to 0.
2088 *
2089 * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and
2090 * entry which points to this memcg will be ignore at swapin.
2091 *
2092 * Removal of cgroup itself succeeds regardless of refs from swap.
2093 */
2094
33327948
KH
2095static void mem_cgroup_free(struct mem_cgroup *mem)
2096{
08e552c6
KH
2097 int node;
2098
8c7c6e34
KH
2099 if (atomic_read(&mem->refcnt) > 0)
2100 return;
08e552c6
KH
2101
2102
2103 for_each_node_state(node, N_POSSIBLE)
2104 free_mem_cgroup_per_zone_info(mem, node);
2105
c8dad2bb 2106 if (mem_cgroup_size() < PAGE_SIZE)
33327948
KH
2107 kfree(mem);
2108 else
2109 vfree(mem);
2110}
2111
8c7c6e34
KH
2112static void mem_cgroup_get(struct mem_cgroup *mem)
2113{
2114 atomic_inc(&mem->refcnt);
2115}
2116
2117static void mem_cgroup_put(struct mem_cgroup *mem)
2118{
2119 if (atomic_dec_and_test(&mem->refcnt)) {
2120 if (!mem->obsolete)
2121 return;
2122 mem_cgroup_free(mem);
2123 }
2124}
2125
33327948 2126
c077719b
KH
2127#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2128static void __init enable_swap_cgroup(void)
2129{
f8d66542 2130 if (!mem_cgroup_disabled() && really_do_swap_account)
c077719b
KH
2131 do_swap_account = 1;
2132}
2133#else
2134static void __init enable_swap_cgroup(void)
2135{
2136}
2137#endif
2138
8cdea7c0
BS
2139static struct cgroup_subsys_state *
2140mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2141{
28dbc4b6 2142 struct mem_cgroup *mem, *parent;
6d12e2d8 2143 int node;
8cdea7c0 2144
c8dad2bb
JB
2145 mem = mem_cgroup_alloc();
2146 if (!mem)
2147 return ERR_PTR(-ENOMEM);
78fb7466 2148
6d12e2d8
KH
2149 for_each_node_state(node, N_POSSIBLE)
2150 if (alloc_mem_cgroup_per_zone_info(mem, node))
2151 goto free_out;
c077719b 2152 /* root ? */
28dbc4b6 2153 if (cont->parent == NULL) {
c077719b 2154 enable_swap_cgroup();
28dbc4b6 2155 parent = NULL;
18f59ea7 2156 } else {
28dbc4b6 2157 parent = mem_cgroup_from_cont(cont->parent);
18f59ea7
BS
2158 mem->use_hierarchy = parent->use_hierarchy;
2159 }
28dbc4b6 2160
18f59ea7
BS
2161 if (parent && parent->use_hierarchy) {
2162 res_counter_init(&mem->res, &parent->res);
2163 res_counter_init(&mem->memsw, &parent->memsw);
2164 } else {
2165 res_counter_init(&mem->res, NULL);
2166 res_counter_init(&mem->memsw, NULL);
2167 }
6d61ef40 2168 mem->last_scanned_child = NULL;
2733c06a 2169 spin_lock_init(&mem->reclaim_param_lock);
6d61ef40 2170
a7885eb8
KM
2171 if (parent)
2172 mem->swappiness = get_swappiness(parent);
2173
8cdea7c0 2174 return &mem->css;
6d12e2d8
KH
2175free_out:
2176 for_each_node_state(node, N_POSSIBLE)
1ecaab2b 2177 free_mem_cgroup_per_zone_info(mem, node);
c8dad2bb 2178 mem_cgroup_free(mem);
2dda81ca 2179 return ERR_PTR(-ENOMEM);
8cdea7c0
BS
2180}
2181
df878fb0
KH
2182static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2183 struct cgroup *cont)
2184{
2185 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
8c7c6e34 2186 mem->obsolete = 1;
c1e862c1 2187 mem_cgroup_force_empty(mem, false);
df878fb0
KH
2188}
2189
8cdea7c0
BS
2190static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2191 struct cgroup *cont)
2192{
33327948 2193 mem_cgroup_free(mem_cgroup_from_cont(cont));
8cdea7c0
BS
2194}
2195
2196static int mem_cgroup_populate(struct cgroup_subsys *ss,
2197 struct cgroup *cont)
2198{
8c7c6e34
KH
2199 int ret;
2200
2201 ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2202 ARRAY_SIZE(mem_cgroup_files));
2203
2204 if (!ret)
2205 ret = register_memsw_files(cont, ss);
2206 return ret;
8cdea7c0
BS
2207}
2208
67e465a7
BS
2209static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2210 struct cgroup *cont,
2211 struct cgroup *old_cont,
2212 struct task_struct *p)
2213{
7f4d454d 2214 mutex_lock(&memcg_tasklist);
67e465a7 2215 /*
f9717d28
NK
2216 * FIXME: It's better to move charges of this process from old
2217 * memcg to new memcg. But it's just on TODO-List now.
67e465a7 2218 */
7f4d454d 2219 mutex_unlock(&memcg_tasklist);
67e465a7
BS
2220}
2221
8cdea7c0
BS
2222struct cgroup_subsys mem_cgroup_subsys = {
2223 .name = "memory",
2224 .subsys_id = mem_cgroup_subsys_id,
2225 .create = mem_cgroup_create,
df878fb0 2226 .pre_destroy = mem_cgroup_pre_destroy,
8cdea7c0
BS
2227 .destroy = mem_cgroup_destroy,
2228 .populate = mem_cgroup_populate,
67e465a7 2229 .attach = mem_cgroup_move_task,
6d12e2d8 2230 .early_init = 0,
8cdea7c0 2231};
c077719b
KH
2232
2233#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2234
2235static int __init disable_swap_account(char *s)
2236{
2237 really_do_swap_account = 0;
2238 return 1;
2239}
2240__setup("noswapaccount", disable_swap_account);
2241#endif