]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/memcontrol.c
vmscan: convert mm_vmscan_lru_isolate to DEFINE_EVENT
[net-next-2.6.git] / mm / memcontrol.c
CommitLineData
8cdea7c0
BS
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
2e72b634
KS
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
8cdea7c0
BS
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23
24#include <linux/res_counter.h>
25#include <linux/memcontrol.h>
26#include <linux/cgroup.h>
78fb7466 27#include <linux/mm.h>
4ffef5fe 28#include <linux/hugetlb.h>
d13d1443 29#include <linux/pagemap.h>
d52aa412 30#include <linux/smp.h>
8a9f3ccd 31#include <linux/page-flags.h>
66e1707b 32#include <linux/backing-dev.h>
8a9f3ccd
BS
33#include <linux/bit_spinlock.h>
34#include <linux/rcupdate.h>
e222432b 35#include <linux/limits.h>
8c7c6e34 36#include <linux/mutex.h>
f64c3f54 37#include <linux/rbtree.h>
b6ac57d5 38#include <linux/slab.h>
66e1707b 39#include <linux/swap.h>
02491447 40#include <linux/swapops.h>
66e1707b 41#include <linux/spinlock.h>
2e72b634
KS
42#include <linux/eventfd.h>
43#include <linux/sort.h>
66e1707b 44#include <linux/fs.h>
d2ceb9b7 45#include <linux/seq_file.h>
33327948 46#include <linux/vmalloc.h>
b69408e8 47#include <linux/mm_inline.h>
52d4b9ac 48#include <linux/page_cgroup.h>
cdec2e42 49#include <linux/cpu.h>
08e552c6 50#include "internal.h"
8cdea7c0 51
8697d331
BS
52#include <asm/uaccess.h>
53
a181b0e8 54struct cgroup_subsys mem_cgroup_subsys __read_mostly;
a181b0e8 55#define MEM_CGROUP_RECLAIM_RETRIES 5
4b3bde4c 56struct mem_cgroup *root_mem_cgroup __read_mostly;
8cdea7c0 57
c077719b 58#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
338c8431 59/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
c077719b
KH
60int do_swap_account __read_mostly;
61static int really_do_swap_account __initdata = 1; /* for remember boot option*/
62#else
63#define do_swap_account (0)
64#endif
65
d2265e6f
KH
66/*
67 * Per memcg event counter is incremented at every pagein/pageout. This counter
68 * is used for trigger some periodic events. This is straightforward and better
69 * than using jiffies etc. to handle periodic memcg event.
70 *
71 * These values will be used as !((event) & ((1 <<(thresh)) - 1))
72 */
73#define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
74#define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
c077719b 75
d52aa412
KH
76/*
77 * Statistics for memory cgroup.
78 */
79enum mem_cgroup_stat_index {
80 /*
81 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
82 */
83 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
d69b042f 84 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
d8046582 85 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
55e462b0
BR
86 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
87 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
0c3e73e8 88 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
d2265e6f 89 MEM_CGROUP_EVENTS, /* incremented at every pagein/pageout */
d52aa412
KH
90
91 MEM_CGROUP_STAT_NSTATS,
92};
93
94struct mem_cgroup_stat_cpu {
95 s64 count[MEM_CGROUP_STAT_NSTATS];
d52aa412
KH
96};
97
6d12e2d8
KH
98/*
99 * per-zone information in memory controller.
100 */
6d12e2d8 101struct mem_cgroup_per_zone {
072c56c1
KH
102 /*
103 * spin_lock to protect the per cgroup LRU
104 */
b69408e8
CL
105 struct list_head lists[NR_LRU_LISTS];
106 unsigned long count[NR_LRU_LISTS];
3e2f41f1
KM
107
108 struct zone_reclaim_stat reclaim_stat;
f64c3f54
BS
109 struct rb_node tree_node; /* RB tree node */
110 unsigned long long usage_in_excess;/* Set to the value by which */
111 /* the soft limit is exceeded*/
112 bool on_tree;
4e416953
BS
113 struct mem_cgroup *mem; /* Back pointer, we cannot */
114 /* use container_of */
6d12e2d8
KH
115};
116/* Macro for accessing counter */
117#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
118
119struct mem_cgroup_per_node {
120 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
121};
122
123struct mem_cgroup_lru_info {
124 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
125};
126
f64c3f54
BS
127/*
128 * Cgroups above their limits are maintained in a RB-Tree, independent of
129 * their hierarchy representation
130 */
131
132struct mem_cgroup_tree_per_zone {
133 struct rb_root rb_root;
134 spinlock_t lock;
135};
136
137struct mem_cgroup_tree_per_node {
138 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
139};
140
141struct mem_cgroup_tree {
142 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
143};
144
145static struct mem_cgroup_tree soft_limit_tree __read_mostly;
146
2e72b634
KS
147struct mem_cgroup_threshold {
148 struct eventfd_ctx *eventfd;
149 u64 threshold;
150};
151
9490ff27 152/* For threshold */
2e72b634
KS
153struct mem_cgroup_threshold_ary {
154 /* An array index points to threshold just below usage. */
5407a562 155 int current_threshold;
2e72b634
KS
156 /* Size of entries[] */
157 unsigned int size;
158 /* Array of thresholds */
159 struct mem_cgroup_threshold entries[0];
160};
2c488db2
KS
161
162struct mem_cgroup_thresholds {
163 /* Primary thresholds array */
164 struct mem_cgroup_threshold_ary *primary;
165 /*
166 * Spare threshold array.
167 * This is needed to make mem_cgroup_unregister_event() "never fail".
168 * It must be able to store at least primary->size - 1 entries.
169 */
170 struct mem_cgroup_threshold_ary *spare;
171};
172
9490ff27
KH
173/* for OOM */
174struct mem_cgroup_eventfd_list {
175 struct list_head list;
176 struct eventfd_ctx *eventfd;
177};
2e72b634 178
2e72b634 179static void mem_cgroup_threshold(struct mem_cgroup *mem);
9490ff27 180static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
2e72b634 181
8cdea7c0
BS
182/*
183 * The memory controller data structure. The memory controller controls both
184 * page cache and RSS per cgroup. We would eventually like to provide
185 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
186 * to help the administrator determine what knobs to tune.
187 *
188 * TODO: Add a water mark for the memory controller. Reclaim will begin when
8a9f3ccd
BS
189 * we hit the water mark. May be even add a low water mark, such that
190 * no reclaim occurs from a cgroup at it's low water mark, this is
191 * a feature that will be implemented much later in the future.
8cdea7c0
BS
192 */
193struct mem_cgroup {
194 struct cgroup_subsys_state css;
195 /*
196 * the counter to account for memory usage
197 */
198 struct res_counter res;
8c7c6e34
KH
199 /*
200 * the counter to account for mem+swap usage.
201 */
202 struct res_counter memsw;
78fb7466
PE
203 /*
204 * Per cgroup active and inactive list, similar to the
205 * per zone LRU lists.
78fb7466 206 */
6d12e2d8 207 struct mem_cgroup_lru_info info;
072c56c1 208
2733c06a
KM
209 /*
210 protect against reclaim related member.
211 */
212 spinlock_t reclaim_param_lock;
213
6d61ef40 214 /*
af901ca1 215 * While reclaiming in a hierarchy, we cache the last child we
04046e1a 216 * reclaimed from.
6d61ef40 217 */
04046e1a 218 int last_scanned_child;
18f59ea7
BS
219 /*
220 * Should the accounting and control be hierarchical, per subtree?
221 */
222 bool use_hierarchy;
867578cb 223 atomic_t oom_lock;
8c7c6e34 224 atomic_t refcnt;
14797e23 225
a7885eb8 226 unsigned int swappiness;
3c11ecf4
KH
227 /* OOM-Killer disable */
228 int oom_kill_disable;
a7885eb8 229
22a668d7
KH
230 /* set when res.limit == memsw.limit */
231 bool memsw_is_minimum;
232
2e72b634
KS
233 /* protect arrays of thresholds */
234 struct mutex thresholds_lock;
235
236 /* thresholds for memory usage. RCU-protected */
2c488db2 237 struct mem_cgroup_thresholds thresholds;
907860ed 238
2e72b634 239 /* thresholds for mem+swap usage. RCU-protected */
2c488db2 240 struct mem_cgroup_thresholds memsw_thresholds;
907860ed 241
9490ff27
KH
242 /* For oom notifier event fd */
243 struct list_head oom_notify;
244
7dc74be0
DN
245 /*
246 * Should we move charges of a task when a task is moved into this
247 * mem_cgroup ? And what type of charges should we move ?
248 */
249 unsigned long move_charge_at_immigrate;
d52aa412 250 /*
c62b1a3b 251 * percpu counter.
d52aa412 252 */
c62b1a3b 253 struct mem_cgroup_stat_cpu *stat;
8cdea7c0
BS
254};
255
7dc74be0
DN
256/* Stuffs for move charges at task migration. */
257/*
258 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
259 * left-shifted bitmap of these types.
260 */
261enum move_type {
4ffef5fe 262 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
87946a72 263 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
7dc74be0
DN
264 NR_MOVE_TYPE,
265};
266
4ffef5fe
DN
267/* "mc" and its members are protected by cgroup_mutex */
268static struct move_charge_struct {
269 struct mem_cgroup *from;
270 struct mem_cgroup *to;
271 unsigned long precharge;
854ffa8d 272 unsigned long moved_charge;
483c30b5 273 unsigned long moved_swap;
8033b97c
DN
274 struct task_struct *moving_task; /* a task moving charges */
275 wait_queue_head_t waitq; /* a waitq for other context */
276} mc = {
277 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
278};
4ffef5fe 279
90254a65
DN
280static bool move_anon(void)
281{
282 return test_bit(MOVE_CHARGE_TYPE_ANON,
283 &mc.to->move_charge_at_immigrate);
284}
285
87946a72
DN
286static bool move_file(void)
287{
288 return test_bit(MOVE_CHARGE_TYPE_FILE,
289 &mc.to->move_charge_at_immigrate);
290}
291
4e416953
BS
292/*
293 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
294 * limit reclaim to prevent infinite loops, if they ever occur.
295 */
296#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
297#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
298
217bc319
KH
299enum charge_type {
300 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
301 MEM_CGROUP_CHARGE_TYPE_MAPPED,
4f98a2fe 302 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
c05555b5 303 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
d13d1443 304 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
8a9478ca 305 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
c05555b5
KH
306 NR_CHARGE_TYPE,
307};
308
52d4b9ac
KH
309/* only for here (for easy reading.) */
310#define PCGF_CACHE (1UL << PCG_CACHE)
311#define PCGF_USED (1UL << PCG_USED)
52d4b9ac 312#define PCGF_LOCK (1UL << PCG_LOCK)
4b3bde4c
BS
313/* Not used, but added here for completeness */
314#define PCGF_ACCT (1UL << PCG_ACCT)
217bc319 315
8c7c6e34
KH
316/* for encoding cft->private value on file */
317#define _MEM (0)
318#define _MEMSWAP (1)
9490ff27 319#define _OOM_TYPE (2)
8c7c6e34
KH
320#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
321#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
322#define MEMFILE_ATTR(val) ((val) & 0xffff)
9490ff27
KH
323/* Used for OOM nofiier */
324#define OOM_CONTROL (0)
8c7c6e34 325
75822b44
BS
326/*
327 * Reclaim flags for mem_cgroup_hierarchical_reclaim
328 */
329#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
330#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
331#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
332#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
4e416953
BS
333#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2
334#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
75822b44 335
8c7c6e34
KH
336static void mem_cgroup_get(struct mem_cgroup *mem);
337static void mem_cgroup_put(struct mem_cgroup *mem);
7bcc1bb1 338static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
cdec2e42 339static void drain_all_stock_async(void);
8c7c6e34 340
f64c3f54
BS
341static struct mem_cgroup_per_zone *
342mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
343{
344 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
345}
346
d324236b
WF
347struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
348{
349 return &mem->css;
350}
351
f64c3f54
BS
352static struct mem_cgroup_per_zone *
353page_cgroup_zoneinfo(struct page_cgroup *pc)
354{
355 struct mem_cgroup *mem = pc->mem_cgroup;
356 int nid = page_cgroup_nid(pc);
357 int zid = page_cgroup_zid(pc);
358
359 if (!mem)
360 return NULL;
361
362 return mem_cgroup_zoneinfo(mem, nid, zid);
363}
364
365static struct mem_cgroup_tree_per_zone *
366soft_limit_tree_node_zone(int nid, int zid)
367{
368 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
369}
370
371static struct mem_cgroup_tree_per_zone *
372soft_limit_tree_from_page(struct page *page)
373{
374 int nid = page_to_nid(page);
375 int zid = page_zonenum(page);
376
377 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
378}
379
380static void
4e416953 381__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
f64c3f54 382 struct mem_cgroup_per_zone *mz,
ef8745c1
KH
383 struct mem_cgroup_tree_per_zone *mctz,
384 unsigned long long new_usage_in_excess)
f64c3f54
BS
385{
386 struct rb_node **p = &mctz->rb_root.rb_node;
387 struct rb_node *parent = NULL;
388 struct mem_cgroup_per_zone *mz_node;
389
390 if (mz->on_tree)
391 return;
392
ef8745c1
KH
393 mz->usage_in_excess = new_usage_in_excess;
394 if (!mz->usage_in_excess)
395 return;
f64c3f54
BS
396 while (*p) {
397 parent = *p;
398 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
399 tree_node);
400 if (mz->usage_in_excess < mz_node->usage_in_excess)
401 p = &(*p)->rb_left;
402 /*
403 * We can't avoid mem cgroups that are over their soft
404 * limit by the same amount
405 */
406 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
407 p = &(*p)->rb_right;
408 }
409 rb_link_node(&mz->tree_node, parent, p);
410 rb_insert_color(&mz->tree_node, &mctz->rb_root);
411 mz->on_tree = true;
4e416953
BS
412}
413
414static void
415__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
416 struct mem_cgroup_per_zone *mz,
417 struct mem_cgroup_tree_per_zone *mctz)
418{
419 if (!mz->on_tree)
420 return;
421 rb_erase(&mz->tree_node, &mctz->rb_root);
422 mz->on_tree = false;
423}
424
f64c3f54
BS
425static void
426mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
427 struct mem_cgroup_per_zone *mz,
428 struct mem_cgroup_tree_per_zone *mctz)
429{
430 spin_lock(&mctz->lock);
4e416953 431 __mem_cgroup_remove_exceeded(mem, mz, mctz);
f64c3f54
BS
432 spin_unlock(&mctz->lock);
433}
434
f64c3f54
BS
435
436static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
437{
ef8745c1 438 unsigned long long excess;
f64c3f54
BS
439 struct mem_cgroup_per_zone *mz;
440 struct mem_cgroup_tree_per_zone *mctz;
4e649152
KH
441 int nid = page_to_nid(page);
442 int zid = page_zonenum(page);
f64c3f54
BS
443 mctz = soft_limit_tree_from_page(page);
444
445 /*
4e649152
KH
446 * Necessary to update all ancestors when hierarchy is used.
447 * because their event counter is not touched.
f64c3f54 448 */
4e649152
KH
449 for (; mem; mem = parent_mem_cgroup(mem)) {
450 mz = mem_cgroup_zoneinfo(mem, nid, zid);
ef8745c1 451 excess = res_counter_soft_limit_excess(&mem->res);
4e649152
KH
452 /*
453 * We have to update the tree if mz is on RB-tree or
454 * mem is over its softlimit.
455 */
ef8745c1 456 if (excess || mz->on_tree) {
4e649152
KH
457 spin_lock(&mctz->lock);
458 /* if on-tree, remove it */
459 if (mz->on_tree)
460 __mem_cgroup_remove_exceeded(mem, mz, mctz);
461 /*
ef8745c1
KH
462 * Insert again. mz->usage_in_excess will be updated.
463 * If excess is 0, no tree ops.
4e649152 464 */
ef8745c1 465 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
4e649152
KH
466 spin_unlock(&mctz->lock);
467 }
f64c3f54
BS
468 }
469}
470
471static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
472{
473 int node, zone;
474 struct mem_cgroup_per_zone *mz;
475 struct mem_cgroup_tree_per_zone *mctz;
476
477 for_each_node_state(node, N_POSSIBLE) {
478 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
479 mz = mem_cgroup_zoneinfo(mem, node, zone);
480 mctz = soft_limit_tree_node_zone(node, zone);
481 mem_cgroup_remove_exceeded(mem, mz, mctz);
482 }
483 }
484}
485
4e416953
BS
486static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
487{
488 return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
489}
490
491static struct mem_cgroup_per_zone *
492__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
493{
494 struct rb_node *rightmost = NULL;
26251eaf 495 struct mem_cgroup_per_zone *mz;
4e416953
BS
496
497retry:
26251eaf 498 mz = NULL;
4e416953
BS
499 rightmost = rb_last(&mctz->rb_root);
500 if (!rightmost)
501 goto done; /* Nothing to reclaim from */
502
503 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
504 /*
505 * Remove the node now but someone else can add it back,
506 * we will to add it back at the end of reclaim to its correct
507 * position in the tree.
508 */
509 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
510 if (!res_counter_soft_limit_excess(&mz->mem->res) ||
511 !css_tryget(&mz->mem->css))
512 goto retry;
513done:
514 return mz;
515}
516
517static struct mem_cgroup_per_zone *
518mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
519{
520 struct mem_cgroup_per_zone *mz;
521
522 spin_lock(&mctz->lock);
523 mz = __mem_cgroup_largest_soft_limit_node(mctz);
524 spin_unlock(&mctz->lock);
525 return mz;
526}
527
c62b1a3b
KH
528static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
529 enum mem_cgroup_stat_index idx)
530{
531 int cpu;
532 s64 val = 0;
533
534 for_each_possible_cpu(cpu)
535 val += per_cpu(mem->stat->count[idx], cpu);
536 return val;
537}
538
539static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
540{
541 s64 ret;
542
543 ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
544 ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
545 return ret;
546}
547
0c3e73e8
BS
548static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
549 bool charge)
550{
551 int val = (charge) ? 1 : -1;
c62b1a3b 552 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
0c3e73e8
BS
553}
554
c05555b5
KH
555static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
556 struct page_cgroup *pc,
557 bool charge)
d52aa412 558{
0c3e73e8 559 int val = (charge) ? 1 : -1;
d52aa412 560
c62b1a3b
KH
561 preempt_disable();
562
c05555b5 563 if (PageCgroupCache(pc))
c62b1a3b 564 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
d52aa412 565 else
c62b1a3b 566 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
55e462b0
BR
567
568 if (charge)
c62b1a3b 569 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
55e462b0 570 else
c62b1a3b 571 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
d2265e6f 572 __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
2e72b634 573
c62b1a3b 574 preempt_enable();
6d12e2d8
KH
575}
576
14067bb3 577static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
b69408e8 578 enum lru_list idx)
6d12e2d8
KH
579{
580 int nid, zid;
581 struct mem_cgroup_per_zone *mz;
582 u64 total = 0;
583
584 for_each_online_node(nid)
585 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
586 mz = mem_cgroup_zoneinfo(mem, nid, zid);
587 total += MEM_CGROUP_ZSTAT(mz, idx);
588 }
589 return total;
d52aa412
KH
590}
591
d2265e6f
KH
592static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
593{
594 s64 val;
595
596 val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
597
598 return !(val & ((1 << event_mask_shift) - 1));
599}
600
601/*
602 * Check events in order.
603 *
604 */
605static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
606{
607 /* threshold event is triggered in finer grain than soft limit */
608 if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
609 mem_cgroup_threshold(mem);
610 if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
611 mem_cgroup_update_tree(mem, page);
612 }
613}
614
d5b69e38 615static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
8cdea7c0
BS
616{
617 return container_of(cgroup_subsys_state(cont,
618 mem_cgroup_subsys_id), struct mem_cgroup,
619 css);
620}
621
cf475ad2 622struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 623{
31a78f23
BS
624 /*
625 * mm_update_next_owner() may clear mm->owner to NULL
626 * if it races with swapoff, page migration, etc.
627 * So this can be called with p == NULL.
628 */
629 if (unlikely(!p))
630 return NULL;
631
78fb7466
PE
632 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
633 struct mem_cgroup, css);
634}
635
54595fe2
KH
636static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
637{
638 struct mem_cgroup *mem = NULL;
0b7f569e
KH
639
640 if (!mm)
641 return NULL;
54595fe2
KH
642 /*
643 * Because we have no locks, mm->owner's may be being moved to other
644 * cgroup. We use css_tryget() here even if this looks
645 * pessimistic (rather than adding locks here).
646 */
647 rcu_read_lock();
648 do {
649 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
650 if (unlikely(!mem))
651 break;
652 } while (!css_tryget(&mem->css));
653 rcu_read_unlock();
654 return mem;
655}
656
14067bb3
KH
657/*
658 * Call callback function against all cgroup under hierarchy tree.
659 */
660static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
661 int (*func)(struct mem_cgroup *, void *))
662{
663 int found, ret, nextid;
664 struct cgroup_subsys_state *css;
665 struct mem_cgroup *mem;
666
667 if (!root->use_hierarchy)
668 return (*func)(root, data);
669
670 nextid = 1;
671 do {
672 ret = 0;
673 mem = NULL;
674
675 rcu_read_lock();
676 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
677 &found);
678 if (css && css_tryget(css))
679 mem = container_of(css, struct mem_cgroup, css);
680 rcu_read_unlock();
681
682 if (mem) {
683 ret = (*func)(mem, data);
684 css_put(&mem->css);
685 }
686 nextid = found + 1;
687 } while (!ret && css);
688
689 return ret;
690}
691
4b3bde4c
BS
692static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
693{
694 return (mem == root_mem_cgroup);
695}
696
08e552c6
KH
697/*
698 * Following LRU functions are allowed to be used without PCG_LOCK.
699 * Operations are called by routine of global LRU independently from memcg.
700 * What we have to take care of here is validness of pc->mem_cgroup.
701 *
702 * Changes to pc->mem_cgroup happens when
703 * 1. charge
704 * 2. moving account
705 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
706 * It is added to LRU before charge.
707 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
708 * When moving account, the page is not on LRU. It's isolated.
709 */
4f98a2fe 710
08e552c6
KH
711void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
712{
713 struct page_cgroup *pc;
08e552c6 714 struct mem_cgroup_per_zone *mz;
6d12e2d8 715
f8d66542 716 if (mem_cgroup_disabled())
08e552c6
KH
717 return;
718 pc = lookup_page_cgroup(page);
719 /* can happen while we handle swapcache. */
4b3bde4c 720 if (!TestClearPageCgroupAcctLRU(pc))
08e552c6 721 return;
4b3bde4c 722 VM_BUG_ON(!pc->mem_cgroup);
544122e5
KH
723 /*
724 * We don't check PCG_USED bit. It's cleared when the "page" is finally
725 * removed from global LRU.
726 */
08e552c6 727 mz = page_cgroup_zoneinfo(pc);
b69408e8 728 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
4b3bde4c
BS
729 if (mem_cgroup_is_root(pc->mem_cgroup))
730 return;
731 VM_BUG_ON(list_empty(&pc->lru));
08e552c6
KH
732 list_del_init(&pc->lru);
733 return;
6d12e2d8
KH
734}
735
08e552c6 736void mem_cgroup_del_lru(struct page *page)
6d12e2d8 737{
08e552c6
KH
738 mem_cgroup_del_lru_list(page, page_lru(page));
739}
b69408e8 740
08e552c6
KH
741void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
742{
743 struct mem_cgroup_per_zone *mz;
744 struct page_cgroup *pc;
b69408e8 745
f8d66542 746 if (mem_cgroup_disabled())
08e552c6 747 return;
6d12e2d8 748
08e552c6 749 pc = lookup_page_cgroup(page);
bd112db8
DN
750 /*
751 * Used bit is set without atomic ops but after smp_wmb().
752 * For making pc->mem_cgroup visible, insert smp_rmb() here.
753 */
08e552c6 754 smp_rmb();
4b3bde4c
BS
755 /* unused or root page is not rotated. */
756 if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
08e552c6
KH
757 return;
758 mz = page_cgroup_zoneinfo(pc);
759 list_move(&pc->lru, &mz->lists[lru]);
6d12e2d8
KH
760}
761
08e552c6 762void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
66e1707b 763{
08e552c6
KH
764 struct page_cgroup *pc;
765 struct mem_cgroup_per_zone *mz;
6d12e2d8 766
f8d66542 767 if (mem_cgroup_disabled())
08e552c6
KH
768 return;
769 pc = lookup_page_cgroup(page);
4b3bde4c 770 VM_BUG_ON(PageCgroupAcctLRU(pc));
bd112db8
DN
771 /*
772 * Used bit is set without atomic ops but after smp_wmb().
773 * For making pc->mem_cgroup visible, insert smp_rmb() here.
774 */
08e552c6
KH
775 smp_rmb();
776 if (!PageCgroupUsed(pc))
894bc310 777 return;
b69408e8 778
08e552c6 779 mz = page_cgroup_zoneinfo(pc);
b69408e8 780 MEM_CGROUP_ZSTAT(mz, lru) += 1;
4b3bde4c
BS
781 SetPageCgroupAcctLRU(pc);
782 if (mem_cgroup_is_root(pc->mem_cgroup))
783 return;
08e552c6
KH
784 list_add(&pc->lru, &mz->lists[lru]);
785}
544122e5 786
08e552c6 787/*
544122e5
KH
788 * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
789 * lru because the page may.be reused after it's fully uncharged (because of
790 * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
791 * it again. This function is only used to charge SwapCache. It's done under
792 * lock_page and expected that zone->lru_lock is never held.
08e552c6 793 */
544122e5 794static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
08e552c6 795{
544122e5
KH
796 unsigned long flags;
797 struct zone *zone = page_zone(page);
798 struct page_cgroup *pc = lookup_page_cgroup(page);
799
800 spin_lock_irqsave(&zone->lru_lock, flags);
801 /*
802 * Forget old LRU when this page_cgroup is *not* used. This Used bit
803 * is guarded by lock_page() because the page is SwapCache.
804 */
805 if (!PageCgroupUsed(pc))
806 mem_cgroup_del_lru_list(page, page_lru(page));
807 spin_unlock_irqrestore(&zone->lru_lock, flags);
08e552c6
KH
808}
809
544122e5
KH
810static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
811{
812 unsigned long flags;
813 struct zone *zone = page_zone(page);
814 struct page_cgroup *pc = lookup_page_cgroup(page);
815
816 spin_lock_irqsave(&zone->lru_lock, flags);
817 /* link when the page is linked to LRU but page_cgroup isn't */
4b3bde4c 818 if (PageLRU(page) && !PageCgroupAcctLRU(pc))
544122e5
KH
819 mem_cgroup_add_lru_list(page, page_lru(page));
820 spin_unlock_irqrestore(&zone->lru_lock, flags);
821}
822
823
08e552c6
KH
824void mem_cgroup_move_lists(struct page *page,
825 enum lru_list from, enum lru_list to)
826{
f8d66542 827 if (mem_cgroup_disabled())
08e552c6
KH
828 return;
829 mem_cgroup_del_lru_list(page, from);
830 mem_cgroup_add_lru_list(page, to);
66e1707b
BS
831}
832
4c4a2214
DR
833int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
834{
835 int ret;
0b7f569e 836 struct mem_cgroup *curr = NULL;
4c4a2214
DR
837
838 task_lock(task);
0b7f569e
KH
839 rcu_read_lock();
840 curr = try_get_mem_cgroup_from_mm(task->mm);
841 rcu_read_unlock();
4c4a2214 842 task_unlock(task);
0b7f569e
KH
843 if (!curr)
844 return 0;
d31f56db
DN
845 /*
846 * We should check use_hierarchy of "mem" not "curr". Because checking
847 * use_hierarchy of "curr" here make this function true if hierarchy is
848 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
849 * hierarchy(even if use_hierarchy is disabled in "mem").
850 */
851 if (mem->use_hierarchy)
0b7f569e
KH
852 ret = css_is_ancestor(&curr->css, &mem->css);
853 else
854 ret = (curr == mem);
855 css_put(&curr->css);
4c4a2214
DR
856 return ret;
857}
858
c772be93 859static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
14797e23
KM
860{
861 unsigned long active;
862 unsigned long inactive;
c772be93
KM
863 unsigned long gb;
864 unsigned long inactive_ratio;
14797e23 865
14067bb3
KH
866 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
867 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
14797e23 868
c772be93
KM
869 gb = (inactive + active) >> (30 - PAGE_SHIFT);
870 if (gb)
871 inactive_ratio = int_sqrt(10 * gb);
872 else
873 inactive_ratio = 1;
874
875 if (present_pages) {
876 present_pages[0] = inactive;
877 present_pages[1] = active;
878 }
879
880 return inactive_ratio;
881}
882
883int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
884{
885 unsigned long active;
886 unsigned long inactive;
887 unsigned long present_pages[2];
888 unsigned long inactive_ratio;
889
890 inactive_ratio = calc_inactive_ratio(memcg, present_pages);
891
892 inactive = present_pages[0];
893 active = present_pages[1];
894
895 if (inactive * inactive_ratio < active)
14797e23
KM
896 return 1;
897
898 return 0;
899}
900
56e49d21
RR
901int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
902{
903 unsigned long active;
904 unsigned long inactive;
905
906 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
907 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
908
909 return (active > inactive);
910}
911
a3d8e054
KM
912unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
913 struct zone *zone,
914 enum lru_list lru)
915{
916 int nid = zone->zone_pgdat->node_id;
917 int zid = zone_idx(zone);
918 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
919
920 return MEM_CGROUP_ZSTAT(mz, lru);
921}
922
3e2f41f1
KM
923struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
924 struct zone *zone)
925{
926 int nid = zone->zone_pgdat->node_id;
927 int zid = zone_idx(zone);
928 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
929
930 return &mz->reclaim_stat;
931}
932
933struct zone_reclaim_stat *
934mem_cgroup_get_reclaim_stat_from_page(struct page *page)
935{
936 struct page_cgroup *pc;
937 struct mem_cgroup_per_zone *mz;
938
939 if (mem_cgroup_disabled())
940 return NULL;
941
942 pc = lookup_page_cgroup(page);
bd112db8
DN
943 /*
944 * Used bit is set without atomic ops but after smp_wmb().
945 * For making pc->mem_cgroup visible, insert smp_rmb() here.
946 */
947 smp_rmb();
948 if (!PageCgroupUsed(pc))
949 return NULL;
950
3e2f41f1
KM
951 mz = page_cgroup_zoneinfo(pc);
952 if (!mz)
953 return NULL;
954
955 return &mz->reclaim_stat;
956}
957
66e1707b
BS
958unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
959 struct list_head *dst,
960 unsigned long *scanned, int order,
961 int mode, struct zone *z,
962 struct mem_cgroup *mem_cont,
4f98a2fe 963 int active, int file)
66e1707b
BS
964{
965 unsigned long nr_taken = 0;
966 struct page *page;
967 unsigned long scan;
968 LIST_HEAD(pc_list);
969 struct list_head *src;
ff7283fa 970 struct page_cgroup *pc, *tmp;
1ecaab2b
KH
971 int nid = z->zone_pgdat->node_id;
972 int zid = zone_idx(z);
973 struct mem_cgroup_per_zone *mz;
b7c46d15 974 int lru = LRU_FILE * file + active;
2ffebca6 975 int ret;
66e1707b 976
cf475ad2 977 BUG_ON(!mem_cont);
1ecaab2b 978 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
b69408e8 979 src = &mz->lists[lru];
66e1707b 980
ff7283fa
KH
981 scan = 0;
982 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
436c6541 983 if (scan >= nr_to_scan)
ff7283fa 984 break;
08e552c6
KH
985
986 page = pc->page;
52d4b9ac
KH
987 if (unlikely(!PageCgroupUsed(pc)))
988 continue;
436c6541 989 if (unlikely(!PageLRU(page)))
ff7283fa 990 continue;
ff7283fa 991
436c6541 992 scan++;
2ffebca6
KH
993 ret = __isolate_lru_page(page, mode, file);
994 switch (ret) {
995 case 0:
66e1707b 996 list_move(&page->lru, dst);
2ffebca6 997 mem_cgroup_del_lru(page);
66e1707b 998 nr_taken++;
2ffebca6
KH
999 break;
1000 case -EBUSY:
1001 /* we don't affect global LRU but rotate in our LRU */
1002 mem_cgroup_rotate_lru_list(page, page_lru(page));
1003 break;
1004 default:
1005 break;
66e1707b
BS
1006 }
1007 }
1008
66e1707b
BS
1009 *scanned = scan;
1010 return nr_taken;
1011}
1012
6d61ef40
BS
1013#define mem_cgroup_from_res_counter(counter, member) \
1014 container_of(counter, struct mem_cgroup, member)
1015
b85a96c0
DN
1016static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
1017{
1018 if (do_swap_account) {
1019 if (res_counter_check_under_limit(&mem->res) &&
1020 res_counter_check_under_limit(&mem->memsw))
1021 return true;
1022 } else
1023 if (res_counter_check_under_limit(&mem->res))
1024 return true;
1025 return false;
1026}
1027
a7885eb8
KM
1028static unsigned int get_swappiness(struct mem_cgroup *memcg)
1029{
1030 struct cgroup *cgrp = memcg->css.cgroup;
1031 unsigned int swappiness;
1032
1033 /* root ? */
1034 if (cgrp->parent == NULL)
1035 return vm_swappiness;
1036
1037 spin_lock(&memcg->reclaim_param_lock);
1038 swappiness = memcg->swappiness;
1039 spin_unlock(&memcg->reclaim_param_lock);
1040
1041 return swappiness;
1042}
1043
81d39c20
KH
1044static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
1045{
1046 int *val = data;
1047 (*val)++;
1048 return 0;
1049}
e222432b
BS
1050
1051/**
6a6135b6 1052 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
e222432b
BS
1053 * @memcg: The memory cgroup that went over limit
1054 * @p: Task that is going to be killed
1055 *
1056 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1057 * enabled
1058 */
1059void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1060{
1061 struct cgroup *task_cgrp;
1062 struct cgroup *mem_cgrp;
1063 /*
1064 * Need a buffer in BSS, can't rely on allocations. The code relies
1065 * on the assumption that OOM is serialized for memory controller.
1066 * If this assumption is broken, revisit this code.
1067 */
1068 static char memcg_name[PATH_MAX];
1069 int ret;
1070
d31f56db 1071 if (!memcg || !p)
e222432b
BS
1072 return;
1073
1074
1075 rcu_read_lock();
1076
1077 mem_cgrp = memcg->css.cgroup;
1078 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1079
1080 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1081 if (ret < 0) {
1082 /*
1083 * Unfortunately, we are unable to convert to a useful name
1084 * But we'll still print out the usage information
1085 */
1086 rcu_read_unlock();
1087 goto done;
1088 }
1089 rcu_read_unlock();
1090
1091 printk(KERN_INFO "Task in %s killed", memcg_name);
1092
1093 rcu_read_lock();
1094 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1095 if (ret < 0) {
1096 rcu_read_unlock();
1097 goto done;
1098 }
1099 rcu_read_unlock();
1100
1101 /*
1102 * Continues from above, so we don't need an KERN_ level
1103 */
1104 printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1105done:
1106
1107 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1108 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1109 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1110 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1111 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1112 "failcnt %llu\n",
1113 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1114 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1115 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1116}
1117
81d39c20
KH
1118/*
1119 * This function returns the number of memcg under hierarchy tree. Returns
1120 * 1(self count) if no children.
1121 */
1122static int mem_cgroup_count_children(struct mem_cgroup *mem)
1123{
1124 int num = 0;
1125 mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
1126 return num;
1127}
1128
a63d83f4
DR
1129/*
1130 * Return the memory (and swap, if configured) limit for a memcg.
1131 */
1132u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1133{
1134 u64 limit;
1135 u64 memsw;
1136
1137 limit = res_counter_read_u64(&memcg->res, RES_LIMIT) +
1138 total_swap_pages;
1139 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1140 /*
1141 * If memsw is finite and limits the amount of swap space available
1142 * to this memcg, return that limit.
1143 */
1144 return min(limit, memsw);
1145}
1146
6d61ef40 1147/*
04046e1a
KH
1148 * Visit the first child (need not be the first child as per the ordering
1149 * of the cgroup list, since we track last_scanned_child) of @mem and use
1150 * that to reclaim free pages from.
1151 */
1152static struct mem_cgroup *
1153mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1154{
1155 struct mem_cgroup *ret = NULL;
1156 struct cgroup_subsys_state *css;
1157 int nextid, found;
1158
1159 if (!root_mem->use_hierarchy) {
1160 css_get(&root_mem->css);
1161 ret = root_mem;
1162 }
1163
1164 while (!ret) {
1165 rcu_read_lock();
1166 nextid = root_mem->last_scanned_child + 1;
1167 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1168 &found);
1169 if (css && css_tryget(css))
1170 ret = container_of(css, struct mem_cgroup, css);
1171
1172 rcu_read_unlock();
1173 /* Updates scanning parameter */
1174 spin_lock(&root_mem->reclaim_param_lock);
1175 if (!css) {
1176 /* this means start scan from ID:1 */
1177 root_mem->last_scanned_child = 0;
1178 } else
1179 root_mem->last_scanned_child = found;
1180 spin_unlock(&root_mem->reclaim_param_lock);
1181 }
1182
1183 return ret;
1184}
1185
1186/*
1187 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1188 * we reclaimed from, so that we don't end up penalizing one child extensively
1189 * based on its position in the children list.
6d61ef40
BS
1190 *
1191 * root_mem is the original ancestor that we've been reclaim from.
04046e1a
KH
1192 *
1193 * We give up and return to the caller when we visit root_mem twice.
1194 * (other groups can be removed while we're walking....)
81d39c20
KH
1195 *
1196 * If shrink==true, for avoiding to free too much, this returns immedieately.
6d61ef40
BS
1197 */
1198static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
4e416953 1199 struct zone *zone,
75822b44
BS
1200 gfp_t gfp_mask,
1201 unsigned long reclaim_options)
6d61ef40 1202{
04046e1a
KH
1203 struct mem_cgroup *victim;
1204 int ret, total = 0;
1205 int loop = 0;
75822b44
BS
1206 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1207 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
4e416953
BS
1208 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1209 unsigned long excess = mem_cgroup_get_excess(root_mem);
04046e1a 1210
22a668d7
KH
1211 /* If memsw_is_minimum==1, swap-out is of-no-use. */
1212 if (root_mem->memsw_is_minimum)
1213 noswap = true;
1214
4e416953 1215 while (1) {
04046e1a 1216 victim = mem_cgroup_select_victim(root_mem);
4e416953 1217 if (victim == root_mem) {
04046e1a 1218 loop++;
cdec2e42
KH
1219 if (loop >= 1)
1220 drain_all_stock_async();
4e416953
BS
1221 if (loop >= 2) {
1222 /*
1223 * If we have not been able to reclaim
1224 * anything, it might because there are
1225 * no reclaimable pages under this hierarchy
1226 */
1227 if (!check_soft || !total) {
1228 css_put(&victim->css);
1229 break;
1230 }
1231 /*
1232 * We want to do more targetted reclaim.
1233 * excess >> 2 is not to excessive so as to
1234 * reclaim too much, nor too less that we keep
1235 * coming back to reclaim from this cgroup
1236 */
1237 if (total >= (excess >> 2) ||
1238 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1239 css_put(&victim->css);
1240 break;
1241 }
1242 }
1243 }
c62b1a3b 1244 if (!mem_cgroup_local_usage(victim)) {
04046e1a
KH
1245 /* this cgroup's local usage == 0 */
1246 css_put(&victim->css);
6d61ef40
BS
1247 continue;
1248 }
04046e1a 1249 /* we use swappiness of local cgroup */
4e416953
BS
1250 if (check_soft)
1251 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1252 noswap, get_swappiness(victim), zone,
1253 zone->zone_pgdat->node_id);
1254 else
1255 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1256 noswap, get_swappiness(victim));
04046e1a 1257 css_put(&victim->css);
81d39c20
KH
1258 /*
1259 * At shrinking usage, we can't check we should stop here or
1260 * reclaim more. It's depends on callers. last_scanned_child
1261 * will work enough for keeping fairness under tree.
1262 */
1263 if (shrink)
1264 return ret;
04046e1a 1265 total += ret;
4e416953
BS
1266 if (check_soft) {
1267 if (res_counter_check_under_soft_limit(&root_mem->res))
1268 return total;
1269 } else if (mem_cgroup_check_under_limit(root_mem))
04046e1a 1270 return 1 + total;
6d61ef40 1271 }
04046e1a 1272 return total;
6d61ef40
BS
1273}
1274
867578cb 1275static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data)
a636b327 1276{
867578cb
KH
1277 int *val = (int *)data;
1278 int x;
1279 /*
1280 * Logically, we can stop scanning immediately when we find
1281 * a memcg is already locked. But condidering unlock ops and
1282 * creation/removal of memcg, scan-all is simple operation.
1283 */
1284 x = atomic_inc_return(&mem->oom_lock);
1285 *val = max(x, *val);
1286 return 0;
1287}
1288/*
1289 * Check OOM-Killer is already running under our hierarchy.
1290 * If someone is running, return false.
1291 */
1292static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1293{
1294 int lock_count = 0;
a636b327 1295
867578cb
KH
1296 mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb);
1297
1298 if (lock_count == 1)
1299 return true;
1300 return false;
a636b327 1301}
0b7f569e 1302
867578cb 1303static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data)
0b7f569e 1304{
867578cb
KH
1305 /*
1306 * When a new child is created while the hierarchy is under oom,
1307 * mem_cgroup_oom_lock() may not be called. We have to use
1308 * atomic_add_unless() here.
1309 */
1310 atomic_add_unless(&mem->oom_lock, -1, 0);
0b7f569e
KH
1311 return 0;
1312}
1313
867578cb
KH
1314static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1315{
1316 mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb);
1317}
1318
1319static DEFINE_MUTEX(memcg_oom_mutex);
1320static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1321
dc98df5a
KH
1322struct oom_wait_info {
1323 struct mem_cgroup *mem;
1324 wait_queue_t wait;
1325};
1326
1327static int memcg_oom_wake_function(wait_queue_t *wait,
1328 unsigned mode, int sync, void *arg)
1329{
1330 struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1331 struct oom_wait_info *oom_wait_info;
1332
1333 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1334
1335 if (oom_wait_info->mem == wake_mem)
1336 goto wakeup;
1337 /* if no hierarchy, no match */
1338 if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1339 return 0;
1340 /*
1341 * Both of oom_wait_info->mem and wake_mem are stable under us.
1342 * Then we can use css_is_ancestor without taking care of RCU.
1343 */
1344 if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1345 !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1346 return 0;
1347
1348wakeup:
1349 return autoremove_wake_function(wait, mode, sync, arg);
1350}
1351
1352static void memcg_wakeup_oom(struct mem_cgroup *mem)
1353{
1354 /* for filtering, pass "mem" as argument. */
1355 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1356}
1357
3c11ecf4
KH
1358static void memcg_oom_recover(struct mem_cgroup *mem)
1359{
4d845ebf 1360 if (atomic_read(&mem->oom_lock))
3c11ecf4
KH
1361 memcg_wakeup_oom(mem);
1362}
1363
867578cb
KH
1364/*
1365 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1366 */
1367bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
0b7f569e 1368{
dc98df5a 1369 struct oom_wait_info owait;
3c11ecf4 1370 bool locked, need_to_kill;
867578cb 1371
dc98df5a
KH
1372 owait.mem = mem;
1373 owait.wait.flags = 0;
1374 owait.wait.func = memcg_oom_wake_function;
1375 owait.wait.private = current;
1376 INIT_LIST_HEAD(&owait.wait.task_list);
3c11ecf4 1377 need_to_kill = true;
867578cb
KH
1378 /* At first, try to OOM lock hierarchy under mem.*/
1379 mutex_lock(&memcg_oom_mutex);
1380 locked = mem_cgroup_oom_lock(mem);
1381 /*
1382 * Even if signal_pending(), we can't quit charge() loop without
1383 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1384 * under OOM is always welcomed, use TASK_KILLABLE here.
1385 */
3c11ecf4
KH
1386 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1387 if (!locked || mem->oom_kill_disable)
1388 need_to_kill = false;
1389 if (locked)
9490ff27 1390 mem_cgroup_oom_notify(mem);
867578cb
KH
1391 mutex_unlock(&memcg_oom_mutex);
1392
3c11ecf4
KH
1393 if (need_to_kill) {
1394 finish_wait(&memcg_oom_waitq, &owait.wait);
867578cb 1395 mem_cgroup_out_of_memory(mem, mask);
3c11ecf4 1396 } else {
867578cb 1397 schedule();
dc98df5a 1398 finish_wait(&memcg_oom_waitq, &owait.wait);
867578cb
KH
1399 }
1400 mutex_lock(&memcg_oom_mutex);
1401 mem_cgroup_oom_unlock(mem);
dc98df5a 1402 memcg_wakeup_oom(mem);
867578cb
KH
1403 mutex_unlock(&memcg_oom_mutex);
1404
1405 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1406 return false;
1407 /* Give chance to dying process */
1408 schedule_timeout(1);
1409 return true;
0b7f569e
KH
1410}
1411
d69b042f
BS
1412/*
1413 * Currently used to update mapped file statistics, but the routine can be
1414 * generalized to update other statistics as well.
1415 */
d8046582 1416void mem_cgroup_update_file_mapped(struct page *page, int val)
d69b042f
BS
1417{
1418 struct mem_cgroup *mem;
d69b042f
BS
1419 struct page_cgroup *pc;
1420
d69b042f
BS
1421 pc = lookup_page_cgroup(page);
1422 if (unlikely(!pc))
1423 return;
1424
1425 lock_page_cgroup(pc);
1426 mem = pc->mem_cgroup;
8725d541 1427 if (!mem || !PageCgroupUsed(pc))
d69b042f
BS
1428 goto done;
1429
1430 /*
c62b1a3b 1431 * Preemption is already disabled. We can use __this_cpu_xxx
d69b042f 1432 */
8725d541
KH
1433 if (val > 0) {
1434 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1435 SetPageCgroupFileMapped(pc);
1436 } else {
1437 __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1438 ClearPageCgroupFileMapped(pc);
1439 }
d69b042f 1440
d69b042f
BS
1441done:
1442 unlock_page_cgroup(pc);
1443}
0b7f569e 1444
cdec2e42
KH
1445/*
1446 * size of first charge trial. "32" comes from vmscan.c's magic value.
1447 * TODO: maybe necessary to use big numbers in big irons.
1448 */
1449#define CHARGE_SIZE (32 * PAGE_SIZE)
1450struct memcg_stock_pcp {
1451 struct mem_cgroup *cached; /* this never be root cgroup */
1452 int charge;
1453 struct work_struct work;
1454};
1455static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1456static atomic_t memcg_drain_count;
1457
1458/*
1459 * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
1460 * from local stock and true is returned. If the stock is 0 or charges from a
1461 * cgroup which is not current target, returns false. This stock will be
1462 * refilled.
1463 */
1464static bool consume_stock(struct mem_cgroup *mem)
1465{
1466 struct memcg_stock_pcp *stock;
1467 bool ret = true;
1468
1469 stock = &get_cpu_var(memcg_stock);
1470 if (mem == stock->cached && stock->charge)
1471 stock->charge -= PAGE_SIZE;
1472 else /* need to call res_counter_charge */
1473 ret = false;
1474 put_cpu_var(memcg_stock);
1475 return ret;
1476}
1477
1478/*
1479 * Returns stocks cached in percpu to res_counter and reset cached information.
1480 */
1481static void drain_stock(struct memcg_stock_pcp *stock)
1482{
1483 struct mem_cgroup *old = stock->cached;
1484
1485 if (stock->charge) {
1486 res_counter_uncharge(&old->res, stock->charge);
1487 if (do_swap_account)
1488 res_counter_uncharge(&old->memsw, stock->charge);
1489 }
1490 stock->cached = NULL;
1491 stock->charge = 0;
1492}
1493
1494/*
1495 * This must be called under preempt disabled or must be called by
1496 * a thread which is pinned to local cpu.
1497 */
1498static void drain_local_stock(struct work_struct *dummy)
1499{
1500 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1501 drain_stock(stock);
1502}
1503
1504/*
1505 * Cache charges(val) which is from res_counter, to local per_cpu area.
320cc51d 1506 * This will be consumed by consume_stock() function, later.
cdec2e42
KH
1507 */
1508static void refill_stock(struct mem_cgroup *mem, int val)
1509{
1510 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1511
1512 if (stock->cached != mem) { /* reset if necessary */
1513 drain_stock(stock);
1514 stock->cached = mem;
1515 }
1516 stock->charge += val;
1517 put_cpu_var(memcg_stock);
1518}
1519
1520/*
1521 * Tries to drain stocked charges in other cpus. This function is asynchronous
1522 * and just put a work per cpu for draining localy on each cpu. Caller can
1523 * expects some charges will be back to res_counter later but cannot wait for
1524 * it.
1525 */
1526static void drain_all_stock_async(void)
1527{
1528 int cpu;
1529 /* This function is for scheduling "drain" in asynchronous way.
1530 * The result of "drain" is not directly handled by callers. Then,
1531 * if someone is calling drain, we don't have to call drain more.
1532 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1533 * there is a race. We just do loose check here.
1534 */
1535 if (atomic_read(&memcg_drain_count))
1536 return;
1537 /* Notify other cpus that system-wide "drain" is running */
1538 atomic_inc(&memcg_drain_count);
1539 get_online_cpus();
1540 for_each_online_cpu(cpu) {
1541 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1542 schedule_work_on(cpu, &stock->work);
1543 }
1544 put_online_cpus();
1545 atomic_dec(&memcg_drain_count);
1546 /* We don't wait for flush_work */
1547}
1548
1549/* This is a synchronous drain interface. */
1550static void drain_all_stock_sync(void)
1551{
1552 /* called when force_empty is called */
1553 atomic_inc(&memcg_drain_count);
1554 schedule_on_each_cpu(drain_local_stock);
1555 atomic_dec(&memcg_drain_count);
1556}
1557
1558static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
1559 unsigned long action,
1560 void *hcpu)
1561{
1562 int cpu = (unsigned long)hcpu;
1563 struct memcg_stock_pcp *stock;
1564
1565 if (action != CPU_DEAD)
1566 return NOTIFY_OK;
1567 stock = &per_cpu(memcg_stock, cpu);
1568 drain_stock(stock);
1569 return NOTIFY_OK;
1570}
1571
f817ed48
KH
1572/*
1573 * Unlike exported interface, "oom" parameter is added. if oom==true,
1574 * oom-killer can be invoked.
8a9f3ccd 1575 */
f817ed48 1576static int __mem_cgroup_try_charge(struct mm_struct *mm,
430e4863 1577 gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
8a9f3ccd 1578{
4e649152 1579 struct mem_cgroup *mem, *mem_over_limit;
7a81b88c 1580 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
4e649152 1581 struct res_counter *fail_res;
cdec2e42 1582 int csize = CHARGE_SIZE;
a636b327 1583
867578cb
KH
1584 /*
1585 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
1586 * in system level. So, allow to go ahead dying process in addition to
1587 * MEMDIE process.
1588 */
1589 if (unlikely(test_thread_flag(TIF_MEMDIE)
1590 || fatal_signal_pending(current)))
1591 goto bypass;
a636b327 1592
8a9f3ccd 1593 /*
3be91277
HD
1594 * We always charge the cgroup the mm_struct belongs to.
1595 * The mm_struct's mem_cgroup changes on task migration if the
8a9f3ccd
BS
1596 * thread group leader migrates. It's possible that mm is not
1597 * set, if so charge the init_mm (happens for pagecache usage).
1598 */
54595fe2
KH
1599 mem = *memcg;
1600 if (likely(!mem)) {
1601 mem = try_get_mem_cgroup_from_mm(mm);
7a81b88c 1602 *memcg = mem;
e8589cc1 1603 } else {
7a81b88c 1604 css_get(&mem->css);
e8589cc1 1605 }
54595fe2
KH
1606 if (unlikely(!mem))
1607 return 0;
1608
46f7e602 1609 VM_BUG_ON(css_is_removed(&mem->css));
cdec2e42
KH
1610 if (mem_cgroup_is_root(mem))
1611 goto done;
8a9f3ccd 1612
8c7c6e34 1613 while (1) {
0c3e73e8 1614 int ret = 0;
75822b44 1615 unsigned long flags = 0;
7a81b88c 1616
cdec2e42 1617 if (consume_stock(mem))
430e4863 1618 goto done;
cdec2e42
KH
1619
1620 ret = res_counter_charge(&mem->res, csize, &fail_res);
8c7c6e34
KH
1621 if (likely(!ret)) {
1622 if (!do_swap_account)
1623 break;
cdec2e42 1624 ret = res_counter_charge(&mem->memsw, csize, &fail_res);
8c7c6e34
KH
1625 if (likely(!ret))
1626 break;
1627 /* mem+swap counter fails */
cdec2e42 1628 res_counter_uncharge(&mem->res, csize);
75822b44 1629 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
6d61ef40
BS
1630 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1631 memsw);
1632 } else
1633 /* mem counter fails */
1634 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1635 res);
1636
cdec2e42
KH
1637 /* reduce request size and retry */
1638 if (csize > PAGE_SIZE) {
1639 csize = PAGE_SIZE;
1640 continue;
1641 }
3be91277 1642 if (!(gfp_mask & __GFP_WAIT))
7a81b88c 1643 goto nomem;
e1a1cd59 1644
4e416953
BS
1645 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1646 gfp_mask, flags);
4d1c6273
DN
1647 if (ret)
1648 continue;
66e1707b
BS
1649
1650 /*
8869b8f6
HD
1651 * try_to_free_mem_cgroup_pages() might not give us a full
1652 * picture of reclaim. Some pages are reclaimed and might be
1653 * moved to swap cache or just unmapped from the cgroup.
1654 * Check the limit again to see if the reclaim reduced the
1655 * current usage of the cgroup before giving up
8c7c6e34 1656 *
8869b8f6 1657 */
b85a96c0
DN
1658 if (mem_cgroup_check_under_limit(mem_over_limit))
1659 continue;
3be91277 1660
8033b97c
DN
1661 /* try to avoid oom while someone is moving charge */
1662 if (mc.moving_task && current != mc.moving_task) {
1663 struct mem_cgroup *from, *to;
1664 bool do_continue = false;
1665 /*
1666 * There is a small race that "from" or "to" can be
1667 * freed by rmdir, so we use css_tryget().
1668 */
8033b97c
DN
1669 from = mc.from;
1670 to = mc.to;
1671 if (from && css_tryget(&from->css)) {
1672 if (mem_over_limit->use_hierarchy)
1673 do_continue = css_is_ancestor(
1674 &from->css,
1675 &mem_over_limit->css);
1676 else
1677 do_continue = (from == mem_over_limit);
1678 css_put(&from->css);
1679 }
1680 if (!do_continue && to && css_tryget(&to->css)) {
1681 if (mem_over_limit->use_hierarchy)
1682 do_continue = css_is_ancestor(
1683 &to->css,
1684 &mem_over_limit->css);
1685 else
1686 do_continue = (to == mem_over_limit);
1687 css_put(&to->css);
1688 }
8033b97c
DN
1689 if (do_continue) {
1690 DEFINE_WAIT(wait);
1691 prepare_to_wait(&mc.waitq, &wait,
1692 TASK_INTERRUPTIBLE);
1693 /* moving charge context might have finished. */
1694 if (mc.moving_task)
1695 schedule();
1696 finish_wait(&mc.waitq, &wait);
1697 continue;
1698 }
1699 }
1700
3be91277 1701 if (!nr_retries--) {
867578cb
KH
1702 if (!oom)
1703 goto nomem;
1704 if (mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) {
1705 nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1706 continue;
a636b327 1707 }
867578cb
KH
1708 /* When we reach here, current task is dying .*/
1709 css_put(&mem->css);
1710 goto bypass;
66e1707b 1711 }
8a9f3ccd 1712 }
cdec2e42
KH
1713 if (csize > PAGE_SIZE)
1714 refill_stock(mem, csize - PAGE_SIZE);
0c3e73e8 1715done:
7a81b88c
KH
1716 return 0;
1717nomem:
1718 css_put(&mem->css);
1719 return -ENOMEM;
867578cb
KH
1720bypass:
1721 *memcg = NULL;
1722 return 0;
7a81b88c 1723}
8a9f3ccd 1724
a3032a2c
DN
1725/*
1726 * Somemtimes we have to undo a charge we got by try_charge().
1727 * This function is for that and do uncharge, put css's refcnt.
1728 * gotten by try_charge().
1729 */
854ffa8d
DN
1730static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
1731 unsigned long count)
a3032a2c
DN
1732{
1733 if (!mem_cgroup_is_root(mem)) {
854ffa8d 1734 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
a3032a2c 1735 if (do_swap_account)
854ffa8d
DN
1736 res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
1737 VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
1738 WARN_ON_ONCE(count > INT_MAX);
1739 __css_put(&mem->css, (int)count);
a3032a2c 1740 }
854ffa8d
DN
1741 /* we don't need css_put for root */
1742}
1743
1744static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
1745{
1746 __mem_cgroup_cancel_charge(mem, 1);
a3032a2c
DN
1747}
1748
a3b2d692
KH
1749/*
1750 * A helper function to get mem_cgroup from ID. must be called under
1751 * rcu_read_lock(). The caller must check css_is_removed() or some if
1752 * it's concern. (dropping refcnt from swap can be called against removed
1753 * memcg.)
1754 */
1755static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1756{
1757 struct cgroup_subsys_state *css;
1758
1759 /* ID 0 is unused ID */
1760 if (!id)
1761 return NULL;
1762 css = css_lookup(&mem_cgroup_subsys, id);
1763 if (!css)
1764 return NULL;
1765 return container_of(css, struct mem_cgroup, css);
1766}
1767
e42d9d5d 1768struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
b5a84319 1769{
e42d9d5d 1770 struct mem_cgroup *mem = NULL;
3c776e64 1771 struct page_cgroup *pc;
a3b2d692 1772 unsigned short id;
b5a84319
KH
1773 swp_entry_t ent;
1774
3c776e64
DN
1775 VM_BUG_ON(!PageLocked(page));
1776
3c776e64 1777 pc = lookup_page_cgroup(page);
c0bd3f63 1778 lock_page_cgroup(pc);
a3b2d692 1779 if (PageCgroupUsed(pc)) {
3c776e64 1780 mem = pc->mem_cgroup;
a3b2d692
KH
1781 if (mem && !css_tryget(&mem->css))
1782 mem = NULL;
e42d9d5d 1783 } else if (PageSwapCache(page)) {
3c776e64 1784 ent.val = page_private(page);
a3b2d692
KH
1785 id = lookup_swap_cgroup(ent);
1786 rcu_read_lock();
1787 mem = mem_cgroup_lookup(id);
1788 if (mem && !css_tryget(&mem->css))
1789 mem = NULL;
1790 rcu_read_unlock();
3c776e64 1791 }
c0bd3f63 1792 unlock_page_cgroup(pc);
b5a84319
KH
1793 return mem;
1794}
1795
7a81b88c 1796/*
a5e924f5 1797 * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
7a81b88c
KH
1798 * USED state. If already USED, uncharge and return.
1799 */
1800
1801static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1802 struct page_cgroup *pc,
1803 enum charge_type ctype)
1804{
7a81b88c
KH
1805 /* try_charge() can return NULL to *memcg, taking care of it. */
1806 if (!mem)
1807 return;
52d4b9ac
KH
1808
1809 lock_page_cgroup(pc);
1810 if (unlikely(PageCgroupUsed(pc))) {
1811 unlock_page_cgroup(pc);
a3032a2c 1812 mem_cgroup_cancel_charge(mem);
7a81b88c 1813 return;
52d4b9ac 1814 }
4b3bde4c 1815
8a9f3ccd 1816 pc->mem_cgroup = mem;
261fb61a
KH
1817 /*
1818 * We access a page_cgroup asynchronously without lock_page_cgroup().
1819 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
1820 * is accessed after testing USED bit. To make pc->mem_cgroup visible
1821 * before USED bit, we need memory barrier here.
1822 * See mem_cgroup_add_lru_list(), etc.
1823 */
08e552c6 1824 smp_wmb();
4b3bde4c
BS
1825 switch (ctype) {
1826 case MEM_CGROUP_CHARGE_TYPE_CACHE:
1827 case MEM_CGROUP_CHARGE_TYPE_SHMEM:
1828 SetPageCgroupCache(pc);
1829 SetPageCgroupUsed(pc);
1830 break;
1831 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1832 ClearPageCgroupCache(pc);
1833 SetPageCgroupUsed(pc);
1834 break;
1835 default:
1836 break;
1837 }
3be91277 1838
08e552c6 1839 mem_cgroup_charge_statistics(mem, pc, true);
52d4b9ac 1840
52d4b9ac 1841 unlock_page_cgroup(pc);
430e4863
KH
1842 /*
1843 * "charge_statistics" updated event counter. Then, check it.
1844 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1845 * if they exceeds softlimit.
1846 */
d2265e6f 1847 memcg_check_events(mem, pc->page);
7a81b88c 1848}
66e1707b 1849
f817ed48 1850/**
57f9fd7d 1851 * __mem_cgroup_move_account - move account of the page
f817ed48
KH
1852 * @pc: page_cgroup of the page.
1853 * @from: mem_cgroup which the page is moved from.
1854 * @to: mem_cgroup which the page is moved to. @from != @to.
854ffa8d 1855 * @uncharge: whether we should call uncharge and css_put against @from.
f817ed48
KH
1856 *
1857 * The caller must confirm following.
08e552c6 1858 * - page is not on LRU (isolate_page() is useful.)
57f9fd7d 1859 * - the pc is locked, used, and ->mem_cgroup points to @from.
f817ed48 1860 *
854ffa8d
DN
1861 * This function doesn't do "charge" nor css_get to new cgroup. It should be
1862 * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
1863 * true, this function does "uncharge" from old cgroup, but it doesn't if
1864 * @uncharge is false, so a caller should do "uncharge".
f817ed48
KH
1865 */
1866
57f9fd7d 1867static void __mem_cgroup_move_account(struct page_cgroup *pc,
854ffa8d 1868 struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
f817ed48 1869{
f817ed48 1870 VM_BUG_ON(from == to);
08e552c6 1871 VM_BUG_ON(PageLRU(pc->page));
57f9fd7d
DN
1872 VM_BUG_ON(!PageCgroupLocked(pc));
1873 VM_BUG_ON(!PageCgroupUsed(pc));
1874 VM_BUG_ON(pc->mem_cgroup != from);
f817ed48 1875
8725d541 1876 if (PageCgroupFileMapped(pc)) {
c62b1a3b
KH
1877 /* Update mapped_file data for mem_cgroup */
1878 preempt_disable();
1879 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1880 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1881 preempt_enable();
d69b042f 1882 }
854ffa8d
DN
1883 mem_cgroup_charge_statistics(from, pc, false);
1884 if (uncharge)
1885 /* This is not "cancel", but cancel_charge does all we need. */
1886 mem_cgroup_cancel_charge(from);
d69b042f 1887
854ffa8d 1888 /* caller should have done css_get */
08e552c6
KH
1889 pc->mem_cgroup = to;
1890 mem_cgroup_charge_statistics(to, pc, true);
88703267
KH
1891 /*
1892 * We charges against "to" which may not have any tasks. Then, "to"
1893 * can be under rmdir(). But in current implementation, caller of
4ffef5fe
DN
1894 * this function is just force_empty() and move charge, so it's
1895 * garanteed that "to" is never removed. So, we don't check rmdir
1896 * status here.
88703267 1897 */
57f9fd7d
DN
1898}
1899
1900/*
1901 * check whether the @pc is valid for moving account and call
1902 * __mem_cgroup_move_account()
1903 */
1904static int mem_cgroup_move_account(struct page_cgroup *pc,
854ffa8d 1905 struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
57f9fd7d
DN
1906{
1907 int ret = -EINVAL;
1908 lock_page_cgroup(pc);
1909 if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
854ffa8d 1910 __mem_cgroup_move_account(pc, from, to, uncharge);
57f9fd7d
DN
1911 ret = 0;
1912 }
1913 unlock_page_cgroup(pc);
d2265e6f
KH
1914 /*
1915 * check events
1916 */
1917 memcg_check_events(to, pc->page);
1918 memcg_check_events(from, pc->page);
f817ed48
KH
1919 return ret;
1920}
1921
1922/*
1923 * move charges to its parent.
1924 */
1925
1926static int mem_cgroup_move_parent(struct page_cgroup *pc,
1927 struct mem_cgroup *child,
1928 gfp_t gfp_mask)
1929{
08e552c6 1930 struct page *page = pc->page;
f817ed48
KH
1931 struct cgroup *cg = child->css.cgroup;
1932 struct cgroup *pcg = cg->parent;
1933 struct mem_cgroup *parent;
f817ed48
KH
1934 int ret;
1935
1936 /* Is ROOT ? */
1937 if (!pcg)
1938 return -EINVAL;
1939
57f9fd7d
DN
1940 ret = -EBUSY;
1941 if (!get_page_unless_zero(page))
1942 goto out;
1943 if (isolate_lru_page(page))
1944 goto put;
08e552c6 1945
f817ed48 1946 parent = mem_cgroup_from_cont(pcg);
430e4863 1947 ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
a636b327 1948 if (ret || !parent)
57f9fd7d 1949 goto put_back;
f817ed48 1950
854ffa8d
DN
1951 ret = mem_cgroup_move_account(pc, child, parent, true);
1952 if (ret)
1953 mem_cgroup_cancel_charge(parent);
57f9fd7d 1954put_back:
08e552c6 1955 putback_lru_page(page);
57f9fd7d 1956put:
40d58138 1957 put_page(page);
57f9fd7d 1958out:
f817ed48
KH
1959 return ret;
1960}
1961
7a81b88c
KH
1962/*
1963 * Charge the memory controller for page usage.
1964 * Return
1965 * 0 if the charge was successful
1966 * < 0 if the cgroup is over its limit
1967 */
1968static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1969 gfp_t gfp_mask, enum charge_type ctype,
1970 struct mem_cgroup *memcg)
1971{
1972 struct mem_cgroup *mem;
1973 struct page_cgroup *pc;
1974 int ret;
1975
1976 pc = lookup_page_cgroup(page);
1977 /* can happen at boot */
1978 if (unlikely(!pc))
1979 return 0;
1980 prefetchw(pc);
1981
1982 mem = memcg;
430e4863 1983 ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
a636b327 1984 if (ret || !mem)
7a81b88c
KH
1985 return ret;
1986
1987 __mem_cgroup_commit_charge(mem, pc, ctype);
8a9f3ccd 1988 return 0;
8a9f3ccd
BS
1989}
1990
7a81b88c
KH
1991int mem_cgroup_newpage_charge(struct page *page,
1992 struct mm_struct *mm, gfp_t gfp_mask)
217bc319 1993{
f8d66542 1994 if (mem_cgroup_disabled())
cede86ac 1995 return 0;
52d4b9ac
KH
1996 if (PageCompound(page))
1997 return 0;
69029cd5
KH
1998 /*
1999 * If already mapped, we don't have to account.
2000 * If page cache, page->mapping has address_space.
2001 * But page->mapping may have out-of-use anon_vma pointer,
2002 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2003 * is NULL.
2004 */
2005 if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2006 return 0;
2007 if (unlikely(!mm))
2008 mm = &init_mm;
217bc319 2009 return mem_cgroup_charge_common(page, mm, gfp_mask,
e8589cc1 2010 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
217bc319
KH
2011}
2012
83aae4c7
DN
2013static void
2014__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2015 enum charge_type ctype);
2016
e1a1cd59
BS
2017int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2018 gfp_t gfp_mask)
8697d331 2019{
b5a84319
KH
2020 struct mem_cgroup *mem = NULL;
2021 int ret;
2022
f8d66542 2023 if (mem_cgroup_disabled())
cede86ac 2024 return 0;
52d4b9ac
KH
2025 if (PageCompound(page))
2026 return 0;
accf163e
KH
2027 /*
2028 * Corner case handling. This is called from add_to_page_cache()
2029 * in usual. But some FS (shmem) precharges this page before calling it
2030 * and call add_to_page_cache() with GFP_NOWAIT.
2031 *
2032 * For GFP_NOWAIT case, the page may be pre-charged before calling
2033 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
2034 * charge twice. (It works but has to pay a bit larger cost.)
b5a84319
KH
2035 * And when the page is SwapCache, it should take swap information
2036 * into account. This is under lock_page() now.
accf163e
KH
2037 */
2038 if (!(gfp_mask & __GFP_WAIT)) {
2039 struct page_cgroup *pc;
2040
52d4b9ac
KH
2041
2042 pc = lookup_page_cgroup(page);
2043 if (!pc)
2044 return 0;
2045 lock_page_cgroup(pc);
2046 if (PageCgroupUsed(pc)) {
2047 unlock_page_cgroup(pc);
accf163e
KH
2048 return 0;
2049 }
52d4b9ac 2050 unlock_page_cgroup(pc);
accf163e
KH
2051 }
2052
b5a84319 2053 if (unlikely(!mm && !mem))
8697d331 2054 mm = &init_mm;
accf163e 2055
c05555b5
KH
2056 if (page_is_file_cache(page))
2057 return mem_cgroup_charge_common(page, mm, gfp_mask,
e8589cc1 2058 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
b5a84319 2059
83aae4c7
DN
2060 /* shmem */
2061 if (PageSwapCache(page)) {
2062 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2063 if (!ret)
2064 __mem_cgroup_commit_charge_swapin(page, mem,
2065 MEM_CGROUP_CHARGE_TYPE_SHMEM);
2066 } else
2067 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2068 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
b5a84319 2069
b5a84319 2070 return ret;
e8589cc1
KH
2071}
2072
54595fe2
KH
2073/*
2074 * While swap-in, try_charge -> commit or cancel, the page is locked.
2075 * And when try_charge() successfully returns, one refcnt to memcg without
21ae2956 2076 * struct page_cgroup is acquired. This refcnt will be consumed by
54595fe2
KH
2077 * "commit()" or removed by "cancel()"
2078 */
8c7c6e34
KH
2079int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2080 struct page *page,
2081 gfp_t mask, struct mem_cgroup **ptr)
2082{
2083 struct mem_cgroup *mem;
54595fe2 2084 int ret;
8c7c6e34 2085
f8d66542 2086 if (mem_cgroup_disabled())
8c7c6e34
KH
2087 return 0;
2088
2089 if (!do_swap_account)
2090 goto charge_cur_mm;
8c7c6e34
KH
2091 /*
2092 * A racing thread's fault, or swapoff, may have already updated
407f9c8b
HD
2093 * the pte, and even removed page from swap cache: in those cases
2094 * do_swap_page()'s pte_same() test will fail; but there's also a
2095 * KSM case which does need to charge the page.
8c7c6e34
KH
2096 */
2097 if (!PageSwapCache(page))
407f9c8b 2098 goto charge_cur_mm;
e42d9d5d 2099 mem = try_get_mem_cgroup_from_page(page);
54595fe2
KH
2100 if (!mem)
2101 goto charge_cur_mm;
8c7c6e34 2102 *ptr = mem;
430e4863 2103 ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
54595fe2
KH
2104 /* drop extra refcnt from tryget */
2105 css_put(&mem->css);
2106 return ret;
8c7c6e34
KH
2107charge_cur_mm:
2108 if (unlikely(!mm))
2109 mm = &init_mm;
430e4863 2110 return __mem_cgroup_try_charge(mm, mask, ptr, true);
8c7c6e34
KH
2111}
2112
83aae4c7
DN
2113static void
2114__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2115 enum charge_type ctype)
7a81b88c
KH
2116{
2117 struct page_cgroup *pc;
2118
f8d66542 2119 if (mem_cgroup_disabled())
7a81b88c
KH
2120 return;
2121 if (!ptr)
2122 return;
88703267 2123 cgroup_exclude_rmdir(&ptr->css);
7a81b88c 2124 pc = lookup_page_cgroup(page);
544122e5 2125 mem_cgroup_lru_del_before_commit_swapcache(page);
83aae4c7 2126 __mem_cgroup_commit_charge(ptr, pc, ctype);
544122e5 2127 mem_cgroup_lru_add_after_commit_swapcache(page);
8c7c6e34
KH
2128 /*
2129 * Now swap is on-memory. This means this page may be
2130 * counted both as mem and swap....double count.
03f3c433
KH
2131 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2132 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2133 * may call delete_from_swap_cache() before reach here.
8c7c6e34 2134 */
03f3c433 2135 if (do_swap_account && PageSwapCache(page)) {
8c7c6e34 2136 swp_entry_t ent = {.val = page_private(page)};
a3b2d692 2137 unsigned short id;
8c7c6e34 2138 struct mem_cgroup *memcg;
a3b2d692
KH
2139
2140 id = swap_cgroup_record(ent, 0);
2141 rcu_read_lock();
2142 memcg = mem_cgroup_lookup(id);
8c7c6e34 2143 if (memcg) {
a3b2d692
KH
2144 /*
2145 * This recorded memcg can be obsolete one. So, avoid
2146 * calling css_tryget
2147 */
0c3e73e8 2148 if (!mem_cgroup_is_root(memcg))
4e649152 2149 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
0c3e73e8 2150 mem_cgroup_swap_statistics(memcg, false);
8c7c6e34
KH
2151 mem_cgroup_put(memcg);
2152 }
a3b2d692 2153 rcu_read_unlock();
8c7c6e34 2154 }
88703267
KH
2155 /*
2156 * At swapin, we may charge account against cgroup which has no tasks.
2157 * So, rmdir()->pre_destroy() can be called while we do this charge.
2158 * In that case, we need to call pre_destroy() again. check it here.
2159 */
2160 cgroup_release_and_wakeup_rmdir(&ptr->css);
7a81b88c
KH
2161}
2162
83aae4c7
DN
2163void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2164{
2165 __mem_cgroup_commit_charge_swapin(page, ptr,
2166 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2167}
2168
7a81b88c
KH
2169void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2170{
f8d66542 2171 if (mem_cgroup_disabled())
7a81b88c
KH
2172 return;
2173 if (!mem)
2174 return;
a3032a2c 2175 mem_cgroup_cancel_charge(mem);
7a81b88c
KH
2176}
2177
569b846d
KH
2178static void
2179__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2180{
2181 struct memcg_batch_info *batch = NULL;
2182 bool uncharge_memsw = true;
2183 /* If swapout, usage of swap doesn't decrease */
2184 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2185 uncharge_memsw = false;
569b846d
KH
2186
2187 batch = &current->memcg_batch;
2188 /*
2189 * In usual, we do css_get() when we remember memcg pointer.
2190 * But in this case, we keep res->usage until end of a series of
2191 * uncharges. Then, it's ok to ignore memcg's refcnt.
2192 */
2193 if (!batch->memcg)
2194 batch->memcg = mem;
3c11ecf4
KH
2195 /*
2196 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2197 * In those cases, all pages freed continously can be expected to be in
2198 * the same cgroup and we have chance to coalesce uncharges.
2199 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2200 * because we want to do uncharge as soon as possible.
2201 */
2202
2203 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2204 goto direct_uncharge;
2205
569b846d
KH
2206 /*
2207 * In typical case, batch->memcg == mem. This means we can
2208 * merge a series of uncharges to an uncharge of res_counter.
2209 * If not, we uncharge res_counter ony by one.
2210 */
2211 if (batch->memcg != mem)
2212 goto direct_uncharge;
2213 /* remember freed charge and uncharge it later */
2214 batch->bytes += PAGE_SIZE;
2215 if (uncharge_memsw)
2216 batch->memsw_bytes += PAGE_SIZE;
2217 return;
2218direct_uncharge:
2219 res_counter_uncharge(&mem->res, PAGE_SIZE);
2220 if (uncharge_memsw)
2221 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
3c11ecf4
KH
2222 if (unlikely(batch->memcg != mem))
2223 memcg_oom_recover(mem);
569b846d
KH
2224 return;
2225}
7a81b88c 2226
8a9f3ccd 2227/*
69029cd5 2228 * uncharge if !page_mapped(page)
8a9f3ccd 2229 */
8c7c6e34 2230static struct mem_cgroup *
69029cd5 2231__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
8a9f3ccd 2232{
8289546e 2233 struct page_cgroup *pc;
8c7c6e34 2234 struct mem_cgroup *mem = NULL;
072c56c1 2235 struct mem_cgroup_per_zone *mz;
8a9f3ccd 2236
f8d66542 2237 if (mem_cgroup_disabled())
8c7c6e34 2238 return NULL;
4077960e 2239
d13d1443 2240 if (PageSwapCache(page))
8c7c6e34 2241 return NULL;
d13d1443 2242
8697d331 2243 /*
3c541e14 2244 * Check if our page_cgroup is valid
8697d331 2245 */
52d4b9ac
KH
2246 pc = lookup_page_cgroup(page);
2247 if (unlikely(!pc || !PageCgroupUsed(pc)))
8c7c6e34 2248 return NULL;
b9c565d5 2249
52d4b9ac 2250 lock_page_cgroup(pc);
d13d1443 2251
8c7c6e34
KH
2252 mem = pc->mem_cgroup;
2253
d13d1443
KH
2254 if (!PageCgroupUsed(pc))
2255 goto unlock_out;
2256
2257 switch (ctype) {
2258 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
8a9478ca 2259 case MEM_CGROUP_CHARGE_TYPE_DROP:
ac39cf8c 2260 /* See mem_cgroup_prepare_migration() */
2261 if (page_mapped(page) || PageCgroupMigration(pc))
d13d1443
KH
2262 goto unlock_out;
2263 break;
2264 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2265 if (!PageAnon(page)) { /* Shared memory */
2266 if (page->mapping && !page_is_file_cache(page))
2267 goto unlock_out;
2268 } else if (page_mapped(page)) /* Anon */
2269 goto unlock_out;
2270 break;
2271 default:
2272 break;
52d4b9ac 2273 }
d13d1443 2274
569b846d
KH
2275 if (!mem_cgroup_is_root(mem))
2276 __do_uncharge(mem, ctype);
0c3e73e8
BS
2277 if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2278 mem_cgroup_swap_statistics(mem, true);
08e552c6 2279 mem_cgroup_charge_statistics(mem, pc, false);
04046e1a 2280
52d4b9ac 2281 ClearPageCgroupUsed(pc);
544122e5
KH
2282 /*
2283 * pc->mem_cgroup is not cleared here. It will be accessed when it's
2284 * freed from LRU. This is safe because uncharged page is expected not
2285 * to be reused (freed soon). Exception is SwapCache, it's handled by
2286 * special functions.
2287 */
b9c565d5 2288
69029cd5 2289 mz = page_cgroup_zoneinfo(pc);
52d4b9ac 2290 unlock_page_cgroup(pc);
fb59e9f1 2291
d2265e6f 2292 memcg_check_events(mem, page);
a7fe942e
KH
2293 /* at swapout, this memcg will be accessed to record to swap */
2294 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2295 css_put(&mem->css);
6d12e2d8 2296
8c7c6e34 2297 return mem;
d13d1443
KH
2298
2299unlock_out:
2300 unlock_page_cgroup(pc);
8c7c6e34 2301 return NULL;
3c541e14
BS
2302}
2303
69029cd5
KH
2304void mem_cgroup_uncharge_page(struct page *page)
2305{
52d4b9ac
KH
2306 /* early check. */
2307 if (page_mapped(page))
2308 return;
2309 if (page->mapping && !PageAnon(page))
2310 return;
69029cd5
KH
2311 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2312}
2313
2314void mem_cgroup_uncharge_cache_page(struct page *page)
2315{
2316 VM_BUG_ON(page_mapped(page));
b7abea96 2317 VM_BUG_ON(page->mapping);
69029cd5
KH
2318 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
2319}
2320
569b846d
KH
2321/*
2322 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2323 * In that cases, pages are freed continuously and we can expect pages
2324 * are in the same memcg. All these calls itself limits the number of
2325 * pages freed at once, then uncharge_start/end() is called properly.
2326 * This may be called prural(2) times in a context,
2327 */
2328
2329void mem_cgroup_uncharge_start(void)
2330{
2331 current->memcg_batch.do_batch++;
2332 /* We can do nest. */
2333 if (current->memcg_batch.do_batch == 1) {
2334 current->memcg_batch.memcg = NULL;
2335 current->memcg_batch.bytes = 0;
2336 current->memcg_batch.memsw_bytes = 0;
2337 }
2338}
2339
2340void mem_cgroup_uncharge_end(void)
2341{
2342 struct memcg_batch_info *batch = &current->memcg_batch;
2343
2344 if (!batch->do_batch)
2345 return;
2346
2347 batch->do_batch--;
2348 if (batch->do_batch) /* If stacked, do nothing. */
2349 return;
2350
2351 if (!batch->memcg)
2352 return;
2353 /*
2354 * This "batch->memcg" is valid without any css_get/put etc...
2355 * bacause we hide charges behind us.
2356 */
2357 if (batch->bytes)
2358 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2359 if (batch->memsw_bytes)
2360 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
3c11ecf4 2361 memcg_oom_recover(batch->memcg);
569b846d
KH
2362 /* forget this pointer (for sanity check) */
2363 batch->memcg = NULL;
2364}
2365
e767e056 2366#ifdef CONFIG_SWAP
8c7c6e34 2367/*
e767e056 2368 * called after __delete_from_swap_cache() and drop "page" account.
8c7c6e34
KH
2369 * memcg information is recorded to swap_cgroup of "ent"
2370 */
8a9478ca
KH
2371void
2372mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
8c7c6e34
KH
2373{
2374 struct mem_cgroup *memcg;
8a9478ca
KH
2375 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
2376
2377 if (!swapout) /* this was a swap cache but the swap is unused ! */
2378 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
2379
2380 memcg = __mem_cgroup_uncharge_common(page, ctype);
8c7c6e34 2381
8c7c6e34 2382 /* record memcg information */
8a9478ca 2383 if (do_swap_account && swapout && memcg) {
a3b2d692 2384 swap_cgroup_record(ent, css_id(&memcg->css));
8c7c6e34
KH
2385 mem_cgroup_get(memcg);
2386 }
8a9478ca 2387 if (swapout && memcg)
a7fe942e 2388 css_put(&memcg->css);
8c7c6e34 2389}
e767e056 2390#endif
8c7c6e34
KH
2391
2392#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2393/*
2394 * called from swap_entry_free(). remove record in swap_cgroup and
2395 * uncharge "memsw" account.
2396 */
2397void mem_cgroup_uncharge_swap(swp_entry_t ent)
d13d1443 2398{
8c7c6e34 2399 struct mem_cgroup *memcg;
a3b2d692 2400 unsigned short id;
8c7c6e34
KH
2401
2402 if (!do_swap_account)
2403 return;
2404
a3b2d692
KH
2405 id = swap_cgroup_record(ent, 0);
2406 rcu_read_lock();
2407 memcg = mem_cgroup_lookup(id);
8c7c6e34 2408 if (memcg) {
a3b2d692
KH
2409 /*
2410 * We uncharge this because swap is freed.
2411 * This memcg can be obsolete one. We avoid calling css_tryget
2412 */
0c3e73e8 2413 if (!mem_cgroup_is_root(memcg))
4e649152 2414 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
0c3e73e8 2415 mem_cgroup_swap_statistics(memcg, false);
8c7c6e34
KH
2416 mem_cgroup_put(memcg);
2417 }
a3b2d692 2418 rcu_read_unlock();
d13d1443 2419}
02491447
DN
2420
2421/**
2422 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2423 * @entry: swap entry to be moved
2424 * @from: mem_cgroup which the entry is moved from
2425 * @to: mem_cgroup which the entry is moved to
483c30b5 2426 * @need_fixup: whether we should fixup res_counters and refcounts.
02491447
DN
2427 *
2428 * It succeeds only when the swap_cgroup's record for this entry is the same
2429 * as the mem_cgroup's id of @from.
2430 *
2431 * Returns 0 on success, -EINVAL on failure.
2432 *
2433 * The caller must have charged to @to, IOW, called res_counter_charge() about
2434 * both res and memsw, and called css_get().
2435 */
2436static int mem_cgroup_move_swap_account(swp_entry_t entry,
483c30b5 2437 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
02491447
DN
2438{
2439 unsigned short old_id, new_id;
2440
2441 old_id = css_id(&from->css);
2442 new_id = css_id(&to->css);
2443
2444 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
02491447 2445 mem_cgroup_swap_statistics(from, false);
483c30b5 2446 mem_cgroup_swap_statistics(to, true);
02491447 2447 /*
483c30b5
DN
2448 * This function is only called from task migration context now.
2449 * It postpones res_counter and refcount handling till the end
2450 * of task migration(mem_cgroup_clear_mc()) for performance
2451 * improvement. But we cannot postpone mem_cgroup_get(to)
2452 * because if the process that has been moved to @to does
2453 * swap-in, the refcount of @to might be decreased to 0.
02491447 2454 */
02491447 2455 mem_cgroup_get(to);
483c30b5
DN
2456 if (need_fixup) {
2457 if (!mem_cgroup_is_root(from))
2458 res_counter_uncharge(&from->memsw, PAGE_SIZE);
2459 mem_cgroup_put(from);
2460 /*
2461 * we charged both to->res and to->memsw, so we should
2462 * uncharge to->res.
2463 */
2464 if (!mem_cgroup_is_root(to))
2465 res_counter_uncharge(&to->res, PAGE_SIZE);
2466 css_put(&to->css);
2467 }
02491447
DN
2468 return 0;
2469 }
2470 return -EINVAL;
2471}
2472#else
2473static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
483c30b5 2474 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
02491447
DN
2475{
2476 return -EINVAL;
2477}
8c7c6e34 2478#endif
d13d1443 2479
ae41be37 2480/*
01b1ae63
KH
2481 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2482 * page belongs to.
ae41be37 2483 */
ac39cf8c 2484int mem_cgroup_prepare_migration(struct page *page,
2485 struct page *newpage, struct mem_cgroup **ptr)
ae41be37
KH
2486{
2487 struct page_cgroup *pc;
e8589cc1 2488 struct mem_cgroup *mem = NULL;
ac39cf8c 2489 enum charge_type ctype;
e8589cc1 2490 int ret = 0;
8869b8f6 2491
f8d66542 2492 if (mem_cgroup_disabled())
4077960e
BS
2493 return 0;
2494
52d4b9ac
KH
2495 pc = lookup_page_cgroup(page);
2496 lock_page_cgroup(pc);
2497 if (PageCgroupUsed(pc)) {
e8589cc1
KH
2498 mem = pc->mem_cgroup;
2499 css_get(&mem->css);
ac39cf8c 2500 /*
2501 * At migrating an anonymous page, its mapcount goes down
2502 * to 0 and uncharge() will be called. But, even if it's fully
2503 * unmapped, migration may fail and this page has to be
2504 * charged again. We set MIGRATION flag here and delay uncharge
2505 * until end_migration() is called
2506 *
2507 * Corner Case Thinking
2508 * A)
2509 * When the old page was mapped as Anon and it's unmap-and-freed
2510 * while migration was ongoing.
2511 * If unmap finds the old page, uncharge() of it will be delayed
2512 * until end_migration(). If unmap finds a new page, it's
2513 * uncharged when it make mapcount to be 1->0. If unmap code
2514 * finds swap_migration_entry, the new page will not be mapped
2515 * and end_migration() will find it(mapcount==0).
2516 *
2517 * B)
2518 * When the old page was mapped but migraion fails, the kernel
2519 * remaps it. A charge for it is kept by MIGRATION flag even
2520 * if mapcount goes down to 0. We can do remap successfully
2521 * without charging it again.
2522 *
2523 * C)
2524 * The "old" page is under lock_page() until the end of
2525 * migration, so, the old page itself will not be swapped-out.
2526 * If the new page is swapped out before end_migraton, our
2527 * hook to usual swap-out path will catch the event.
2528 */
2529 if (PageAnon(page))
2530 SetPageCgroupMigration(pc);
e8589cc1 2531 }
52d4b9ac 2532 unlock_page_cgroup(pc);
ac39cf8c 2533 /*
2534 * If the page is not charged at this point,
2535 * we return here.
2536 */
2537 if (!mem)
2538 return 0;
01b1ae63 2539
93d5c9be 2540 *ptr = mem;
ac39cf8c 2541 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
2542 css_put(&mem->css);/* drop extra refcnt */
2543 if (ret || *ptr == NULL) {
2544 if (PageAnon(page)) {
2545 lock_page_cgroup(pc);
2546 ClearPageCgroupMigration(pc);
2547 unlock_page_cgroup(pc);
2548 /*
2549 * The old page may be fully unmapped while we kept it.
2550 */
2551 mem_cgroup_uncharge_page(page);
2552 }
2553 return -ENOMEM;
e8589cc1 2554 }
ac39cf8c 2555 /*
2556 * We charge new page before it's used/mapped. So, even if unlock_page()
2557 * is called before end_migration, we can catch all events on this new
2558 * page. In the case new page is migrated but not remapped, new page's
2559 * mapcount will be finally 0 and we call uncharge in end_migration().
2560 */
2561 pc = lookup_page_cgroup(newpage);
2562 if (PageAnon(page))
2563 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2564 else if (page_is_file_cache(page))
2565 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2566 else
2567 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2568 __mem_cgroup_commit_charge(mem, pc, ctype);
e8589cc1 2569 return ret;
ae41be37 2570}
8869b8f6 2571
69029cd5 2572/* remove redundant charge if migration failed*/
01b1ae63 2573void mem_cgroup_end_migration(struct mem_cgroup *mem,
ac39cf8c 2574 struct page *oldpage, struct page *newpage)
ae41be37 2575{
ac39cf8c 2576 struct page *used, *unused;
01b1ae63 2577 struct page_cgroup *pc;
01b1ae63
KH
2578
2579 if (!mem)
2580 return;
ac39cf8c 2581 /* blocks rmdir() */
88703267 2582 cgroup_exclude_rmdir(&mem->css);
01b1ae63
KH
2583 /* at migration success, oldpage->mapping is NULL. */
2584 if (oldpage->mapping) {
ac39cf8c 2585 used = oldpage;
2586 unused = newpage;
01b1ae63 2587 } else {
ac39cf8c 2588 used = newpage;
01b1ae63
KH
2589 unused = oldpage;
2590 }
69029cd5 2591 /*
ac39cf8c 2592 * We disallowed uncharge of pages under migration because mapcount
2593 * of the page goes down to zero, temporarly.
2594 * Clear the flag and check the page should be charged.
01b1ae63 2595 */
ac39cf8c 2596 pc = lookup_page_cgroup(oldpage);
2597 lock_page_cgroup(pc);
2598 ClearPageCgroupMigration(pc);
2599 unlock_page_cgroup(pc);
01b1ae63 2600
ac39cf8c 2601 if (unused != oldpage)
2602 pc = lookup_page_cgroup(unused);
2603 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
2604
2605 pc = lookup_page_cgroup(used);
01b1ae63 2606 /*
ac39cf8c 2607 * If a page is a file cache, radix-tree replacement is very atomic
2608 * and we can skip this check. When it was an Anon page, its mapcount
2609 * goes down to 0. But because we added MIGRATION flage, it's not
2610 * uncharged yet. There are several case but page->mapcount check
2611 * and USED bit check in mem_cgroup_uncharge_page() will do enough
2612 * check. (see prepare_charge() also)
69029cd5 2613 */
ac39cf8c 2614 if (PageAnon(used))
2615 mem_cgroup_uncharge_page(used);
88703267 2616 /*
ac39cf8c 2617 * At migration, we may charge account against cgroup which has no
2618 * tasks.
88703267
KH
2619 * So, rmdir()->pre_destroy() can be called while we do this charge.
2620 * In that case, we need to call pre_destroy() again. check it here.
2621 */
2622 cgroup_release_and_wakeup_rmdir(&mem->css);
ae41be37 2623}
78fb7466 2624
c9b0ed51 2625/*
ae3abae6
DN
2626 * A call to try to shrink memory usage on charge failure at shmem's swapin.
2627 * Calling hierarchical_reclaim is not enough because we should update
2628 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
2629 * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
2630 * not from the memcg which this page would be charged to.
2631 * try_charge_swapin does all of these works properly.
c9b0ed51 2632 */
ae3abae6 2633int mem_cgroup_shmem_charge_fallback(struct page *page,
b5a84319
KH
2634 struct mm_struct *mm,
2635 gfp_t gfp_mask)
c9b0ed51 2636{
b5a84319 2637 struct mem_cgroup *mem = NULL;
ae3abae6 2638 int ret;
c9b0ed51 2639
f8d66542 2640 if (mem_cgroup_disabled())
cede86ac 2641 return 0;
c9b0ed51 2642
ae3abae6
DN
2643 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2644 if (!ret)
2645 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
c9b0ed51 2646
ae3abae6 2647 return ret;
c9b0ed51
KH
2648}
2649
8c7c6e34
KH
2650static DEFINE_MUTEX(set_limit_mutex);
2651
d38d2a75 2652static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
8c7c6e34 2653 unsigned long long val)
628f4235 2654{
81d39c20 2655 int retry_count;
3c11ecf4 2656 u64 memswlimit, memlimit;
628f4235 2657 int ret = 0;
81d39c20
KH
2658 int children = mem_cgroup_count_children(memcg);
2659 u64 curusage, oldusage;
3c11ecf4 2660 int enlarge;
81d39c20
KH
2661
2662 /*
2663 * For keeping hierarchical_reclaim simple, how long we should retry
2664 * is depends on callers. We set our retry-count to be function
2665 * of # of children which we should visit in this loop.
2666 */
2667 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
2668
2669 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
628f4235 2670
3c11ecf4 2671 enlarge = 0;
8c7c6e34 2672 while (retry_count) {
628f4235
KH
2673 if (signal_pending(current)) {
2674 ret = -EINTR;
2675 break;
2676 }
8c7c6e34
KH
2677 /*
2678 * Rather than hide all in some function, I do this in
2679 * open coded manner. You see what this really does.
2680 * We have to guarantee mem->res.limit < mem->memsw.limit.
2681 */
2682 mutex_lock(&set_limit_mutex);
2683 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2684 if (memswlimit < val) {
2685 ret = -EINVAL;
2686 mutex_unlock(&set_limit_mutex);
628f4235
KH
2687 break;
2688 }
3c11ecf4
KH
2689
2690 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2691 if (memlimit < val)
2692 enlarge = 1;
2693
8c7c6e34 2694 ret = res_counter_set_limit(&memcg->res, val);
22a668d7
KH
2695 if (!ret) {
2696 if (memswlimit == val)
2697 memcg->memsw_is_minimum = true;
2698 else
2699 memcg->memsw_is_minimum = false;
2700 }
8c7c6e34
KH
2701 mutex_unlock(&set_limit_mutex);
2702
2703 if (!ret)
2704 break;
2705
aa20d489 2706 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
4e416953 2707 MEM_CGROUP_RECLAIM_SHRINK);
81d39c20
KH
2708 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2709 /* Usage is reduced ? */
2710 if (curusage >= oldusage)
2711 retry_count--;
2712 else
2713 oldusage = curusage;
8c7c6e34 2714 }
3c11ecf4
KH
2715 if (!ret && enlarge)
2716 memcg_oom_recover(memcg);
14797e23 2717
8c7c6e34
KH
2718 return ret;
2719}
2720
338c8431
LZ
2721static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2722 unsigned long long val)
8c7c6e34 2723{
81d39c20 2724 int retry_count;
3c11ecf4 2725 u64 memlimit, memswlimit, oldusage, curusage;
81d39c20
KH
2726 int children = mem_cgroup_count_children(memcg);
2727 int ret = -EBUSY;
3c11ecf4 2728 int enlarge = 0;
8c7c6e34 2729
81d39c20
KH
2730 /* see mem_cgroup_resize_res_limit */
2731 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
2732 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
8c7c6e34
KH
2733 while (retry_count) {
2734 if (signal_pending(current)) {
2735 ret = -EINTR;
2736 break;
2737 }
2738 /*
2739 * Rather than hide all in some function, I do this in
2740 * open coded manner. You see what this really does.
2741 * We have to guarantee mem->res.limit < mem->memsw.limit.
2742 */
2743 mutex_lock(&set_limit_mutex);
2744 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2745 if (memlimit > val) {
2746 ret = -EINVAL;
2747 mutex_unlock(&set_limit_mutex);
2748 break;
2749 }
3c11ecf4
KH
2750 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2751 if (memswlimit < val)
2752 enlarge = 1;
8c7c6e34 2753 ret = res_counter_set_limit(&memcg->memsw, val);
22a668d7
KH
2754 if (!ret) {
2755 if (memlimit == val)
2756 memcg->memsw_is_minimum = true;
2757 else
2758 memcg->memsw_is_minimum = false;
2759 }
8c7c6e34
KH
2760 mutex_unlock(&set_limit_mutex);
2761
2762 if (!ret)
2763 break;
2764
4e416953 2765 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
75822b44
BS
2766 MEM_CGROUP_RECLAIM_NOSWAP |
2767 MEM_CGROUP_RECLAIM_SHRINK);
8c7c6e34 2768 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
81d39c20 2769 /* Usage is reduced ? */
8c7c6e34 2770 if (curusage >= oldusage)
628f4235 2771 retry_count--;
81d39c20
KH
2772 else
2773 oldusage = curusage;
628f4235 2774 }
3c11ecf4
KH
2775 if (!ret && enlarge)
2776 memcg_oom_recover(memcg);
628f4235
KH
2777 return ret;
2778}
2779
4e416953
BS
2780unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2781 gfp_t gfp_mask, int nid,
2782 int zid)
2783{
2784 unsigned long nr_reclaimed = 0;
2785 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2786 unsigned long reclaimed;
2787 int loop = 0;
2788 struct mem_cgroup_tree_per_zone *mctz;
ef8745c1 2789 unsigned long long excess;
4e416953
BS
2790
2791 if (order > 0)
2792 return 0;
2793
2794 mctz = soft_limit_tree_node_zone(nid, zid);
2795 /*
2796 * This loop can run a while, specially if mem_cgroup's continuously
2797 * keep exceeding their soft limit and putting the system under
2798 * pressure
2799 */
2800 do {
2801 if (next_mz)
2802 mz = next_mz;
2803 else
2804 mz = mem_cgroup_largest_soft_limit_node(mctz);
2805 if (!mz)
2806 break;
2807
2808 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
2809 gfp_mask,
2810 MEM_CGROUP_RECLAIM_SOFT);
2811 nr_reclaimed += reclaimed;
2812 spin_lock(&mctz->lock);
2813
2814 /*
2815 * If we failed to reclaim anything from this memory cgroup
2816 * it is time to move on to the next cgroup
2817 */
2818 next_mz = NULL;
2819 if (!reclaimed) {
2820 do {
2821 /*
2822 * Loop until we find yet another one.
2823 *
2824 * By the time we get the soft_limit lock
2825 * again, someone might have aded the
2826 * group back on the RB tree. Iterate to
2827 * make sure we get a different mem.
2828 * mem_cgroup_largest_soft_limit_node returns
2829 * NULL if no other cgroup is present on
2830 * the tree
2831 */
2832 next_mz =
2833 __mem_cgroup_largest_soft_limit_node(mctz);
2834 if (next_mz == mz) {
2835 css_put(&next_mz->mem->css);
2836 next_mz = NULL;
2837 } else /* next_mz == NULL or other memcg */
2838 break;
2839 } while (1);
2840 }
4e416953 2841 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
ef8745c1 2842 excess = res_counter_soft_limit_excess(&mz->mem->res);
4e416953
BS
2843 /*
2844 * One school of thought says that we should not add
2845 * back the node to the tree if reclaim returns 0.
2846 * But our reclaim could return 0, simply because due
2847 * to priority we are exposing a smaller subset of
2848 * memory to reclaim from. Consider this as a longer
2849 * term TODO.
2850 */
ef8745c1
KH
2851 /* If excess == 0, no tree ops */
2852 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
4e416953
BS
2853 spin_unlock(&mctz->lock);
2854 css_put(&mz->mem->css);
2855 loop++;
2856 /*
2857 * Could not reclaim anything and there are no more
2858 * mem cgroups to try or we seem to be looping without
2859 * reclaiming anything.
2860 */
2861 if (!nr_reclaimed &&
2862 (next_mz == NULL ||
2863 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2864 break;
2865 } while (!nr_reclaimed);
2866 if (next_mz)
2867 css_put(&next_mz->mem->css);
2868 return nr_reclaimed;
2869}
2870
cc847582
KH
2871/*
2872 * This routine traverse page_cgroup in given list and drop them all.
cc847582
KH
2873 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
2874 */
f817ed48 2875static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
08e552c6 2876 int node, int zid, enum lru_list lru)
cc847582 2877{
08e552c6
KH
2878 struct zone *zone;
2879 struct mem_cgroup_per_zone *mz;
f817ed48 2880 struct page_cgroup *pc, *busy;
08e552c6 2881 unsigned long flags, loop;
072c56c1 2882 struct list_head *list;
f817ed48 2883 int ret = 0;
072c56c1 2884
08e552c6
KH
2885 zone = &NODE_DATA(node)->node_zones[zid];
2886 mz = mem_cgroup_zoneinfo(mem, node, zid);
b69408e8 2887 list = &mz->lists[lru];
cc847582 2888
f817ed48
KH
2889 loop = MEM_CGROUP_ZSTAT(mz, lru);
2890 /* give some margin against EBUSY etc...*/
2891 loop += 256;
2892 busy = NULL;
2893 while (loop--) {
2894 ret = 0;
08e552c6 2895 spin_lock_irqsave(&zone->lru_lock, flags);
f817ed48 2896 if (list_empty(list)) {
08e552c6 2897 spin_unlock_irqrestore(&zone->lru_lock, flags);
52d4b9ac 2898 break;
f817ed48
KH
2899 }
2900 pc = list_entry(list->prev, struct page_cgroup, lru);
2901 if (busy == pc) {
2902 list_move(&pc->lru, list);
648bcc77 2903 busy = NULL;
08e552c6 2904 spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed48
KH
2905 continue;
2906 }
08e552c6 2907 spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed48 2908
2c26fdd7 2909 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
f817ed48 2910 if (ret == -ENOMEM)
52d4b9ac 2911 break;
f817ed48
KH
2912
2913 if (ret == -EBUSY || ret == -EINVAL) {
2914 /* found lock contention or "pc" is obsolete. */
2915 busy = pc;
2916 cond_resched();
2917 } else
2918 busy = NULL;
cc847582 2919 }
08e552c6 2920
f817ed48
KH
2921 if (!ret && !list_empty(list))
2922 return -EBUSY;
2923 return ret;
cc847582
KH
2924}
2925
2926/*
2927 * make mem_cgroup's charge to be 0 if there is no task.
2928 * This enables deleting this mem_cgroup.
2929 */
c1e862c1 2930static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
cc847582 2931{
f817ed48
KH
2932 int ret;
2933 int node, zid, shrink;
2934 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
c1e862c1 2935 struct cgroup *cgrp = mem->css.cgroup;
8869b8f6 2936
cc847582 2937 css_get(&mem->css);
f817ed48
KH
2938
2939 shrink = 0;
c1e862c1
KH
2940 /* should free all ? */
2941 if (free_all)
2942 goto try_to_free;
f817ed48 2943move_account:
fce66477 2944 do {
f817ed48 2945 ret = -EBUSY;
c1e862c1
KH
2946 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
2947 goto out;
2948 ret = -EINTR;
2949 if (signal_pending(current))
cc847582 2950 goto out;
52d4b9ac
KH
2951 /* This is for making all *used* pages to be on LRU. */
2952 lru_add_drain_all();
cdec2e42 2953 drain_all_stock_sync();
f817ed48 2954 ret = 0;
299b4eaa 2955 for_each_node_state(node, N_HIGH_MEMORY) {
f817ed48 2956 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
b69408e8 2957 enum lru_list l;
f817ed48
KH
2958 for_each_lru(l) {
2959 ret = mem_cgroup_force_empty_list(mem,
08e552c6 2960 node, zid, l);
f817ed48
KH
2961 if (ret)
2962 break;
2963 }
1ecaab2b 2964 }
f817ed48
KH
2965 if (ret)
2966 break;
2967 }
3c11ecf4 2968 memcg_oom_recover(mem);
f817ed48
KH
2969 /* it seems parent cgroup doesn't have enough mem */
2970 if (ret == -ENOMEM)
2971 goto try_to_free;
52d4b9ac 2972 cond_resched();
fce66477
DN
2973 /* "ret" should also be checked to ensure all lists are empty. */
2974 } while (mem->res.usage > 0 || ret);
cc847582
KH
2975out:
2976 css_put(&mem->css);
2977 return ret;
f817ed48
KH
2978
2979try_to_free:
c1e862c1
KH
2980 /* returns EBUSY if there is a task or if we come here twice. */
2981 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
f817ed48
KH
2982 ret = -EBUSY;
2983 goto out;
2984 }
c1e862c1
KH
2985 /* we call try-to-free pages for make this cgroup empty */
2986 lru_add_drain_all();
f817ed48
KH
2987 /* try to free all pages in this cgroup */
2988 shrink = 1;
2989 while (nr_retries && mem->res.usage > 0) {
2990 int progress;
c1e862c1
KH
2991
2992 if (signal_pending(current)) {
2993 ret = -EINTR;
2994 goto out;
2995 }
a7885eb8
KM
2996 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
2997 false, get_swappiness(mem));
c1e862c1 2998 if (!progress) {
f817ed48 2999 nr_retries--;
c1e862c1 3000 /* maybe some writeback is necessary */
8aa7e847 3001 congestion_wait(BLK_RW_ASYNC, HZ/10);
c1e862c1 3002 }
f817ed48
KH
3003
3004 }
08e552c6 3005 lru_add_drain();
f817ed48 3006 /* try move_account...there may be some *locked* pages. */
fce66477 3007 goto move_account;
cc847582
KH
3008}
3009
c1e862c1
KH
3010int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3011{
3012 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3013}
3014
3015
18f59ea7
BS
3016static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3017{
3018 return mem_cgroup_from_cont(cont)->use_hierarchy;
3019}
3020
3021static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3022 u64 val)
3023{
3024 int retval = 0;
3025 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3026 struct cgroup *parent = cont->parent;
3027 struct mem_cgroup *parent_mem = NULL;
3028
3029 if (parent)
3030 parent_mem = mem_cgroup_from_cont(parent);
3031
3032 cgroup_lock();
3033 /*
af901ca1 3034 * If parent's use_hierarchy is set, we can't make any modifications
18f59ea7
BS
3035 * in the child subtrees. If it is unset, then the change can
3036 * occur, provided the current cgroup has no children.
3037 *
3038 * For the root cgroup, parent_mem is NULL, we allow value to be
3039 * set if there are no children.
3040 */
3041 if ((!parent_mem || !parent_mem->use_hierarchy) &&
3042 (val == 1 || val == 0)) {
3043 if (list_empty(&cont->children))
3044 mem->use_hierarchy = val;
3045 else
3046 retval = -EBUSY;
3047 } else
3048 retval = -EINVAL;
3049 cgroup_unlock();
3050
3051 return retval;
3052}
3053
0c3e73e8
BS
3054struct mem_cgroup_idx_data {
3055 s64 val;
3056 enum mem_cgroup_stat_index idx;
3057};
3058
3059static int
3060mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
3061{
3062 struct mem_cgroup_idx_data *d = data;
c62b1a3b 3063 d->val += mem_cgroup_read_stat(mem, d->idx);
0c3e73e8
BS
3064 return 0;
3065}
3066
3067static void
3068mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
3069 enum mem_cgroup_stat_index idx, s64 *val)
3070{
3071 struct mem_cgroup_idx_data d;
3072 d.idx = idx;
3073 d.val = 0;
3074 mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
3075 *val = d.val;
3076}
3077
104f3928
KS
3078static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3079{
3080 u64 idx_val, val;
3081
3082 if (!mem_cgroup_is_root(mem)) {
3083 if (!swap)
3084 return res_counter_read_u64(&mem->res, RES_USAGE);
3085 else
3086 return res_counter_read_u64(&mem->memsw, RES_USAGE);
3087 }
3088
3089 mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
3090 val = idx_val;
3091 mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
3092 val += idx_val;
3093
3094 if (swap) {
3095 mem_cgroup_get_recursive_idx_stat(mem,
3096 MEM_CGROUP_STAT_SWAPOUT, &idx_val);
3097 val += idx_val;
3098 }
3099
3100 return val << PAGE_SHIFT;
3101}
3102
2c3daa72 3103static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
8cdea7c0 3104{
8c7c6e34 3105 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
104f3928 3106 u64 val;
8c7c6e34
KH
3107 int type, name;
3108
3109 type = MEMFILE_TYPE(cft->private);
3110 name = MEMFILE_ATTR(cft->private);
3111 switch (type) {
3112 case _MEM:
104f3928
KS
3113 if (name == RES_USAGE)
3114 val = mem_cgroup_usage(mem, false);
3115 else
0c3e73e8 3116 val = res_counter_read_u64(&mem->res, name);
8c7c6e34
KH
3117 break;
3118 case _MEMSWAP:
104f3928
KS
3119 if (name == RES_USAGE)
3120 val = mem_cgroup_usage(mem, true);
3121 else
0c3e73e8 3122 val = res_counter_read_u64(&mem->memsw, name);
8c7c6e34
KH
3123 break;
3124 default:
3125 BUG();
3126 break;
3127 }
3128 return val;
8cdea7c0 3129}
628f4235
KH
3130/*
3131 * The user of this function is...
3132 * RES_LIMIT.
3133 */
856c13aa
PM
3134static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3135 const char *buffer)
8cdea7c0 3136{
628f4235 3137 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
8c7c6e34 3138 int type, name;
628f4235
KH
3139 unsigned long long val;
3140 int ret;
3141
8c7c6e34
KH
3142 type = MEMFILE_TYPE(cft->private);
3143 name = MEMFILE_ATTR(cft->private);
3144 switch (name) {
628f4235 3145 case RES_LIMIT:
4b3bde4c
BS
3146 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3147 ret = -EINVAL;
3148 break;
3149 }
628f4235
KH
3150 /* This function does all necessary parse...reuse it */
3151 ret = res_counter_memparse_write_strategy(buffer, &val);
8c7c6e34
KH
3152 if (ret)
3153 break;
3154 if (type == _MEM)
628f4235 3155 ret = mem_cgroup_resize_limit(memcg, val);
8c7c6e34
KH
3156 else
3157 ret = mem_cgroup_resize_memsw_limit(memcg, val);
628f4235 3158 break;
296c81d8
BS
3159 case RES_SOFT_LIMIT:
3160 ret = res_counter_memparse_write_strategy(buffer, &val);
3161 if (ret)
3162 break;
3163 /*
3164 * For memsw, soft limits are hard to implement in terms
3165 * of semantics, for now, we support soft limits for
3166 * control without swap
3167 */
3168 if (type == _MEM)
3169 ret = res_counter_set_soft_limit(&memcg->res, val);
3170 else
3171 ret = -EINVAL;
3172 break;
628f4235
KH
3173 default:
3174 ret = -EINVAL; /* should be BUG() ? */
3175 break;
3176 }
3177 return ret;
8cdea7c0
BS
3178}
3179
fee7b548
KH
3180static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3181 unsigned long long *mem_limit, unsigned long long *memsw_limit)
3182{
3183 struct cgroup *cgroup;
3184 unsigned long long min_limit, min_memsw_limit, tmp;
3185
3186 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3187 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3188 cgroup = memcg->css.cgroup;
3189 if (!memcg->use_hierarchy)
3190 goto out;
3191
3192 while (cgroup->parent) {
3193 cgroup = cgroup->parent;
3194 memcg = mem_cgroup_from_cont(cgroup);
3195 if (!memcg->use_hierarchy)
3196 break;
3197 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3198 min_limit = min(min_limit, tmp);
3199 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3200 min_memsw_limit = min(min_memsw_limit, tmp);
3201 }
3202out:
3203 *mem_limit = min_limit;
3204 *memsw_limit = min_memsw_limit;
3205 return;
3206}
3207
29f2a4da 3208static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
c84872e1
PE
3209{
3210 struct mem_cgroup *mem;
8c7c6e34 3211 int type, name;
c84872e1
PE
3212
3213 mem = mem_cgroup_from_cont(cont);
8c7c6e34
KH
3214 type = MEMFILE_TYPE(event);
3215 name = MEMFILE_ATTR(event);
3216 switch (name) {
29f2a4da 3217 case RES_MAX_USAGE:
8c7c6e34
KH
3218 if (type == _MEM)
3219 res_counter_reset_max(&mem->res);
3220 else
3221 res_counter_reset_max(&mem->memsw);
29f2a4da
PE
3222 break;
3223 case RES_FAILCNT:
8c7c6e34
KH
3224 if (type == _MEM)
3225 res_counter_reset_failcnt(&mem->res);
3226 else
3227 res_counter_reset_failcnt(&mem->memsw);
29f2a4da
PE
3228 break;
3229 }
f64c3f54 3230
85cc59db 3231 return 0;
c84872e1
PE
3232}
3233
7dc74be0
DN
3234static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3235 struct cftype *cft)
3236{
3237 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3238}
3239
02491447 3240#ifdef CONFIG_MMU
7dc74be0
DN
3241static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3242 struct cftype *cft, u64 val)
3243{
3244 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3245
3246 if (val >= (1 << NR_MOVE_TYPE))
3247 return -EINVAL;
3248 /*
3249 * We check this value several times in both in can_attach() and
3250 * attach(), so we need cgroup lock to prevent this value from being
3251 * inconsistent.
3252 */
3253 cgroup_lock();
3254 mem->move_charge_at_immigrate = val;
3255 cgroup_unlock();
3256
3257 return 0;
3258}
02491447
DN
3259#else
3260static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3261 struct cftype *cft, u64 val)
3262{
3263 return -ENOSYS;
3264}
3265#endif
7dc74be0 3266
14067bb3
KH
3267
3268/* For read statistics */
3269enum {
3270 MCS_CACHE,
3271 MCS_RSS,
d8046582 3272 MCS_FILE_MAPPED,
14067bb3
KH
3273 MCS_PGPGIN,
3274 MCS_PGPGOUT,
1dd3a273 3275 MCS_SWAP,
14067bb3
KH
3276 MCS_INACTIVE_ANON,
3277 MCS_ACTIVE_ANON,
3278 MCS_INACTIVE_FILE,
3279 MCS_ACTIVE_FILE,
3280 MCS_UNEVICTABLE,
3281 NR_MCS_STAT,
3282};
3283
3284struct mcs_total_stat {
3285 s64 stat[NR_MCS_STAT];
d2ceb9b7
KH
3286};
3287
14067bb3
KH
3288struct {
3289 char *local_name;
3290 char *total_name;
3291} memcg_stat_strings[NR_MCS_STAT] = {
3292 {"cache", "total_cache"},
3293 {"rss", "total_rss"},
d69b042f 3294 {"mapped_file", "total_mapped_file"},
14067bb3
KH
3295 {"pgpgin", "total_pgpgin"},
3296 {"pgpgout", "total_pgpgout"},
1dd3a273 3297 {"swap", "total_swap"},
14067bb3
KH
3298 {"inactive_anon", "total_inactive_anon"},
3299 {"active_anon", "total_active_anon"},
3300 {"inactive_file", "total_inactive_file"},
3301 {"active_file", "total_active_file"},
3302 {"unevictable", "total_unevictable"}
3303};
3304
3305
3306static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
3307{
3308 struct mcs_total_stat *s = data;
3309 s64 val;
3310
3311 /* per cpu stat */
c62b1a3b 3312 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
14067bb3 3313 s->stat[MCS_CACHE] += val * PAGE_SIZE;
c62b1a3b 3314 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
14067bb3 3315 s->stat[MCS_RSS] += val * PAGE_SIZE;
c62b1a3b 3316 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
d8046582 3317 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
c62b1a3b 3318 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
14067bb3 3319 s->stat[MCS_PGPGIN] += val;
c62b1a3b 3320 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
14067bb3 3321 s->stat[MCS_PGPGOUT] += val;
1dd3a273 3322 if (do_swap_account) {
c62b1a3b 3323 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
1dd3a273
DN
3324 s->stat[MCS_SWAP] += val * PAGE_SIZE;
3325 }
14067bb3
KH
3326
3327 /* per zone stat */
3328 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
3329 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
3330 val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
3331 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
3332 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
3333 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
3334 val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
3335 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
3336 val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
3337 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
3338 return 0;
3339}
3340
3341static void
3342mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3343{
3344 mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
3345}
3346
c64745cf
PM
3347static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3348 struct cgroup_map_cb *cb)
d2ceb9b7 3349{
d2ceb9b7 3350 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
14067bb3 3351 struct mcs_total_stat mystat;
d2ceb9b7
KH
3352 int i;
3353
14067bb3
KH
3354 memset(&mystat, 0, sizeof(mystat));
3355 mem_cgroup_get_local_stat(mem_cont, &mystat);
d2ceb9b7 3356
1dd3a273
DN
3357 for (i = 0; i < NR_MCS_STAT; i++) {
3358 if (i == MCS_SWAP && !do_swap_account)
3359 continue;
14067bb3 3360 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
1dd3a273 3361 }
7b854121 3362
14067bb3 3363 /* Hierarchical information */
fee7b548
KH
3364 {
3365 unsigned long long limit, memsw_limit;
3366 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
3367 cb->fill(cb, "hierarchical_memory_limit", limit);
3368 if (do_swap_account)
3369 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
3370 }
7f016ee8 3371
14067bb3
KH
3372 memset(&mystat, 0, sizeof(mystat));
3373 mem_cgroup_get_total_stat(mem_cont, &mystat);
1dd3a273
DN
3374 for (i = 0; i < NR_MCS_STAT; i++) {
3375 if (i == MCS_SWAP && !do_swap_account)
3376 continue;
14067bb3 3377 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
1dd3a273 3378 }
14067bb3 3379
7f016ee8 3380#ifdef CONFIG_DEBUG_VM
c772be93 3381 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
7f016ee8
KM
3382
3383 {
3384 int nid, zid;
3385 struct mem_cgroup_per_zone *mz;
3386 unsigned long recent_rotated[2] = {0, 0};
3387 unsigned long recent_scanned[2] = {0, 0};
3388
3389 for_each_online_node(nid)
3390 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3391 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
3392
3393 recent_rotated[0] +=
3394 mz->reclaim_stat.recent_rotated[0];
3395 recent_rotated[1] +=
3396 mz->reclaim_stat.recent_rotated[1];
3397 recent_scanned[0] +=
3398 mz->reclaim_stat.recent_scanned[0];
3399 recent_scanned[1] +=
3400 mz->reclaim_stat.recent_scanned[1];
3401 }
3402 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
3403 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
3404 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
3405 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
3406 }
3407#endif
3408
d2ceb9b7
KH
3409 return 0;
3410}
3411
a7885eb8
KM
3412static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
3413{
3414 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3415
3416 return get_swappiness(memcg);
3417}
3418
3419static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3420 u64 val)
3421{
3422 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3423 struct mem_cgroup *parent;
068b38c1 3424
a7885eb8
KM
3425 if (val > 100)
3426 return -EINVAL;
3427
3428 if (cgrp->parent == NULL)
3429 return -EINVAL;
3430
3431 parent = mem_cgroup_from_cont(cgrp->parent);
068b38c1
LZ
3432
3433 cgroup_lock();
3434
a7885eb8
KM
3435 /* If under hierarchy, only empty-root can set this value */
3436 if ((parent->use_hierarchy) ||
068b38c1
LZ
3437 (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3438 cgroup_unlock();
a7885eb8 3439 return -EINVAL;
068b38c1 3440 }
a7885eb8
KM
3441
3442 spin_lock(&memcg->reclaim_param_lock);
3443 memcg->swappiness = val;
3444 spin_unlock(&memcg->reclaim_param_lock);
3445
068b38c1
LZ
3446 cgroup_unlock();
3447
a7885eb8
KM
3448 return 0;
3449}
3450
2e72b634
KS
3451static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3452{
3453 struct mem_cgroup_threshold_ary *t;
3454 u64 usage;
3455 int i;
3456
3457 rcu_read_lock();
3458 if (!swap)
2c488db2 3459 t = rcu_dereference(memcg->thresholds.primary);
2e72b634 3460 else
2c488db2 3461 t = rcu_dereference(memcg->memsw_thresholds.primary);
2e72b634
KS
3462
3463 if (!t)
3464 goto unlock;
3465
3466 usage = mem_cgroup_usage(memcg, swap);
3467
3468 /*
3469 * current_threshold points to threshold just below usage.
3470 * If it's not true, a threshold was crossed after last
3471 * call of __mem_cgroup_threshold().
3472 */
5407a562 3473 i = t->current_threshold;
2e72b634
KS
3474
3475 /*
3476 * Iterate backward over array of thresholds starting from
3477 * current_threshold and check if a threshold is crossed.
3478 * If none of thresholds below usage is crossed, we read
3479 * only one element of the array here.
3480 */
3481 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3482 eventfd_signal(t->entries[i].eventfd, 1);
3483
3484 /* i = current_threshold + 1 */
3485 i++;
3486
3487 /*
3488 * Iterate forward over array of thresholds starting from
3489 * current_threshold+1 and check if a threshold is crossed.
3490 * If none of thresholds above usage is crossed, we read
3491 * only one element of the array here.
3492 */
3493 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3494 eventfd_signal(t->entries[i].eventfd, 1);
3495
3496 /* Update current_threshold */
5407a562 3497 t->current_threshold = i - 1;
2e72b634
KS
3498unlock:
3499 rcu_read_unlock();
3500}
3501
3502static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3503{
3504 __mem_cgroup_threshold(memcg, false);
3505 if (do_swap_account)
3506 __mem_cgroup_threshold(memcg, true);
3507}
3508
3509static int compare_thresholds(const void *a, const void *b)
3510{
3511 const struct mem_cgroup_threshold *_a = a;
3512 const struct mem_cgroup_threshold *_b = b;
3513
3514 return _a->threshold - _b->threshold;
3515}
3516
9490ff27
KH
3517static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
3518{
3519 struct mem_cgroup_eventfd_list *ev;
3520
3521 list_for_each_entry(ev, &mem->oom_notify, list)
3522 eventfd_signal(ev->eventfd, 1);
3523 return 0;
3524}
3525
3526static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
3527{
3528 mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
3529}
3530
3531static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3532 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
2e72b634
KS
3533{
3534 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2c488db2
KS
3535 struct mem_cgroup_thresholds *thresholds;
3536 struct mem_cgroup_threshold_ary *new;
2e72b634
KS
3537 int type = MEMFILE_TYPE(cft->private);
3538 u64 threshold, usage;
2c488db2 3539 int i, size, ret;
2e72b634
KS
3540
3541 ret = res_counter_memparse_write_strategy(args, &threshold);
3542 if (ret)
3543 return ret;
3544
3545 mutex_lock(&memcg->thresholds_lock);
2c488db2 3546
2e72b634 3547 if (type == _MEM)
2c488db2 3548 thresholds = &memcg->thresholds;
2e72b634 3549 else if (type == _MEMSWAP)
2c488db2 3550 thresholds = &memcg->memsw_thresholds;
2e72b634
KS
3551 else
3552 BUG();
3553
3554 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3555
3556 /* Check if a threshold crossed before adding a new one */
2c488db2 3557 if (thresholds->primary)
2e72b634
KS
3558 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3559
2c488db2 3560 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
2e72b634
KS
3561
3562 /* Allocate memory for new array of thresholds */
2c488db2 3563 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
2e72b634 3564 GFP_KERNEL);
2c488db2 3565 if (!new) {
2e72b634
KS
3566 ret = -ENOMEM;
3567 goto unlock;
3568 }
2c488db2 3569 new->size = size;
2e72b634
KS
3570
3571 /* Copy thresholds (if any) to new array */
2c488db2
KS
3572 if (thresholds->primary) {
3573 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
2e72b634 3574 sizeof(struct mem_cgroup_threshold));
2c488db2
KS
3575 }
3576
2e72b634 3577 /* Add new threshold */
2c488db2
KS
3578 new->entries[size - 1].eventfd = eventfd;
3579 new->entries[size - 1].threshold = threshold;
2e72b634
KS
3580
3581 /* Sort thresholds. Registering of new threshold isn't time-critical */
2c488db2 3582 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
2e72b634
KS
3583 compare_thresholds, NULL);
3584
3585 /* Find current threshold */
2c488db2 3586 new->current_threshold = -1;
2e72b634 3587 for (i = 0; i < size; i++) {
2c488db2 3588 if (new->entries[i].threshold < usage) {
2e72b634 3589 /*
2c488db2
KS
3590 * new->current_threshold will not be used until
3591 * rcu_assign_pointer(), so it's safe to increment
2e72b634
KS
3592 * it here.
3593 */
2c488db2 3594 ++new->current_threshold;
2e72b634
KS
3595 }
3596 }
3597
2c488db2
KS
3598 /* Free old spare buffer and save old primary buffer as spare */
3599 kfree(thresholds->spare);
3600 thresholds->spare = thresholds->primary;
3601
3602 rcu_assign_pointer(thresholds->primary, new);
2e72b634 3603
907860ed 3604 /* To be sure that nobody uses thresholds */
2e72b634
KS
3605 synchronize_rcu();
3606
2e72b634
KS
3607unlock:
3608 mutex_unlock(&memcg->thresholds_lock);
3609
3610 return ret;
3611}
3612
907860ed 3613static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
9490ff27 3614 struct cftype *cft, struct eventfd_ctx *eventfd)
2e72b634
KS
3615{
3616 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2c488db2
KS
3617 struct mem_cgroup_thresholds *thresholds;
3618 struct mem_cgroup_threshold_ary *new;
2e72b634
KS
3619 int type = MEMFILE_TYPE(cft->private);
3620 u64 usage;
2c488db2 3621 int i, j, size;
2e72b634
KS
3622
3623 mutex_lock(&memcg->thresholds_lock);
3624 if (type == _MEM)
2c488db2 3625 thresholds = &memcg->thresholds;
2e72b634 3626 else if (type == _MEMSWAP)
2c488db2 3627 thresholds = &memcg->memsw_thresholds;
2e72b634
KS
3628 else
3629 BUG();
3630
3631 /*
3632 * Something went wrong if we trying to unregister a threshold
3633 * if we don't have thresholds
3634 */
3635 BUG_ON(!thresholds);
3636
3637 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3638
3639 /* Check if a threshold crossed before removing */
3640 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3641
3642 /* Calculate new number of threshold */
2c488db2
KS
3643 size = 0;
3644 for (i = 0; i < thresholds->primary->size; i++) {
3645 if (thresholds->primary->entries[i].eventfd != eventfd)
2e72b634
KS
3646 size++;
3647 }
3648
2c488db2 3649 new = thresholds->spare;
907860ed 3650
2e72b634
KS
3651 /* Set thresholds array to NULL if we don't have thresholds */
3652 if (!size) {
2c488db2
KS
3653 kfree(new);
3654 new = NULL;
907860ed 3655 goto swap_buffers;
2e72b634
KS
3656 }
3657
2c488db2 3658 new->size = size;
2e72b634
KS
3659
3660 /* Copy thresholds and find current threshold */
2c488db2
KS
3661 new->current_threshold = -1;
3662 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3663 if (thresholds->primary->entries[i].eventfd == eventfd)
2e72b634
KS
3664 continue;
3665
2c488db2
KS
3666 new->entries[j] = thresholds->primary->entries[i];
3667 if (new->entries[j].threshold < usage) {
2e72b634 3668 /*
2c488db2 3669 * new->current_threshold will not be used
2e72b634
KS
3670 * until rcu_assign_pointer(), so it's safe to increment
3671 * it here.
3672 */
2c488db2 3673 ++new->current_threshold;
2e72b634
KS
3674 }
3675 j++;
3676 }
3677
907860ed 3678swap_buffers:
2c488db2
KS
3679 /* Swap primary and spare array */
3680 thresholds->spare = thresholds->primary;
3681 rcu_assign_pointer(thresholds->primary, new);
2e72b634 3682
907860ed 3683 /* To be sure that nobody uses thresholds */
2e72b634
KS
3684 synchronize_rcu();
3685
2e72b634 3686 mutex_unlock(&memcg->thresholds_lock);
2e72b634 3687}
c1e862c1 3688
9490ff27
KH
3689static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
3690 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3691{
3692 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3693 struct mem_cgroup_eventfd_list *event;
3694 int type = MEMFILE_TYPE(cft->private);
3695
3696 BUG_ON(type != _OOM_TYPE);
3697 event = kmalloc(sizeof(*event), GFP_KERNEL);
3698 if (!event)
3699 return -ENOMEM;
3700
3701 mutex_lock(&memcg_oom_mutex);
3702
3703 event->eventfd = eventfd;
3704 list_add(&event->list, &memcg->oom_notify);
3705
3706 /* already in OOM ? */
3707 if (atomic_read(&memcg->oom_lock))
3708 eventfd_signal(eventfd, 1);
3709 mutex_unlock(&memcg_oom_mutex);
3710
3711 return 0;
3712}
3713
907860ed 3714static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
9490ff27
KH
3715 struct cftype *cft, struct eventfd_ctx *eventfd)
3716{
3717 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3718 struct mem_cgroup_eventfd_list *ev, *tmp;
3719 int type = MEMFILE_TYPE(cft->private);
3720
3721 BUG_ON(type != _OOM_TYPE);
3722
3723 mutex_lock(&memcg_oom_mutex);
3724
3725 list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
3726 if (ev->eventfd == eventfd) {
3727 list_del(&ev->list);
3728 kfree(ev);
3729 }
3730 }
3731
3732 mutex_unlock(&memcg_oom_mutex);
9490ff27
KH
3733}
3734
3c11ecf4
KH
3735static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
3736 struct cftype *cft, struct cgroup_map_cb *cb)
3737{
3738 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3739
3740 cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
3741
3742 if (atomic_read(&mem->oom_lock))
3743 cb->fill(cb, "under_oom", 1);
3744 else
3745 cb->fill(cb, "under_oom", 0);
3746 return 0;
3747}
3748
3749/*
3750 */
3751static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
3752 struct cftype *cft, u64 val)
3753{
3754 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3755 struct mem_cgroup *parent;
3756
3757 /* cannot set to root cgroup and only 0 and 1 are allowed */
3758 if (!cgrp->parent || !((val == 0) || (val == 1)))
3759 return -EINVAL;
3760
3761 parent = mem_cgroup_from_cont(cgrp->parent);
3762
3763 cgroup_lock();
3764 /* oom-kill-disable is a flag for subhierarchy. */
3765 if ((parent->use_hierarchy) ||
3766 (mem->use_hierarchy && !list_empty(&cgrp->children))) {
3767 cgroup_unlock();
3768 return -EINVAL;
3769 }
3770 mem->oom_kill_disable = val;
4d845ebf
KH
3771 if (!val)
3772 memcg_oom_recover(mem);
3c11ecf4
KH
3773 cgroup_unlock();
3774 return 0;
3775}
3776
8cdea7c0
BS
3777static struct cftype mem_cgroup_files[] = {
3778 {
0eea1030 3779 .name = "usage_in_bytes",
8c7c6e34 3780 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2c3daa72 3781 .read_u64 = mem_cgroup_read,
9490ff27
KH
3782 .register_event = mem_cgroup_usage_register_event,
3783 .unregister_event = mem_cgroup_usage_unregister_event,
8cdea7c0 3784 },
c84872e1
PE
3785 {
3786 .name = "max_usage_in_bytes",
8c7c6e34 3787 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
29f2a4da 3788 .trigger = mem_cgroup_reset,
c84872e1
PE
3789 .read_u64 = mem_cgroup_read,
3790 },
8cdea7c0 3791 {
0eea1030 3792 .name = "limit_in_bytes",
8c7c6e34 3793 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
856c13aa 3794 .write_string = mem_cgroup_write,
2c3daa72 3795 .read_u64 = mem_cgroup_read,
8cdea7c0 3796 },
296c81d8
BS
3797 {
3798 .name = "soft_limit_in_bytes",
3799 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3800 .write_string = mem_cgroup_write,
3801 .read_u64 = mem_cgroup_read,
3802 },
8cdea7c0
BS
3803 {
3804 .name = "failcnt",
8c7c6e34 3805 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
29f2a4da 3806 .trigger = mem_cgroup_reset,
2c3daa72 3807 .read_u64 = mem_cgroup_read,
8cdea7c0 3808 },
d2ceb9b7
KH
3809 {
3810 .name = "stat",
c64745cf 3811 .read_map = mem_control_stat_show,
d2ceb9b7 3812 },
c1e862c1
KH
3813 {
3814 .name = "force_empty",
3815 .trigger = mem_cgroup_force_empty_write,
3816 },
18f59ea7
BS
3817 {
3818 .name = "use_hierarchy",
3819 .write_u64 = mem_cgroup_hierarchy_write,
3820 .read_u64 = mem_cgroup_hierarchy_read,
3821 },
a7885eb8
KM
3822 {
3823 .name = "swappiness",
3824 .read_u64 = mem_cgroup_swappiness_read,
3825 .write_u64 = mem_cgroup_swappiness_write,
3826 },
7dc74be0
DN
3827 {
3828 .name = "move_charge_at_immigrate",
3829 .read_u64 = mem_cgroup_move_charge_read,
3830 .write_u64 = mem_cgroup_move_charge_write,
3831 },
9490ff27
KH
3832 {
3833 .name = "oom_control",
3c11ecf4
KH
3834 .read_map = mem_cgroup_oom_control_read,
3835 .write_u64 = mem_cgroup_oom_control_write,
9490ff27
KH
3836 .register_event = mem_cgroup_oom_register_event,
3837 .unregister_event = mem_cgroup_oom_unregister_event,
3838 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3839 },
8cdea7c0
BS
3840};
3841
8c7c6e34
KH
3842#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3843static struct cftype memsw_cgroup_files[] = {
3844 {
3845 .name = "memsw.usage_in_bytes",
3846 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
3847 .read_u64 = mem_cgroup_read,
9490ff27
KH
3848 .register_event = mem_cgroup_usage_register_event,
3849 .unregister_event = mem_cgroup_usage_unregister_event,
8c7c6e34
KH
3850 },
3851 {
3852 .name = "memsw.max_usage_in_bytes",
3853 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
3854 .trigger = mem_cgroup_reset,
3855 .read_u64 = mem_cgroup_read,
3856 },
3857 {
3858 .name = "memsw.limit_in_bytes",
3859 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
3860 .write_string = mem_cgroup_write,
3861 .read_u64 = mem_cgroup_read,
3862 },
3863 {
3864 .name = "memsw.failcnt",
3865 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
3866 .trigger = mem_cgroup_reset,
3867 .read_u64 = mem_cgroup_read,
3868 },
3869};
3870
3871static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3872{
3873 if (!do_swap_account)
3874 return 0;
3875 return cgroup_add_files(cont, ss, memsw_cgroup_files,
3876 ARRAY_SIZE(memsw_cgroup_files));
3877};
3878#else
3879static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3880{
3881 return 0;
3882}
3883#endif
3884
6d12e2d8
KH
3885static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3886{
3887 struct mem_cgroup_per_node *pn;
1ecaab2b 3888 struct mem_cgroup_per_zone *mz;
b69408e8 3889 enum lru_list l;
41e3355d 3890 int zone, tmp = node;
1ecaab2b
KH
3891 /*
3892 * This routine is called against possible nodes.
3893 * But it's BUG to call kmalloc() against offline node.
3894 *
3895 * TODO: this routine can waste much memory for nodes which will
3896 * never be onlined. It's better to use memory hotplug callback
3897 * function.
3898 */
41e3355d
KH
3899 if (!node_state(node, N_NORMAL_MEMORY))
3900 tmp = -1;
3901 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6d12e2d8
KH
3902 if (!pn)
3903 return 1;
1ecaab2b 3904
6d12e2d8
KH
3905 mem->info.nodeinfo[node] = pn;
3906 memset(pn, 0, sizeof(*pn));
1ecaab2b
KH
3907
3908 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3909 mz = &pn->zoneinfo[zone];
b69408e8
CL
3910 for_each_lru(l)
3911 INIT_LIST_HEAD(&mz->lists[l]);
f64c3f54 3912 mz->usage_in_excess = 0;
4e416953
BS
3913 mz->on_tree = false;
3914 mz->mem = mem;
1ecaab2b 3915 }
6d12e2d8
KH
3916 return 0;
3917}
3918
1ecaab2b
KH
3919static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3920{
3921 kfree(mem->info.nodeinfo[node]);
3922}
3923
33327948
KH
3924static struct mem_cgroup *mem_cgroup_alloc(void)
3925{
3926 struct mem_cgroup *mem;
c62b1a3b 3927 int size = sizeof(struct mem_cgroup);
33327948 3928
c62b1a3b 3929 /* Can be very big if MAX_NUMNODES is very big */
c8dad2bb
JB
3930 if (size < PAGE_SIZE)
3931 mem = kmalloc(size, GFP_KERNEL);
33327948 3932 else
c8dad2bb 3933 mem = vmalloc(size);
33327948 3934
e7bbcdf3
DC
3935 if (!mem)
3936 return NULL;
3937
3938 memset(mem, 0, size);
c62b1a3b
KH
3939 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
3940 if (!mem->stat) {
3941 if (size < PAGE_SIZE)
3942 kfree(mem);
3943 else
3944 vfree(mem);
3945 mem = NULL;
3946 }
33327948
KH
3947 return mem;
3948}
3949
8c7c6e34
KH
3950/*
3951 * At destroying mem_cgroup, references from swap_cgroup can remain.
3952 * (scanning all at force_empty is too costly...)
3953 *
3954 * Instead of clearing all references at force_empty, we remember
3955 * the number of reference from swap_cgroup and free mem_cgroup when
3956 * it goes down to 0.
3957 *
8c7c6e34
KH
3958 * Removal of cgroup itself succeeds regardless of refs from swap.
3959 */
3960
a7ba0eef 3961static void __mem_cgroup_free(struct mem_cgroup *mem)
33327948 3962{
08e552c6
KH
3963 int node;
3964
f64c3f54 3965 mem_cgroup_remove_from_trees(mem);
04046e1a
KH
3966 free_css_id(&mem_cgroup_subsys, &mem->css);
3967
08e552c6
KH
3968 for_each_node_state(node, N_POSSIBLE)
3969 free_mem_cgroup_per_zone_info(mem, node);
3970
c62b1a3b
KH
3971 free_percpu(mem->stat);
3972 if (sizeof(struct mem_cgroup) < PAGE_SIZE)
33327948
KH
3973 kfree(mem);
3974 else
3975 vfree(mem);
3976}
3977
8c7c6e34
KH
3978static void mem_cgroup_get(struct mem_cgroup *mem)
3979{
3980 atomic_inc(&mem->refcnt);
3981}
3982
483c30b5 3983static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
8c7c6e34 3984{
483c30b5 3985 if (atomic_sub_and_test(count, &mem->refcnt)) {
7bcc1bb1 3986 struct mem_cgroup *parent = parent_mem_cgroup(mem);
a7ba0eef 3987 __mem_cgroup_free(mem);
7bcc1bb1
DN
3988 if (parent)
3989 mem_cgroup_put(parent);
3990 }
8c7c6e34
KH
3991}
3992
483c30b5
DN
3993static void mem_cgroup_put(struct mem_cgroup *mem)
3994{
3995 __mem_cgroup_put(mem, 1);
3996}
3997
7bcc1bb1
DN
3998/*
3999 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4000 */
4001static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
4002{
4003 if (!mem->res.parent)
4004 return NULL;
4005 return mem_cgroup_from_res_counter(mem->res.parent, res);
4006}
33327948 4007
c077719b
KH
4008#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4009static void __init enable_swap_cgroup(void)
4010{
f8d66542 4011 if (!mem_cgroup_disabled() && really_do_swap_account)
c077719b
KH
4012 do_swap_account = 1;
4013}
4014#else
4015static void __init enable_swap_cgroup(void)
4016{
4017}
4018#endif
4019
f64c3f54
BS
4020static int mem_cgroup_soft_limit_tree_init(void)
4021{
4022 struct mem_cgroup_tree_per_node *rtpn;
4023 struct mem_cgroup_tree_per_zone *rtpz;
4024 int tmp, node, zone;
4025
4026 for_each_node_state(node, N_POSSIBLE) {
4027 tmp = node;
4028 if (!node_state(node, N_NORMAL_MEMORY))
4029 tmp = -1;
4030 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4031 if (!rtpn)
4032 return 1;
4033
4034 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4035
4036 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4037 rtpz = &rtpn->rb_tree_per_zone[zone];
4038 rtpz->rb_root = RB_ROOT;
4039 spin_lock_init(&rtpz->lock);
4040 }
4041 }
4042 return 0;
4043}
4044
0eb253e2 4045static struct cgroup_subsys_state * __ref
8cdea7c0
BS
4046mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4047{
28dbc4b6 4048 struct mem_cgroup *mem, *parent;
04046e1a 4049 long error = -ENOMEM;
6d12e2d8 4050 int node;
8cdea7c0 4051
c8dad2bb
JB
4052 mem = mem_cgroup_alloc();
4053 if (!mem)
04046e1a 4054 return ERR_PTR(error);
78fb7466 4055
6d12e2d8
KH
4056 for_each_node_state(node, N_POSSIBLE)
4057 if (alloc_mem_cgroup_per_zone_info(mem, node))
4058 goto free_out;
f64c3f54 4059
c077719b 4060 /* root ? */
28dbc4b6 4061 if (cont->parent == NULL) {
cdec2e42 4062 int cpu;
c077719b 4063 enable_swap_cgroup();
28dbc4b6 4064 parent = NULL;
4b3bde4c 4065 root_mem_cgroup = mem;
f64c3f54
BS
4066 if (mem_cgroup_soft_limit_tree_init())
4067 goto free_out;
cdec2e42
KH
4068 for_each_possible_cpu(cpu) {
4069 struct memcg_stock_pcp *stock =
4070 &per_cpu(memcg_stock, cpu);
4071 INIT_WORK(&stock->work, drain_local_stock);
4072 }
4073 hotcpu_notifier(memcg_stock_cpu_callback, 0);
18f59ea7 4074 } else {
28dbc4b6 4075 parent = mem_cgroup_from_cont(cont->parent);
18f59ea7 4076 mem->use_hierarchy = parent->use_hierarchy;
3c11ecf4 4077 mem->oom_kill_disable = parent->oom_kill_disable;
18f59ea7 4078 }
28dbc4b6 4079
18f59ea7
BS
4080 if (parent && parent->use_hierarchy) {
4081 res_counter_init(&mem->res, &parent->res);
4082 res_counter_init(&mem->memsw, &parent->memsw);
7bcc1bb1
DN
4083 /*
4084 * We increment refcnt of the parent to ensure that we can
4085 * safely access it on res_counter_charge/uncharge.
4086 * This refcnt will be decremented when freeing this
4087 * mem_cgroup(see mem_cgroup_put).
4088 */
4089 mem_cgroup_get(parent);
18f59ea7
BS
4090 } else {
4091 res_counter_init(&mem->res, NULL);
4092 res_counter_init(&mem->memsw, NULL);
4093 }
04046e1a 4094 mem->last_scanned_child = 0;
2733c06a 4095 spin_lock_init(&mem->reclaim_param_lock);
9490ff27 4096 INIT_LIST_HEAD(&mem->oom_notify);
6d61ef40 4097
a7885eb8
KM
4098 if (parent)
4099 mem->swappiness = get_swappiness(parent);
a7ba0eef 4100 atomic_set(&mem->refcnt, 1);
7dc74be0 4101 mem->move_charge_at_immigrate = 0;
2e72b634 4102 mutex_init(&mem->thresholds_lock);
8cdea7c0 4103 return &mem->css;
6d12e2d8 4104free_out:
a7ba0eef 4105 __mem_cgroup_free(mem);
4b3bde4c 4106 root_mem_cgroup = NULL;
04046e1a 4107 return ERR_PTR(error);
8cdea7c0
BS
4108}
4109
ec64f515 4110static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
df878fb0
KH
4111 struct cgroup *cont)
4112{
4113 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
ec64f515
KH
4114
4115 return mem_cgroup_force_empty(mem, false);
df878fb0
KH
4116}
4117
8cdea7c0
BS
4118static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4119 struct cgroup *cont)
4120{
c268e994 4121 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
c268e994 4122
c268e994 4123 mem_cgroup_put(mem);
8cdea7c0
BS
4124}
4125
4126static int mem_cgroup_populate(struct cgroup_subsys *ss,
4127 struct cgroup *cont)
4128{
8c7c6e34
KH
4129 int ret;
4130
4131 ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4132 ARRAY_SIZE(mem_cgroup_files));
4133
4134 if (!ret)
4135 ret = register_memsw_files(cont, ss);
4136 return ret;
8cdea7c0
BS
4137}
4138
02491447 4139#ifdef CONFIG_MMU
7dc74be0 4140/* Handlers for move charge at task migration. */
854ffa8d
DN
4141#define PRECHARGE_COUNT_AT_ONCE 256
4142static int mem_cgroup_do_precharge(unsigned long count)
7dc74be0 4143{
854ffa8d
DN
4144 int ret = 0;
4145 int batch_count = PRECHARGE_COUNT_AT_ONCE;
4ffef5fe
DN
4146 struct mem_cgroup *mem = mc.to;
4147
854ffa8d
DN
4148 if (mem_cgroup_is_root(mem)) {
4149 mc.precharge += count;
4150 /* we don't need css_get for root */
4151 return ret;
4152 }
4153 /* try to charge at once */
4154 if (count > 1) {
4155 struct res_counter *dummy;
4156 /*
4157 * "mem" cannot be under rmdir() because we've already checked
4158 * by cgroup_lock_live_cgroup() that it is not removed and we
4159 * are still under the same cgroup_mutex. So we can postpone
4160 * css_get().
4161 */
4162 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4163 goto one_by_one;
4164 if (do_swap_account && res_counter_charge(&mem->memsw,
4165 PAGE_SIZE * count, &dummy)) {
4166 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
4167 goto one_by_one;
4168 }
4169 mc.precharge += count;
4170 VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
4171 WARN_ON_ONCE(count > INT_MAX);
4172 __css_get(&mem->css, (int)count);
4173 return ret;
4174 }
4175one_by_one:
4176 /* fall back to one by one charge */
4177 while (count--) {
4178 if (signal_pending(current)) {
4179 ret = -EINTR;
4180 break;
4181 }
4182 if (!batch_count--) {
4183 batch_count = PRECHARGE_COUNT_AT_ONCE;
4184 cond_resched();
4185 }
430e4863 4186 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
854ffa8d
DN
4187 if (ret || !mem)
4188 /* mem_cgroup_clear_mc() will do uncharge later */
4189 return -ENOMEM;
4190 mc.precharge++;
4191 }
4ffef5fe
DN
4192 return ret;
4193}
4194
4195/**
4196 * is_target_pte_for_mc - check a pte whether it is valid for move charge
4197 * @vma: the vma the pte to be checked belongs
4198 * @addr: the address corresponding to the pte to be checked
4199 * @ptent: the pte to be checked
02491447 4200 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4ffef5fe
DN
4201 *
4202 * Returns
4203 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4204 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4205 * move charge. if @target is not NULL, the page is stored in target->page
4206 * with extra refcnt got(Callers should handle it).
02491447
DN
4207 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4208 * target for charge migration. if @target is not NULL, the entry is stored
4209 * in target->ent.
4ffef5fe
DN
4210 *
4211 * Called with pte lock held.
4212 */
4ffef5fe
DN
4213union mc_target {
4214 struct page *page;
02491447 4215 swp_entry_t ent;
4ffef5fe
DN
4216};
4217
4ffef5fe
DN
4218enum mc_target_type {
4219 MC_TARGET_NONE, /* not used */
4220 MC_TARGET_PAGE,
02491447 4221 MC_TARGET_SWAP,
4ffef5fe
DN
4222};
4223
90254a65
DN
4224static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4225 unsigned long addr, pte_t ptent)
4ffef5fe 4226{
90254a65 4227 struct page *page = vm_normal_page(vma, addr, ptent);
4ffef5fe 4228
90254a65
DN
4229 if (!page || !page_mapped(page))
4230 return NULL;
4231 if (PageAnon(page)) {
4232 /* we don't move shared anon */
4233 if (!move_anon() || page_mapcount(page) > 2)
4234 return NULL;
87946a72
DN
4235 } else if (!move_file())
4236 /* we ignore mapcount for file pages */
90254a65
DN
4237 return NULL;
4238 if (!get_page_unless_zero(page))
4239 return NULL;
4240
4241 return page;
4242}
4243
4244static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4245 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4246{
4247 int usage_count;
4248 struct page *page = NULL;
4249 swp_entry_t ent = pte_to_swp_entry(ptent);
4250
4251 if (!move_anon() || non_swap_entry(ent))
4252 return NULL;
4253 usage_count = mem_cgroup_count_swap_user(ent, &page);
4254 if (usage_count > 1) { /* we don't move shared anon */
02491447
DN
4255 if (page)
4256 put_page(page);
90254a65 4257 return NULL;
02491447 4258 }
90254a65
DN
4259 if (do_swap_account)
4260 entry->val = ent.val;
4261
4262 return page;
4263}
4264
87946a72
DN
4265static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4266 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4267{
4268 struct page *page = NULL;
4269 struct inode *inode;
4270 struct address_space *mapping;
4271 pgoff_t pgoff;
4272
4273 if (!vma->vm_file) /* anonymous vma */
4274 return NULL;
4275 if (!move_file())
4276 return NULL;
4277
4278 inode = vma->vm_file->f_path.dentry->d_inode;
4279 mapping = vma->vm_file->f_mapping;
4280 if (pte_none(ptent))
4281 pgoff = linear_page_index(vma, addr);
4282 else /* pte_file(ptent) is true */
4283 pgoff = pte_to_pgoff(ptent);
4284
4285 /* page is moved even if it's not RSS of this task(page-faulted). */
4286 if (!mapping_cap_swap_backed(mapping)) { /* normal file */
4287 page = find_get_page(mapping, pgoff);
4288 } else { /* shmem/tmpfs file. we should take account of swap too. */
4289 swp_entry_t ent;
4290 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
4291 if (do_swap_account)
4292 entry->val = ent.val;
4293 }
4294
4295 return page;
4296}
4297
90254a65
DN
4298static int is_target_pte_for_mc(struct vm_area_struct *vma,
4299 unsigned long addr, pte_t ptent, union mc_target *target)
4300{
4301 struct page *page = NULL;
4302 struct page_cgroup *pc;
4303 int ret = 0;
4304 swp_entry_t ent = { .val = 0 };
4305
4306 if (pte_present(ptent))
4307 page = mc_handle_present_pte(vma, addr, ptent);
4308 else if (is_swap_pte(ptent))
4309 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
87946a72
DN
4310 else if (pte_none(ptent) || pte_file(ptent))
4311 page = mc_handle_file_pte(vma, addr, ptent, &ent);
90254a65
DN
4312
4313 if (!page && !ent.val)
4314 return 0;
02491447
DN
4315 if (page) {
4316 pc = lookup_page_cgroup(page);
4317 /*
4318 * Do only loose check w/o page_cgroup lock.
4319 * mem_cgroup_move_account() checks the pc is valid or not under
4320 * the lock.
4321 */
4322 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
4323 ret = MC_TARGET_PAGE;
4324 if (target)
4325 target->page = page;
4326 }
4327 if (!ret || !target)
4328 put_page(page);
4329 }
90254a65
DN
4330 /* There is a swap entry and a page doesn't exist or isn't charged */
4331 if (ent.val && !ret &&
7f0f1546
KH
4332 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4333 ret = MC_TARGET_SWAP;
4334 if (target)
4335 target->ent = ent;
4ffef5fe 4336 }
4ffef5fe
DN
4337 return ret;
4338}
4339
4340static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4341 unsigned long addr, unsigned long end,
4342 struct mm_walk *walk)
4343{
4344 struct vm_area_struct *vma = walk->private;
4345 pte_t *pte;
4346 spinlock_t *ptl;
4347
4348 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4349 for (; addr != end; pte++, addr += PAGE_SIZE)
4350 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
4351 mc.precharge++; /* increment precharge temporarily */
4352 pte_unmap_unlock(pte - 1, ptl);
4353 cond_resched();
4354
7dc74be0
DN
4355 return 0;
4356}
4357
4ffef5fe
DN
4358static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4359{
4360 unsigned long precharge;
4361 struct vm_area_struct *vma;
4362
4363 down_read(&mm->mmap_sem);
4364 for (vma = mm->mmap; vma; vma = vma->vm_next) {
4365 struct mm_walk mem_cgroup_count_precharge_walk = {
4366 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4367 .mm = mm,
4368 .private = vma,
4369 };
4370 if (is_vm_hugetlb_page(vma))
4371 continue;
4ffef5fe
DN
4372 walk_page_range(vma->vm_start, vma->vm_end,
4373 &mem_cgroup_count_precharge_walk);
4374 }
4375 up_read(&mm->mmap_sem);
4376
4377 precharge = mc.precharge;
4378 mc.precharge = 0;
4379
4380 return precharge;
4381}
4382
4ffef5fe
DN
4383static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4384{
854ffa8d 4385 return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
4ffef5fe
DN
4386}
4387
4388static void mem_cgroup_clear_mc(void)
4389{
4390 /* we must uncharge all the leftover precharges from mc.to */
854ffa8d
DN
4391 if (mc.precharge) {
4392 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
4393 mc.precharge = 0;
3c11ecf4 4394 memcg_oom_recover(mc.to);
854ffa8d
DN
4395 }
4396 /*
4397 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4398 * we must uncharge here.
4399 */
4400 if (mc.moved_charge) {
4401 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4402 mc.moved_charge = 0;
3c11ecf4 4403 memcg_oom_recover(mc.from);
4ffef5fe 4404 }
483c30b5
DN
4405 /* we must fixup refcnts and charges */
4406 if (mc.moved_swap) {
4407 WARN_ON_ONCE(mc.moved_swap > INT_MAX);
4408 /* uncharge swap account from the old cgroup */
4409 if (!mem_cgroup_is_root(mc.from))
4410 res_counter_uncharge(&mc.from->memsw,
4411 PAGE_SIZE * mc.moved_swap);
4412 __mem_cgroup_put(mc.from, mc.moved_swap);
4413
4414 if (!mem_cgroup_is_root(mc.to)) {
4415 /*
4416 * we charged both to->res and to->memsw, so we should
4417 * uncharge to->res.
4418 */
4419 res_counter_uncharge(&mc.to->res,
4420 PAGE_SIZE * mc.moved_swap);
4421 VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags));
4422 __css_put(&mc.to->css, mc.moved_swap);
4423 }
4424 /* we've already done mem_cgroup_get(mc.to) */
4425
4426 mc.moved_swap = 0;
4427 }
4ffef5fe
DN
4428 mc.from = NULL;
4429 mc.to = NULL;
8033b97c
DN
4430 mc.moving_task = NULL;
4431 wake_up_all(&mc.waitq);
4ffef5fe
DN
4432}
4433
7dc74be0
DN
4434static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4435 struct cgroup *cgroup,
4436 struct task_struct *p,
4437 bool threadgroup)
4438{
4439 int ret = 0;
4440 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
4441
4442 if (mem->move_charge_at_immigrate) {
4443 struct mm_struct *mm;
4444 struct mem_cgroup *from = mem_cgroup_from_task(p);
4445
4446 VM_BUG_ON(from == mem);
4447
4448 mm = get_task_mm(p);
4449 if (!mm)
4450 return 0;
7dc74be0 4451 /* We move charges only when we move a owner of the mm */
4ffef5fe
DN
4452 if (mm->owner == p) {
4453 VM_BUG_ON(mc.from);
4454 VM_BUG_ON(mc.to);
4455 VM_BUG_ON(mc.precharge);
854ffa8d 4456 VM_BUG_ON(mc.moved_charge);
483c30b5 4457 VM_BUG_ON(mc.moved_swap);
8033b97c 4458 VM_BUG_ON(mc.moving_task);
4ffef5fe
DN
4459 mc.from = from;
4460 mc.to = mem;
4461 mc.precharge = 0;
854ffa8d 4462 mc.moved_charge = 0;
483c30b5 4463 mc.moved_swap = 0;
8033b97c 4464 mc.moving_task = current;
4ffef5fe
DN
4465
4466 ret = mem_cgroup_precharge_mc(mm);
4467 if (ret)
4468 mem_cgroup_clear_mc();
4469 }
7dc74be0
DN
4470 mmput(mm);
4471 }
4472 return ret;
4473}
4474
4475static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4476 struct cgroup *cgroup,
4477 struct task_struct *p,
4478 bool threadgroup)
4479{
4ffef5fe 4480 mem_cgroup_clear_mc();
7dc74be0
DN
4481}
4482
4ffef5fe
DN
4483static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4484 unsigned long addr, unsigned long end,
4485 struct mm_walk *walk)
7dc74be0 4486{
4ffef5fe
DN
4487 int ret = 0;
4488 struct vm_area_struct *vma = walk->private;
4489 pte_t *pte;
4490 spinlock_t *ptl;
4491
4492retry:
4493 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4494 for (; addr != end; addr += PAGE_SIZE) {
4495 pte_t ptent = *(pte++);
4496 union mc_target target;
4497 int type;
4498 struct page *page;
4499 struct page_cgroup *pc;
02491447 4500 swp_entry_t ent;
4ffef5fe
DN
4501
4502 if (!mc.precharge)
4503 break;
4504
4505 type = is_target_pte_for_mc(vma, addr, ptent, &target);
4506 switch (type) {
4507 case MC_TARGET_PAGE:
4508 page = target.page;
4509 if (isolate_lru_page(page))
4510 goto put;
4511 pc = lookup_page_cgroup(page);
854ffa8d
DN
4512 if (!mem_cgroup_move_account(pc,
4513 mc.from, mc.to, false)) {
4ffef5fe 4514 mc.precharge--;
854ffa8d
DN
4515 /* we uncharge from mc.from later. */
4516 mc.moved_charge++;
4ffef5fe
DN
4517 }
4518 putback_lru_page(page);
4519put: /* is_target_pte_for_mc() gets the page */
4520 put_page(page);
4521 break;
02491447
DN
4522 case MC_TARGET_SWAP:
4523 ent = target.ent;
483c30b5
DN
4524 if (!mem_cgroup_move_swap_account(ent,
4525 mc.from, mc.to, false)) {
02491447 4526 mc.precharge--;
483c30b5
DN
4527 /* we fixup refcnts and charges later. */
4528 mc.moved_swap++;
4529 }
02491447 4530 break;
4ffef5fe
DN
4531 default:
4532 break;
4533 }
4534 }
4535 pte_unmap_unlock(pte - 1, ptl);
4536 cond_resched();
4537
4538 if (addr != end) {
4539 /*
4540 * We have consumed all precharges we got in can_attach().
4541 * We try charge one by one, but don't do any additional
4542 * charges to mc.to if we have failed in charge once in attach()
4543 * phase.
4544 */
854ffa8d 4545 ret = mem_cgroup_do_precharge(1);
4ffef5fe
DN
4546 if (!ret)
4547 goto retry;
4548 }
4549
4550 return ret;
4551}
4552
4553static void mem_cgroup_move_charge(struct mm_struct *mm)
4554{
4555 struct vm_area_struct *vma;
4556
4557 lru_add_drain_all();
4558 down_read(&mm->mmap_sem);
4559 for (vma = mm->mmap; vma; vma = vma->vm_next) {
4560 int ret;
4561 struct mm_walk mem_cgroup_move_charge_walk = {
4562 .pmd_entry = mem_cgroup_move_charge_pte_range,
4563 .mm = mm,
4564 .private = vma,
4565 };
4566 if (is_vm_hugetlb_page(vma))
4567 continue;
4ffef5fe
DN
4568 ret = walk_page_range(vma->vm_start, vma->vm_end,
4569 &mem_cgroup_move_charge_walk);
4570 if (ret)
4571 /*
4572 * means we have consumed all precharges and failed in
4573 * doing additional charge. Just abandon here.
4574 */
4575 break;
4576 }
4577 up_read(&mm->mmap_sem);
7dc74be0
DN
4578}
4579
67e465a7
BS
4580static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4581 struct cgroup *cont,
4582 struct cgroup *old_cont,
be367d09
BB
4583 struct task_struct *p,
4584 bool threadgroup)
67e465a7 4585{
4ffef5fe
DN
4586 struct mm_struct *mm;
4587
4588 if (!mc.to)
4589 /* no need to move charge */
4590 return;
4591
4592 mm = get_task_mm(p);
4593 if (mm) {
4594 mem_cgroup_move_charge(mm);
4595 mmput(mm);
4596 }
4597 mem_cgroup_clear_mc();
67e465a7 4598}
5cfb80a7
DN
4599#else /* !CONFIG_MMU */
4600static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4601 struct cgroup *cgroup,
4602 struct task_struct *p,
4603 bool threadgroup)
4604{
4605 return 0;
4606}
4607static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4608 struct cgroup *cgroup,
4609 struct task_struct *p,
4610 bool threadgroup)
4611{
4612}
4613static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4614 struct cgroup *cont,
4615 struct cgroup *old_cont,
4616 struct task_struct *p,
4617 bool threadgroup)
4618{
4619}
4620#endif
67e465a7 4621
8cdea7c0
BS
4622struct cgroup_subsys mem_cgroup_subsys = {
4623 .name = "memory",
4624 .subsys_id = mem_cgroup_subsys_id,
4625 .create = mem_cgroup_create,
df878fb0 4626 .pre_destroy = mem_cgroup_pre_destroy,
8cdea7c0
BS
4627 .destroy = mem_cgroup_destroy,
4628 .populate = mem_cgroup_populate,
7dc74be0
DN
4629 .can_attach = mem_cgroup_can_attach,
4630 .cancel_attach = mem_cgroup_cancel_attach,
67e465a7 4631 .attach = mem_cgroup_move_task,
6d12e2d8 4632 .early_init = 0,
04046e1a 4633 .use_id = 1,
8cdea7c0 4634};
c077719b
KH
4635
4636#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4637
4638static int __init disable_swap_account(char *s)
4639{
4640 really_do_swap_account = 0;
4641 return 1;
4642}
4643__setup("noswapaccount", disable_swap_account);
4644#endif