]> bbs.cooldavid.org Git - net-next-2.6.git/blob - mm/memcontrol.c
memcg: remove mem from arg of charge_common
[net-next-2.6.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  */
23
24 #include <linux/res_counter.h>
25 #include <linux/memcontrol.h>
26 #include <linux/cgroup.h>
27 #include <linux/mm.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp.h>
31 #include <linux/page-flags.h>
32 #include <linux/backing-dev.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/rcupdate.h>
35 #include <linux/limits.h>
36 #include <linux/mutex.h>
37 #include <linux/rbtree.h>
38 #include <linux/slab.h>
39 #include <linux/swap.h>
40 #include <linux/swapops.h>
41 #include <linux/spinlock.h>
42 #include <linux/eventfd.h>
43 #include <linux/sort.h>
44 #include <linux/fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/vmalloc.h>
47 #include <linux/mm_inline.h>
48 #include <linux/page_cgroup.h>
49 #include <linux/cpu.h>
50 #include "internal.h"
51
52 #include <asm/uaccess.h>
53
54 #include <trace/events/vmscan.h>
55
56 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
57 #define MEM_CGROUP_RECLAIM_RETRIES      5
58 struct mem_cgroup *root_mem_cgroup __read_mostly;
59
60 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
61 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
62 int do_swap_account __read_mostly;
63 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
64 #else
65 #define do_swap_account         (0)
66 #endif
67
68 /*
69  * Per memcg event counter is incremented at every pagein/pageout. This counter
70  * is used for trigger some periodic events. This is straightforward and better
71  * than using jiffies etc. to handle periodic memcg event.
72  *
73  * These values will be used as !((event) & ((1 <<(thresh)) - 1))
74  */
75 #define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
76 #define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
77
78 /*
79  * Statistics for memory cgroup.
80  */
81 enum mem_cgroup_stat_index {
82         /*
83          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
84          */
85         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
86         MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
87         MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
88         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
89         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
90         MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
91         MEM_CGROUP_EVENTS,      /* incremented at every  pagein/pageout */
92
93         MEM_CGROUP_STAT_NSTATS,
94 };
95
96 struct mem_cgroup_stat_cpu {
97         s64 count[MEM_CGROUP_STAT_NSTATS];
98 };
99
100 /*
101  * per-zone information in memory controller.
102  */
103 struct mem_cgroup_per_zone {
104         /*
105          * spin_lock to protect the per cgroup LRU
106          */
107         struct list_head        lists[NR_LRU_LISTS];
108         unsigned long           count[NR_LRU_LISTS];
109
110         struct zone_reclaim_stat reclaim_stat;
111         struct rb_node          tree_node;      /* RB tree node */
112         unsigned long long      usage_in_excess;/* Set to the value by which */
113                                                 /* the soft limit is exceeded*/
114         bool                    on_tree;
115         struct mem_cgroup       *mem;           /* Back pointer, we cannot */
116                                                 /* use container_of        */
117 };
118 /* Macro for accessing counter */
119 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
120
121 struct mem_cgroup_per_node {
122         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
123 };
124
125 struct mem_cgroup_lru_info {
126         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
127 };
128
129 /*
130  * Cgroups above their limits are maintained in a RB-Tree, independent of
131  * their hierarchy representation
132  */
133
134 struct mem_cgroup_tree_per_zone {
135         struct rb_root rb_root;
136         spinlock_t lock;
137 };
138
139 struct mem_cgroup_tree_per_node {
140         struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
141 };
142
143 struct mem_cgroup_tree {
144         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
145 };
146
147 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
148
149 struct mem_cgroup_threshold {
150         struct eventfd_ctx *eventfd;
151         u64 threshold;
152 };
153
154 /* For threshold */
155 struct mem_cgroup_threshold_ary {
156         /* An array index points to threshold just below usage. */
157         int current_threshold;
158         /* Size of entries[] */
159         unsigned int size;
160         /* Array of thresholds */
161         struct mem_cgroup_threshold entries[0];
162 };
163
164 struct mem_cgroup_thresholds {
165         /* Primary thresholds array */
166         struct mem_cgroup_threshold_ary *primary;
167         /*
168          * Spare threshold array.
169          * This is needed to make mem_cgroup_unregister_event() "never fail".
170          * It must be able to store at least primary->size - 1 entries.
171          */
172         struct mem_cgroup_threshold_ary *spare;
173 };
174
175 /* for OOM */
176 struct mem_cgroup_eventfd_list {
177         struct list_head list;
178         struct eventfd_ctx *eventfd;
179 };
180
181 static void mem_cgroup_threshold(struct mem_cgroup *mem);
182 static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
183
184 /*
185  * The memory controller data structure. The memory controller controls both
186  * page cache and RSS per cgroup. We would eventually like to provide
187  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
188  * to help the administrator determine what knobs to tune.
189  *
190  * TODO: Add a water mark for the memory controller. Reclaim will begin when
191  * we hit the water mark. May be even add a low water mark, such that
192  * no reclaim occurs from a cgroup at it's low water mark, this is
193  * a feature that will be implemented much later in the future.
194  */
195 struct mem_cgroup {
196         struct cgroup_subsys_state css;
197         /*
198          * the counter to account for memory usage
199          */
200         struct res_counter res;
201         /*
202          * the counter to account for mem+swap usage.
203          */
204         struct res_counter memsw;
205         /*
206          * Per cgroup active and inactive list, similar to the
207          * per zone LRU lists.
208          */
209         struct mem_cgroup_lru_info info;
210
211         /*
212           protect against reclaim related member.
213         */
214         spinlock_t reclaim_param_lock;
215
216         /*
217          * While reclaiming in a hierarchy, we cache the last child we
218          * reclaimed from.
219          */
220         int last_scanned_child;
221         /*
222          * Should the accounting and control be hierarchical, per subtree?
223          */
224         bool use_hierarchy;
225         atomic_t        oom_lock;
226         atomic_t        refcnt;
227
228         unsigned int    swappiness;
229         /* OOM-Killer disable */
230         int             oom_kill_disable;
231
232         /* set when res.limit == memsw.limit */
233         bool            memsw_is_minimum;
234
235         /* protect arrays of thresholds */
236         struct mutex thresholds_lock;
237
238         /* thresholds for memory usage. RCU-protected */
239         struct mem_cgroup_thresholds thresholds;
240
241         /* thresholds for mem+swap usage. RCU-protected */
242         struct mem_cgroup_thresholds memsw_thresholds;
243
244         /* For oom notifier event fd */
245         struct list_head oom_notify;
246
247         /*
248          * Should we move charges of a task when a task is moved into this
249          * mem_cgroup ? And what type of charges should we move ?
250          */
251         unsigned long   move_charge_at_immigrate;
252         /*
253          * percpu counter.
254          */
255         struct mem_cgroup_stat_cpu *stat;
256 };
257
258 /* Stuffs for move charges at task migration. */
259 /*
260  * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
261  * left-shifted bitmap of these types.
262  */
263 enum move_type {
264         MOVE_CHARGE_TYPE_ANON,  /* private anonymous page and swap of it */
265         MOVE_CHARGE_TYPE_FILE,  /* file page(including tmpfs) and swap of it */
266         NR_MOVE_TYPE,
267 };
268
269 /* "mc" and its members are protected by cgroup_mutex */
270 static struct move_charge_struct {
271         spinlock_t        lock; /* for from, to, moving_task */
272         struct mem_cgroup *from;
273         struct mem_cgroup *to;
274         unsigned long precharge;
275         unsigned long moved_charge;
276         unsigned long moved_swap;
277         struct task_struct *moving_task;        /* a task moving charges */
278         wait_queue_head_t waitq;                /* a waitq for other context */
279 } mc = {
280         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
281         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
282 };
283
284 static bool move_anon(void)
285 {
286         return test_bit(MOVE_CHARGE_TYPE_ANON,
287                                         &mc.to->move_charge_at_immigrate);
288 }
289
290 static bool move_file(void)
291 {
292         return test_bit(MOVE_CHARGE_TYPE_FILE,
293                                         &mc.to->move_charge_at_immigrate);
294 }
295
296 /*
297  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
298  * limit reclaim to prevent infinite loops, if they ever occur.
299  */
300 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            (100)
301 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
302
303 enum charge_type {
304         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
305         MEM_CGROUP_CHARGE_TYPE_MAPPED,
306         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
307         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
308         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
309         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
310         NR_CHARGE_TYPE,
311 };
312
313 /* only for here (for easy reading.) */
314 #define PCGF_CACHE      (1UL << PCG_CACHE)
315 #define PCGF_USED       (1UL << PCG_USED)
316 #define PCGF_LOCK       (1UL << PCG_LOCK)
317 /* Not used, but added here for completeness */
318 #define PCGF_ACCT       (1UL << PCG_ACCT)
319
320 /* for encoding cft->private value on file */
321 #define _MEM                    (0)
322 #define _MEMSWAP                (1)
323 #define _OOM_TYPE               (2)
324 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
325 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
326 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
327 /* Used for OOM nofiier */
328 #define OOM_CONTROL             (0)
329
330 /*
331  * Reclaim flags for mem_cgroup_hierarchical_reclaim
332  */
333 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT   0x0
334 #define MEM_CGROUP_RECLAIM_NOSWAP       (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
335 #define MEM_CGROUP_RECLAIM_SHRINK_BIT   0x1
336 #define MEM_CGROUP_RECLAIM_SHRINK       (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
337 #define MEM_CGROUP_RECLAIM_SOFT_BIT     0x2
338 #define MEM_CGROUP_RECLAIM_SOFT         (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
339
340 static void mem_cgroup_get(struct mem_cgroup *mem);
341 static void mem_cgroup_put(struct mem_cgroup *mem);
342 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
343 static void drain_all_stock_async(void);
344
345 static struct mem_cgroup_per_zone *
346 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
347 {
348         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
349 }
350
351 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
352 {
353         return &mem->css;
354 }
355
356 static struct mem_cgroup_per_zone *
357 page_cgroup_zoneinfo(struct page_cgroup *pc)
358 {
359         struct mem_cgroup *mem = pc->mem_cgroup;
360         int nid = page_cgroup_nid(pc);
361         int zid = page_cgroup_zid(pc);
362
363         if (!mem)
364                 return NULL;
365
366         return mem_cgroup_zoneinfo(mem, nid, zid);
367 }
368
369 static struct mem_cgroup_tree_per_zone *
370 soft_limit_tree_node_zone(int nid, int zid)
371 {
372         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
373 }
374
375 static struct mem_cgroup_tree_per_zone *
376 soft_limit_tree_from_page(struct page *page)
377 {
378         int nid = page_to_nid(page);
379         int zid = page_zonenum(page);
380
381         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
382 }
383
384 static void
385 __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
386                                 struct mem_cgroup_per_zone *mz,
387                                 struct mem_cgroup_tree_per_zone *mctz,
388                                 unsigned long long new_usage_in_excess)
389 {
390         struct rb_node **p = &mctz->rb_root.rb_node;
391         struct rb_node *parent = NULL;
392         struct mem_cgroup_per_zone *mz_node;
393
394         if (mz->on_tree)
395                 return;
396
397         mz->usage_in_excess = new_usage_in_excess;
398         if (!mz->usage_in_excess)
399                 return;
400         while (*p) {
401                 parent = *p;
402                 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
403                                         tree_node);
404                 if (mz->usage_in_excess < mz_node->usage_in_excess)
405                         p = &(*p)->rb_left;
406                 /*
407                  * We can't avoid mem cgroups that are over their soft
408                  * limit by the same amount
409                  */
410                 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
411                         p = &(*p)->rb_right;
412         }
413         rb_link_node(&mz->tree_node, parent, p);
414         rb_insert_color(&mz->tree_node, &mctz->rb_root);
415         mz->on_tree = true;
416 }
417
418 static void
419 __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
420                                 struct mem_cgroup_per_zone *mz,
421                                 struct mem_cgroup_tree_per_zone *mctz)
422 {
423         if (!mz->on_tree)
424                 return;
425         rb_erase(&mz->tree_node, &mctz->rb_root);
426         mz->on_tree = false;
427 }
428
429 static void
430 mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
431                                 struct mem_cgroup_per_zone *mz,
432                                 struct mem_cgroup_tree_per_zone *mctz)
433 {
434         spin_lock(&mctz->lock);
435         __mem_cgroup_remove_exceeded(mem, mz, mctz);
436         spin_unlock(&mctz->lock);
437 }
438
439
440 static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
441 {
442         unsigned long long excess;
443         struct mem_cgroup_per_zone *mz;
444         struct mem_cgroup_tree_per_zone *mctz;
445         int nid = page_to_nid(page);
446         int zid = page_zonenum(page);
447         mctz = soft_limit_tree_from_page(page);
448
449         /*
450          * Necessary to update all ancestors when hierarchy is used.
451          * because their event counter is not touched.
452          */
453         for (; mem; mem = parent_mem_cgroup(mem)) {
454                 mz = mem_cgroup_zoneinfo(mem, nid, zid);
455                 excess = res_counter_soft_limit_excess(&mem->res);
456                 /*
457                  * We have to update the tree if mz is on RB-tree or
458                  * mem is over its softlimit.
459                  */
460                 if (excess || mz->on_tree) {
461                         spin_lock(&mctz->lock);
462                         /* if on-tree, remove it */
463                         if (mz->on_tree)
464                                 __mem_cgroup_remove_exceeded(mem, mz, mctz);
465                         /*
466                          * Insert again. mz->usage_in_excess will be updated.
467                          * If excess is 0, no tree ops.
468                          */
469                         __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
470                         spin_unlock(&mctz->lock);
471                 }
472         }
473 }
474
475 static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
476 {
477         int node, zone;
478         struct mem_cgroup_per_zone *mz;
479         struct mem_cgroup_tree_per_zone *mctz;
480
481         for_each_node_state(node, N_POSSIBLE) {
482                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
483                         mz = mem_cgroup_zoneinfo(mem, node, zone);
484                         mctz = soft_limit_tree_node_zone(node, zone);
485                         mem_cgroup_remove_exceeded(mem, mz, mctz);
486                 }
487         }
488 }
489
490 static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
491 {
492         return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
493 }
494
495 static struct mem_cgroup_per_zone *
496 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
497 {
498         struct rb_node *rightmost = NULL;
499         struct mem_cgroup_per_zone *mz;
500
501 retry:
502         mz = NULL;
503         rightmost = rb_last(&mctz->rb_root);
504         if (!rightmost)
505                 goto done;              /* Nothing to reclaim from */
506
507         mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
508         /*
509          * Remove the node now but someone else can add it back,
510          * we will to add it back at the end of reclaim to its correct
511          * position in the tree.
512          */
513         __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
514         if (!res_counter_soft_limit_excess(&mz->mem->res) ||
515                 !css_tryget(&mz->mem->css))
516                 goto retry;
517 done:
518         return mz;
519 }
520
521 static struct mem_cgroup_per_zone *
522 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
523 {
524         struct mem_cgroup_per_zone *mz;
525
526         spin_lock(&mctz->lock);
527         mz = __mem_cgroup_largest_soft_limit_node(mctz);
528         spin_unlock(&mctz->lock);
529         return mz;
530 }
531
532 static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
533                 enum mem_cgroup_stat_index idx)
534 {
535         int cpu;
536         s64 val = 0;
537
538         for_each_possible_cpu(cpu)
539                 val += per_cpu(mem->stat->count[idx], cpu);
540         return val;
541 }
542
543 static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
544 {
545         s64 ret;
546
547         ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
548         ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
549         return ret;
550 }
551
552 static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
553                                          bool charge)
554 {
555         int val = (charge) ? 1 : -1;
556         this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
557 }
558
559 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
560                                          struct page_cgroup *pc,
561                                          bool charge)
562 {
563         int val = (charge) ? 1 : -1;
564
565         preempt_disable();
566
567         if (PageCgroupCache(pc))
568                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
569         else
570                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
571
572         if (charge)
573                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
574         else
575                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
576         __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
577
578         preempt_enable();
579 }
580
581 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
582                                         enum lru_list idx)
583 {
584         int nid, zid;
585         struct mem_cgroup_per_zone *mz;
586         u64 total = 0;
587
588         for_each_online_node(nid)
589                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
590                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
591                         total += MEM_CGROUP_ZSTAT(mz, idx);
592                 }
593         return total;
594 }
595
596 static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
597 {
598         s64 val;
599
600         val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
601
602         return !(val & ((1 << event_mask_shift) - 1));
603 }
604
605 /*
606  * Check events in order.
607  *
608  */
609 static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
610 {
611         /* threshold event is triggered in finer grain than soft limit */
612         if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
613                 mem_cgroup_threshold(mem);
614                 if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
615                         mem_cgroup_update_tree(mem, page);
616         }
617 }
618
619 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
620 {
621         return container_of(cgroup_subsys_state(cont,
622                                 mem_cgroup_subsys_id), struct mem_cgroup,
623                                 css);
624 }
625
626 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
627 {
628         /*
629          * mm_update_next_owner() may clear mm->owner to NULL
630          * if it races with swapoff, page migration, etc.
631          * So this can be called with p == NULL.
632          */
633         if (unlikely(!p))
634                 return NULL;
635
636         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
637                                 struct mem_cgroup, css);
638 }
639
640 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
641 {
642         struct mem_cgroup *mem = NULL;
643
644         if (!mm)
645                 return NULL;
646         /*
647          * Because we have no locks, mm->owner's may be being moved to other
648          * cgroup. We use css_tryget() here even if this looks
649          * pessimistic (rather than adding locks here).
650          */
651         rcu_read_lock();
652         do {
653                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
654                 if (unlikely(!mem))
655                         break;
656         } while (!css_tryget(&mem->css));
657         rcu_read_unlock();
658         return mem;
659 }
660
661 /*
662  * Call callback function against all cgroup under hierarchy tree.
663  */
664 static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
665                           int (*func)(struct mem_cgroup *, void *))
666 {
667         int found, ret, nextid;
668         struct cgroup_subsys_state *css;
669         struct mem_cgroup *mem;
670
671         if (!root->use_hierarchy)
672                 return (*func)(root, data);
673
674         nextid = 1;
675         do {
676                 ret = 0;
677                 mem = NULL;
678
679                 rcu_read_lock();
680                 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
681                                    &found);
682                 if (css && css_tryget(css))
683                         mem = container_of(css, struct mem_cgroup, css);
684                 rcu_read_unlock();
685
686                 if (mem) {
687                         ret = (*func)(mem, data);
688                         css_put(&mem->css);
689                 }
690                 nextid = found + 1;
691         } while (!ret && css);
692
693         return ret;
694 }
695
696 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
697 {
698         return (mem == root_mem_cgroup);
699 }
700
701 /*
702  * Following LRU functions are allowed to be used without PCG_LOCK.
703  * Operations are called by routine of global LRU independently from memcg.
704  * What we have to take care of here is validness of pc->mem_cgroup.
705  *
706  * Changes to pc->mem_cgroup happens when
707  * 1. charge
708  * 2. moving account
709  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
710  * It is added to LRU before charge.
711  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
712  * When moving account, the page is not on LRU. It's isolated.
713  */
714
715 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
716 {
717         struct page_cgroup *pc;
718         struct mem_cgroup_per_zone *mz;
719
720         if (mem_cgroup_disabled())
721                 return;
722         pc = lookup_page_cgroup(page);
723         /* can happen while we handle swapcache. */
724         if (!TestClearPageCgroupAcctLRU(pc))
725                 return;
726         VM_BUG_ON(!pc->mem_cgroup);
727         /*
728          * We don't check PCG_USED bit. It's cleared when the "page" is finally
729          * removed from global LRU.
730          */
731         mz = page_cgroup_zoneinfo(pc);
732         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
733         if (mem_cgroup_is_root(pc->mem_cgroup))
734                 return;
735         VM_BUG_ON(list_empty(&pc->lru));
736         list_del_init(&pc->lru);
737         return;
738 }
739
740 void mem_cgroup_del_lru(struct page *page)
741 {
742         mem_cgroup_del_lru_list(page, page_lru(page));
743 }
744
745 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
746 {
747         struct mem_cgroup_per_zone *mz;
748         struct page_cgroup *pc;
749
750         if (mem_cgroup_disabled())
751                 return;
752
753         pc = lookup_page_cgroup(page);
754         /*
755          * Used bit is set without atomic ops but after smp_wmb().
756          * For making pc->mem_cgroup visible, insert smp_rmb() here.
757          */
758         smp_rmb();
759         /* unused or root page is not rotated. */
760         if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
761                 return;
762         mz = page_cgroup_zoneinfo(pc);
763         list_move(&pc->lru, &mz->lists[lru]);
764 }
765
766 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
767 {
768         struct page_cgroup *pc;
769         struct mem_cgroup_per_zone *mz;
770
771         if (mem_cgroup_disabled())
772                 return;
773         pc = lookup_page_cgroup(page);
774         VM_BUG_ON(PageCgroupAcctLRU(pc));
775         /*
776          * Used bit is set without atomic ops but after smp_wmb().
777          * For making pc->mem_cgroup visible, insert smp_rmb() here.
778          */
779         smp_rmb();
780         if (!PageCgroupUsed(pc))
781                 return;
782
783         mz = page_cgroup_zoneinfo(pc);
784         MEM_CGROUP_ZSTAT(mz, lru) += 1;
785         SetPageCgroupAcctLRU(pc);
786         if (mem_cgroup_is_root(pc->mem_cgroup))
787                 return;
788         list_add(&pc->lru, &mz->lists[lru]);
789 }
790
791 /*
792  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
793  * lru because the page may.be reused after it's fully uncharged (because of
794  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
795  * it again. This function is only used to charge SwapCache. It's done under
796  * lock_page and expected that zone->lru_lock is never held.
797  */
798 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
799 {
800         unsigned long flags;
801         struct zone *zone = page_zone(page);
802         struct page_cgroup *pc = lookup_page_cgroup(page);
803
804         spin_lock_irqsave(&zone->lru_lock, flags);
805         /*
806          * Forget old LRU when this page_cgroup is *not* used. This Used bit
807          * is guarded by lock_page() because the page is SwapCache.
808          */
809         if (!PageCgroupUsed(pc))
810                 mem_cgroup_del_lru_list(page, page_lru(page));
811         spin_unlock_irqrestore(&zone->lru_lock, flags);
812 }
813
814 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
815 {
816         unsigned long flags;
817         struct zone *zone = page_zone(page);
818         struct page_cgroup *pc = lookup_page_cgroup(page);
819
820         spin_lock_irqsave(&zone->lru_lock, flags);
821         /* link when the page is linked to LRU but page_cgroup isn't */
822         if (PageLRU(page) && !PageCgroupAcctLRU(pc))
823                 mem_cgroup_add_lru_list(page, page_lru(page));
824         spin_unlock_irqrestore(&zone->lru_lock, flags);
825 }
826
827
828 void mem_cgroup_move_lists(struct page *page,
829                            enum lru_list from, enum lru_list to)
830 {
831         if (mem_cgroup_disabled())
832                 return;
833         mem_cgroup_del_lru_list(page, from);
834         mem_cgroup_add_lru_list(page, to);
835 }
836
837 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
838 {
839         int ret;
840         struct mem_cgroup *curr = NULL;
841
842         task_lock(task);
843         curr = try_get_mem_cgroup_from_mm(task->mm);
844         task_unlock(task);
845         if (!curr)
846                 return 0;
847         /*
848          * We should check use_hierarchy of "mem" not "curr". Because checking
849          * use_hierarchy of "curr" here make this function true if hierarchy is
850          * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
851          * hierarchy(even if use_hierarchy is disabled in "mem").
852          */
853         if (mem->use_hierarchy)
854                 ret = css_is_ancestor(&curr->css, &mem->css);
855         else
856                 ret = (curr == mem);
857         css_put(&curr->css);
858         return ret;
859 }
860
861 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
862 {
863         unsigned long active;
864         unsigned long inactive;
865         unsigned long gb;
866         unsigned long inactive_ratio;
867
868         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
869         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
870
871         gb = (inactive + active) >> (30 - PAGE_SHIFT);
872         if (gb)
873                 inactive_ratio = int_sqrt(10 * gb);
874         else
875                 inactive_ratio = 1;
876
877         if (present_pages) {
878                 present_pages[0] = inactive;
879                 present_pages[1] = active;
880         }
881
882         return inactive_ratio;
883 }
884
885 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
886 {
887         unsigned long active;
888         unsigned long inactive;
889         unsigned long present_pages[2];
890         unsigned long inactive_ratio;
891
892         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
893
894         inactive = present_pages[0];
895         active = present_pages[1];
896
897         if (inactive * inactive_ratio < active)
898                 return 1;
899
900         return 0;
901 }
902
903 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
904 {
905         unsigned long active;
906         unsigned long inactive;
907
908         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
909         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
910
911         return (active > inactive);
912 }
913
914 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
915                                        struct zone *zone,
916                                        enum lru_list lru)
917 {
918         int nid = zone->zone_pgdat->node_id;
919         int zid = zone_idx(zone);
920         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
921
922         return MEM_CGROUP_ZSTAT(mz, lru);
923 }
924
925 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
926                                                       struct zone *zone)
927 {
928         int nid = zone->zone_pgdat->node_id;
929         int zid = zone_idx(zone);
930         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
931
932         return &mz->reclaim_stat;
933 }
934
935 struct zone_reclaim_stat *
936 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
937 {
938         struct page_cgroup *pc;
939         struct mem_cgroup_per_zone *mz;
940
941         if (mem_cgroup_disabled())
942                 return NULL;
943
944         pc = lookup_page_cgroup(page);
945         /*
946          * Used bit is set without atomic ops but after smp_wmb().
947          * For making pc->mem_cgroup visible, insert smp_rmb() here.
948          */
949         smp_rmb();
950         if (!PageCgroupUsed(pc))
951                 return NULL;
952
953         mz = page_cgroup_zoneinfo(pc);
954         if (!mz)
955                 return NULL;
956
957         return &mz->reclaim_stat;
958 }
959
960 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
961                                         struct list_head *dst,
962                                         unsigned long *scanned, int order,
963                                         int mode, struct zone *z,
964                                         struct mem_cgroup *mem_cont,
965                                         int active, int file)
966 {
967         unsigned long nr_taken = 0;
968         struct page *page;
969         unsigned long scan;
970         LIST_HEAD(pc_list);
971         struct list_head *src;
972         struct page_cgroup *pc, *tmp;
973         int nid = z->zone_pgdat->node_id;
974         int zid = zone_idx(z);
975         struct mem_cgroup_per_zone *mz;
976         int lru = LRU_FILE * file + active;
977         int ret;
978
979         BUG_ON(!mem_cont);
980         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
981         src = &mz->lists[lru];
982
983         scan = 0;
984         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
985                 if (scan >= nr_to_scan)
986                         break;
987
988                 page = pc->page;
989                 if (unlikely(!PageCgroupUsed(pc)))
990                         continue;
991                 if (unlikely(!PageLRU(page)))
992                         continue;
993
994                 scan++;
995                 ret = __isolate_lru_page(page, mode, file);
996                 switch (ret) {
997                 case 0:
998                         list_move(&page->lru, dst);
999                         mem_cgroup_del_lru(page);
1000                         nr_taken++;
1001                         break;
1002                 case -EBUSY:
1003                         /* we don't affect global LRU but rotate in our LRU */
1004                         mem_cgroup_rotate_lru_list(page, page_lru(page));
1005                         break;
1006                 default:
1007                         break;
1008                 }
1009         }
1010
1011         *scanned = scan;
1012
1013         trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1014                                       0, 0, 0, mode);
1015
1016         return nr_taken;
1017 }
1018
1019 #define mem_cgroup_from_res_counter(counter, member)    \
1020         container_of(counter, struct mem_cgroup, member)
1021
1022 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
1023 {
1024         if (do_swap_account) {
1025                 if (res_counter_check_under_limit(&mem->res) &&
1026                         res_counter_check_under_limit(&mem->memsw))
1027                         return true;
1028         } else
1029                 if (res_counter_check_under_limit(&mem->res))
1030                         return true;
1031         return false;
1032 }
1033
1034 static unsigned int get_swappiness(struct mem_cgroup *memcg)
1035 {
1036         struct cgroup *cgrp = memcg->css.cgroup;
1037         unsigned int swappiness;
1038
1039         /* root ? */
1040         if (cgrp->parent == NULL)
1041                 return vm_swappiness;
1042
1043         spin_lock(&memcg->reclaim_param_lock);
1044         swappiness = memcg->swappiness;
1045         spin_unlock(&memcg->reclaim_param_lock);
1046
1047         return swappiness;
1048 }
1049
1050 /* A routine for testing mem is not under move_account */
1051
1052 static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1053 {
1054         struct mem_cgroup *from;
1055         struct mem_cgroup *to;
1056         bool ret = false;
1057         /*
1058          * Unlike task_move routines, we access mc.to, mc.from not under
1059          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1060          */
1061         spin_lock(&mc.lock);
1062         from = mc.from;
1063         to = mc.to;
1064         if (!from)
1065                 goto unlock;
1066         if (from == mem || to == mem
1067             || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
1068             || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css)))
1069                 ret = true;
1070 unlock:
1071         spin_unlock(&mc.lock);
1072         return ret;
1073 }
1074
1075 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
1076 {
1077         if (mc.moving_task && current != mc.moving_task) {
1078                 if (mem_cgroup_under_move(mem)) {
1079                         DEFINE_WAIT(wait);
1080                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1081                         /* moving charge context might have finished. */
1082                         if (mc.moving_task)
1083                                 schedule();
1084                         finish_wait(&mc.waitq, &wait);
1085                         return true;
1086                 }
1087         }
1088         return false;
1089 }
1090
1091 static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
1092 {
1093         int *val = data;
1094         (*val)++;
1095         return 0;
1096 }
1097
1098 /**
1099  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1100  * @memcg: The memory cgroup that went over limit
1101  * @p: Task that is going to be killed
1102  *
1103  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1104  * enabled
1105  */
1106 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1107 {
1108         struct cgroup *task_cgrp;
1109         struct cgroup *mem_cgrp;
1110         /*
1111          * Need a buffer in BSS, can't rely on allocations. The code relies
1112          * on the assumption that OOM is serialized for memory controller.
1113          * If this assumption is broken, revisit this code.
1114          */
1115         static char memcg_name[PATH_MAX];
1116         int ret;
1117
1118         if (!memcg || !p)
1119                 return;
1120
1121
1122         rcu_read_lock();
1123
1124         mem_cgrp = memcg->css.cgroup;
1125         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1126
1127         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1128         if (ret < 0) {
1129                 /*
1130                  * Unfortunately, we are unable to convert to a useful name
1131                  * But we'll still print out the usage information
1132                  */
1133                 rcu_read_unlock();
1134                 goto done;
1135         }
1136         rcu_read_unlock();
1137
1138         printk(KERN_INFO "Task in %s killed", memcg_name);
1139
1140         rcu_read_lock();
1141         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1142         if (ret < 0) {
1143                 rcu_read_unlock();
1144                 goto done;
1145         }
1146         rcu_read_unlock();
1147
1148         /*
1149          * Continues from above, so we don't need an KERN_ level
1150          */
1151         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1152 done:
1153
1154         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1155                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1156                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1157                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1158         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1159                 "failcnt %llu\n",
1160                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1161                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1162                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1163 }
1164
1165 /*
1166  * This function returns the number of memcg under hierarchy tree. Returns
1167  * 1(self count) if no children.
1168  */
1169 static int mem_cgroup_count_children(struct mem_cgroup *mem)
1170 {
1171         int num = 0;
1172         mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
1173         return num;
1174 }
1175
1176 /*
1177  * Return the memory (and swap, if configured) limit for a memcg.
1178  */
1179 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1180 {
1181         u64 limit;
1182         u64 memsw;
1183
1184         limit = res_counter_read_u64(&memcg->res, RES_LIMIT) +
1185                         total_swap_pages;
1186         memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1187         /*
1188          * If memsw is finite and limits the amount of swap space available
1189          * to this memcg, return that limit.
1190          */
1191         return min(limit, memsw);
1192 }
1193
1194 /*
1195  * Visit the first child (need not be the first child as per the ordering
1196  * of the cgroup list, since we track last_scanned_child) of @mem and use
1197  * that to reclaim free pages from.
1198  */
1199 static struct mem_cgroup *
1200 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1201 {
1202         struct mem_cgroup *ret = NULL;
1203         struct cgroup_subsys_state *css;
1204         int nextid, found;
1205
1206         if (!root_mem->use_hierarchy) {
1207                 css_get(&root_mem->css);
1208                 ret = root_mem;
1209         }
1210
1211         while (!ret) {
1212                 rcu_read_lock();
1213                 nextid = root_mem->last_scanned_child + 1;
1214                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1215                                    &found);
1216                 if (css && css_tryget(css))
1217                         ret = container_of(css, struct mem_cgroup, css);
1218
1219                 rcu_read_unlock();
1220                 /* Updates scanning parameter */
1221                 spin_lock(&root_mem->reclaim_param_lock);
1222                 if (!css) {
1223                         /* this means start scan from ID:1 */
1224                         root_mem->last_scanned_child = 0;
1225                 } else
1226                         root_mem->last_scanned_child = found;
1227                 spin_unlock(&root_mem->reclaim_param_lock);
1228         }
1229
1230         return ret;
1231 }
1232
1233 /*
1234  * Scan the hierarchy if needed to reclaim memory. We remember the last child
1235  * we reclaimed from, so that we don't end up penalizing one child extensively
1236  * based on its position in the children list.
1237  *
1238  * root_mem is the original ancestor that we've been reclaim from.
1239  *
1240  * We give up and return to the caller when we visit root_mem twice.
1241  * (other groups can be removed while we're walking....)
1242  *
1243  * If shrink==true, for avoiding to free too much, this returns immedieately.
1244  */
1245 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1246                                                 struct zone *zone,
1247                                                 gfp_t gfp_mask,
1248                                                 unsigned long reclaim_options)
1249 {
1250         struct mem_cgroup *victim;
1251         int ret, total = 0;
1252         int loop = 0;
1253         bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1254         bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1255         bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1256         unsigned long excess = mem_cgroup_get_excess(root_mem);
1257
1258         /* If memsw_is_minimum==1, swap-out is of-no-use. */
1259         if (root_mem->memsw_is_minimum)
1260                 noswap = true;
1261
1262         while (1) {
1263                 victim = mem_cgroup_select_victim(root_mem);
1264                 if (victim == root_mem) {
1265                         loop++;
1266                         if (loop >= 1)
1267                                 drain_all_stock_async();
1268                         if (loop >= 2) {
1269                                 /*
1270                                  * If we have not been able to reclaim
1271                                  * anything, it might because there are
1272                                  * no reclaimable pages under this hierarchy
1273                                  */
1274                                 if (!check_soft || !total) {
1275                                         css_put(&victim->css);
1276                                         break;
1277                                 }
1278                                 /*
1279                                  * We want to do more targetted reclaim.
1280                                  * excess >> 2 is not to excessive so as to
1281                                  * reclaim too much, nor too less that we keep
1282                                  * coming back to reclaim from this cgroup
1283                                  */
1284                                 if (total >= (excess >> 2) ||
1285                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1286                                         css_put(&victim->css);
1287                                         break;
1288                                 }
1289                         }
1290                 }
1291                 if (!mem_cgroup_local_usage(victim)) {
1292                         /* this cgroup's local usage == 0 */
1293                         css_put(&victim->css);
1294                         continue;
1295                 }
1296                 /* we use swappiness of local cgroup */
1297                 if (check_soft)
1298                         ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1299                                 noswap, get_swappiness(victim), zone,
1300                                 zone->zone_pgdat->node_id);
1301                 else
1302                         ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1303                                                 noswap, get_swappiness(victim));
1304                 css_put(&victim->css);
1305                 /*
1306                  * At shrinking usage, we can't check we should stop here or
1307                  * reclaim more. It's depends on callers. last_scanned_child
1308                  * will work enough for keeping fairness under tree.
1309                  */
1310                 if (shrink)
1311                         return ret;
1312                 total += ret;
1313                 if (check_soft) {
1314                         if (res_counter_check_under_soft_limit(&root_mem->res))
1315                                 return total;
1316                 } else if (mem_cgroup_check_under_limit(root_mem))
1317                         return 1 + total;
1318         }
1319         return total;
1320 }
1321
1322 static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data)
1323 {
1324         int *val = (int *)data;
1325         int x;
1326         /*
1327          * Logically, we can stop scanning immediately when we find
1328          * a memcg is already locked. But condidering unlock ops and
1329          * creation/removal of memcg, scan-all is simple operation.
1330          */
1331         x = atomic_inc_return(&mem->oom_lock);
1332         *val = max(x, *val);
1333         return 0;
1334 }
1335 /*
1336  * Check OOM-Killer is already running under our hierarchy.
1337  * If someone is running, return false.
1338  */
1339 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1340 {
1341         int lock_count = 0;
1342
1343         mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb);
1344
1345         if (lock_count == 1)
1346                 return true;
1347         return false;
1348 }
1349
1350 static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data)
1351 {
1352         /*
1353          * When a new child is created while the hierarchy is under oom,
1354          * mem_cgroup_oom_lock() may not be called. We have to use
1355          * atomic_add_unless() here.
1356          */
1357         atomic_add_unless(&mem->oom_lock, -1, 0);
1358         return 0;
1359 }
1360
1361 static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1362 {
1363         mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb);
1364 }
1365
1366 static DEFINE_MUTEX(memcg_oom_mutex);
1367 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1368
1369 struct oom_wait_info {
1370         struct mem_cgroup *mem;
1371         wait_queue_t    wait;
1372 };
1373
1374 static int memcg_oom_wake_function(wait_queue_t *wait,
1375         unsigned mode, int sync, void *arg)
1376 {
1377         struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1378         struct oom_wait_info *oom_wait_info;
1379
1380         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1381
1382         if (oom_wait_info->mem == wake_mem)
1383                 goto wakeup;
1384         /* if no hierarchy, no match */
1385         if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1386                 return 0;
1387         /*
1388          * Both of oom_wait_info->mem and wake_mem are stable under us.
1389          * Then we can use css_is_ancestor without taking care of RCU.
1390          */
1391         if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1392             !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1393                 return 0;
1394
1395 wakeup:
1396         return autoremove_wake_function(wait, mode, sync, arg);
1397 }
1398
1399 static void memcg_wakeup_oom(struct mem_cgroup *mem)
1400 {
1401         /* for filtering, pass "mem" as argument. */
1402         __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1403 }
1404
1405 static void memcg_oom_recover(struct mem_cgroup *mem)
1406 {
1407         if (mem && atomic_read(&mem->oom_lock))
1408                 memcg_wakeup_oom(mem);
1409 }
1410
1411 /*
1412  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1413  */
1414 bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1415 {
1416         struct oom_wait_info owait;
1417         bool locked, need_to_kill;
1418
1419         owait.mem = mem;
1420         owait.wait.flags = 0;
1421         owait.wait.func = memcg_oom_wake_function;
1422         owait.wait.private = current;
1423         INIT_LIST_HEAD(&owait.wait.task_list);
1424         need_to_kill = true;
1425         /* At first, try to OOM lock hierarchy under mem.*/
1426         mutex_lock(&memcg_oom_mutex);
1427         locked = mem_cgroup_oom_lock(mem);
1428         /*
1429          * Even if signal_pending(), we can't quit charge() loop without
1430          * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1431          * under OOM is always welcomed, use TASK_KILLABLE here.
1432          */
1433         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1434         if (!locked || mem->oom_kill_disable)
1435                 need_to_kill = false;
1436         if (locked)
1437                 mem_cgroup_oom_notify(mem);
1438         mutex_unlock(&memcg_oom_mutex);
1439
1440         if (need_to_kill) {
1441                 finish_wait(&memcg_oom_waitq, &owait.wait);
1442                 mem_cgroup_out_of_memory(mem, mask);
1443         } else {
1444                 schedule();
1445                 finish_wait(&memcg_oom_waitq, &owait.wait);
1446         }
1447         mutex_lock(&memcg_oom_mutex);
1448         mem_cgroup_oom_unlock(mem);
1449         memcg_wakeup_oom(mem);
1450         mutex_unlock(&memcg_oom_mutex);
1451
1452         if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1453                 return false;
1454         /* Give chance to dying process */
1455         schedule_timeout(1);
1456         return true;
1457 }
1458
1459 /*
1460  * Currently used to update mapped file statistics, but the routine can be
1461  * generalized to update other statistics as well.
1462  */
1463 void mem_cgroup_update_file_mapped(struct page *page, int val)
1464 {
1465         struct mem_cgroup *mem;
1466         struct page_cgroup *pc;
1467
1468         pc = lookup_page_cgroup(page);
1469         if (unlikely(!pc))
1470                 return;
1471
1472         lock_page_cgroup(pc);
1473         mem = pc->mem_cgroup;
1474         if (!mem || !PageCgroupUsed(pc))
1475                 goto done;
1476
1477         /*
1478          * Preemption is already disabled. We can use __this_cpu_xxx
1479          */
1480         if (val > 0) {
1481                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1482                 SetPageCgroupFileMapped(pc);
1483         } else {
1484                 __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1485                 ClearPageCgroupFileMapped(pc);
1486         }
1487
1488 done:
1489         unlock_page_cgroup(pc);
1490 }
1491
1492 /*
1493  * size of first charge trial. "32" comes from vmscan.c's magic value.
1494  * TODO: maybe necessary to use big numbers in big irons.
1495  */
1496 #define CHARGE_SIZE     (32 * PAGE_SIZE)
1497 struct memcg_stock_pcp {
1498         struct mem_cgroup *cached; /* this never be root cgroup */
1499         int charge;
1500         struct work_struct work;
1501 };
1502 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1503 static atomic_t memcg_drain_count;
1504
1505 /*
1506  * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
1507  * from local stock and true is returned. If the stock is 0 or charges from a
1508  * cgroup which is not current target, returns false. This stock will be
1509  * refilled.
1510  */
1511 static bool consume_stock(struct mem_cgroup *mem)
1512 {
1513         struct memcg_stock_pcp *stock;
1514         bool ret = true;
1515
1516         stock = &get_cpu_var(memcg_stock);
1517         if (mem == stock->cached && stock->charge)
1518                 stock->charge -= PAGE_SIZE;
1519         else /* need to call res_counter_charge */
1520                 ret = false;
1521         put_cpu_var(memcg_stock);
1522         return ret;
1523 }
1524
1525 /*
1526  * Returns stocks cached in percpu to res_counter and reset cached information.
1527  */
1528 static void drain_stock(struct memcg_stock_pcp *stock)
1529 {
1530         struct mem_cgroup *old = stock->cached;
1531
1532         if (stock->charge) {
1533                 res_counter_uncharge(&old->res, stock->charge);
1534                 if (do_swap_account)
1535                         res_counter_uncharge(&old->memsw, stock->charge);
1536         }
1537         stock->cached = NULL;
1538         stock->charge = 0;
1539 }
1540
1541 /*
1542  * This must be called under preempt disabled or must be called by
1543  * a thread which is pinned to local cpu.
1544  */
1545 static void drain_local_stock(struct work_struct *dummy)
1546 {
1547         struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1548         drain_stock(stock);
1549 }
1550
1551 /*
1552  * Cache charges(val) which is from res_counter, to local per_cpu area.
1553  * This will be consumed by consume_stock() function, later.
1554  */
1555 static void refill_stock(struct mem_cgroup *mem, int val)
1556 {
1557         struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1558
1559         if (stock->cached != mem) { /* reset if necessary */
1560                 drain_stock(stock);
1561                 stock->cached = mem;
1562         }
1563         stock->charge += val;
1564         put_cpu_var(memcg_stock);
1565 }
1566
1567 /*
1568  * Tries to drain stocked charges in other cpus. This function is asynchronous
1569  * and just put a work per cpu for draining localy on each cpu. Caller can
1570  * expects some charges will be back to res_counter later but cannot wait for
1571  * it.
1572  */
1573 static void drain_all_stock_async(void)
1574 {
1575         int cpu;
1576         /* This function is for scheduling "drain" in asynchronous way.
1577          * The result of "drain" is not directly handled by callers. Then,
1578          * if someone is calling drain, we don't have to call drain more.
1579          * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1580          * there is a race. We just do loose check here.
1581          */
1582         if (atomic_read(&memcg_drain_count))
1583                 return;
1584         /* Notify other cpus that system-wide "drain" is running */
1585         atomic_inc(&memcg_drain_count);
1586         get_online_cpus();
1587         for_each_online_cpu(cpu) {
1588                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1589                 schedule_work_on(cpu, &stock->work);
1590         }
1591         put_online_cpus();
1592         atomic_dec(&memcg_drain_count);
1593         /* We don't wait for flush_work */
1594 }
1595
1596 /* This is a synchronous drain interface. */
1597 static void drain_all_stock_sync(void)
1598 {
1599         /* called when force_empty is called */
1600         atomic_inc(&memcg_drain_count);
1601         schedule_on_each_cpu(drain_local_stock);
1602         atomic_dec(&memcg_drain_count);
1603 }
1604
1605 static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
1606                                         unsigned long action,
1607                                         void *hcpu)
1608 {
1609         int cpu = (unsigned long)hcpu;
1610         struct memcg_stock_pcp *stock;
1611
1612         if (action != CPU_DEAD)
1613                 return NOTIFY_OK;
1614         stock = &per_cpu(memcg_stock, cpu);
1615         drain_stock(stock);
1616         return NOTIFY_OK;
1617 }
1618
1619
1620 /* See __mem_cgroup_try_charge() for details */
1621 enum {
1622         CHARGE_OK,              /* success */
1623         CHARGE_RETRY,           /* need to retry but retry is not bad */
1624         CHARGE_NOMEM,           /* we can't do more. return -ENOMEM */
1625         CHARGE_WOULDBLOCK,      /* GFP_WAIT wasn't set and no enough res. */
1626         CHARGE_OOM_DIE,         /* the current is killed because of OOM */
1627 };
1628
1629 static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
1630                                 int csize, bool oom_check)
1631 {
1632         struct mem_cgroup *mem_over_limit;
1633         struct res_counter *fail_res;
1634         unsigned long flags = 0;
1635         int ret;
1636
1637         ret = res_counter_charge(&mem->res, csize, &fail_res);
1638
1639         if (likely(!ret)) {
1640                 if (!do_swap_account)
1641                         return CHARGE_OK;
1642                 ret = res_counter_charge(&mem->memsw, csize, &fail_res);
1643                 if (likely(!ret))
1644                         return CHARGE_OK;
1645
1646                 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
1647                 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1648         } else
1649                 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
1650
1651         if (csize > PAGE_SIZE) /* change csize and retry */
1652                 return CHARGE_RETRY;
1653
1654         if (!(gfp_mask & __GFP_WAIT))
1655                 return CHARGE_WOULDBLOCK;
1656
1657         ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1658                                         gfp_mask, flags);
1659         /*
1660          * try_to_free_mem_cgroup_pages() might not give us a full
1661          * picture of reclaim. Some pages are reclaimed and might be
1662          * moved to swap cache or just unmapped from the cgroup.
1663          * Check the limit again to see if the reclaim reduced the
1664          * current usage of the cgroup before giving up
1665          */
1666         if (ret || mem_cgroup_check_under_limit(mem_over_limit))
1667                 return CHARGE_RETRY;
1668
1669         /*
1670          * At task move, charge accounts can be doubly counted. So, it's
1671          * better to wait until the end of task_move if something is going on.
1672          */
1673         if (mem_cgroup_wait_acct_move(mem_over_limit))
1674                 return CHARGE_RETRY;
1675
1676         /* If we don't need to call oom-killer at el, return immediately */
1677         if (!oom_check)
1678                 return CHARGE_NOMEM;
1679         /* check OOM */
1680         if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
1681                 return CHARGE_OOM_DIE;
1682
1683         return CHARGE_RETRY;
1684 }
1685
1686 /*
1687  * Unlike exported interface, "oom" parameter is added. if oom==true,
1688  * oom-killer can be invoked.
1689  */
1690 static int __mem_cgroup_try_charge(struct mm_struct *mm,
1691                 gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
1692 {
1693         int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1694         struct mem_cgroup *mem = NULL;
1695         int ret;
1696         int csize = CHARGE_SIZE;
1697
1698         /*
1699          * Unlike gloval-vm's OOM-kill, we're not in memory shortage
1700          * in system level. So, allow to go ahead dying process in addition to
1701          * MEMDIE process.
1702          */
1703         if (unlikely(test_thread_flag(TIF_MEMDIE)
1704                      || fatal_signal_pending(current)))
1705                 goto bypass;
1706
1707         /*
1708          * We always charge the cgroup the mm_struct belongs to.
1709          * The mm_struct's mem_cgroup changes on task migration if the
1710          * thread group leader migrates. It's possible that mm is not
1711          * set, if so charge the init_mm (happens for pagecache usage).
1712          */
1713         if (*memcg) {
1714                 mem = *memcg;
1715                 css_get(&mem->css);
1716         } else {
1717                 mem = try_get_mem_cgroup_from_mm(mm);
1718                 if (unlikely(!mem))
1719                         return 0;
1720                 *memcg = mem;
1721         }
1722
1723         VM_BUG_ON(css_is_removed(&mem->css));
1724         if (mem_cgroup_is_root(mem))
1725                 goto done;
1726
1727         do {
1728                 bool oom_check;
1729
1730                 if (consume_stock(mem))
1731                         goto done; /* don't need to fill stock */
1732                 /* If killed, bypass charge */
1733                 if (fatal_signal_pending(current))
1734                         goto bypass;
1735
1736                 oom_check = false;
1737                 if (oom && !nr_oom_retries) {
1738                         oom_check = true;
1739                         nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1740                 }
1741
1742                 ret = __mem_cgroup_do_charge(mem, gfp_mask, csize, oom_check);
1743
1744                 switch (ret) {
1745                 case CHARGE_OK:
1746                         break;
1747                 case CHARGE_RETRY: /* not in OOM situation but retry */
1748                         csize = PAGE_SIZE;
1749                         break;
1750                 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
1751                         goto nomem;
1752                 case CHARGE_NOMEM: /* OOM routine works */
1753                         if (!oom)
1754                                 goto nomem;
1755                         /* If oom, we never return -ENOMEM */
1756                         nr_oom_retries--;
1757                         break;
1758                 case CHARGE_OOM_DIE: /* Killed by OOM Killer */
1759                         goto bypass;
1760                 }
1761         } while (ret != CHARGE_OK);
1762
1763         if (csize > PAGE_SIZE)
1764                 refill_stock(mem, csize - PAGE_SIZE);
1765 done:
1766         return 0;
1767 nomem:
1768         css_put(&mem->css);
1769         return -ENOMEM;
1770 bypass:
1771         if (mem)
1772                 css_put(&mem->css);
1773         *memcg = NULL;
1774         return 0;
1775 }
1776
1777 /*
1778  * Somemtimes we have to undo a charge we got by try_charge().
1779  * This function is for that and do uncharge, put css's refcnt.
1780  * gotten by try_charge().
1781  */
1782 static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
1783                                                         unsigned long count)
1784 {
1785         if (!mem_cgroup_is_root(mem)) {
1786                 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
1787                 if (do_swap_account)
1788                         res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
1789                 VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
1790                 WARN_ON_ONCE(count > INT_MAX);
1791                 __css_put(&mem->css, (int)count);
1792         }
1793         /* we don't need css_put for root */
1794 }
1795
1796 static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
1797 {
1798         __mem_cgroup_cancel_charge(mem, 1);
1799 }
1800
1801 /*
1802  * A helper function to get mem_cgroup from ID. must be called under
1803  * rcu_read_lock(). The caller must check css_is_removed() or some if
1804  * it's concern. (dropping refcnt from swap can be called against removed
1805  * memcg.)
1806  */
1807 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1808 {
1809         struct cgroup_subsys_state *css;
1810
1811         /* ID 0 is unused ID */
1812         if (!id)
1813                 return NULL;
1814         css = css_lookup(&mem_cgroup_subsys, id);
1815         if (!css)
1816                 return NULL;
1817         return container_of(css, struct mem_cgroup, css);
1818 }
1819
1820 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
1821 {
1822         struct mem_cgroup *mem = NULL;
1823         struct page_cgroup *pc;
1824         unsigned short id;
1825         swp_entry_t ent;
1826
1827         VM_BUG_ON(!PageLocked(page));
1828
1829         pc = lookup_page_cgroup(page);
1830         lock_page_cgroup(pc);
1831         if (PageCgroupUsed(pc)) {
1832                 mem = pc->mem_cgroup;
1833                 if (mem && !css_tryget(&mem->css))
1834                         mem = NULL;
1835         } else if (PageSwapCache(page)) {
1836                 ent.val = page_private(page);
1837                 id = lookup_swap_cgroup(ent);
1838                 rcu_read_lock();
1839                 mem = mem_cgroup_lookup(id);
1840                 if (mem && !css_tryget(&mem->css))
1841                         mem = NULL;
1842                 rcu_read_unlock();
1843         }
1844         unlock_page_cgroup(pc);
1845         return mem;
1846 }
1847
1848 /*
1849  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1850  * USED state. If already USED, uncharge and return.
1851  */
1852
1853 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1854                                      struct page_cgroup *pc,
1855                                      enum charge_type ctype)
1856 {
1857         /* try_charge() can return NULL to *memcg, taking care of it. */
1858         if (!mem)
1859                 return;
1860
1861         lock_page_cgroup(pc);
1862         if (unlikely(PageCgroupUsed(pc))) {
1863                 unlock_page_cgroup(pc);
1864                 mem_cgroup_cancel_charge(mem);
1865                 return;
1866         }
1867
1868         pc->mem_cgroup = mem;
1869         /*
1870          * We access a page_cgroup asynchronously without lock_page_cgroup().
1871          * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
1872          * is accessed after testing USED bit. To make pc->mem_cgroup visible
1873          * before USED bit, we need memory barrier here.
1874          * See mem_cgroup_add_lru_list(), etc.
1875          */
1876         smp_wmb();
1877         switch (ctype) {
1878         case MEM_CGROUP_CHARGE_TYPE_CACHE:
1879         case MEM_CGROUP_CHARGE_TYPE_SHMEM:
1880                 SetPageCgroupCache(pc);
1881                 SetPageCgroupUsed(pc);
1882                 break;
1883         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1884                 ClearPageCgroupCache(pc);
1885                 SetPageCgroupUsed(pc);
1886                 break;
1887         default:
1888                 break;
1889         }
1890
1891         mem_cgroup_charge_statistics(mem, pc, true);
1892
1893         unlock_page_cgroup(pc);
1894         /*
1895          * "charge_statistics" updated event counter. Then, check it.
1896          * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1897          * if they exceeds softlimit.
1898          */
1899         memcg_check_events(mem, pc->page);
1900 }
1901
1902 /**
1903  * __mem_cgroup_move_account - move account of the page
1904  * @pc: page_cgroup of the page.
1905  * @from: mem_cgroup which the page is moved from.
1906  * @to: mem_cgroup which the page is moved to. @from != @to.
1907  * @uncharge: whether we should call uncharge and css_put against @from.
1908  *
1909  * The caller must confirm following.
1910  * - page is not on LRU (isolate_page() is useful.)
1911  * - the pc is locked, used, and ->mem_cgroup points to @from.
1912  *
1913  * This function doesn't do "charge" nor css_get to new cgroup. It should be
1914  * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
1915  * true, this function does "uncharge" from old cgroup, but it doesn't if
1916  * @uncharge is false, so a caller should do "uncharge".
1917  */
1918
1919 static void __mem_cgroup_move_account(struct page_cgroup *pc,
1920         struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
1921 {
1922         VM_BUG_ON(from == to);
1923         VM_BUG_ON(PageLRU(pc->page));
1924         VM_BUG_ON(!PageCgroupLocked(pc));
1925         VM_BUG_ON(!PageCgroupUsed(pc));
1926         VM_BUG_ON(pc->mem_cgroup != from);
1927
1928         if (PageCgroupFileMapped(pc)) {
1929                 /* Update mapped_file data for mem_cgroup */
1930                 preempt_disable();
1931                 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1932                 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1933                 preempt_enable();
1934         }
1935         mem_cgroup_charge_statistics(from, pc, false);
1936         if (uncharge)
1937                 /* This is not "cancel", but cancel_charge does all we need. */
1938                 mem_cgroup_cancel_charge(from);
1939
1940         /* caller should have done css_get */
1941         pc->mem_cgroup = to;
1942         mem_cgroup_charge_statistics(to, pc, true);
1943         /*
1944          * We charges against "to" which may not have any tasks. Then, "to"
1945          * can be under rmdir(). But in current implementation, caller of
1946          * this function is just force_empty() and move charge, so it's
1947          * garanteed that "to" is never removed. So, we don't check rmdir
1948          * status here.
1949          */
1950 }
1951
1952 /*
1953  * check whether the @pc is valid for moving account and call
1954  * __mem_cgroup_move_account()
1955  */
1956 static int mem_cgroup_move_account(struct page_cgroup *pc,
1957                 struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
1958 {
1959         int ret = -EINVAL;
1960         lock_page_cgroup(pc);
1961         if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
1962                 __mem_cgroup_move_account(pc, from, to, uncharge);
1963                 ret = 0;
1964         }
1965         unlock_page_cgroup(pc);
1966         /*
1967          * check events
1968          */
1969         memcg_check_events(to, pc->page);
1970         memcg_check_events(from, pc->page);
1971         return ret;
1972 }
1973
1974 /*
1975  * move charges to its parent.
1976  */
1977
1978 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1979                                   struct mem_cgroup *child,
1980                                   gfp_t gfp_mask)
1981 {
1982         struct page *page = pc->page;
1983         struct cgroup *cg = child->css.cgroup;
1984         struct cgroup *pcg = cg->parent;
1985         struct mem_cgroup *parent;
1986         int ret;
1987
1988         /* Is ROOT ? */
1989         if (!pcg)
1990                 return -EINVAL;
1991
1992         ret = -EBUSY;
1993         if (!get_page_unless_zero(page))
1994                 goto out;
1995         if (isolate_lru_page(page))
1996                 goto put;
1997
1998         parent = mem_cgroup_from_cont(pcg);
1999         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
2000         if (ret || !parent)
2001                 goto put_back;
2002
2003         ret = mem_cgroup_move_account(pc, child, parent, true);
2004         if (ret)
2005                 mem_cgroup_cancel_charge(parent);
2006 put_back:
2007         putback_lru_page(page);
2008 put:
2009         put_page(page);
2010 out:
2011         return ret;
2012 }
2013
2014 /*
2015  * Charge the memory controller for page usage.
2016  * Return
2017  * 0 if the charge was successful
2018  * < 0 if the cgroup is over its limit
2019  */
2020 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2021                                 gfp_t gfp_mask, enum charge_type ctype)
2022 {
2023         struct mem_cgroup *mem = NULL;
2024         struct page_cgroup *pc;
2025         int ret;
2026
2027         pc = lookup_page_cgroup(page);
2028         /* can happen at boot */
2029         if (unlikely(!pc))
2030                 return 0;
2031         prefetchw(pc);
2032
2033         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
2034         if (ret || !mem)
2035                 return ret;
2036
2037         __mem_cgroup_commit_charge(mem, pc, ctype);
2038         return 0;
2039 }
2040
2041 int mem_cgroup_newpage_charge(struct page *page,
2042                               struct mm_struct *mm, gfp_t gfp_mask)
2043 {
2044         if (mem_cgroup_disabled())
2045                 return 0;
2046         if (PageCompound(page))
2047                 return 0;
2048         /*
2049          * If already mapped, we don't have to account.
2050          * If page cache, page->mapping has address_space.
2051          * But page->mapping may have out-of-use anon_vma pointer,
2052          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2053          * is NULL.
2054          */
2055         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2056                 return 0;
2057         if (unlikely(!mm))
2058                 mm = &init_mm;
2059         return mem_cgroup_charge_common(page, mm, gfp_mask,
2060                                 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2061 }
2062
2063 static void
2064 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2065                                         enum charge_type ctype);
2066
2067 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2068                                 gfp_t gfp_mask)
2069 {
2070         int ret;
2071
2072         if (mem_cgroup_disabled())
2073                 return 0;
2074         if (PageCompound(page))
2075                 return 0;
2076         /*
2077          * Corner case handling. This is called from add_to_page_cache()
2078          * in usual. But some FS (shmem) precharges this page before calling it
2079          * and call add_to_page_cache() with GFP_NOWAIT.
2080          *
2081          * For GFP_NOWAIT case, the page may be pre-charged before calling
2082          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
2083          * charge twice. (It works but has to pay a bit larger cost.)
2084          * And when the page is SwapCache, it should take swap information
2085          * into account. This is under lock_page() now.
2086          */
2087         if (!(gfp_mask & __GFP_WAIT)) {
2088                 struct page_cgroup *pc;
2089
2090                 pc = lookup_page_cgroup(page);
2091                 if (!pc)
2092                         return 0;
2093                 lock_page_cgroup(pc);
2094                 if (PageCgroupUsed(pc)) {
2095                         unlock_page_cgroup(pc);
2096                         return 0;
2097                 }
2098                 unlock_page_cgroup(pc);
2099         }
2100
2101         if (unlikely(!mm))
2102                 mm = &init_mm;
2103
2104         if (page_is_file_cache(page))
2105                 return mem_cgroup_charge_common(page, mm, gfp_mask,
2106                                 MEM_CGROUP_CHARGE_TYPE_CACHE);
2107
2108         /* shmem */
2109         if (PageSwapCache(page)) {
2110                 struct mem_cgroup *mem = NULL;
2111
2112                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2113                 if (!ret)
2114                         __mem_cgroup_commit_charge_swapin(page, mem,
2115                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2116         } else
2117                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2118                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2119
2120         return ret;
2121 }
2122
2123 /*
2124  * While swap-in, try_charge -> commit or cancel, the page is locked.
2125  * And when try_charge() successfully returns, one refcnt to memcg without
2126  * struct page_cgroup is acquired. This refcnt will be consumed by
2127  * "commit()" or removed by "cancel()"
2128  */
2129 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2130                                  struct page *page,
2131                                  gfp_t mask, struct mem_cgroup **ptr)
2132 {
2133         struct mem_cgroup *mem;
2134         int ret;
2135
2136         if (mem_cgroup_disabled())
2137                 return 0;
2138
2139         if (!do_swap_account)
2140                 goto charge_cur_mm;
2141         /*
2142          * A racing thread's fault, or swapoff, may have already updated
2143          * the pte, and even removed page from swap cache: in those cases
2144          * do_swap_page()'s pte_same() test will fail; but there's also a
2145          * KSM case which does need to charge the page.
2146          */
2147         if (!PageSwapCache(page))
2148                 goto charge_cur_mm;
2149         mem = try_get_mem_cgroup_from_page(page);
2150         if (!mem)
2151                 goto charge_cur_mm;
2152         *ptr = mem;
2153         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
2154         /* drop extra refcnt from tryget */
2155         css_put(&mem->css);
2156         return ret;
2157 charge_cur_mm:
2158         if (unlikely(!mm))
2159                 mm = &init_mm;
2160         return __mem_cgroup_try_charge(mm, mask, ptr, true);
2161 }
2162
2163 static void
2164 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2165                                         enum charge_type ctype)
2166 {
2167         struct page_cgroup *pc;
2168
2169         if (mem_cgroup_disabled())
2170                 return;
2171         if (!ptr)
2172                 return;
2173         cgroup_exclude_rmdir(&ptr->css);
2174         pc = lookup_page_cgroup(page);
2175         mem_cgroup_lru_del_before_commit_swapcache(page);
2176         __mem_cgroup_commit_charge(ptr, pc, ctype);
2177         mem_cgroup_lru_add_after_commit_swapcache(page);
2178         /*
2179          * Now swap is on-memory. This means this page may be
2180          * counted both as mem and swap....double count.
2181          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2182          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2183          * may call delete_from_swap_cache() before reach here.
2184          */
2185         if (do_swap_account && PageSwapCache(page)) {
2186                 swp_entry_t ent = {.val = page_private(page)};
2187                 unsigned short id;
2188                 struct mem_cgroup *memcg;
2189
2190                 id = swap_cgroup_record(ent, 0);
2191                 rcu_read_lock();
2192                 memcg = mem_cgroup_lookup(id);
2193                 if (memcg) {
2194                         /*
2195                          * This recorded memcg can be obsolete one. So, avoid
2196                          * calling css_tryget
2197                          */
2198                         if (!mem_cgroup_is_root(memcg))
2199                                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2200                         mem_cgroup_swap_statistics(memcg, false);
2201                         mem_cgroup_put(memcg);
2202                 }
2203                 rcu_read_unlock();
2204         }
2205         /*
2206          * At swapin, we may charge account against cgroup which has no tasks.
2207          * So, rmdir()->pre_destroy() can be called while we do this charge.
2208          * In that case, we need to call pre_destroy() again. check it here.
2209          */
2210         cgroup_release_and_wakeup_rmdir(&ptr->css);
2211 }
2212
2213 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2214 {
2215         __mem_cgroup_commit_charge_swapin(page, ptr,
2216                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
2217 }
2218
2219 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2220 {
2221         if (mem_cgroup_disabled())
2222                 return;
2223         if (!mem)
2224                 return;
2225         mem_cgroup_cancel_charge(mem);
2226 }
2227
2228 static void
2229 __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2230 {
2231         struct memcg_batch_info *batch = NULL;
2232         bool uncharge_memsw = true;
2233         /* If swapout, usage of swap doesn't decrease */
2234         if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2235                 uncharge_memsw = false;
2236
2237         batch = &current->memcg_batch;
2238         /*
2239          * In usual, we do css_get() when we remember memcg pointer.
2240          * But in this case, we keep res->usage until end of a series of
2241          * uncharges. Then, it's ok to ignore memcg's refcnt.
2242          */
2243         if (!batch->memcg)
2244                 batch->memcg = mem;
2245         /*
2246          * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2247          * In those cases, all pages freed continously can be expected to be in
2248          * the same cgroup and we have chance to coalesce uncharges.
2249          * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2250          * because we want to do uncharge as soon as possible.
2251          */
2252
2253         if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2254                 goto direct_uncharge;
2255
2256         /*
2257          * In typical case, batch->memcg == mem. This means we can
2258          * merge a series of uncharges to an uncharge of res_counter.
2259          * If not, we uncharge res_counter ony by one.
2260          */
2261         if (batch->memcg != mem)
2262                 goto direct_uncharge;
2263         /* remember freed charge and uncharge it later */
2264         batch->bytes += PAGE_SIZE;
2265         if (uncharge_memsw)
2266                 batch->memsw_bytes += PAGE_SIZE;
2267         return;
2268 direct_uncharge:
2269         res_counter_uncharge(&mem->res, PAGE_SIZE);
2270         if (uncharge_memsw)
2271                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
2272         if (unlikely(batch->memcg != mem))
2273                 memcg_oom_recover(mem);
2274         return;
2275 }
2276
2277 /*
2278  * uncharge if !page_mapped(page)
2279  */
2280 static struct mem_cgroup *
2281 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2282 {
2283         struct page_cgroup *pc;
2284         struct mem_cgroup *mem = NULL;
2285
2286         if (mem_cgroup_disabled())
2287                 return NULL;
2288
2289         if (PageSwapCache(page))
2290                 return NULL;
2291
2292         /*
2293          * Check if our page_cgroup is valid
2294          */
2295         pc = lookup_page_cgroup(page);
2296         if (unlikely(!pc || !PageCgroupUsed(pc)))
2297                 return NULL;
2298
2299         lock_page_cgroup(pc);
2300
2301         mem = pc->mem_cgroup;
2302
2303         if (!PageCgroupUsed(pc))
2304                 goto unlock_out;
2305
2306         switch (ctype) {
2307         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2308         case MEM_CGROUP_CHARGE_TYPE_DROP:
2309                 /* See mem_cgroup_prepare_migration() */
2310                 if (page_mapped(page) || PageCgroupMigration(pc))
2311                         goto unlock_out;
2312                 break;
2313         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2314                 if (!PageAnon(page)) {  /* Shared memory */
2315                         if (page->mapping && !page_is_file_cache(page))
2316                                 goto unlock_out;
2317                 } else if (page_mapped(page)) /* Anon */
2318                                 goto unlock_out;
2319                 break;
2320         default:
2321                 break;
2322         }
2323
2324         if (!mem_cgroup_is_root(mem))
2325                 __do_uncharge(mem, ctype);
2326         if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2327                 mem_cgroup_swap_statistics(mem, true);
2328         mem_cgroup_charge_statistics(mem, pc, false);
2329
2330         ClearPageCgroupUsed(pc);
2331         /*
2332          * pc->mem_cgroup is not cleared here. It will be accessed when it's
2333          * freed from LRU. This is safe because uncharged page is expected not
2334          * to be reused (freed soon). Exception is SwapCache, it's handled by
2335          * special functions.
2336          */
2337
2338         unlock_page_cgroup(pc);
2339
2340         memcg_check_events(mem, page);
2341         /* at swapout, this memcg will be accessed to record to swap */
2342         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2343                 css_put(&mem->css);
2344
2345         return mem;
2346
2347 unlock_out:
2348         unlock_page_cgroup(pc);
2349         return NULL;
2350 }
2351
2352 void mem_cgroup_uncharge_page(struct page *page)
2353 {
2354         /* early check. */
2355         if (page_mapped(page))
2356                 return;
2357         if (page->mapping && !PageAnon(page))
2358                 return;
2359         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2360 }
2361
2362 void mem_cgroup_uncharge_cache_page(struct page *page)
2363 {
2364         VM_BUG_ON(page_mapped(page));
2365         VM_BUG_ON(page->mapping);
2366         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
2367 }
2368
2369 /*
2370  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2371  * In that cases, pages are freed continuously and we can expect pages
2372  * are in the same memcg. All these calls itself limits the number of
2373  * pages freed at once, then uncharge_start/end() is called properly.
2374  * This may be called prural(2) times in a context,
2375  */
2376
2377 void mem_cgroup_uncharge_start(void)
2378 {
2379         current->memcg_batch.do_batch++;
2380         /* We can do nest. */
2381         if (current->memcg_batch.do_batch == 1) {
2382                 current->memcg_batch.memcg = NULL;
2383                 current->memcg_batch.bytes = 0;
2384                 current->memcg_batch.memsw_bytes = 0;
2385         }
2386 }
2387
2388 void mem_cgroup_uncharge_end(void)
2389 {
2390         struct memcg_batch_info *batch = &current->memcg_batch;
2391
2392         if (!batch->do_batch)
2393                 return;
2394
2395         batch->do_batch--;
2396         if (batch->do_batch) /* If stacked, do nothing. */
2397                 return;
2398
2399         if (!batch->memcg)
2400                 return;
2401         /*
2402          * This "batch->memcg" is valid without any css_get/put etc...
2403          * bacause we hide charges behind us.
2404          */
2405         if (batch->bytes)
2406                 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2407         if (batch->memsw_bytes)
2408                 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
2409         memcg_oom_recover(batch->memcg);
2410         /* forget this pointer (for sanity check) */
2411         batch->memcg = NULL;
2412 }
2413
2414 #ifdef CONFIG_SWAP
2415 /*
2416  * called after __delete_from_swap_cache() and drop "page" account.
2417  * memcg information is recorded to swap_cgroup of "ent"
2418  */
2419 void
2420 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
2421 {
2422         struct mem_cgroup *memcg;
2423         int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
2424
2425         if (!swapout) /* this was a swap cache but the swap is unused ! */
2426                 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
2427
2428         memcg = __mem_cgroup_uncharge_common(page, ctype);
2429
2430         /* record memcg information */
2431         if (do_swap_account && swapout && memcg) {
2432                 swap_cgroup_record(ent, css_id(&memcg->css));
2433                 mem_cgroup_get(memcg);
2434         }
2435         if (swapout && memcg)
2436                 css_put(&memcg->css);
2437 }
2438 #endif
2439
2440 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2441 /*
2442  * called from swap_entry_free(). remove record in swap_cgroup and
2443  * uncharge "memsw" account.
2444  */
2445 void mem_cgroup_uncharge_swap(swp_entry_t ent)
2446 {
2447         struct mem_cgroup *memcg;
2448         unsigned short id;
2449
2450         if (!do_swap_account)
2451                 return;
2452
2453         id = swap_cgroup_record(ent, 0);
2454         rcu_read_lock();
2455         memcg = mem_cgroup_lookup(id);
2456         if (memcg) {
2457                 /*
2458                  * We uncharge this because swap is freed.
2459                  * This memcg can be obsolete one. We avoid calling css_tryget
2460                  */
2461                 if (!mem_cgroup_is_root(memcg))
2462                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2463                 mem_cgroup_swap_statistics(memcg, false);
2464                 mem_cgroup_put(memcg);
2465         }
2466         rcu_read_unlock();
2467 }
2468
2469 /**
2470  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2471  * @entry: swap entry to be moved
2472  * @from:  mem_cgroup which the entry is moved from
2473  * @to:  mem_cgroup which the entry is moved to
2474  * @need_fixup: whether we should fixup res_counters and refcounts.
2475  *
2476  * It succeeds only when the swap_cgroup's record for this entry is the same
2477  * as the mem_cgroup's id of @from.
2478  *
2479  * Returns 0 on success, -EINVAL on failure.
2480  *
2481  * The caller must have charged to @to, IOW, called res_counter_charge() about
2482  * both res and memsw, and called css_get().
2483  */
2484 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2485                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2486 {
2487         unsigned short old_id, new_id;
2488
2489         old_id = css_id(&from->css);
2490         new_id = css_id(&to->css);
2491
2492         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2493                 mem_cgroup_swap_statistics(from, false);
2494                 mem_cgroup_swap_statistics(to, true);
2495                 /*
2496                  * This function is only called from task migration context now.
2497                  * It postpones res_counter and refcount handling till the end
2498                  * of task migration(mem_cgroup_clear_mc()) for performance
2499                  * improvement. But we cannot postpone mem_cgroup_get(to)
2500                  * because if the process that has been moved to @to does
2501                  * swap-in, the refcount of @to might be decreased to 0.
2502                  */
2503                 mem_cgroup_get(to);
2504                 if (need_fixup) {
2505                         if (!mem_cgroup_is_root(from))
2506                                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
2507                         mem_cgroup_put(from);
2508                         /*
2509                          * we charged both to->res and to->memsw, so we should
2510                          * uncharge to->res.
2511                          */
2512                         if (!mem_cgroup_is_root(to))
2513                                 res_counter_uncharge(&to->res, PAGE_SIZE);
2514                         css_put(&to->css);
2515                 }
2516                 return 0;
2517         }
2518         return -EINVAL;
2519 }
2520 #else
2521 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2522                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2523 {
2524         return -EINVAL;
2525 }
2526 #endif
2527
2528 /*
2529  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2530  * page belongs to.
2531  */
2532 int mem_cgroup_prepare_migration(struct page *page,
2533         struct page *newpage, struct mem_cgroup **ptr)
2534 {
2535         struct page_cgroup *pc;
2536         struct mem_cgroup *mem = NULL;
2537         enum charge_type ctype;
2538         int ret = 0;
2539
2540         if (mem_cgroup_disabled())
2541                 return 0;
2542
2543         pc = lookup_page_cgroup(page);
2544         lock_page_cgroup(pc);
2545         if (PageCgroupUsed(pc)) {
2546                 mem = pc->mem_cgroup;
2547                 css_get(&mem->css);
2548                 /*
2549                  * At migrating an anonymous page, its mapcount goes down
2550                  * to 0 and uncharge() will be called. But, even if it's fully
2551                  * unmapped, migration may fail and this page has to be
2552                  * charged again. We set MIGRATION flag here and delay uncharge
2553                  * until end_migration() is called
2554                  *
2555                  * Corner Case Thinking
2556                  * A)
2557                  * When the old page was mapped as Anon and it's unmap-and-freed
2558                  * while migration was ongoing.
2559                  * If unmap finds the old page, uncharge() of it will be delayed
2560                  * until end_migration(). If unmap finds a new page, it's
2561                  * uncharged when it make mapcount to be 1->0. If unmap code
2562                  * finds swap_migration_entry, the new page will not be mapped
2563                  * and end_migration() will find it(mapcount==0).
2564                  *
2565                  * B)
2566                  * When the old page was mapped but migraion fails, the kernel
2567                  * remaps it. A charge for it is kept by MIGRATION flag even
2568                  * if mapcount goes down to 0. We can do remap successfully
2569                  * without charging it again.
2570                  *
2571                  * C)
2572                  * The "old" page is under lock_page() until the end of
2573                  * migration, so, the old page itself will not be swapped-out.
2574                  * If the new page is swapped out before end_migraton, our
2575                  * hook to usual swap-out path will catch the event.
2576                  */
2577                 if (PageAnon(page))
2578                         SetPageCgroupMigration(pc);
2579         }
2580         unlock_page_cgroup(pc);
2581         /*
2582          * If the page is not charged at this point,
2583          * we return here.
2584          */
2585         if (!mem)
2586                 return 0;
2587
2588         *ptr = mem;
2589         ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
2590         css_put(&mem->css);/* drop extra refcnt */
2591         if (ret || *ptr == NULL) {
2592                 if (PageAnon(page)) {
2593                         lock_page_cgroup(pc);
2594                         ClearPageCgroupMigration(pc);
2595                         unlock_page_cgroup(pc);
2596                         /*
2597                          * The old page may be fully unmapped while we kept it.
2598                          */
2599                         mem_cgroup_uncharge_page(page);
2600                 }
2601                 return -ENOMEM;
2602         }
2603         /*
2604          * We charge new page before it's used/mapped. So, even if unlock_page()
2605          * is called before end_migration, we can catch all events on this new
2606          * page. In the case new page is migrated but not remapped, new page's
2607          * mapcount will be finally 0 and we call uncharge in end_migration().
2608          */
2609         pc = lookup_page_cgroup(newpage);
2610         if (PageAnon(page))
2611                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2612         else if (page_is_file_cache(page))
2613                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2614         else
2615                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2616         __mem_cgroup_commit_charge(mem, pc, ctype);
2617         return ret;
2618 }
2619
2620 /* remove redundant charge if migration failed*/
2621 void mem_cgroup_end_migration(struct mem_cgroup *mem,
2622         struct page *oldpage, struct page *newpage)
2623 {
2624         struct page *used, *unused;
2625         struct page_cgroup *pc;
2626
2627         if (!mem)
2628                 return;
2629         /* blocks rmdir() */
2630         cgroup_exclude_rmdir(&mem->css);
2631         /* at migration success, oldpage->mapping is NULL. */
2632         if (oldpage->mapping) {
2633                 used = oldpage;
2634                 unused = newpage;
2635         } else {
2636                 used = newpage;
2637                 unused = oldpage;
2638         }
2639         /*
2640          * We disallowed uncharge of pages under migration because mapcount
2641          * of the page goes down to zero, temporarly.
2642          * Clear the flag and check the page should be charged.
2643          */
2644         pc = lookup_page_cgroup(oldpage);
2645         lock_page_cgroup(pc);
2646         ClearPageCgroupMigration(pc);
2647         unlock_page_cgroup(pc);
2648
2649         __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
2650
2651         /*
2652          * If a page is a file cache, radix-tree replacement is very atomic
2653          * and we can skip this check. When it was an Anon page, its mapcount
2654          * goes down to 0. But because we added MIGRATION flage, it's not
2655          * uncharged yet. There are several case but page->mapcount check
2656          * and USED bit check in mem_cgroup_uncharge_page() will do enough
2657          * check. (see prepare_charge() also)
2658          */
2659         if (PageAnon(used))
2660                 mem_cgroup_uncharge_page(used);
2661         /*
2662          * At migration, we may charge account against cgroup which has no
2663          * tasks.
2664          * So, rmdir()->pre_destroy() can be called while we do this charge.
2665          * In that case, we need to call pre_destroy() again. check it here.
2666          */
2667         cgroup_release_and_wakeup_rmdir(&mem->css);
2668 }
2669
2670 /*
2671  * A call to try to shrink memory usage on charge failure at shmem's swapin.
2672  * Calling hierarchical_reclaim is not enough because we should update
2673  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
2674  * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
2675  * not from the memcg which this page would be charged to.
2676  * try_charge_swapin does all of these works properly.
2677  */
2678 int mem_cgroup_shmem_charge_fallback(struct page *page,
2679                             struct mm_struct *mm,
2680                             gfp_t gfp_mask)
2681 {
2682         struct mem_cgroup *mem = NULL;
2683         int ret;
2684
2685         if (mem_cgroup_disabled())
2686                 return 0;
2687
2688         ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2689         if (!ret)
2690                 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
2691
2692         return ret;
2693 }
2694
2695 static DEFINE_MUTEX(set_limit_mutex);
2696
2697 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2698                                 unsigned long long val)
2699 {
2700         int retry_count;
2701         u64 memswlimit, memlimit;
2702         int ret = 0;
2703         int children = mem_cgroup_count_children(memcg);
2704         u64 curusage, oldusage;
2705         int enlarge;
2706
2707         /*
2708          * For keeping hierarchical_reclaim simple, how long we should retry
2709          * is depends on callers. We set our retry-count to be function
2710          * of # of children which we should visit in this loop.
2711          */
2712         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
2713
2714         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2715
2716         enlarge = 0;
2717         while (retry_count) {
2718                 if (signal_pending(current)) {
2719                         ret = -EINTR;
2720                         break;
2721                 }
2722                 /*
2723                  * Rather than hide all in some function, I do this in
2724                  * open coded manner. You see what this really does.
2725                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2726                  */
2727                 mutex_lock(&set_limit_mutex);
2728                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2729                 if (memswlimit < val) {
2730                         ret = -EINVAL;
2731                         mutex_unlock(&set_limit_mutex);
2732                         break;
2733                 }
2734
2735                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2736                 if (memlimit < val)
2737                         enlarge = 1;
2738
2739                 ret = res_counter_set_limit(&memcg->res, val);
2740                 if (!ret) {
2741                         if (memswlimit == val)
2742                                 memcg->memsw_is_minimum = true;
2743                         else
2744                                 memcg->memsw_is_minimum = false;
2745                 }
2746                 mutex_unlock(&set_limit_mutex);
2747
2748                 if (!ret)
2749                         break;
2750
2751                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2752                                                 MEM_CGROUP_RECLAIM_SHRINK);
2753                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2754                 /* Usage is reduced ? */
2755                 if (curusage >= oldusage)
2756                         retry_count--;
2757                 else
2758                         oldusage = curusage;
2759         }
2760         if (!ret && enlarge)
2761                 memcg_oom_recover(memcg);
2762
2763         return ret;
2764 }
2765
2766 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2767                                         unsigned long long val)
2768 {
2769         int retry_count;
2770         u64 memlimit, memswlimit, oldusage, curusage;
2771         int children = mem_cgroup_count_children(memcg);
2772         int ret = -EBUSY;
2773         int enlarge = 0;
2774
2775         /* see mem_cgroup_resize_res_limit */
2776         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
2777         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2778         while (retry_count) {
2779                 if (signal_pending(current)) {
2780                         ret = -EINTR;
2781                         break;
2782                 }
2783                 /*
2784                  * Rather than hide all in some function, I do this in
2785                  * open coded manner. You see what this really does.
2786                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2787                  */
2788                 mutex_lock(&set_limit_mutex);
2789                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2790                 if (memlimit > val) {
2791                         ret = -EINVAL;
2792                         mutex_unlock(&set_limit_mutex);
2793                         break;
2794                 }
2795                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2796                 if (memswlimit < val)
2797                         enlarge = 1;
2798                 ret = res_counter_set_limit(&memcg->memsw, val);
2799                 if (!ret) {
2800                         if (memlimit == val)
2801                                 memcg->memsw_is_minimum = true;
2802                         else
2803                                 memcg->memsw_is_minimum = false;
2804                 }
2805                 mutex_unlock(&set_limit_mutex);
2806
2807                 if (!ret)
2808                         break;
2809
2810                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2811                                                 MEM_CGROUP_RECLAIM_NOSWAP |
2812                                                 MEM_CGROUP_RECLAIM_SHRINK);
2813                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2814                 /* Usage is reduced ? */
2815                 if (curusage >= oldusage)
2816                         retry_count--;
2817                 else
2818                         oldusage = curusage;
2819         }
2820         if (!ret && enlarge)
2821                 memcg_oom_recover(memcg);
2822         return ret;
2823 }
2824
2825 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2826                                                 gfp_t gfp_mask, int nid,
2827                                                 int zid)
2828 {
2829         unsigned long nr_reclaimed = 0;
2830         struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2831         unsigned long reclaimed;
2832         int loop = 0;
2833         struct mem_cgroup_tree_per_zone *mctz;
2834         unsigned long long excess;
2835
2836         if (order > 0)
2837                 return 0;
2838
2839         mctz = soft_limit_tree_node_zone(nid, zid);
2840         /*
2841          * This loop can run a while, specially if mem_cgroup's continuously
2842          * keep exceeding their soft limit and putting the system under
2843          * pressure
2844          */
2845         do {
2846                 if (next_mz)
2847                         mz = next_mz;
2848                 else
2849                         mz = mem_cgroup_largest_soft_limit_node(mctz);
2850                 if (!mz)
2851                         break;
2852
2853                 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
2854                                                 gfp_mask,
2855                                                 MEM_CGROUP_RECLAIM_SOFT);
2856                 nr_reclaimed += reclaimed;
2857                 spin_lock(&mctz->lock);
2858
2859                 /*
2860                  * If we failed to reclaim anything from this memory cgroup
2861                  * it is time to move on to the next cgroup
2862                  */
2863                 next_mz = NULL;
2864                 if (!reclaimed) {
2865                         do {
2866                                 /*
2867                                  * Loop until we find yet another one.
2868                                  *
2869                                  * By the time we get the soft_limit lock
2870                                  * again, someone might have aded the
2871                                  * group back on the RB tree. Iterate to
2872                                  * make sure we get a different mem.
2873                                  * mem_cgroup_largest_soft_limit_node returns
2874                                  * NULL if no other cgroup is present on
2875                                  * the tree
2876                                  */
2877                                 next_mz =
2878                                 __mem_cgroup_largest_soft_limit_node(mctz);
2879                                 if (next_mz == mz) {
2880                                         css_put(&next_mz->mem->css);
2881                                         next_mz = NULL;
2882                                 } else /* next_mz == NULL or other memcg */
2883                                         break;
2884                         } while (1);
2885                 }
2886                 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
2887                 excess = res_counter_soft_limit_excess(&mz->mem->res);
2888                 /*
2889                  * One school of thought says that we should not add
2890                  * back the node to the tree if reclaim returns 0.
2891                  * But our reclaim could return 0, simply because due
2892                  * to priority we are exposing a smaller subset of
2893                  * memory to reclaim from. Consider this as a longer
2894                  * term TODO.
2895                  */
2896                 /* If excess == 0, no tree ops */
2897                 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
2898                 spin_unlock(&mctz->lock);
2899                 css_put(&mz->mem->css);
2900                 loop++;
2901                 /*
2902                  * Could not reclaim anything and there are no more
2903                  * mem cgroups to try or we seem to be looping without
2904                  * reclaiming anything.
2905                  */
2906                 if (!nr_reclaimed &&
2907                         (next_mz == NULL ||
2908                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2909                         break;
2910         } while (!nr_reclaimed);
2911         if (next_mz)
2912                 css_put(&next_mz->mem->css);
2913         return nr_reclaimed;
2914 }
2915
2916 /*
2917  * This routine traverse page_cgroup in given list and drop them all.
2918  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
2919  */
2920 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
2921                                 int node, int zid, enum lru_list lru)
2922 {
2923         struct zone *zone;
2924         struct mem_cgroup_per_zone *mz;
2925         struct page_cgroup *pc, *busy;
2926         unsigned long flags, loop;
2927         struct list_head *list;
2928         int ret = 0;
2929
2930         zone = &NODE_DATA(node)->node_zones[zid];
2931         mz = mem_cgroup_zoneinfo(mem, node, zid);
2932         list = &mz->lists[lru];
2933
2934         loop = MEM_CGROUP_ZSTAT(mz, lru);
2935         /* give some margin against EBUSY etc...*/
2936         loop += 256;
2937         busy = NULL;
2938         while (loop--) {
2939                 ret = 0;
2940                 spin_lock_irqsave(&zone->lru_lock, flags);
2941                 if (list_empty(list)) {
2942                         spin_unlock_irqrestore(&zone->lru_lock, flags);
2943                         break;
2944                 }
2945                 pc = list_entry(list->prev, struct page_cgroup, lru);
2946                 if (busy == pc) {
2947                         list_move(&pc->lru, list);
2948                         busy = NULL;
2949                         spin_unlock_irqrestore(&zone->lru_lock, flags);
2950                         continue;
2951                 }
2952                 spin_unlock_irqrestore(&zone->lru_lock, flags);
2953
2954                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
2955                 if (ret == -ENOMEM)
2956                         break;
2957
2958                 if (ret == -EBUSY || ret == -EINVAL) {
2959                         /* found lock contention or "pc" is obsolete. */
2960                         busy = pc;
2961                         cond_resched();
2962                 } else
2963                         busy = NULL;
2964         }
2965
2966         if (!ret && !list_empty(list))
2967                 return -EBUSY;
2968         return ret;
2969 }
2970
2971 /*
2972  * make mem_cgroup's charge to be 0 if there is no task.
2973  * This enables deleting this mem_cgroup.
2974  */
2975 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
2976 {
2977         int ret;
2978         int node, zid, shrink;
2979         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2980         struct cgroup *cgrp = mem->css.cgroup;
2981
2982         css_get(&mem->css);
2983
2984         shrink = 0;
2985         /* should free all ? */
2986         if (free_all)
2987                 goto try_to_free;
2988 move_account:
2989         do {
2990                 ret = -EBUSY;
2991                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
2992                         goto out;
2993                 ret = -EINTR;
2994                 if (signal_pending(current))
2995                         goto out;
2996                 /* This is for making all *used* pages to be on LRU. */
2997                 lru_add_drain_all();
2998                 drain_all_stock_sync();
2999                 ret = 0;
3000                 for_each_node_state(node, N_HIGH_MEMORY) {
3001                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3002                                 enum lru_list l;
3003                                 for_each_lru(l) {
3004                                         ret = mem_cgroup_force_empty_list(mem,
3005                                                         node, zid, l);
3006                                         if (ret)
3007                                                 break;
3008                                 }
3009                         }
3010                         if (ret)
3011                                 break;
3012                 }
3013                 memcg_oom_recover(mem);
3014                 /* it seems parent cgroup doesn't have enough mem */
3015                 if (ret == -ENOMEM)
3016                         goto try_to_free;
3017                 cond_resched();
3018         /* "ret" should also be checked to ensure all lists are empty. */
3019         } while (mem->res.usage > 0 || ret);
3020 out:
3021         css_put(&mem->css);
3022         return ret;
3023
3024 try_to_free:
3025         /* returns EBUSY if there is a task or if we come here twice. */
3026         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3027                 ret = -EBUSY;
3028                 goto out;
3029         }
3030         /* we call try-to-free pages for make this cgroup empty */
3031         lru_add_drain_all();
3032         /* try to free all pages in this cgroup */
3033         shrink = 1;
3034         while (nr_retries && mem->res.usage > 0) {
3035                 int progress;
3036
3037                 if (signal_pending(current)) {
3038                         ret = -EINTR;
3039                         goto out;
3040                 }
3041                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
3042                                                 false, get_swappiness(mem));
3043                 if (!progress) {
3044                         nr_retries--;
3045                         /* maybe some writeback is necessary */
3046                         congestion_wait(BLK_RW_ASYNC, HZ/10);
3047                 }
3048
3049         }
3050         lru_add_drain();
3051         /* try move_account...there may be some *locked* pages. */
3052         goto move_account;
3053 }
3054
3055 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3056 {
3057         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3058 }
3059
3060
3061 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3062 {
3063         return mem_cgroup_from_cont(cont)->use_hierarchy;
3064 }
3065
3066 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3067                                         u64 val)
3068 {
3069         int retval = 0;
3070         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3071         struct cgroup *parent = cont->parent;
3072         struct mem_cgroup *parent_mem = NULL;
3073
3074         if (parent)
3075                 parent_mem = mem_cgroup_from_cont(parent);
3076
3077         cgroup_lock();
3078         /*
3079          * If parent's use_hierarchy is set, we can't make any modifications
3080          * in the child subtrees. If it is unset, then the change can
3081          * occur, provided the current cgroup has no children.
3082          *
3083          * For the root cgroup, parent_mem is NULL, we allow value to be
3084          * set if there are no children.
3085          */
3086         if ((!parent_mem || !parent_mem->use_hierarchy) &&
3087                                 (val == 1 || val == 0)) {
3088                 if (list_empty(&cont->children))
3089                         mem->use_hierarchy = val;
3090                 else
3091                         retval = -EBUSY;
3092         } else
3093                 retval = -EINVAL;
3094         cgroup_unlock();
3095
3096         return retval;
3097 }
3098
3099 struct mem_cgroup_idx_data {
3100         s64 val;
3101         enum mem_cgroup_stat_index idx;
3102 };
3103
3104 static int
3105 mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
3106 {
3107         struct mem_cgroup_idx_data *d = data;
3108         d->val += mem_cgroup_read_stat(mem, d->idx);
3109         return 0;
3110 }
3111
3112 static void
3113 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
3114                                 enum mem_cgroup_stat_index idx, s64 *val)
3115 {
3116         struct mem_cgroup_idx_data d;
3117         d.idx = idx;
3118         d.val = 0;
3119         mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
3120         *val = d.val;
3121 }
3122
3123 static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3124 {
3125         u64 idx_val, val;
3126
3127         if (!mem_cgroup_is_root(mem)) {
3128                 if (!swap)
3129                         return res_counter_read_u64(&mem->res, RES_USAGE);
3130                 else
3131                         return res_counter_read_u64(&mem->memsw, RES_USAGE);
3132         }
3133
3134         mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
3135         val = idx_val;
3136         mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
3137         val += idx_val;
3138
3139         if (swap) {
3140                 mem_cgroup_get_recursive_idx_stat(mem,
3141                                 MEM_CGROUP_STAT_SWAPOUT, &idx_val);
3142                 val += idx_val;
3143         }
3144
3145         return val << PAGE_SHIFT;
3146 }
3147
3148 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3149 {
3150         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3151         u64 val;
3152         int type, name;
3153
3154         type = MEMFILE_TYPE(cft->private);
3155         name = MEMFILE_ATTR(cft->private);
3156         switch (type) {
3157         case _MEM:
3158                 if (name == RES_USAGE)
3159                         val = mem_cgroup_usage(mem, false);
3160                 else
3161                         val = res_counter_read_u64(&mem->res, name);
3162                 break;
3163         case _MEMSWAP:
3164                 if (name == RES_USAGE)
3165                         val = mem_cgroup_usage(mem, true);
3166                 else
3167                         val = res_counter_read_u64(&mem->memsw, name);
3168                 break;
3169         default:
3170                 BUG();
3171                 break;
3172         }
3173         return val;
3174 }
3175 /*
3176  * The user of this function is...
3177  * RES_LIMIT.
3178  */
3179 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3180                             const char *buffer)
3181 {
3182         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3183         int type, name;
3184         unsigned long long val;
3185         int ret;
3186
3187         type = MEMFILE_TYPE(cft->private);
3188         name = MEMFILE_ATTR(cft->private);
3189         switch (name) {
3190         case RES_LIMIT:
3191                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3192                         ret = -EINVAL;
3193                         break;
3194                 }
3195                 /* This function does all necessary parse...reuse it */
3196                 ret = res_counter_memparse_write_strategy(buffer, &val);
3197                 if (ret)
3198                         break;
3199                 if (type == _MEM)
3200                         ret = mem_cgroup_resize_limit(memcg, val);
3201                 else
3202                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
3203                 break;
3204         case RES_SOFT_LIMIT:
3205                 ret = res_counter_memparse_write_strategy(buffer, &val);
3206                 if (ret)
3207                         break;
3208                 /*
3209                  * For memsw, soft limits are hard to implement in terms
3210                  * of semantics, for now, we support soft limits for
3211                  * control without swap
3212                  */
3213                 if (type == _MEM)
3214                         ret = res_counter_set_soft_limit(&memcg->res, val);
3215                 else
3216                         ret = -EINVAL;
3217                 break;
3218         default:
3219                 ret = -EINVAL; /* should be BUG() ? */
3220                 break;
3221         }
3222         return ret;
3223 }
3224
3225 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3226                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
3227 {
3228         struct cgroup *cgroup;
3229         unsigned long long min_limit, min_memsw_limit, tmp;
3230
3231         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3232         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3233         cgroup = memcg->css.cgroup;
3234         if (!memcg->use_hierarchy)
3235                 goto out;
3236
3237         while (cgroup->parent) {
3238                 cgroup = cgroup->parent;
3239                 memcg = mem_cgroup_from_cont(cgroup);
3240                 if (!memcg->use_hierarchy)
3241                         break;
3242                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3243                 min_limit = min(min_limit, tmp);
3244                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3245                 min_memsw_limit = min(min_memsw_limit, tmp);
3246         }
3247 out:
3248         *mem_limit = min_limit;
3249         *memsw_limit = min_memsw_limit;
3250         return;
3251 }
3252
3253 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3254 {
3255         struct mem_cgroup *mem;
3256         int type, name;
3257
3258         mem = mem_cgroup_from_cont(cont);
3259         type = MEMFILE_TYPE(event);
3260         name = MEMFILE_ATTR(event);
3261         switch (name) {
3262         case RES_MAX_USAGE:
3263                 if (type == _MEM)
3264                         res_counter_reset_max(&mem->res);
3265                 else
3266                         res_counter_reset_max(&mem->memsw);
3267                 break;
3268         case RES_FAILCNT:
3269                 if (type == _MEM)
3270                         res_counter_reset_failcnt(&mem->res);
3271                 else
3272                         res_counter_reset_failcnt(&mem->memsw);
3273                 break;
3274         }
3275
3276         return 0;
3277 }
3278
3279 static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3280                                         struct cftype *cft)
3281 {
3282         return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3283 }
3284
3285 #ifdef CONFIG_MMU
3286 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3287                                         struct cftype *cft, u64 val)
3288 {
3289         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3290
3291         if (val >= (1 << NR_MOVE_TYPE))
3292                 return -EINVAL;
3293         /*
3294          * We check this value several times in both in can_attach() and
3295          * attach(), so we need cgroup lock to prevent this value from being
3296          * inconsistent.
3297          */
3298         cgroup_lock();
3299         mem->move_charge_at_immigrate = val;
3300         cgroup_unlock();
3301
3302         return 0;
3303 }
3304 #else
3305 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3306                                         struct cftype *cft, u64 val)
3307 {
3308         return -ENOSYS;
3309 }
3310 #endif
3311
3312
3313 /* For read statistics */
3314 enum {
3315         MCS_CACHE,
3316         MCS_RSS,
3317         MCS_FILE_MAPPED,
3318         MCS_PGPGIN,
3319         MCS_PGPGOUT,
3320         MCS_SWAP,
3321         MCS_INACTIVE_ANON,
3322         MCS_ACTIVE_ANON,
3323         MCS_INACTIVE_FILE,
3324         MCS_ACTIVE_FILE,
3325         MCS_UNEVICTABLE,
3326         NR_MCS_STAT,
3327 };
3328
3329 struct mcs_total_stat {
3330         s64 stat[NR_MCS_STAT];
3331 };
3332
3333 struct {
3334         char *local_name;
3335         char *total_name;
3336 } memcg_stat_strings[NR_MCS_STAT] = {
3337         {"cache", "total_cache"},
3338         {"rss", "total_rss"},
3339         {"mapped_file", "total_mapped_file"},
3340         {"pgpgin", "total_pgpgin"},
3341         {"pgpgout", "total_pgpgout"},
3342         {"swap", "total_swap"},
3343         {"inactive_anon", "total_inactive_anon"},
3344         {"active_anon", "total_active_anon"},
3345         {"inactive_file", "total_inactive_file"},
3346         {"active_file", "total_active_file"},
3347         {"unevictable", "total_unevictable"}
3348 };
3349
3350
3351 static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
3352 {
3353         struct mcs_total_stat *s = data;
3354         s64 val;
3355
3356         /* per cpu stat */
3357         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
3358         s->stat[MCS_CACHE] += val * PAGE_SIZE;
3359         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
3360         s->stat[MCS_RSS] += val * PAGE_SIZE;
3361         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
3362         s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
3363         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
3364         s->stat[MCS_PGPGIN] += val;
3365         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
3366         s->stat[MCS_PGPGOUT] += val;
3367         if (do_swap_account) {
3368                 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3369                 s->stat[MCS_SWAP] += val * PAGE_SIZE;
3370         }
3371
3372         /* per zone stat */
3373         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
3374         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
3375         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
3376         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
3377         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
3378         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
3379         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
3380         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
3381         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
3382         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
3383         return 0;
3384 }
3385
3386 static void
3387 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3388 {
3389         mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
3390 }
3391
3392 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3393                                  struct cgroup_map_cb *cb)
3394 {
3395         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
3396         struct mcs_total_stat mystat;
3397         int i;
3398
3399         memset(&mystat, 0, sizeof(mystat));
3400         mem_cgroup_get_local_stat(mem_cont, &mystat);
3401
3402         for (i = 0; i < NR_MCS_STAT; i++) {
3403                 if (i == MCS_SWAP && !do_swap_account)
3404                         continue;
3405                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
3406         }
3407
3408         /* Hierarchical information */
3409         {
3410                 unsigned long long limit, memsw_limit;
3411                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
3412                 cb->fill(cb, "hierarchical_memory_limit", limit);
3413                 if (do_swap_account)
3414                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
3415         }
3416
3417         memset(&mystat, 0, sizeof(mystat));
3418         mem_cgroup_get_total_stat(mem_cont, &mystat);
3419         for (i = 0; i < NR_MCS_STAT; i++) {
3420                 if (i == MCS_SWAP && !do_swap_account)
3421                         continue;
3422                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
3423         }
3424
3425 #ifdef CONFIG_DEBUG_VM
3426         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
3427
3428         {
3429                 int nid, zid;
3430                 struct mem_cgroup_per_zone *mz;
3431                 unsigned long recent_rotated[2] = {0, 0};
3432                 unsigned long recent_scanned[2] = {0, 0};
3433
3434                 for_each_online_node(nid)
3435                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3436                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
3437
3438                                 recent_rotated[0] +=
3439                                         mz->reclaim_stat.recent_rotated[0];
3440                                 recent_rotated[1] +=
3441                                         mz->reclaim_stat.recent_rotated[1];
3442                                 recent_scanned[0] +=
3443                                         mz->reclaim_stat.recent_scanned[0];
3444                                 recent_scanned[1] +=
3445                                         mz->reclaim_stat.recent_scanned[1];
3446                         }
3447                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
3448                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
3449                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
3450                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
3451         }
3452 #endif
3453
3454         return 0;
3455 }
3456
3457 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
3458 {
3459         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3460
3461         return get_swappiness(memcg);
3462 }
3463
3464 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3465                                        u64 val)
3466 {
3467         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3468         struct mem_cgroup *parent;
3469
3470         if (val > 100)
3471                 return -EINVAL;
3472
3473         if (cgrp->parent == NULL)
3474                 return -EINVAL;
3475
3476         parent = mem_cgroup_from_cont(cgrp->parent);
3477
3478         cgroup_lock();
3479
3480         /* If under hierarchy, only empty-root can set this value */
3481         if ((parent->use_hierarchy) ||
3482             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3483                 cgroup_unlock();
3484                 return -EINVAL;
3485         }
3486
3487         spin_lock(&memcg->reclaim_param_lock);
3488         memcg->swappiness = val;
3489         spin_unlock(&memcg->reclaim_param_lock);
3490
3491         cgroup_unlock();
3492
3493         return 0;
3494 }
3495
3496 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3497 {
3498         struct mem_cgroup_threshold_ary *t;
3499         u64 usage;
3500         int i;
3501
3502         rcu_read_lock();
3503         if (!swap)
3504                 t = rcu_dereference(memcg->thresholds.primary);
3505         else
3506                 t = rcu_dereference(memcg->memsw_thresholds.primary);
3507
3508         if (!t)
3509                 goto unlock;
3510
3511         usage = mem_cgroup_usage(memcg, swap);
3512
3513         /*
3514          * current_threshold points to threshold just below usage.
3515          * If it's not true, a threshold was crossed after last
3516          * call of __mem_cgroup_threshold().
3517          */
3518         i = t->current_threshold;
3519
3520         /*
3521          * Iterate backward over array of thresholds starting from
3522          * current_threshold and check if a threshold is crossed.
3523          * If none of thresholds below usage is crossed, we read
3524          * only one element of the array here.
3525          */
3526         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3527                 eventfd_signal(t->entries[i].eventfd, 1);
3528
3529         /* i = current_threshold + 1 */
3530         i++;
3531
3532         /*
3533          * Iterate forward over array of thresholds starting from
3534          * current_threshold+1 and check if a threshold is crossed.
3535          * If none of thresholds above usage is crossed, we read
3536          * only one element of the array here.
3537          */
3538         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3539                 eventfd_signal(t->entries[i].eventfd, 1);
3540
3541         /* Update current_threshold */
3542         t->current_threshold = i - 1;
3543 unlock:
3544         rcu_read_unlock();
3545 }
3546
3547 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3548 {
3549         __mem_cgroup_threshold(memcg, false);
3550         if (do_swap_account)
3551                 __mem_cgroup_threshold(memcg, true);
3552 }
3553
3554 static int compare_thresholds(const void *a, const void *b)
3555 {
3556         const struct mem_cgroup_threshold *_a = a;
3557         const struct mem_cgroup_threshold *_b = b;
3558
3559         return _a->threshold - _b->threshold;
3560 }
3561
3562 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
3563 {
3564         struct mem_cgroup_eventfd_list *ev;
3565
3566         list_for_each_entry(ev, &mem->oom_notify, list)
3567                 eventfd_signal(ev->eventfd, 1);
3568         return 0;
3569 }
3570
3571 static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
3572 {
3573         mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
3574 }
3575
3576 static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3577         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3578 {
3579         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3580         struct mem_cgroup_thresholds *thresholds;
3581         struct mem_cgroup_threshold_ary *new;
3582         int type = MEMFILE_TYPE(cft->private);
3583         u64 threshold, usage;
3584         int i, size, ret;
3585
3586         ret = res_counter_memparse_write_strategy(args, &threshold);
3587         if (ret)
3588                 return ret;
3589
3590         mutex_lock(&memcg->thresholds_lock);
3591
3592         if (type == _MEM)
3593                 thresholds = &memcg->thresholds;
3594         else if (type == _MEMSWAP)
3595                 thresholds = &memcg->memsw_thresholds;
3596         else
3597                 BUG();
3598
3599         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3600
3601         /* Check if a threshold crossed before adding a new one */
3602         if (thresholds->primary)
3603                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3604
3605         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3606
3607         /* Allocate memory for new array of thresholds */
3608         new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3609                         GFP_KERNEL);
3610         if (!new) {
3611                 ret = -ENOMEM;
3612                 goto unlock;
3613         }
3614         new->size = size;
3615
3616         /* Copy thresholds (if any) to new array */
3617         if (thresholds->primary) {
3618                 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3619                                 sizeof(struct mem_cgroup_threshold));
3620         }
3621
3622         /* Add new threshold */
3623         new->entries[size - 1].eventfd = eventfd;
3624         new->entries[size - 1].threshold = threshold;
3625
3626         /* Sort thresholds. Registering of new threshold isn't time-critical */
3627         sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3628                         compare_thresholds, NULL);
3629
3630         /* Find current threshold */
3631         new->current_threshold = -1;
3632         for (i = 0; i < size; i++) {
3633                 if (new->entries[i].threshold < usage) {
3634                         /*
3635                          * new->current_threshold will not be used until
3636                          * rcu_assign_pointer(), so it's safe to increment
3637                          * it here.
3638                          */
3639                         ++new->current_threshold;
3640                 }
3641         }
3642
3643         /* Free old spare buffer and save old primary buffer as spare */
3644         kfree(thresholds->spare);
3645         thresholds->spare = thresholds->primary;
3646
3647         rcu_assign_pointer(thresholds->primary, new);
3648
3649         /* To be sure that nobody uses thresholds */
3650         synchronize_rcu();
3651
3652 unlock:
3653         mutex_unlock(&memcg->thresholds_lock);
3654
3655         return ret;
3656 }
3657
3658 static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3659         struct cftype *cft, struct eventfd_ctx *eventfd)
3660 {
3661         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3662         struct mem_cgroup_thresholds *thresholds;
3663         struct mem_cgroup_threshold_ary *new;
3664         int type = MEMFILE_TYPE(cft->private);
3665         u64 usage;
3666         int i, j, size;
3667
3668         mutex_lock(&memcg->thresholds_lock);
3669         if (type == _MEM)
3670                 thresholds = &memcg->thresholds;
3671         else if (type == _MEMSWAP)
3672                 thresholds = &memcg->memsw_thresholds;
3673         else
3674                 BUG();
3675
3676         /*
3677          * Something went wrong if we trying to unregister a threshold
3678          * if we don't have thresholds
3679          */
3680         BUG_ON(!thresholds);
3681
3682         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3683
3684         /* Check if a threshold crossed before removing */
3685         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3686
3687         /* Calculate new number of threshold */
3688         size = 0;
3689         for (i = 0; i < thresholds->primary->size; i++) {
3690                 if (thresholds->primary->entries[i].eventfd != eventfd)
3691                         size++;
3692         }
3693
3694         new = thresholds->spare;
3695
3696         /* Set thresholds array to NULL if we don't have thresholds */
3697         if (!size) {
3698                 kfree(new);
3699                 new = NULL;
3700                 goto swap_buffers;
3701         }
3702
3703         new->size = size;
3704
3705         /* Copy thresholds and find current threshold */
3706         new->current_threshold = -1;
3707         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3708                 if (thresholds->primary->entries[i].eventfd == eventfd)
3709                         continue;
3710
3711                 new->entries[j] = thresholds->primary->entries[i];
3712                 if (new->entries[j].threshold < usage) {
3713                         /*
3714                          * new->current_threshold will not be used
3715                          * until rcu_assign_pointer(), so it's safe to increment
3716                          * it here.
3717                          */
3718                         ++new->current_threshold;
3719                 }
3720                 j++;
3721         }
3722
3723 swap_buffers:
3724         /* Swap primary and spare array */
3725         thresholds->spare = thresholds->primary;
3726         rcu_assign_pointer(thresholds->primary, new);
3727
3728         /* To be sure that nobody uses thresholds */
3729         synchronize_rcu();
3730
3731         mutex_unlock(&memcg->thresholds_lock);
3732 }
3733
3734 static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
3735         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3736 {
3737         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3738         struct mem_cgroup_eventfd_list *event;
3739         int type = MEMFILE_TYPE(cft->private);
3740
3741         BUG_ON(type != _OOM_TYPE);
3742         event = kmalloc(sizeof(*event), GFP_KERNEL);
3743         if (!event)
3744                 return -ENOMEM;
3745
3746         mutex_lock(&memcg_oom_mutex);
3747
3748         event->eventfd = eventfd;
3749         list_add(&event->list, &memcg->oom_notify);
3750
3751         /* already in OOM ? */
3752         if (atomic_read(&memcg->oom_lock))
3753                 eventfd_signal(eventfd, 1);
3754         mutex_unlock(&memcg_oom_mutex);
3755
3756         return 0;
3757 }
3758
3759 static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
3760         struct cftype *cft, struct eventfd_ctx *eventfd)
3761 {
3762         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3763         struct mem_cgroup_eventfd_list *ev, *tmp;
3764         int type = MEMFILE_TYPE(cft->private);
3765
3766         BUG_ON(type != _OOM_TYPE);
3767
3768         mutex_lock(&memcg_oom_mutex);
3769
3770         list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
3771                 if (ev->eventfd == eventfd) {
3772                         list_del(&ev->list);
3773                         kfree(ev);
3774                 }
3775         }
3776
3777         mutex_unlock(&memcg_oom_mutex);
3778 }
3779
3780 static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
3781         struct cftype *cft,  struct cgroup_map_cb *cb)
3782 {
3783         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3784
3785         cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
3786
3787         if (atomic_read(&mem->oom_lock))
3788                 cb->fill(cb, "under_oom", 1);
3789         else
3790                 cb->fill(cb, "under_oom", 0);
3791         return 0;
3792 }
3793
3794 static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
3795         struct cftype *cft, u64 val)
3796 {
3797         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3798         struct mem_cgroup *parent;
3799
3800         /* cannot set to root cgroup and only 0 and 1 are allowed */
3801         if (!cgrp->parent || !((val == 0) || (val == 1)))
3802                 return -EINVAL;
3803
3804         parent = mem_cgroup_from_cont(cgrp->parent);
3805
3806         cgroup_lock();
3807         /* oom-kill-disable is a flag for subhierarchy. */
3808         if ((parent->use_hierarchy) ||
3809             (mem->use_hierarchy && !list_empty(&cgrp->children))) {
3810                 cgroup_unlock();
3811                 return -EINVAL;
3812         }
3813         mem->oom_kill_disable = val;
3814         if (!val)
3815                 memcg_oom_recover(mem);
3816         cgroup_unlock();
3817         return 0;
3818 }
3819
3820 static struct cftype mem_cgroup_files[] = {
3821         {
3822                 .name = "usage_in_bytes",
3823                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3824                 .read_u64 = mem_cgroup_read,
3825                 .register_event = mem_cgroup_usage_register_event,
3826                 .unregister_event = mem_cgroup_usage_unregister_event,
3827         },
3828         {
3829                 .name = "max_usage_in_bytes",
3830                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3831                 .trigger = mem_cgroup_reset,
3832                 .read_u64 = mem_cgroup_read,
3833         },
3834         {
3835                 .name = "limit_in_bytes",
3836                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3837                 .write_string = mem_cgroup_write,
3838                 .read_u64 = mem_cgroup_read,
3839         },
3840         {
3841                 .name = "soft_limit_in_bytes",
3842                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3843                 .write_string = mem_cgroup_write,
3844                 .read_u64 = mem_cgroup_read,
3845         },
3846         {
3847                 .name = "failcnt",
3848                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3849                 .trigger = mem_cgroup_reset,
3850                 .read_u64 = mem_cgroup_read,
3851         },
3852         {
3853                 .name = "stat",
3854                 .read_map = mem_control_stat_show,
3855         },
3856         {
3857                 .name = "force_empty",
3858                 .trigger = mem_cgroup_force_empty_write,
3859         },
3860         {
3861                 .name = "use_hierarchy",
3862                 .write_u64 = mem_cgroup_hierarchy_write,
3863                 .read_u64 = mem_cgroup_hierarchy_read,
3864         },
3865         {
3866                 .name = "swappiness",
3867                 .read_u64 = mem_cgroup_swappiness_read,
3868                 .write_u64 = mem_cgroup_swappiness_write,
3869         },
3870         {
3871                 .name = "move_charge_at_immigrate",
3872                 .read_u64 = mem_cgroup_move_charge_read,
3873                 .write_u64 = mem_cgroup_move_charge_write,
3874         },
3875         {
3876                 .name = "oom_control",
3877                 .read_map = mem_cgroup_oom_control_read,
3878                 .write_u64 = mem_cgroup_oom_control_write,
3879                 .register_event = mem_cgroup_oom_register_event,
3880                 .unregister_event = mem_cgroup_oom_unregister_event,
3881                 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3882         },
3883 };
3884
3885 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3886 static struct cftype memsw_cgroup_files[] = {
3887         {
3888                 .name = "memsw.usage_in_bytes",
3889                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
3890                 .read_u64 = mem_cgroup_read,
3891                 .register_event = mem_cgroup_usage_register_event,
3892                 .unregister_event = mem_cgroup_usage_unregister_event,
3893         },
3894         {
3895                 .name = "memsw.max_usage_in_bytes",
3896                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
3897                 .trigger = mem_cgroup_reset,
3898                 .read_u64 = mem_cgroup_read,
3899         },
3900         {
3901                 .name = "memsw.limit_in_bytes",
3902                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
3903                 .write_string = mem_cgroup_write,
3904                 .read_u64 = mem_cgroup_read,
3905         },
3906         {
3907                 .name = "memsw.failcnt",
3908                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
3909                 .trigger = mem_cgroup_reset,
3910                 .read_u64 = mem_cgroup_read,
3911         },
3912 };
3913
3914 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3915 {
3916         if (!do_swap_account)
3917                 return 0;
3918         return cgroup_add_files(cont, ss, memsw_cgroup_files,
3919                                 ARRAY_SIZE(memsw_cgroup_files));
3920 };
3921 #else
3922 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3923 {
3924         return 0;
3925 }
3926 #endif
3927
3928 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3929 {
3930         struct mem_cgroup_per_node *pn;
3931         struct mem_cgroup_per_zone *mz;
3932         enum lru_list l;
3933         int zone, tmp = node;
3934         /*
3935          * This routine is called against possible nodes.
3936          * But it's BUG to call kmalloc() against offline node.
3937          *
3938          * TODO: this routine can waste much memory for nodes which will
3939          *       never be onlined. It's better to use memory hotplug callback
3940          *       function.
3941          */
3942         if (!node_state(node, N_NORMAL_MEMORY))
3943                 tmp = -1;
3944         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
3945         if (!pn)
3946                 return 1;
3947
3948         mem->info.nodeinfo[node] = pn;
3949         memset(pn, 0, sizeof(*pn));
3950
3951         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3952                 mz = &pn->zoneinfo[zone];
3953                 for_each_lru(l)
3954                         INIT_LIST_HEAD(&mz->lists[l]);
3955                 mz->usage_in_excess = 0;
3956                 mz->on_tree = false;
3957                 mz->mem = mem;
3958         }
3959         return 0;
3960 }
3961
3962 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3963 {
3964         kfree(mem->info.nodeinfo[node]);
3965 }
3966
3967 static struct mem_cgroup *mem_cgroup_alloc(void)
3968 {
3969         struct mem_cgroup *mem;
3970         int size = sizeof(struct mem_cgroup);
3971
3972         /* Can be very big if MAX_NUMNODES is very big */
3973         if (size < PAGE_SIZE)
3974                 mem = kmalloc(size, GFP_KERNEL);
3975         else
3976                 mem = vmalloc(size);
3977
3978         if (!mem)
3979                 return NULL;
3980
3981         memset(mem, 0, size);
3982         mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
3983         if (!mem->stat) {
3984                 if (size < PAGE_SIZE)
3985                         kfree(mem);
3986                 else
3987                         vfree(mem);
3988                 mem = NULL;
3989         }
3990         return mem;
3991 }
3992
3993 /*
3994  * At destroying mem_cgroup, references from swap_cgroup can remain.
3995  * (scanning all at force_empty is too costly...)
3996  *
3997  * Instead of clearing all references at force_empty, we remember
3998  * the number of reference from swap_cgroup and free mem_cgroup when
3999  * it goes down to 0.
4000  *
4001  * Removal of cgroup itself succeeds regardless of refs from swap.
4002  */
4003
4004 static void __mem_cgroup_free(struct mem_cgroup *mem)
4005 {
4006         int node;
4007
4008         mem_cgroup_remove_from_trees(mem);
4009         free_css_id(&mem_cgroup_subsys, &mem->css);
4010
4011         for_each_node_state(node, N_POSSIBLE)
4012                 free_mem_cgroup_per_zone_info(mem, node);
4013
4014         free_percpu(mem->stat);
4015         if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4016                 kfree(mem);
4017         else
4018                 vfree(mem);
4019 }
4020
4021 static void mem_cgroup_get(struct mem_cgroup *mem)
4022 {
4023         atomic_inc(&mem->refcnt);
4024 }
4025
4026 static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
4027 {
4028         if (atomic_sub_and_test(count, &mem->refcnt)) {
4029                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
4030                 __mem_cgroup_free(mem);
4031                 if (parent)
4032                         mem_cgroup_put(parent);
4033         }
4034 }
4035
4036 static void mem_cgroup_put(struct mem_cgroup *mem)
4037 {
4038         __mem_cgroup_put(mem, 1);
4039 }
4040
4041 /*
4042  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4043  */
4044 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
4045 {
4046         if (!mem->res.parent)
4047                 return NULL;
4048         return mem_cgroup_from_res_counter(mem->res.parent, res);
4049 }
4050
4051 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4052 static void __init enable_swap_cgroup(void)
4053 {
4054         if (!mem_cgroup_disabled() && really_do_swap_account)
4055                 do_swap_account = 1;
4056 }
4057 #else
4058 static void __init enable_swap_cgroup(void)
4059 {
4060 }
4061 #endif
4062
4063 static int mem_cgroup_soft_limit_tree_init(void)
4064 {
4065         struct mem_cgroup_tree_per_node *rtpn;
4066         struct mem_cgroup_tree_per_zone *rtpz;
4067         int tmp, node, zone;
4068
4069         for_each_node_state(node, N_POSSIBLE) {
4070                 tmp = node;
4071                 if (!node_state(node, N_NORMAL_MEMORY))
4072                         tmp = -1;
4073                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4074                 if (!rtpn)
4075                         return 1;
4076
4077                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4078
4079                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4080                         rtpz = &rtpn->rb_tree_per_zone[zone];
4081                         rtpz->rb_root = RB_ROOT;
4082                         spin_lock_init(&rtpz->lock);
4083                 }
4084         }
4085         return 0;
4086 }
4087
4088 static struct cgroup_subsys_state * __ref
4089 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4090 {
4091         struct mem_cgroup *mem, *parent;
4092         long error = -ENOMEM;
4093         int node;
4094
4095         mem = mem_cgroup_alloc();
4096         if (!mem)
4097                 return ERR_PTR(error);
4098
4099         for_each_node_state(node, N_POSSIBLE)
4100                 if (alloc_mem_cgroup_per_zone_info(mem, node))
4101                         goto free_out;
4102
4103         /* root ? */
4104         if (cont->parent == NULL) {
4105                 int cpu;
4106                 enable_swap_cgroup();
4107                 parent = NULL;
4108                 root_mem_cgroup = mem;
4109                 if (mem_cgroup_soft_limit_tree_init())
4110                         goto free_out;
4111                 for_each_possible_cpu(cpu) {
4112                         struct memcg_stock_pcp *stock =
4113                                                 &per_cpu(memcg_stock, cpu);
4114                         INIT_WORK(&stock->work, drain_local_stock);
4115                 }
4116                 hotcpu_notifier(memcg_stock_cpu_callback, 0);
4117         } else {
4118                 parent = mem_cgroup_from_cont(cont->parent);
4119                 mem->use_hierarchy = parent->use_hierarchy;
4120                 mem->oom_kill_disable = parent->oom_kill_disable;
4121         }
4122
4123         if (parent && parent->use_hierarchy) {
4124                 res_counter_init(&mem->res, &parent->res);
4125                 res_counter_init(&mem->memsw, &parent->memsw);
4126                 /*
4127                  * We increment refcnt of the parent to ensure that we can
4128                  * safely access it on res_counter_charge/uncharge.
4129                  * This refcnt will be decremented when freeing this
4130                  * mem_cgroup(see mem_cgroup_put).
4131                  */
4132                 mem_cgroup_get(parent);
4133         } else {
4134                 res_counter_init(&mem->res, NULL);
4135                 res_counter_init(&mem->memsw, NULL);
4136         }
4137         mem->last_scanned_child = 0;
4138         spin_lock_init(&mem->reclaim_param_lock);
4139         INIT_LIST_HEAD(&mem->oom_notify);
4140
4141         if (parent)
4142                 mem->swappiness = get_swappiness(parent);
4143         atomic_set(&mem->refcnt, 1);
4144         mem->move_charge_at_immigrate = 0;
4145         mutex_init(&mem->thresholds_lock);
4146         return &mem->css;
4147 free_out:
4148         __mem_cgroup_free(mem);
4149         root_mem_cgroup = NULL;
4150         return ERR_PTR(error);
4151 }
4152
4153 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
4154                                         struct cgroup *cont)
4155 {
4156         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4157
4158         return mem_cgroup_force_empty(mem, false);
4159 }
4160
4161 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4162                                 struct cgroup *cont)
4163 {
4164         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4165
4166         mem_cgroup_put(mem);
4167 }
4168
4169 static int mem_cgroup_populate(struct cgroup_subsys *ss,
4170                                 struct cgroup *cont)
4171 {
4172         int ret;
4173
4174         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4175                                 ARRAY_SIZE(mem_cgroup_files));
4176
4177         if (!ret)
4178                 ret = register_memsw_files(cont, ss);
4179         return ret;
4180 }
4181
4182 #ifdef CONFIG_MMU
4183 /* Handlers for move charge at task migration. */
4184 #define PRECHARGE_COUNT_AT_ONCE 256
4185 static int mem_cgroup_do_precharge(unsigned long count)
4186 {
4187         int ret = 0;
4188         int batch_count = PRECHARGE_COUNT_AT_ONCE;
4189         struct mem_cgroup *mem = mc.to;
4190
4191         if (mem_cgroup_is_root(mem)) {
4192                 mc.precharge += count;
4193                 /* we don't need css_get for root */
4194                 return ret;
4195         }
4196         /* try to charge at once */
4197         if (count > 1) {
4198                 struct res_counter *dummy;
4199                 /*
4200                  * "mem" cannot be under rmdir() because we've already checked
4201                  * by cgroup_lock_live_cgroup() that it is not removed and we
4202                  * are still under the same cgroup_mutex. So we can postpone
4203                  * css_get().
4204                  */
4205                 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4206                         goto one_by_one;
4207                 if (do_swap_account && res_counter_charge(&mem->memsw,
4208                                                 PAGE_SIZE * count, &dummy)) {
4209                         res_counter_uncharge(&mem->res, PAGE_SIZE * count);
4210                         goto one_by_one;
4211                 }
4212                 mc.precharge += count;
4213                 VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
4214                 WARN_ON_ONCE(count > INT_MAX);
4215                 __css_get(&mem->css, (int)count);
4216                 return ret;
4217         }
4218 one_by_one:
4219         /* fall back to one by one charge */
4220         while (count--) {
4221                 if (signal_pending(current)) {
4222                         ret = -EINTR;
4223                         break;
4224                 }
4225                 if (!batch_count--) {
4226                         batch_count = PRECHARGE_COUNT_AT_ONCE;
4227                         cond_resched();
4228                 }
4229                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
4230                 if (ret || !mem)
4231                         /* mem_cgroup_clear_mc() will do uncharge later */
4232                         return -ENOMEM;
4233                 mc.precharge++;
4234         }
4235         return ret;
4236 }
4237
4238 /**
4239  * is_target_pte_for_mc - check a pte whether it is valid for move charge
4240  * @vma: the vma the pte to be checked belongs
4241  * @addr: the address corresponding to the pte to be checked
4242  * @ptent: the pte to be checked
4243  * @target: the pointer the target page or swap ent will be stored(can be NULL)
4244  *
4245  * Returns
4246  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4247  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4248  *     move charge. if @target is not NULL, the page is stored in target->page
4249  *     with extra refcnt got(Callers should handle it).
4250  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4251  *     target for charge migration. if @target is not NULL, the entry is stored
4252  *     in target->ent.
4253  *
4254  * Called with pte lock held.
4255  */
4256 union mc_target {
4257         struct page     *page;
4258         swp_entry_t     ent;
4259 };
4260
4261 enum mc_target_type {
4262         MC_TARGET_NONE, /* not used */
4263         MC_TARGET_PAGE,
4264         MC_TARGET_SWAP,
4265 };
4266
4267 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4268                                                 unsigned long addr, pte_t ptent)
4269 {
4270         struct page *page = vm_normal_page(vma, addr, ptent);
4271
4272         if (!page || !page_mapped(page))
4273                 return NULL;
4274         if (PageAnon(page)) {
4275                 /* we don't move shared anon */
4276                 if (!move_anon() || page_mapcount(page) > 2)
4277                         return NULL;
4278         } else if (!move_file())
4279                 /* we ignore mapcount for file pages */
4280                 return NULL;
4281         if (!get_page_unless_zero(page))
4282                 return NULL;
4283
4284         return page;
4285 }
4286
4287 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4288                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
4289 {
4290         int usage_count;
4291         struct page *page = NULL;
4292         swp_entry_t ent = pte_to_swp_entry(ptent);
4293
4294         if (!move_anon() || non_swap_entry(ent))
4295                 return NULL;
4296         usage_count = mem_cgroup_count_swap_user(ent, &page);
4297         if (usage_count > 1) { /* we don't move shared anon */
4298                 if (page)
4299                         put_page(page);
4300                 return NULL;
4301         }
4302         if (do_swap_account)
4303                 entry->val = ent.val;
4304
4305         return page;
4306 }
4307
4308 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4309                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
4310 {
4311         struct page *page = NULL;
4312         struct inode *inode;
4313         struct address_space *mapping;
4314         pgoff_t pgoff;
4315
4316         if (!vma->vm_file) /* anonymous vma */
4317                 return NULL;
4318         if (!move_file())
4319                 return NULL;
4320
4321         inode = vma->vm_file->f_path.dentry->d_inode;
4322         mapping = vma->vm_file->f_mapping;
4323         if (pte_none(ptent))
4324                 pgoff = linear_page_index(vma, addr);
4325         else /* pte_file(ptent) is true */
4326                 pgoff = pte_to_pgoff(ptent);
4327
4328         /* page is moved even if it's not RSS of this task(page-faulted). */
4329         if (!mapping_cap_swap_backed(mapping)) { /* normal file */
4330                 page = find_get_page(mapping, pgoff);
4331         } else { /* shmem/tmpfs file. we should take account of swap too. */
4332                 swp_entry_t ent;
4333                 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
4334                 if (do_swap_account)
4335                         entry->val = ent.val;
4336         }
4337
4338         return page;
4339 }
4340
4341 static int is_target_pte_for_mc(struct vm_area_struct *vma,
4342                 unsigned long addr, pte_t ptent, union mc_target *target)
4343 {
4344         struct page *page = NULL;
4345         struct page_cgroup *pc;
4346         int ret = 0;
4347         swp_entry_t ent = { .val = 0 };
4348
4349         if (pte_present(ptent))
4350                 page = mc_handle_present_pte(vma, addr, ptent);
4351         else if (is_swap_pte(ptent))
4352                 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4353         else if (pte_none(ptent) || pte_file(ptent))
4354                 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4355
4356         if (!page && !ent.val)
4357                 return 0;
4358         if (page) {
4359                 pc = lookup_page_cgroup(page);
4360                 /*
4361                  * Do only loose check w/o page_cgroup lock.
4362                  * mem_cgroup_move_account() checks the pc is valid or not under
4363                  * the lock.
4364                  */
4365                 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
4366                         ret = MC_TARGET_PAGE;
4367                         if (target)
4368                                 target->page = page;
4369                 }
4370                 if (!ret || !target)
4371                         put_page(page);
4372         }
4373         /* There is a swap entry and a page doesn't exist or isn't charged */
4374         if (ent.val && !ret &&
4375                         css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4376                 ret = MC_TARGET_SWAP;
4377                 if (target)
4378                         target->ent = ent;
4379         }
4380         return ret;
4381 }
4382
4383 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4384                                         unsigned long addr, unsigned long end,
4385                                         struct mm_walk *walk)
4386 {
4387         struct vm_area_struct *vma = walk->private;
4388         pte_t *pte;
4389         spinlock_t *ptl;
4390
4391         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4392         for (; addr != end; pte++, addr += PAGE_SIZE)
4393                 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
4394                         mc.precharge++; /* increment precharge temporarily */
4395         pte_unmap_unlock(pte - 1, ptl);
4396         cond_resched();
4397
4398         return 0;
4399 }
4400
4401 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4402 {
4403         unsigned long precharge;
4404         struct vm_area_struct *vma;
4405
4406         down_read(&mm->mmap_sem);
4407         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4408                 struct mm_walk mem_cgroup_count_precharge_walk = {
4409                         .pmd_entry = mem_cgroup_count_precharge_pte_range,
4410                         .mm = mm,
4411                         .private = vma,
4412                 };
4413                 if (is_vm_hugetlb_page(vma))
4414                         continue;
4415                 walk_page_range(vma->vm_start, vma->vm_end,
4416                                         &mem_cgroup_count_precharge_walk);
4417         }
4418         up_read(&mm->mmap_sem);
4419
4420         precharge = mc.precharge;
4421         mc.precharge = 0;
4422
4423         return precharge;
4424 }
4425
4426 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4427 {
4428         return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
4429 }
4430
4431 static void mem_cgroup_clear_mc(void)
4432 {
4433         struct mem_cgroup *from = mc.from;
4434         struct mem_cgroup *to = mc.to;
4435
4436         /* we must uncharge all the leftover precharges from mc.to */
4437         if (mc.precharge) {
4438                 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
4439                 mc.precharge = 0;
4440         }
4441         /*
4442          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4443          * we must uncharge here.
4444          */
4445         if (mc.moved_charge) {
4446                 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4447                 mc.moved_charge = 0;
4448         }
4449         /* we must fixup refcnts and charges */
4450         if (mc.moved_swap) {
4451                 WARN_ON_ONCE(mc.moved_swap > INT_MAX);
4452                 /* uncharge swap account from the old cgroup */
4453                 if (!mem_cgroup_is_root(mc.from))
4454                         res_counter_uncharge(&mc.from->memsw,
4455                                                 PAGE_SIZE * mc.moved_swap);
4456                 __mem_cgroup_put(mc.from, mc.moved_swap);
4457
4458                 if (!mem_cgroup_is_root(mc.to)) {
4459                         /*
4460                          * we charged both to->res and to->memsw, so we should
4461                          * uncharge to->res.
4462                          */
4463                         res_counter_uncharge(&mc.to->res,
4464                                                 PAGE_SIZE * mc.moved_swap);
4465                         VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags));
4466                         __css_put(&mc.to->css, mc.moved_swap);
4467                 }
4468                 /* we've already done mem_cgroup_get(mc.to) */
4469
4470                 mc.moved_swap = 0;
4471         }
4472         spin_lock(&mc.lock);
4473         mc.from = NULL;
4474         mc.to = NULL;
4475         mc.moving_task = NULL;
4476         spin_unlock(&mc.lock);
4477         memcg_oom_recover(from);
4478         memcg_oom_recover(to);
4479         wake_up_all(&mc.waitq);
4480 }
4481
4482 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4483                                 struct cgroup *cgroup,
4484                                 struct task_struct *p,
4485                                 bool threadgroup)
4486 {
4487         int ret = 0;
4488         struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
4489
4490         if (mem->move_charge_at_immigrate) {
4491                 struct mm_struct *mm;
4492                 struct mem_cgroup *from = mem_cgroup_from_task(p);
4493
4494                 VM_BUG_ON(from == mem);
4495
4496                 mm = get_task_mm(p);
4497                 if (!mm)
4498                         return 0;
4499                 /* We move charges only when we move a owner of the mm */
4500                 if (mm->owner == p) {
4501                         VM_BUG_ON(mc.from);
4502                         VM_BUG_ON(mc.to);
4503                         VM_BUG_ON(mc.precharge);
4504                         VM_BUG_ON(mc.moved_charge);
4505                         VM_BUG_ON(mc.moved_swap);
4506                         VM_BUG_ON(mc.moving_task);
4507                         spin_lock(&mc.lock);
4508                         mc.from = from;
4509                         mc.to = mem;
4510                         mc.precharge = 0;
4511                         mc.moved_charge = 0;
4512                         mc.moved_swap = 0;
4513                         mc.moving_task = current;
4514                         spin_unlock(&mc.lock);
4515
4516                         ret = mem_cgroup_precharge_mc(mm);
4517                         if (ret)
4518                                 mem_cgroup_clear_mc();
4519                 }
4520                 mmput(mm);
4521         }
4522         return ret;
4523 }
4524
4525 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4526                                 struct cgroup *cgroup,
4527                                 struct task_struct *p,
4528                                 bool threadgroup)
4529 {
4530         mem_cgroup_clear_mc();
4531 }
4532
4533 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4534                                 unsigned long addr, unsigned long end,
4535                                 struct mm_walk *walk)
4536 {
4537         int ret = 0;
4538         struct vm_area_struct *vma = walk->private;
4539         pte_t *pte;
4540         spinlock_t *ptl;
4541
4542 retry:
4543         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4544         for (; addr != end; addr += PAGE_SIZE) {
4545                 pte_t ptent = *(pte++);
4546                 union mc_target target;
4547                 int type;
4548                 struct page *page;
4549                 struct page_cgroup *pc;
4550                 swp_entry_t ent;
4551
4552                 if (!mc.precharge)
4553                         break;
4554
4555                 type = is_target_pte_for_mc(vma, addr, ptent, &target);
4556                 switch (type) {
4557                 case MC_TARGET_PAGE:
4558                         page = target.page;
4559                         if (isolate_lru_page(page))
4560                                 goto put;
4561                         pc = lookup_page_cgroup(page);
4562                         if (!mem_cgroup_move_account(pc,
4563                                                 mc.from, mc.to, false)) {
4564                                 mc.precharge--;
4565                                 /* we uncharge from mc.from later. */
4566                                 mc.moved_charge++;
4567                         }
4568                         putback_lru_page(page);
4569 put:                    /* is_target_pte_for_mc() gets the page */
4570                         put_page(page);
4571                         break;
4572                 case MC_TARGET_SWAP:
4573                         ent = target.ent;
4574                         if (!mem_cgroup_move_swap_account(ent,
4575                                                 mc.from, mc.to, false)) {
4576                                 mc.precharge--;
4577                                 /* we fixup refcnts and charges later. */
4578                                 mc.moved_swap++;
4579                         }
4580                         break;
4581                 default:
4582                         break;
4583                 }
4584         }
4585         pte_unmap_unlock(pte - 1, ptl);
4586         cond_resched();
4587
4588         if (addr != end) {
4589                 /*
4590                  * We have consumed all precharges we got in can_attach().
4591                  * We try charge one by one, but don't do any additional
4592                  * charges to mc.to if we have failed in charge once in attach()
4593                  * phase.
4594                  */
4595                 ret = mem_cgroup_do_precharge(1);
4596                 if (!ret)
4597                         goto retry;
4598         }
4599
4600         return ret;
4601 }
4602
4603 static void mem_cgroup_move_charge(struct mm_struct *mm)
4604 {
4605         struct vm_area_struct *vma;
4606
4607         lru_add_drain_all();
4608         down_read(&mm->mmap_sem);
4609         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4610                 int ret;
4611                 struct mm_walk mem_cgroup_move_charge_walk = {
4612                         .pmd_entry = mem_cgroup_move_charge_pte_range,
4613                         .mm = mm,
4614                         .private = vma,
4615                 };
4616                 if (is_vm_hugetlb_page(vma))
4617                         continue;
4618                 ret = walk_page_range(vma->vm_start, vma->vm_end,
4619                                                 &mem_cgroup_move_charge_walk);
4620                 if (ret)
4621                         /*
4622                          * means we have consumed all precharges and failed in
4623                          * doing additional charge. Just abandon here.
4624                          */
4625                         break;
4626         }
4627         up_read(&mm->mmap_sem);
4628 }
4629
4630 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4631                                 struct cgroup *cont,
4632                                 struct cgroup *old_cont,
4633                                 struct task_struct *p,
4634                                 bool threadgroup)
4635 {
4636         struct mm_struct *mm;
4637
4638         if (!mc.to)
4639                 /* no need to move charge */
4640                 return;
4641
4642         mm = get_task_mm(p);
4643         if (mm) {
4644                 mem_cgroup_move_charge(mm);
4645                 mmput(mm);
4646         }
4647         mem_cgroup_clear_mc();
4648 }
4649 #else   /* !CONFIG_MMU */
4650 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4651                                 struct cgroup *cgroup,
4652                                 struct task_struct *p,
4653                                 bool threadgroup)
4654 {
4655         return 0;
4656 }
4657 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4658                                 struct cgroup *cgroup,
4659                                 struct task_struct *p,
4660                                 bool threadgroup)
4661 {
4662 }
4663 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4664                                 struct cgroup *cont,
4665                                 struct cgroup *old_cont,
4666                                 struct task_struct *p,
4667                                 bool threadgroup)
4668 {
4669 }
4670 #endif
4671
4672 struct cgroup_subsys mem_cgroup_subsys = {
4673         .name = "memory",
4674         .subsys_id = mem_cgroup_subsys_id,
4675         .create = mem_cgroup_create,
4676         .pre_destroy = mem_cgroup_pre_destroy,
4677         .destroy = mem_cgroup_destroy,
4678         .populate = mem_cgroup_populate,
4679         .can_attach = mem_cgroup_can_attach,
4680         .cancel_attach = mem_cgroup_cancel_attach,
4681         .attach = mem_cgroup_move_task,
4682         .early_init = 0,
4683         .use_id = 1,
4684 };
4685
4686 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4687
4688 static int __init disable_swap_account(char *s)
4689 {
4690         really_do_swap_account = 0;
4691         return 1;
4692 }
4693 __setup("noswapaccount", disable_swap_account);
4694 #endif