1 /* memcontrol.c - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
24 #include <linux/smp.h>
25 #include <linux/page-flags.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bit_spinlock.h>
28 #include <linux/rcupdate.h>
29 #include <linux/swap.h>
30 #include <linux/spinlock.h>
32 #include <linux/seq_file.h>
34 #include <asm/uaccess.h>
36 struct cgroup_subsys mem_cgroup_subsys;
37 static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
40 * Statistics for memory cgroup.
42 enum mem_cgroup_stat_index {
44 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
46 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
47 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
49 MEM_CGROUP_STAT_NSTATS,
52 struct mem_cgroup_stat_cpu {
53 s64 count[MEM_CGROUP_STAT_NSTATS];
54 } ____cacheline_aligned_in_smp;
56 struct mem_cgroup_stat {
57 struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
61 * For accounting under irq disable, no need for increment preempt count.
63 static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
64 enum mem_cgroup_stat_index idx, int val)
66 int cpu = smp_processor_id();
67 stat->cpustat[cpu].count[idx] += val;
70 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
71 enum mem_cgroup_stat_index idx)
75 for_each_possible_cpu(cpu)
76 ret += stat->cpustat[cpu].count[idx];
81 * per-zone information in memory controller.
84 enum mem_cgroup_zstat_index {
85 MEM_CGROUP_ZSTAT_ACTIVE,
86 MEM_CGROUP_ZSTAT_INACTIVE,
91 struct mem_cgroup_per_zone {
92 unsigned long count[NR_MEM_CGROUP_ZSTAT];
94 /* Macro for accessing counter */
95 #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
97 struct mem_cgroup_per_node {
98 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
101 struct mem_cgroup_lru_info {
102 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
106 * The memory controller data structure. The memory controller controls both
107 * page cache and RSS per cgroup. We would eventually like to provide
108 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
109 * to help the administrator determine what knobs to tune.
111 * TODO: Add a water mark for the memory controller. Reclaim will begin when
112 * we hit the water mark. May be even add a low water mark, such that
113 * no reclaim occurs from a cgroup at it's low water mark, this is
114 * a feature that will be implemented much later in the future.
117 struct cgroup_subsys_state css;
119 * the counter to account for memory usage
121 struct res_counter res;
123 * Per cgroup active and inactive list, similar to the
124 * per zone LRU lists.
125 * TODO: Consider making these lists per zone
127 struct list_head active_list;
128 struct list_head inactive_list;
129 struct mem_cgroup_lru_info info;
131 * spin_lock to protect the per cgroup LRU
134 unsigned long control_type; /* control RSS or RSS+Pagecache */
138 struct mem_cgroup_stat stat;
142 * We use the lower bit of the page->page_cgroup pointer as a bit spin
143 * lock. We need to ensure that page->page_cgroup is atleast two
144 * byte aligned (based on comments from Nick Piggin)
146 #define PAGE_CGROUP_LOCK_BIT 0x0
147 #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT)
150 * A page_cgroup page is associated with every page descriptor. The
151 * page_cgroup helps us identify information about the cgroup
154 struct list_head lru; /* per cgroup LRU list */
156 struct mem_cgroup *mem_cgroup;
157 atomic_t ref_cnt; /* Helpful when pages move b/w */
158 /* mapped and cached states */
161 #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
162 #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
164 static inline int page_cgroup_nid(struct page_cgroup *pc)
166 return page_to_nid(pc->page);
169 static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
171 return page_zonenum(pc->page);
175 MEM_CGROUP_TYPE_UNSPEC = 0,
176 MEM_CGROUP_TYPE_MAPPED,
177 MEM_CGROUP_TYPE_CACHED,
183 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
184 MEM_CGROUP_CHARGE_TYPE_MAPPED,
189 * Always modified under lru lock. Then, not necessary to preempt_disable()
191 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
194 int val = (charge)? 1 : -1;
195 struct mem_cgroup_stat *stat = &mem->stat;
196 VM_BUG_ON(!irqs_disabled());
198 if (flags & PAGE_CGROUP_FLAG_CACHE)
199 __mem_cgroup_stat_add_safe(stat,
200 MEM_CGROUP_STAT_CACHE, val);
202 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
205 static inline struct mem_cgroup_per_zone *
206 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
208 BUG_ON(!mem->info.nodeinfo[nid]);
209 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
212 static inline struct mem_cgroup_per_zone *
213 page_cgroup_zoneinfo(struct page_cgroup *pc)
215 struct mem_cgroup *mem = pc->mem_cgroup;
216 int nid = page_cgroup_nid(pc);
217 int zid = page_cgroup_zid(pc);
219 return mem_cgroup_zoneinfo(mem, nid, zid);
222 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
223 enum mem_cgroup_zstat_index idx)
226 struct mem_cgroup_per_zone *mz;
229 for_each_online_node(nid)
230 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
231 mz = mem_cgroup_zoneinfo(mem, nid, zid);
232 total += MEM_CGROUP_ZSTAT(mz, idx);
237 static struct mem_cgroup init_mem_cgroup;
240 struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
242 return container_of(cgroup_subsys_state(cont,
243 mem_cgroup_subsys_id), struct mem_cgroup,
248 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
250 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
251 struct mem_cgroup, css);
254 void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
256 struct mem_cgroup *mem;
258 mem = mem_cgroup_from_task(p);
260 mm->mem_cgroup = mem;
263 void mm_free_cgroup(struct mm_struct *mm)
265 css_put(&mm->mem_cgroup->css);
268 static inline int page_cgroup_locked(struct page *page)
270 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT,
274 void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
279 * While resetting the page_cgroup we might not hold the
280 * page_cgroup lock. free_hot_cold_page() is an example
284 VM_BUG_ON(!page_cgroup_locked(page));
285 locked = (page->page_cgroup & PAGE_CGROUP_LOCK);
286 page->page_cgroup = ((unsigned long)pc | locked);
289 struct page_cgroup *page_get_page_cgroup(struct page *page)
291 return (struct page_cgroup *)
292 (page->page_cgroup & ~PAGE_CGROUP_LOCK);
295 static void __always_inline lock_page_cgroup(struct page *page)
297 bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
298 VM_BUG_ON(!page_cgroup_locked(page));
301 static void __always_inline unlock_page_cgroup(struct page *page)
303 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
307 * Tie new page_cgroup to struct page under lock_page_cgroup()
308 * This can fail if the page has been tied to a page_cgroup.
309 * If success, returns 0.
311 static int page_cgroup_assign_new_page_cgroup(struct page *page,
312 struct page_cgroup *pc)
316 lock_page_cgroup(page);
317 if (!page_get_page_cgroup(page))
318 page_assign_page_cgroup(page, pc);
319 else /* A page is tied to other pc. */
321 unlock_page_cgroup(page);
326 * Clear page->page_cgroup member under lock_page_cgroup().
327 * If given "pc" value is different from one page->page_cgroup,
328 * page->cgroup is not cleared.
329 * Returns a value of page->page_cgroup at lock taken.
330 * A can can detect failure of clearing by following
331 * clear_page_cgroup(page, pc) == pc
334 static struct page_cgroup *clear_page_cgroup(struct page *page,
335 struct page_cgroup *pc)
337 struct page_cgroup *ret;
339 lock_page_cgroup(page);
340 ret = page_get_page_cgroup(page);
341 if (likely(ret == pc))
342 page_assign_page_cgroup(page, NULL);
343 unlock_page_cgroup(page);
347 static void __mem_cgroup_remove_list(struct page_cgroup *pc)
349 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
350 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
353 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
355 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
357 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
358 list_del_init(&pc->lru);
361 static void __mem_cgroup_add_list(struct page_cgroup *pc)
363 int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
364 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
367 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
368 list_add(&pc->lru, &pc->mem_cgroup->inactive_list);
370 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
371 list_add(&pc->lru, &pc->mem_cgroup->active_list);
373 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
376 static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
378 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
379 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
382 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
384 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
387 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
388 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
389 list_move(&pc->lru, &pc->mem_cgroup->active_list);
391 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
392 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
393 list_move(&pc->lru, &pc->mem_cgroup->inactive_list);
397 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
402 ret = task->mm && mm_cgroup(task->mm) == mem;
408 * This routine assumes that the appropriate zone's lru lock is already held
410 void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
412 struct mem_cgroup *mem;
416 mem = pc->mem_cgroup;
418 spin_lock(&mem->lru_lock);
419 __mem_cgroup_move_lists(pc, active);
420 spin_unlock(&mem->lru_lock);
423 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
424 struct list_head *dst,
425 unsigned long *scanned, int order,
426 int mode, struct zone *z,
427 struct mem_cgroup *mem_cont,
430 unsigned long nr_taken = 0;
434 struct list_head *src;
435 struct page_cgroup *pc, *tmp;
438 src = &mem_cont->active_list;
440 src = &mem_cont->inactive_list;
442 spin_lock(&mem_cont->lru_lock);
444 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
445 if (scan >= nr_to_scan)
450 if (unlikely(!PageLRU(page)))
453 if (PageActive(page) && !active) {
454 __mem_cgroup_move_lists(pc, true);
457 if (!PageActive(page) && active) {
458 __mem_cgroup_move_lists(pc, false);
464 * TODO: make the active/inactive lists per zone
466 if (page_zone(page) != z)
470 list_move(&pc->lru, &pc_list);
472 if (__isolate_lru_page(page, mode) == 0) {
473 list_move(&page->lru, dst);
478 list_splice(&pc_list, src);
479 spin_unlock(&mem_cont->lru_lock);
486 * Charge the memory controller for page usage.
488 * 0 if the charge was successful
489 * < 0 if the cgroup is over its limit
491 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
492 gfp_t gfp_mask, enum charge_type ctype)
494 struct mem_cgroup *mem;
495 struct page_cgroup *pc;
497 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
500 * Should page_cgroup's go to their own slab?
501 * One could optimize the performance of the charging routine
502 * by saving a bit in the page_flags and using it as a lock
503 * to see if the cgroup page already has a page_cgroup associated
508 lock_page_cgroup(page);
509 pc = page_get_page_cgroup(page);
511 * The page_cgroup exists and
512 * the page has already been accounted.
515 if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
516 /* this page is under being uncharged ? */
517 unlock_page_cgroup(page);
521 unlock_page_cgroup(page);
525 unlock_page_cgroup(page);
528 pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
533 * We always charge the cgroup the mm_struct belongs to.
534 * The mm_struct's mem_cgroup changes on task migration if the
535 * thread group leader migrates. It's possible that mm is not
536 * set, if so charge the init_mm (happens for pagecache usage).
542 mem = rcu_dereference(mm->mem_cgroup);
544 * For every charge from the cgroup, increment reference
551 * If we created the page_cgroup, we should free it on exceeding
554 while (res_counter_charge(&mem->res, PAGE_SIZE)) {
555 if (!(gfp_mask & __GFP_WAIT))
558 if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
562 * try_to_free_mem_cgroup_pages() might not give us a full
563 * picture of reclaim. Some pages are reclaimed and might be
564 * moved to swap cache or just unmapped from the cgroup.
565 * Check the limit again to see if the reclaim reduced the
566 * current usage of the cgroup before giving up
568 if (res_counter_check_under_limit(&mem->res))
572 mem_cgroup_out_of_memory(mem, gfp_mask);
575 congestion_wait(WRITE, HZ/10);
578 atomic_set(&pc->ref_cnt, 1);
579 pc->mem_cgroup = mem;
581 pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
582 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
583 pc->flags |= PAGE_CGROUP_FLAG_CACHE;
585 if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
587 * Another charge has been added to this page already.
588 * We take lock_page_cgroup(page) again and read
589 * page->cgroup, increment refcnt.... just retry is OK.
591 res_counter_uncharge(&mem->res, PAGE_SIZE);
599 spin_lock_irqsave(&mem->lru_lock, flags);
600 /* Update statistics vector */
601 __mem_cgroup_add_list(pc);
602 spin_unlock_irqrestore(&mem->lru_lock, flags);
613 int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
616 return mem_cgroup_charge_common(page, mm, gfp_mask,
617 MEM_CGROUP_CHARGE_TYPE_MAPPED);
621 * See if the cached pages should be charged at all?
623 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
627 struct mem_cgroup *mem;
632 mem = rcu_dereference(mm->mem_cgroup);
635 if (mem->control_type == MEM_CGROUP_TYPE_ALL)
636 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
637 MEM_CGROUP_CHARGE_TYPE_CACHE);
643 * Uncharging is always a welcome operation, we never complain, simply
646 void mem_cgroup_uncharge(struct page_cgroup *pc)
648 struct mem_cgroup *mem;
653 * This can handle cases when a page is not charged at all and we
654 * are switching between handling the control_type.
659 if (atomic_dec_and_test(&pc->ref_cnt)) {
662 * get page->cgroup and clear it under lock.
663 * force_empty can drop page->cgroup without checking refcnt.
665 if (clear_page_cgroup(page, pc) == pc) {
666 mem = pc->mem_cgroup;
668 res_counter_uncharge(&mem->res, PAGE_SIZE);
669 spin_lock_irqsave(&mem->lru_lock, flags);
670 __mem_cgroup_remove_list(pc);
671 spin_unlock_irqrestore(&mem->lru_lock, flags);
678 * Returns non-zero if a page (under migration) has valid page_cgroup member.
679 * Refcnt of page_cgroup is incremented.
682 int mem_cgroup_prepare_migration(struct page *page)
684 struct page_cgroup *pc;
686 lock_page_cgroup(page);
687 pc = page_get_page_cgroup(page);
688 if (pc && atomic_inc_not_zero(&pc->ref_cnt))
690 unlock_page_cgroup(page);
694 void mem_cgroup_end_migration(struct page *page)
696 struct page_cgroup *pc = page_get_page_cgroup(page);
697 mem_cgroup_uncharge(pc);
700 * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
701 * And no race with uncharge() routines because page_cgroup for *page*
702 * has extra one reference by mem_cgroup_prepare_migration.
705 void mem_cgroup_page_migration(struct page *page, struct page *newpage)
707 struct page_cgroup *pc;
708 struct mem_cgroup *mem;
711 pc = page_get_page_cgroup(page);
714 mem = pc->mem_cgroup;
715 if (clear_page_cgroup(page, pc) != pc)
718 spin_lock_irqsave(&mem->lru_lock, flags);
720 __mem_cgroup_remove_list(pc);
722 lock_page_cgroup(newpage);
723 page_assign_page_cgroup(newpage, pc);
724 unlock_page_cgroup(newpage);
725 __mem_cgroup_add_list(pc);
727 spin_unlock_irqrestore(&mem->lru_lock, flags);
732 * This routine traverse page_cgroup in given list and drop them all.
733 * This routine ignores page_cgroup->ref_cnt.
734 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
736 #define FORCE_UNCHARGE_BATCH (128)
738 mem_cgroup_force_empty_list(struct mem_cgroup *mem, struct list_head *list)
740 struct page_cgroup *pc;
746 count = FORCE_UNCHARGE_BATCH;
747 spin_lock_irqsave(&mem->lru_lock, flags);
749 while (--count && !list_empty(list)) {
750 pc = list_entry(list->prev, struct page_cgroup, lru);
752 /* Avoid race with charge */
753 atomic_set(&pc->ref_cnt, 0);
754 if (clear_page_cgroup(page, pc) == pc) {
756 res_counter_uncharge(&mem->res, PAGE_SIZE);
757 __mem_cgroup_remove_list(pc);
759 } else /* being uncharged ? ...do relax */
762 spin_unlock_irqrestore(&mem->lru_lock, flags);
763 if (!list_empty(list)) {
771 * make mem_cgroup's charge to be 0 if there is no task.
772 * This enables deleting this mem_cgroup.
775 int mem_cgroup_force_empty(struct mem_cgroup *mem)
780 * page reclaim code (kswapd etc..) will move pages between
781 ` * active_list <-> inactive_list while we don't take a lock.
782 * So, we have to do loop here until all lists are empty.
784 while (!(list_empty(&mem->active_list) &&
785 list_empty(&mem->inactive_list))) {
786 if (atomic_read(&mem->css.cgroup->count) > 0)
788 /* drop all page_cgroup in active_list */
789 mem_cgroup_force_empty_list(mem, &mem->active_list);
790 /* drop all page_cgroup in inactive_list */
791 mem_cgroup_force_empty_list(mem, &mem->inactive_list);
801 int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
803 *tmp = memparse(buf, &buf);
808 * Round up the value to the closest page size
810 *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
814 static ssize_t mem_cgroup_read(struct cgroup *cont,
815 struct cftype *cft, struct file *file,
816 char __user *userbuf, size_t nbytes, loff_t *ppos)
818 return res_counter_read(&mem_cgroup_from_cont(cont)->res,
819 cft->private, userbuf, nbytes, ppos,
823 static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
824 struct file *file, const char __user *userbuf,
825 size_t nbytes, loff_t *ppos)
827 return res_counter_write(&mem_cgroup_from_cont(cont)->res,
828 cft->private, userbuf, nbytes, ppos,
829 mem_cgroup_write_strategy);
832 static ssize_t mem_control_type_write(struct cgroup *cont,
833 struct cftype *cft, struct file *file,
834 const char __user *userbuf,
835 size_t nbytes, loff_t *pos)
840 struct mem_cgroup *mem;
842 mem = mem_cgroup_from_cont(cont);
843 buf = kmalloc(nbytes + 1, GFP_KERNEL);
850 if (copy_from_user(buf, userbuf, nbytes))
854 tmp = simple_strtoul(buf, &end, 10);
858 if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX)
861 mem->control_type = tmp;
869 static ssize_t mem_control_type_read(struct cgroup *cont,
871 struct file *file, char __user *userbuf,
872 size_t nbytes, loff_t *ppos)
876 struct mem_cgroup *mem;
878 mem = mem_cgroup_from_cont(cont);
880 val = mem->control_type;
881 s += sprintf(s, "%lu\n", val);
882 return simple_read_from_buffer((void __user *)userbuf, nbytes,
887 static ssize_t mem_force_empty_write(struct cgroup *cont,
888 struct cftype *cft, struct file *file,
889 const char __user *userbuf,
890 size_t nbytes, loff_t *ppos)
892 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
894 ret = mem_cgroup_force_empty(mem);
901 * Note: This should be removed if cgroup supports write-only file.
904 static ssize_t mem_force_empty_read(struct cgroup *cont,
906 struct file *file, char __user *userbuf,
907 size_t nbytes, loff_t *ppos)
913 static const struct mem_cgroup_stat_desc {
916 } mem_cgroup_stat_desc[] = {
917 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
918 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
921 static int mem_control_stat_show(struct seq_file *m, void *arg)
923 struct cgroup *cont = m->private;
924 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
925 struct mem_cgroup_stat *stat = &mem_cont->stat;
928 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
931 val = mem_cgroup_read_stat(stat, i);
932 val *= mem_cgroup_stat_desc[i].unit;
933 seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg,
936 /* showing # of active pages */
938 unsigned long active, inactive;
940 inactive = mem_cgroup_get_all_zonestat(mem_cont,
941 MEM_CGROUP_ZSTAT_INACTIVE);
942 active = mem_cgroup_get_all_zonestat(mem_cont,
943 MEM_CGROUP_ZSTAT_ACTIVE);
944 seq_printf(m, "active %ld\n", (active) * PAGE_SIZE);
945 seq_printf(m, "inactive %ld\n", (inactive) * PAGE_SIZE);
950 static const struct file_operations mem_control_stat_file_operations = {
953 .release = single_release,
956 static int mem_control_stat_open(struct inode *unused, struct file *file)
959 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
961 file->f_op = &mem_control_stat_file_operations;
962 return single_open(file, mem_control_stat_show, cont);
967 static struct cftype mem_cgroup_files[] = {
969 .name = "usage_in_bytes",
970 .private = RES_USAGE,
971 .read = mem_cgroup_read,
974 .name = "limit_in_bytes",
975 .private = RES_LIMIT,
976 .write = mem_cgroup_write,
977 .read = mem_cgroup_read,
981 .private = RES_FAILCNT,
982 .read = mem_cgroup_read,
985 .name = "control_type",
986 .write = mem_control_type_write,
987 .read = mem_control_type_read,
990 .name = "force_empty",
991 .write = mem_force_empty_write,
992 .read = mem_force_empty_read,
996 .open = mem_control_stat_open,
1000 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1002 struct mem_cgroup_per_node *pn;
1004 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node);
1007 mem->info.nodeinfo[node] = pn;
1008 memset(pn, 0, sizeof(*pn));
1012 static struct mem_cgroup init_mem_cgroup;
1014 static struct cgroup_subsys_state *
1015 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1017 struct mem_cgroup *mem;
1020 if (unlikely((cont->parent) == NULL)) {
1021 mem = &init_mem_cgroup;
1022 init_mm.mem_cgroup = mem;
1024 mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
1029 res_counter_init(&mem->res);
1030 INIT_LIST_HEAD(&mem->active_list);
1031 INIT_LIST_HEAD(&mem->inactive_list);
1032 spin_lock_init(&mem->lru_lock);
1033 mem->control_type = MEM_CGROUP_TYPE_ALL;
1034 memset(&mem->info, 0, sizeof(mem->info));
1036 for_each_node_state(node, N_POSSIBLE)
1037 if (alloc_mem_cgroup_per_zone_info(mem, node))
1042 for_each_node_state(node, N_POSSIBLE)
1043 kfree(mem->info.nodeinfo[node]);
1044 if (cont->parent != NULL)
1049 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1050 struct cgroup *cont)
1052 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1053 mem_cgroup_force_empty(mem);
1056 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1057 struct cgroup *cont)
1060 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1062 for_each_node_state(node, N_POSSIBLE)
1063 kfree(mem->info.nodeinfo[node]);
1065 kfree(mem_cgroup_from_cont(cont));
1068 static int mem_cgroup_populate(struct cgroup_subsys *ss,
1069 struct cgroup *cont)
1071 return cgroup_add_files(cont, ss, mem_cgroup_files,
1072 ARRAY_SIZE(mem_cgroup_files));
1075 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1076 struct cgroup *cont,
1077 struct cgroup *old_cont,
1078 struct task_struct *p)
1080 struct mm_struct *mm;
1081 struct mem_cgroup *mem, *old_mem;
1083 mm = get_task_mm(p);
1087 mem = mem_cgroup_from_cont(cont);
1088 old_mem = mem_cgroup_from_cont(old_cont);
1094 * Only thread group leaders are allowed to migrate, the mm_struct is
1095 * in effect owned by the leader
1097 if (p->tgid != p->pid)
1101 rcu_assign_pointer(mm->mem_cgroup, mem);
1102 css_put(&old_mem->css);
1109 struct cgroup_subsys mem_cgroup_subsys = {
1111 .subsys_id = mem_cgroup_subsys_id,
1112 .create = mem_cgroup_create,
1113 .pre_destroy = mem_cgroup_pre_destroy,
1114 .destroy = mem_cgroup_destroy,
1115 .populate = mem_cgroup_populate,
1116 .attach = mem_cgroup_move_task,