]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/memcontrol.c
per-zone and reclaim enhancements for memory controller: nid/zid helper function...
[net-next-2.6.git] / mm / memcontrol.c
CommitLineData
8cdea7c0
BS
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
8cdea7c0
BS
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/res_counter.h>
21#include <linux/memcontrol.h>
22#include <linux/cgroup.h>
78fb7466 23#include <linux/mm.h>
d52aa412 24#include <linux/smp.h>
8a9f3ccd 25#include <linux/page-flags.h>
66e1707b 26#include <linux/backing-dev.h>
8a9f3ccd
BS
27#include <linux/bit_spinlock.h>
28#include <linux/rcupdate.h>
66e1707b
BS
29#include <linux/swap.h>
30#include <linux/spinlock.h>
31#include <linux/fs.h>
d2ceb9b7 32#include <linux/seq_file.h>
8cdea7c0 33
8697d331
BS
34#include <asm/uaccess.h>
35
8cdea7c0 36struct cgroup_subsys mem_cgroup_subsys;
66e1707b 37static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
8cdea7c0 38
d52aa412
KH
39/*
40 * Statistics for memory cgroup.
41 */
42enum mem_cgroup_stat_index {
43 /*
44 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
45 */
46 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
47 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
48
49 MEM_CGROUP_STAT_NSTATS,
50};
51
52struct mem_cgroup_stat_cpu {
53 s64 count[MEM_CGROUP_STAT_NSTATS];
54} ____cacheline_aligned_in_smp;
55
56struct mem_cgroup_stat {
57 struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
58};
59
60/*
61 * For accounting under irq disable, no need for increment preempt count.
62 */
63static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
64 enum mem_cgroup_stat_index idx, int val)
65{
66 int cpu = smp_processor_id();
67 stat->cpustat[cpu].count[idx] += val;
68}
69
70static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
71 enum mem_cgroup_stat_index idx)
72{
73 int cpu;
74 s64 ret = 0;
75 for_each_possible_cpu(cpu)
76 ret += stat->cpustat[cpu].count[idx];
77 return ret;
78}
79
8cdea7c0
BS
80/*
81 * The memory controller data structure. The memory controller controls both
82 * page cache and RSS per cgroup. We would eventually like to provide
83 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
84 * to help the administrator determine what knobs to tune.
85 *
86 * TODO: Add a water mark for the memory controller. Reclaim will begin when
8a9f3ccd
BS
87 * we hit the water mark. May be even add a low water mark, such that
88 * no reclaim occurs from a cgroup at it's low water mark, this is
89 * a feature that will be implemented much later in the future.
8cdea7c0
BS
90 */
91struct mem_cgroup {
92 struct cgroup_subsys_state css;
93 /*
94 * the counter to account for memory usage
95 */
96 struct res_counter res;
78fb7466
PE
97 /*
98 * Per cgroup active and inactive list, similar to the
99 * per zone LRU lists.
100 * TODO: Consider making these lists per zone
101 */
102 struct list_head active_list;
103 struct list_head inactive_list;
66e1707b
BS
104 /*
105 * spin_lock to protect the per cgroup LRU
106 */
107 spinlock_t lru_lock;
8697d331 108 unsigned long control_type; /* control RSS or RSS+Pagecache */
d52aa412
KH
109 /*
110 * statistics.
111 */
112 struct mem_cgroup_stat stat;
8cdea7c0
BS
113};
114
8a9f3ccd
BS
115/*
116 * We use the lower bit of the page->page_cgroup pointer as a bit spin
117 * lock. We need to ensure that page->page_cgroup is atleast two
118 * byte aligned (based on comments from Nick Piggin)
119 */
120#define PAGE_CGROUP_LOCK_BIT 0x0
121#define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT)
122
8cdea7c0
BS
123/*
124 * A page_cgroup page is associated with every page descriptor. The
125 * page_cgroup helps us identify information about the cgroup
126 */
127struct page_cgroup {
128 struct list_head lru; /* per cgroup LRU list */
129 struct page *page;
130 struct mem_cgroup *mem_cgroup;
8a9f3ccd
BS
131 atomic_t ref_cnt; /* Helpful when pages move b/w */
132 /* mapped and cached states */
217bc319 133 int flags;
8cdea7c0 134};
217bc319 135#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
3564c7c4 136#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
8cdea7c0 137
c0149530
KH
138static inline int page_cgroup_nid(struct page_cgroup *pc)
139{
140 return page_to_nid(pc->page);
141}
142
143static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
144{
145 return page_zonenum(pc->page);
146}
147
8697d331
BS
148enum {
149 MEM_CGROUP_TYPE_UNSPEC = 0,
150 MEM_CGROUP_TYPE_MAPPED,
151 MEM_CGROUP_TYPE_CACHED,
152 MEM_CGROUP_TYPE_ALL,
153 MEM_CGROUP_TYPE_MAX,
154};
155
217bc319
KH
156enum charge_type {
157 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
158 MEM_CGROUP_CHARGE_TYPE_MAPPED,
159};
160
d52aa412
KH
161/*
162 * Always modified under lru lock. Then, not necessary to preempt_disable()
163 */
164static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
165 bool charge)
166{
167 int val = (charge)? 1 : -1;
168 struct mem_cgroup_stat *stat = &mem->stat;
169 VM_BUG_ON(!irqs_disabled());
170
171 if (flags & PAGE_CGROUP_FLAG_CACHE)
172 __mem_cgroup_stat_add_safe(stat,
173 MEM_CGROUP_STAT_CACHE, val);
174 else
175 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
176
177}
178
8697d331 179static struct mem_cgroup init_mem_cgroup;
8cdea7c0
BS
180
181static inline
182struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
183{
184 return container_of(cgroup_subsys_state(cont,
185 mem_cgroup_subsys_id), struct mem_cgroup,
186 css);
187}
188
78fb7466
PE
189static inline
190struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
191{
192 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
193 struct mem_cgroup, css);
194}
195
196void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
197{
198 struct mem_cgroup *mem;
199
200 mem = mem_cgroup_from_task(p);
201 css_get(&mem->css);
202 mm->mem_cgroup = mem;
203}
204
205void mm_free_cgroup(struct mm_struct *mm)
206{
207 css_put(&mm->mem_cgroup->css);
208}
209
8a9f3ccd
BS
210static inline int page_cgroup_locked(struct page *page)
211{
212 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT,
213 &page->page_cgroup);
214}
215
78fb7466
PE
216void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
217{
8a9f3ccd
BS
218 int locked;
219
220 /*
221 * While resetting the page_cgroup we might not hold the
222 * page_cgroup lock. free_hot_cold_page() is an example
223 * of such a scenario
224 */
225 if (pc)
226 VM_BUG_ON(!page_cgroup_locked(page));
227 locked = (page->page_cgroup & PAGE_CGROUP_LOCK);
228 page->page_cgroup = ((unsigned long)pc | locked);
78fb7466
PE
229}
230
231struct page_cgroup *page_get_page_cgroup(struct page *page)
232{
8a9f3ccd
BS
233 return (struct page_cgroup *)
234 (page->page_cgroup & ~PAGE_CGROUP_LOCK);
235}
236
8697d331 237static void __always_inline lock_page_cgroup(struct page *page)
8a9f3ccd
BS
238{
239 bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
240 VM_BUG_ON(!page_cgroup_locked(page));
241}
242
8697d331 243static void __always_inline unlock_page_cgroup(struct page *page)
8a9f3ccd
BS
244{
245 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
246}
247
9175e031
KH
248/*
249 * Tie new page_cgroup to struct page under lock_page_cgroup()
250 * This can fail if the page has been tied to a page_cgroup.
251 * If success, returns 0.
252 */
d52aa412
KH
253static int page_cgroup_assign_new_page_cgroup(struct page *page,
254 struct page_cgroup *pc)
9175e031
KH
255{
256 int ret = 0;
257
258 lock_page_cgroup(page);
259 if (!page_get_page_cgroup(page))
260 page_assign_page_cgroup(page, pc);
261 else /* A page is tied to other pc. */
262 ret = 1;
263 unlock_page_cgroup(page);
264 return ret;
265}
266
267/*
268 * Clear page->page_cgroup member under lock_page_cgroup().
269 * If given "pc" value is different from one page->page_cgroup,
270 * page->cgroup is not cleared.
271 * Returns a value of page->page_cgroup at lock taken.
272 * A can can detect failure of clearing by following
273 * clear_page_cgroup(page, pc) == pc
274 */
275
d52aa412
KH
276static struct page_cgroup *clear_page_cgroup(struct page *page,
277 struct page_cgroup *pc)
9175e031
KH
278{
279 struct page_cgroup *ret;
280 /* lock and clear */
281 lock_page_cgroup(page);
282 ret = page_get_page_cgroup(page);
283 if (likely(ret == pc))
284 page_assign_page_cgroup(page, NULL);
285 unlock_page_cgroup(page);
286 return ret;
287}
288
8697d331 289static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
66e1707b 290{
3564c7c4
KH
291 if (active) {
292 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
66e1707b 293 list_move(&pc->lru, &pc->mem_cgroup->active_list);
3564c7c4
KH
294 } else {
295 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
66e1707b 296 list_move(&pc->lru, &pc->mem_cgroup->inactive_list);
3564c7c4 297 }
66e1707b
BS
298}
299
4c4a2214
DR
300int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
301{
302 int ret;
303
304 task_lock(task);
305 ret = task->mm && mm_cgroup(task->mm) == mem;
306 task_unlock(task);
307 return ret;
308}
309
66e1707b
BS
310/*
311 * This routine assumes that the appropriate zone's lru lock is already held
312 */
313void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
314{
315 struct mem_cgroup *mem;
316 if (!pc)
317 return;
318
319 mem = pc->mem_cgroup;
320
321 spin_lock(&mem->lru_lock);
322 __mem_cgroup_move_lists(pc, active);
323 spin_unlock(&mem->lru_lock);
324}
325
326unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
327 struct list_head *dst,
328 unsigned long *scanned, int order,
329 int mode, struct zone *z,
330 struct mem_cgroup *mem_cont,
331 int active)
332{
333 unsigned long nr_taken = 0;
334 struct page *page;
335 unsigned long scan;
336 LIST_HEAD(pc_list);
337 struct list_head *src;
ff7283fa 338 struct page_cgroup *pc, *tmp;
66e1707b
BS
339
340 if (active)
341 src = &mem_cont->active_list;
342 else
343 src = &mem_cont->inactive_list;
344
345 spin_lock(&mem_cont->lru_lock);
ff7283fa
KH
346 scan = 0;
347 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
436c6541 348 if (scan >= nr_to_scan)
ff7283fa 349 break;
66e1707b
BS
350 page = pc->page;
351 VM_BUG_ON(!pc);
352
436c6541 353 if (unlikely(!PageLRU(page)))
ff7283fa 354 continue;
ff7283fa 355
66e1707b
BS
356 if (PageActive(page) && !active) {
357 __mem_cgroup_move_lists(pc, true);
66e1707b
BS
358 continue;
359 }
360 if (!PageActive(page) && active) {
361 __mem_cgroup_move_lists(pc, false);
66e1707b
BS
362 continue;
363 }
364
365 /*
366 * Reclaim, per zone
367 * TODO: make the active/inactive lists per zone
368 */
369 if (page_zone(page) != z)
370 continue;
371
436c6541
HD
372 scan++;
373 list_move(&pc->lru, &pc_list);
66e1707b
BS
374
375 if (__isolate_lru_page(page, mode) == 0) {
376 list_move(&page->lru, dst);
377 nr_taken++;
378 }
379 }
380
381 list_splice(&pc_list, src);
382 spin_unlock(&mem_cont->lru_lock);
383
384 *scanned = scan;
385 return nr_taken;
386}
387
8a9f3ccd
BS
388/*
389 * Charge the memory controller for page usage.
390 * Return
391 * 0 if the charge was successful
392 * < 0 if the cgroup is over its limit
393 */
217bc319
KH
394static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
395 gfp_t gfp_mask, enum charge_type ctype)
8a9f3ccd
BS
396{
397 struct mem_cgroup *mem;
9175e031 398 struct page_cgroup *pc;
66e1707b
BS
399 unsigned long flags;
400 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
8a9f3ccd
BS
401
402 /*
403 * Should page_cgroup's go to their own slab?
404 * One could optimize the performance of the charging routine
405 * by saving a bit in the page_flags and using it as a lock
406 * to see if the cgroup page already has a page_cgroup associated
407 * with it
408 */
66e1707b 409retry:
82369553
HD
410 if (page) {
411 lock_page_cgroup(page);
412 pc = page_get_page_cgroup(page);
413 /*
414 * The page_cgroup exists and
415 * the page has already been accounted.
416 */
417 if (pc) {
418 if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
419 /* this page is under being uncharged ? */
420 unlock_page_cgroup(page);
421 cpu_relax();
422 goto retry;
423 } else {
424 unlock_page_cgroup(page);
425 goto done;
426 }
9175e031 427 }
82369553 428 unlock_page_cgroup(page);
8a9f3ccd 429 }
8a9f3ccd 430
e1a1cd59 431 pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
8a9f3ccd
BS
432 if (pc == NULL)
433 goto err;
434
8a9f3ccd 435 /*
3be91277
HD
436 * We always charge the cgroup the mm_struct belongs to.
437 * The mm_struct's mem_cgroup changes on task migration if the
8a9f3ccd
BS
438 * thread group leader migrates. It's possible that mm is not
439 * set, if so charge the init_mm (happens for pagecache usage).
440 */
441 if (!mm)
442 mm = &init_mm;
443
3be91277 444 rcu_read_lock();
8a9f3ccd
BS
445 mem = rcu_dereference(mm->mem_cgroup);
446 /*
447 * For every charge from the cgroup, increment reference
448 * count
449 */
450 css_get(&mem->css);
451 rcu_read_unlock();
452
453 /*
454 * If we created the page_cgroup, we should free it on exceeding
455 * the cgroup limit.
456 */
0eea1030 457 while (res_counter_charge(&mem->res, PAGE_SIZE)) {
3be91277
HD
458 if (!(gfp_mask & __GFP_WAIT))
459 goto out;
e1a1cd59
BS
460
461 if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
66e1707b
BS
462 continue;
463
464 /*
465 * try_to_free_mem_cgroup_pages() might not give us a full
466 * picture of reclaim. Some pages are reclaimed and might be
467 * moved to swap cache or just unmapped from the cgroup.
468 * Check the limit again to see if the reclaim reduced the
469 * current usage of the cgroup before giving up
470 */
471 if (res_counter_check_under_limit(&mem->res))
472 continue;
3be91277
HD
473
474 if (!nr_retries--) {
475 mem_cgroup_out_of_memory(mem, gfp_mask);
476 goto out;
66e1707b 477 }
3be91277 478 congestion_wait(WRITE, HZ/10);
8a9f3ccd
BS
479 }
480
8a9f3ccd
BS
481 atomic_set(&pc->ref_cnt, 1);
482 pc->mem_cgroup = mem;
483 pc->page = page;
3564c7c4 484 pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
217bc319
KH
485 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
486 pc->flags |= PAGE_CGROUP_FLAG_CACHE;
3be91277 487
82369553 488 if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
9175e031 489 /*
3be91277
HD
490 * Another charge has been added to this page already.
491 * We take lock_page_cgroup(page) again and read
9175e031
KH
492 * page->cgroup, increment refcnt.... just retry is OK.
493 */
494 res_counter_uncharge(&mem->res, PAGE_SIZE);
495 css_put(&mem->css);
496 kfree(pc);
82369553
HD
497 if (!page)
498 goto done;
9175e031
KH
499 goto retry;
500 }
8a9f3ccd 501
66e1707b 502 spin_lock_irqsave(&mem->lru_lock, flags);
d52aa412
KH
503 /* Update statistics vector */
504 mem_cgroup_charge_statistics(mem, pc->flags, true);
66e1707b
BS
505 list_add(&pc->lru, &mem->active_list);
506 spin_unlock_irqrestore(&mem->lru_lock, flags);
507
8a9f3ccd 508done:
8a9f3ccd 509 return 0;
3be91277
HD
510out:
511 css_put(&mem->css);
8a9f3ccd 512 kfree(pc);
8a9f3ccd 513err:
8a9f3ccd
BS
514 return -ENOMEM;
515}
516
217bc319
KH
517int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
518 gfp_t gfp_mask)
519{
520 return mem_cgroup_charge_common(page, mm, gfp_mask,
521 MEM_CGROUP_CHARGE_TYPE_MAPPED);
522}
523
8697d331
BS
524/*
525 * See if the cached pages should be charged at all?
526 */
e1a1cd59
BS
527int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
528 gfp_t gfp_mask)
8697d331 529{
ac44d354 530 int ret = 0;
8697d331
BS
531 struct mem_cgroup *mem;
532 if (!mm)
533 mm = &init_mm;
534
ac44d354 535 rcu_read_lock();
8697d331 536 mem = rcu_dereference(mm->mem_cgroup);
ac44d354
BS
537 css_get(&mem->css);
538 rcu_read_unlock();
8697d331 539 if (mem->control_type == MEM_CGROUP_TYPE_ALL)
ac44d354 540 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
217bc319 541 MEM_CGROUP_CHARGE_TYPE_CACHE);
ac44d354
BS
542 css_put(&mem->css);
543 return ret;
8697d331
BS
544}
545
8a9f3ccd
BS
546/*
547 * Uncharging is always a welcome operation, we never complain, simply
548 * uncharge.
549 */
550void mem_cgroup_uncharge(struct page_cgroup *pc)
551{
552 struct mem_cgroup *mem;
553 struct page *page;
66e1707b 554 unsigned long flags;
8a9f3ccd 555
8697d331
BS
556 /*
557 * This can handle cases when a page is not charged at all and we
558 * are switching between handling the control_type.
559 */
8a9f3ccd
BS
560 if (!pc)
561 return;
562
563 if (atomic_dec_and_test(&pc->ref_cnt)) {
564 page = pc->page;
9175e031
KH
565 /*
566 * get page->cgroup and clear it under lock.
cc847582 567 * force_empty can drop page->cgroup without checking refcnt.
9175e031
KH
568 */
569 if (clear_page_cgroup(page, pc) == pc) {
570 mem = pc->mem_cgroup;
571 css_put(&mem->css);
572 res_counter_uncharge(&mem->res, PAGE_SIZE);
573 spin_lock_irqsave(&mem->lru_lock, flags);
574 list_del_init(&pc->lru);
d52aa412 575 mem_cgroup_charge_statistics(mem, pc->flags, false);
9175e031
KH
576 spin_unlock_irqrestore(&mem->lru_lock, flags);
577 kfree(pc);
9175e031 578 }
8a9f3ccd 579 }
78fb7466 580}
ae41be37
KH
581/*
582 * Returns non-zero if a page (under migration) has valid page_cgroup member.
583 * Refcnt of page_cgroup is incremented.
584 */
585
586int mem_cgroup_prepare_migration(struct page *page)
587{
588 struct page_cgroup *pc;
589 int ret = 0;
590 lock_page_cgroup(page);
591 pc = page_get_page_cgroup(page);
592 if (pc && atomic_inc_not_zero(&pc->ref_cnt))
593 ret = 1;
594 unlock_page_cgroup(page);
595 return ret;
596}
597
598void mem_cgroup_end_migration(struct page *page)
599{
600 struct page_cgroup *pc = page_get_page_cgroup(page);
601 mem_cgroup_uncharge(pc);
602}
603/*
604 * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
605 * And no race with uncharge() routines because page_cgroup for *page*
606 * has extra one reference by mem_cgroup_prepare_migration.
607 */
608
609void mem_cgroup_page_migration(struct page *page, struct page *newpage)
610{
611 struct page_cgroup *pc;
612retry:
613 pc = page_get_page_cgroup(page);
614 if (!pc)
615 return;
616 if (clear_page_cgroup(page, pc) != pc)
617 goto retry;
618 pc->page = newpage;
619 lock_page_cgroup(newpage);
620 page_assign_page_cgroup(newpage, pc);
621 unlock_page_cgroup(newpage);
622 return;
623}
78fb7466 624
cc847582
KH
625/*
626 * This routine traverse page_cgroup in given list and drop them all.
627 * This routine ignores page_cgroup->ref_cnt.
628 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
629 */
630#define FORCE_UNCHARGE_BATCH (128)
631static void
632mem_cgroup_force_empty_list(struct mem_cgroup *mem, struct list_head *list)
633{
634 struct page_cgroup *pc;
635 struct page *page;
636 int count;
637 unsigned long flags;
638
639retry:
640 count = FORCE_UNCHARGE_BATCH;
641 spin_lock_irqsave(&mem->lru_lock, flags);
642
643 while (--count && !list_empty(list)) {
644 pc = list_entry(list->prev, struct page_cgroup, lru);
645 page = pc->page;
646 /* Avoid race with charge */
647 atomic_set(&pc->ref_cnt, 0);
648 if (clear_page_cgroup(page, pc) == pc) {
649 css_put(&mem->css);
650 res_counter_uncharge(&mem->res, PAGE_SIZE);
651 list_del_init(&pc->lru);
d52aa412 652 mem_cgroup_charge_statistics(mem, pc->flags, false);
cc847582
KH
653 kfree(pc);
654 } else /* being uncharged ? ...do relax */
655 break;
656 }
657 spin_unlock_irqrestore(&mem->lru_lock, flags);
658 if (!list_empty(list)) {
659 cond_resched();
660 goto retry;
661 }
662 return;
663}
664
665/*
666 * make mem_cgroup's charge to be 0 if there is no task.
667 * This enables deleting this mem_cgroup.
668 */
669
670int mem_cgroup_force_empty(struct mem_cgroup *mem)
671{
672 int ret = -EBUSY;
673 css_get(&mem->css);
674 /*
675 * page reclaim code (kswapd etc..) will move pages between
676` * active_list <-> inactive_list while we don't take a lock.
677 * So, we have to do loop here until all lists are empty.
678 */
679 while (!(list_empty(&mem->active_list) &&
680 list_empty(&mem->inactive_list))) {
681 if (atomic_read(&mem->css.cgroup->count) > 0)
682 goto out;
683 /* drop all page_cgroup in active_list */
684 mem_cgroup_force_empty_list(mem, &mem->active_list);
685 /* drop all page_cgroup in inactive_list */
686 mem_cgroup_force_empty_list(mem, &mem->inactive_list);
687 }
688 ret = 0;
689out:
690 css_put(&mem->css);
691 return ret;
692}
693
694
695
0eea1030
BS
696int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
697{
698 *tmp = memparse(buf, &buf);
699 if (*buf != '\0')
700 return -EINVAL;
701
702 /*
703 * Round up the value to the closest page size
704 */
705 *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
706 return 0;
707}
708
709static ssize_t mem_cgroup_read(struct cgroup *cont,
710 struct cftype *cft, struct file *file,
711 char __user *userbuf, size_t nbytes, loff_t *ppos)
8cdea7c0
BS
712{
713 return res_counter_read(&mem_cgroup_from_cont(cont)->res,
0eea1030
BS
714 cft->private, userbuf, nbytes, ppos,
715 NULL);
8cdea7c0
BS
716}
717
718static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
719 struct file *file, const char __user *userbuf,
720 size_t nbytes, loff_t *ppos)
721{
722 return res_counter_write(&mem_cgroup_from_cont(cont)->res,
0eea1030
BS
723 cft->private, userbuf, nbytes, ppos,
724 mem_cgroup_write_strategy);
8cdea7c0
BS
725}
726
8697d331
BS
727static ssize_t mem_control_type_write(struct cgroup *cont,
728 struct cftype *cft, struct file *file,
729 const char __user *userbuf,
730 size_t nbytes, loff_t *pos)
731{
732 int ret;
733 char *buf, *end;
734 unsigned long tmp;
735 struct mem_cgroup *mem;
736
737 mem = mem_cgroup_from_cont(cont);
738 buf = kmalloc(nbytes + 1, GFP_KERNEL);
739 ret = -ENOMEM;
740 if (buf == NULL)
741 goto out;
742
743 buf[nbytes] = 0;
744 ret = -EFAULT;
745 if (copy_from_user(buf, userbuf, nbytes))
746 goto out_free;
747
748 ret = -EINVAL;
749 tmp = simple_strtoul(buf, &end, 10);
750 if (*end != '\0')
751 goto out_free;
752
753 if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX)
754 goto out_free;
755
756 mem->control_type = tmp;
757 ret = nbytes;
758out_free:
759 kfree(buf);
760out:
761 return ret;
762}
763
764static ssize_t mem_control_type_read(struct cgroup *cont,
765 struct cftype *cft,
766 struct file *file, char __user *userbuf,
767 size_t nbytes, loff_t *ppos)
768{
769 unsigned long val;
770 char buf[64], *s;
771 struct mem_cgroup *mem;
772
773 mem = mem_cgroup_from_cont(cont);
774 s = buf;
775 val = mem->control_type;
776 s += sprintf(s, "%lu\n", val);
777 return simple_read_from_buffer((void __user *)userbuf, nbytes,
778 ppos, buf, s - buf);
779}
780
cc847582
KH
781
782static ssize_t mem_force_empty_write(struct cgroup *cont,
783 struct cftype *cft, struct file *file,
784 const char __user *userbuf,
785 size_t nbytes, loff_t *ppos)
786{
787 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
788 int ret;
789 ret = mem_cgroup_force_empty(mem);
790 if (!ret)
791 ret = nbytes;
792 return ret;
793}
794
795/*
796 * Note: This should be removed if cgroup supports write-only file.
797 */
798
799static ssize_t mem_force_empty_read(struct cgroup *cont,
800 struct cftype *cft,
801 struct file *file, char __user *userbuf,
802 size_t nbytes, loff_t *ppos)
803{
804 return -EINVAL;
805}
806
807
d2ceb9b7
KH
808static const struct mem_cgroup_stat_desc {
809 const char *msg;
810 u64 unit;
811} mem_cgroup_stat_desc[] = {
812 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
813 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
814};
815
816static int mem_control_stat_show(struct seq_file *m, void *arg)
817{
818 struct cgroup *cont = m->private;
819 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
820 struct mem_cgroup_stat *stat = &mem_cont->stat;
821 int i;
822
823 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
824 s64 val;
825
826 val = mem_cgroup_read_stat(stat, i);
827 val *= mem_cgroup_stat_desc[i].unit;
828 seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg,
829 (long long)val);
830 }
831 return 0;
832}
833
834static const struct file_operations mem_control_stat_file_operations = {
835 .read = seq_read,
836 .llseek = seq_lseek,
837 .release = single_release,
838};
839
840static int mem_control_stat_open(struct inode *unused, struct file *file)
841{
842 /* XXX __d_cont */
843 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
844
845 file->f_op = &mem_control_stat_file_operations;
846 return single_open(file, mem_control_stat_show, cont);
847}
848
849
850
8cdea7c0
BS
851static struct cftype mem_cgroup_files[] = {
852 {
0eea1030 853 .name = "usage_in_bytes",
8cdea7c0
BS
854 .private = RES_USAGE,
855 .read = mem_cgroup_read,
856 },
857 {
0eea1030 858 .name = "limit_in_bytes",
8cdea7c0
BS
859 .private = RES_LIMIT,
860 .write = mem_cgroup_write,
861 .read = mem_cgroup_read,
862 },
863 {
864 .name = "failcnt",
865 .private = RES_FAILCNT,
866 .read = mem_cgroup_read,
867 },
8697d331
BS
868 {
869 .name = "control_type",
870 .write = mem_control_type_write,
871 .read = mem_control_type_read,
872 },
cc847582
KH
873 {
874 .name = "force_empty",
875 .write = mem_force_empty_write,
876 .read = mem_force_empty_read,
877 },
d2ceb9b7
KH
878 {
879 .name = "stat",
880 .open = mem_control_stat_open,
881 },
8cdea7c0
BS
882};
883
78fb7466
PE
884static struct mem_cgroup init_mem_cgroup;
885
8cdea7c0
BS
886static struct cgroup_subsys_state *
887mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
888{
889 struct mem_cgroup *mem;
890
78fb7466
PE
891 if (unlikely((cont->parent) == NULL)) {
892 mem = &init_mem_cgroup;
893 init_mm.mem_cgroup = mem;
894 } else
895 mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
896
897 if (mem == NULL)
898 return NULL;
8cdea7c0
BS
899
900 res_counter_init(&mem->res);
8a9f3ccd
BS
901 INIT_LIST_HEAD(&mem->active_list);
902 INIT_LIST_HEAD(&mem->inactive_list);
66e1707b 903 spin_lock_init(&mem->lru_lock);
8697d331 904 mem->control_type = MEM_CGROUP_TYPE_ALL;
8cdea7c0
BS
905 return &mem->css;
906}
907
df878fb0
KH
908static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
909 struct cgroup *cont)
910{
911 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
912 mem_cgroup_force_empty(mem);
913}
914
8cdea7c0
BS
915static void mem_cgroup_destroy(struct cgroup_subsys *ss,
916 struct cgroup *cont)
917{
918 kfree(mem_cgroup_from_cont(cont));
919}
920
921static int mem_cgroup_populate(struct cgroup_subsys *ss,
922 struct cgroup *cont)
923{
924 return cgroup_add_files(cont, ss, mem_cgroup_files,
925 ARRAY_SIZE(mem_cgroup_files));
926}
927
67e465a7
BS
928static void mem_cgroup_move_task(struct cgroup_subsys *ss,
929 struct cgroup *cont,
930 struct cgroup *old_cont,
931 struct task_struct *p)
932{
933 struct mm_struct *mm;
934 struct mem_cgroup *mem, *old_mem;
935
936 mm = get_task_mm(p);
937 if (mm == NULL)
938 return;
939
940 mem = mem_cgroup_from_cont(cont);
941 old_mem = mem_cgroup_from_cont(old_cont);
942
943 if (mem == old_mem)
944 goto out;
945
946 /*
947 * Only thread group leaders are allowed to migrate, the mm_struct is
948 * in effect owned by the leader
949 */
950 if (p->tgid != p->pid)
951 goto out;
952
953 css_get(&mem->css);
954 rcu_assign_pointer(mm->mem_cgroup, mem);
955 css_put(&old_mem->css);
956
957out:
958 mmput(mm);
959 return;
960}
961
8cdea7c0
BS
962struct cgroup_subsys mem_cgroup_subsys = {
963 .name = "memory",
964 .subsys_id = mem_cgroup_subsys_id,
965 .create = mem_cgroup_create,
df878fb0 966 .pre_destroy = mem_cgroup_pre_destroy,
8cdea7c0
BS
967 .destroy = mem_cgroup_destroy,
968 .populate = mem_cgroup_populate,
67e465a7 969 .attach = mem_cgroup_move_task,
78fb7466 970 .early_init = 1,
8cdea7c0 971};