]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/shmem.c
libfs: move EXPORT_SYMBOL for d_alloc_name
[net-next-2.6.git] / mm / shmem.c
CommitLineData
1da177e4
LT
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
0edd73b3
HD
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
1da177e4
LT
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
12 *
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16 *
853ac43a
MM
17 * tiny-shmem:
18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
19 *
1da177e4
LT
20 * This file is released under the GPL.
21 */
22
853ac43a
MM
23#include <linux/fs.h>
24#include <linux/init.h>
25#include <linux/vfs.h>
26#include <linux/mount.h>
caefba17 27#include <linux/pagemap.h>
853ac43a
MM
28#include <linux/file.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/swap.h>
32
33static struct vfsmount *shm_mnt;
34
35#ifdef CONFIG_SHMEM
1da177e4
LT
36/*
37 * This virtual memory filesystem is heavily based on the ramfs. It
38 * extends ramfs by the ability to use swap and honor resource limits
39 * which makes it a completely usable filesystem.
40 */
41
39f0247d 42#include <linux/xattr.h>
a5694255 43#include <linux/exportfs.h>
39f0247d 44#include <linux/generic_acl.h>
1da177e4 45#include <linux/mman.h>
1da177e4
LT
46#include <linux/string.h>
47#include <linux/slab.h>
48#include <linux/backing-dev.h>
49#include <linux/shmem_fs.h>
1da177e4 50#include <linux/writeback.h>
1da177e4
LT
51#include <linux/blkdev.h>
52#include <linux/security.h>
53#include <linux/swapops.h>
54#include <linux/mempolicy.h>
55#include <linux/namei.h>
b00dc3ad 56#include <linux/ctype.h>
304dbdb7 57#include <linux/migrate.h>
c1f60a5a 58#include <linux/highmem.h>
680d794b 59#include <linux/seq_file.h>
92562927 60#include <linux/magic.h>
304dbdb7 61
1da177e4
LT
62#include <asm/uaccess.h>
63#include <asm/div64.h>
64#include <asm/pgtable.h>
65
caefba17
HD
66/*
67 * The maximum size of a shmem/tmpfs file is limited by the maximum size of
68 * its triple-indirect swap vector - see illustration at shmem_swp_entry().
69 *
70 * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
71 * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum
72 * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
73 * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
74 *
75 * We use / and * instead of shifts in the definitions below, so that the swap
76 * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
77 */
1da177e4 78#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
61609d01 79#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
1da177e4 80
caefba17
HD
81#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
82#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
1da177e4 83
caefba17
HD
84#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
85#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
86
87#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
1da177e4
LT
88#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
89
90/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
91#define SHMEM_PAGEIN VM_READ
92#define SHMEM_TRUNCATE VM_WRITE
93
94/* Definition to limit shmem_truncate's steps between cond_rescheds */
95#define LATENCY_LIMIT 64
96
97/* Pretend that each entry is of this size in directory's i_size */
98#define BOGO_DIRENT_SIZE 20
99
1da177e4
LT
100/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
101enum sgp_type {
1da177e4
LT
102 SGP_READ, /* don't exceed i_size, don't allocate page */
103 SGP_CACHE, /* don't exceed i_size, may allocate page */
a0ee5ec5 104 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
1da177e4
LT
105 SGP_WRITE, /* may exceed i_size, may allocate page */
106};
107
b76db735 108#ifdef CONFIG_TMPFS
680d794b 109static unsigned long shmem_default_max_blocks(void)
110{
111 return totalram_pages / 2;
112}
113
114static unsigned long shmem_default_max_inodes(void)
115{
116 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
117}
b76db735 118#endif
680d794b 119
1da177e4
LT
120static int shmem_getpage(struct inode *inode, unsigned long idx,
121 struct page **pagep, enum sgp_type sgp, int *type);
122
6daa0e28 123static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
1da177e4
LT
124{
125 /*
126 * The above definition of ENTRIES_PER_PAGE, and the use of
127 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
128 * might be reconsidered if it ever diverges from PAGE_SIZE.
769848c0 129 *
e12ba74d 130 * Mobility flags are masked out as swap vectors cannot move
1da177e4 131 */
e12ba74d 132 return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
769848c0 133 PAGE_CACHE_SHIFT-PAGE_SHIFT);
1da177e4
LT
134}
135
136static inline void shmem_dir_free(struct page *page)
137{
138 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
139}
140
141static struct page **shmem_dir_map(struct page *page)
142{
143 return (struct page **)kmap_atomic(page, KM_USER0);
144}
145
146static inline void shmem_dir_unmap(struct page **dir)
147{
148 kunmap_atomic(dir, KM_USER0);
149}
150
151static swp_entry_t *shmem_swp_map(struct page *page)
152{
153 return (swp_entry_t *)kmap_atomic(page, KM_USER1);
154}
155
156static inline void shmem_swp_balance_unmap(void)
157{
158 /*
159 * When passing a pointer to an i_direct entry, to code which
160 * also handles indirect entries and so will shmem_swp_unmap,
161 * we must arrange for the preempt count to remain in balance.
162 * What kmap_atomic of a lowmem page does depends on config
163 * and architecture, so pretend to kmap_atomic some lowmem page.
164 */
165 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
166}
167
168static inline void shmem_swp_unmap(swp_entry_t *entry)
169{
170 kunmap_atomic(entry, KM_USER1);
171}
172
173static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
174{
175 return sb->s_fs_info;
176}
177
178/*
179 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
180 * for shared memory and for shared anonymous (/dev/zero) mappings
181 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
182 * consistent with the pre-accounting of private mappings ...
183 */
184static inline int shmem_acct_size(unsigned long flags, loff_t size)
185{
0b0a0806
HD
186 return (flags & VM_NORESERVE) ?
187 0 : security_vm_enough_memory_kern(VM_ACCT(size));
1da177e4
LT
188}
189
190static inline void shmem_unacct_size(unsigned long flags, loff_t size)
191{
0b0a0806 192 if (!(flags & VM_NORESERVE))
1da177e4
LT
193 vm_unacct_memory(VM_ACCT(size));
194}
195
196/*
197 * ... whereas tmpfs objects are accounted incrementally as
198 * pages are allocated, in order to allow huge sparse files.
199 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
200 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
201 */
202static inline int shmem_acct_block(unsigned long flags)
203{
0b0a0806
HD
204 return (flags & VM_NORESERVE) ?
205 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
1da177e4
LT
206}
207
208static inline void shmem_unacct_blocks(unsigned long flags, long pages)
209{
0b0a0806 210 if (flags & VM_NORESERVE)
1da177e4
LT
211 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
212}
213
759b9775 214static const struct super_operations shmem_ops;
f5e54d6e 215static const struct address_space_operations shmem_aops;
15ad7cdc 216static const struct file_operations shmem_file_operations;
92e1d5be
AV
217static const struct inode_operations shmem_inode_operations;
218static const struct inode_operations shmem_dir_inode_operations;
219static const struct inode_operations shmem_special_inode_operations;
f0f37e2f 220static const struct vm_operations_struct shmem_vm_ops;
1da177e4 221
6c231b7b 222static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
1da177e4 223 .ra_pages = 0, /* No readahead */
4f98a2fe 224 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1da177e4
LT
225 .unplug_io_fn = default_unplug_io_fn,
226};
227
228static LIST_HEAD(shmem_swaplist);
cb5f7b9a 229static DEFINE_MUTEX(shmem_swaplist_mutex);
1da177e4
LT
230
231static void shmem_free_blocks(struct inode *inode, long pages)
232{
233 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
0edd73b3 234 if (sbinfo->max_blocks) {
1da177e4
LT
235 spin_lock(&sbinfo->stat_lock);
236 sbinfo->free_blocks += pages;
237 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
238 spin_unlock(&sbinfo->stat_lock);
239 }
240}
241
5b04c689
PE
242static int shmem_reserve_inode(struct super_block *sb)
243{
244 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
245 if (sbinfo->max_inodes) {
246 spin_lock(&sbinfo->stat_lock);
247 if (!sbinfo->free_inodes) {
248 spin_unlock(&sbinfo->stat_lock);
249 return -ENOSPC;
250 }
251 sbinfo->free_inodes--;
252 spin_unlock(&sbinfo->stat_lock);
253 }
254 return 0;
255}
256
257static void shmem_free_inode(struct super_block *sb)
258{
259 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
260 if (sbinfo->max_inodes) {
261 spin_lock(&sbinfo->stat_lock);
262 sbinfo->free_inodes++;
263 spin_unlock(&sbinfo->stat_lock);
264 }
265}
266
46711810 267/**
1da177e4 268 * shmem_recalc_inode - recalculate the size of an inode
1da177e4
LT
269 * @inode: inode to recalc
270 *
271 * We have to calculate the free blocks since the mm can drop
272 * undirtied hole pages behind our back.
273 *
274 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
275 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
276 *
277 * It has to be called with the spinlock held.
278 */
279static void shmem_recalc_inode(struct inode *inode)
280{
281 struct shmem_inode_info *info = SHMEM_I(inode);
282 long freed;
283
284 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
285 if (freed > 0) {
286 info->alloced -= freed;
287 shmem_unacct_blocks(info->flags, freed);
288 shmem_free_blocks(inode, freed);
289 }
290}
291
46711810 292/**
1da177e4 293 * shmem_swp_entry - find the swap vector position in the info structure
1da177e4
LT
294 * @info: info structure for the inode
295 * @index: index of the page to find
296 * @page: optional page to add to the structure. Has to be preset to
297 * all zeros
298 *
299 * If there is no space allocated yet it will return NULL when
300 * page is NULL, else it will use the page for the needed block,
301 * setting it to NULL on return to indicate that it has been used.
302 *
303 * The swap vector is organized the following way:
304 *
305 * There are SHMEM_NR_DIRECT entries directly stored in the
306 * shmem_inode_info structure. So small files do not need an addional
307 * allocation.
308 *
309 * For pages with index > SHMEM_NR_DIRECT there is the pointer
310 * i_indirect which points to a page which holds in the first half
311 * doubly indirect blocks, in the second half triple indirect blocks:
312 *
313 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
314 * following layout (for SHMEM_NR_DIRECT == 16):
315 *
316 * i_indirect -> dir --> 16-19
317 * | +-> 20-23
318 * |
319 * +-->dir2 --> 24-27
320 * | +-> 28-31
321 * | +-> 32-35
322 * | +-> 36-39
323 * |
324 * +-->dir3 --> 40-43
325 * +-> 44-47
326 * +-> 48-51
327 * +-> 52-55
328 */
329static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
330{
331 unsigned long offset;
332 struct page **dir;
333 struct page *subdir;
334
335 if (index < SHMEM_NR_DIRECT) {
336 shmem_swp_balance_unmap();
337 return info->i_direct+index;
338 }
339 if (!info->i_indirect) {
340 if (page) {
341 info->i_indirect = *page;
342 *page = NULL;
343 }
344 return NULL; /* need another page */
345 }
346
347 index -= SHMEM_NR_DIRECT;
348 offset = index % ENTRIES_PER_PAGE;
349 index /= ENTRIES_PER_PAGE;
350 dir = shmem_dir_map(info->i_indirect);
351
352 if (index >= ENTRIES_PER_PAGE/2) {
353 index -= ENTRIES_PER_PAGE/2;
354 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
355 index %= ENTRIES_PER_PAGE;
356 subdir = *dir;
357 if (!subdir) {
358 if (page) {
359 *dir = *page;
360 *page = NULL;
361 }
362 shmem_dir_unmap(dir);
363 return NULL; /* need another page */
364 }
365 shmem_dir_unmap(dir);
366 dir = shmem_dir_map(subdir);
367 }
368
369 dir += index;
370 subdir = *dir;
371 if (!subdir) {
372 if (!page || !(subdir = *page)) {
373 shmem_dir_unmap(dir);
374 return NULL; /* need a page */
375 }
376 *dir = subdir;
377 *page = NULL;
378 }
379 shmem_dir_unmap(dir);
380 return shmem_swp_map(subdir) + offset;
381}
382
383static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
384{
385 long incdec = value? 1: -1;
386
387 entry->val = value;
388 info->swapped += incdec;
4c21e2f2
HD
389 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
390 struct page *page = kmap_atomic_to_page(entry);
391 set_page_private(page, page_private(page) + incdec);
392 }
1da177e4
LT
393}
394
46711810 395/**
1da177e4 396 * shmem_swp_alloc - get the position of the swap entry for the page.
1da177e4
LT
397 * @info: info structure for the inode
398 * @index: index of the page to find
399 * @sgp: check and recheck i_size? skip allocation?
46711810
RD
400 *
401 * If the entry does not exist, allocate it.
1da177e4
LT
402 */
403static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
404{
405 struct inode *inode = &info->vfs_inode;
406 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
407 struct page *page = NULL;
408 swp_entry_t *entry;
409
410 if (sgp != SGP_WRITE &&
411 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
412 return ERR_PTR(-EINVAL);
413
414 while (!(entry = shmem_swp_entry(info, index, &page))) {
415 if (sgp == SGP_READ)
416 return shmem_swp_map(ZERO_PAGE(0));
417 /*
418 * Test free_blocks against 1 not 0, since we have 1 data
419 * page (and perhaps indirect index pages) yet to allocate:
420 * a waste to allocate index if we cannot allocate data.
421 */
0edd73b3 422 if (sbinfo->max_blocks) {
1da177e4
LT
423 spin_lock(&sbinfo->stat_lock);
424 if (sbinfo->free_blocks <= 1) {
425 spin_unlock(&sbinfo->stat_lock);
426 return ERR_PTR(-ENOSPC);
427 }
428 sbinfo->free_blocks--;
429 inode->i_blocks += BLOCKS_PER_PAGE;
430 spin_unlock(&sbinfo->stat_lock);
431 }
432
433 spin_unlock(&info->lock);
769848c0 434 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
4c21e2f2
HD
435 if (page)
436 set_page_private(page, 0);
1da177e4
LT
437 spin_lock(&info->lock);
438
439 if (!page) {
440 shmem_free_blocks(inode, 1);
441 return ERR_PTR(-ENOMEM);
442 }
443 if (sgp != SGP_WRITE &&
444 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
445 entry = ERR_PTR(-EINVAL);
446 break;
447 }
448 if (info->next_index <= index)
449 info->next_index = index + 1;
450 }
451 if (page) {
452 /* another task gave its page, or truncated the file */
453 shmem_free_blocks(inode, 1);
454 shmem_dir_free(page);
455 }
456 if (info->next_index <= index && !IS_ERR(entry))
457 info->next_index = index + 1;
458 return entry;
459}
460
46711810 461/**
1da177e4 462 * shmem_free_swp - free some swap entries in a directory
1ae70006
HD
463 * @dir: pointer to the directory
464 * @edir: pointer after last entry of the directory
465 * @punch_lock: pointer to spinlock when needed for the holepunch case
1da177e4 466 */
1ae70006
HD
467static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
468 spinlock_t *punch_lock)
1da177e4 469{
1ae70006 470 spinlock_t *punch_unlock = NULL;
1da177e4
LT
471 swp_entry_t *ptr;
472 int freed = 0;
473
474 for (ptr = dir; ptr < edir; ptr++) {
475 if (ptr->val) {
1ae70006
HD
476 if (unlikely(punch_lock)) {
477 punch_unlock = punch_lock;
478 punch_lock = NULL;
479 spin_lock(punch_unlock);
480 if (!ptr->val)
481 continue;
482 }
1da177e4
LT
483 free_swap_and_cache(*ptr);
484 *ptr = (swp_entry_t){0};
485 freed++;
486 }
487 }
1ae70006
HD
488 if (punch_unlock)
489 spin_unlock(punch_unlock);
1da177e4
LT
490 return freed;
491}
492
1ae70006
HD
493static int shmem_map_and_free_swp(struct page *subdir, int offset,
494 int limit, struct page ***dir, spinlock_t *punch_lock)
1da177e4
LT
495{
496 swp_entry_t *ptr;
497 int freed = 0;
498
499 ptr = shmem_swp_map(subdir);
500 for (; offset < limit; offset += LATENCY_LIMIT) {
501 int size = limit - offset;
502 if (size > LATENCY_LIMIT)
503 size = LATENCY_LIMIT;
1ae70006
HD
504 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
505 punch_lock);
1da177e4
LT
506 if (need_resched()) {
507 shmem_swp_unmap(ptr);
508 if (*dir) {
509 shmem_dir_unmap(*dir);
510 *dir = NULL;
511 }
512 cond_resched();
513 ptr = shmem_swp_map(subdir);
514 }
515 }
516 shmem_swp_unmap(ptr);
517 return freed;
518}
519
520static void shmem_free_pages(struct list_head *next)
521{
522 struct page *page;
523 int freed = 0;
524
525 do {
526 page = container_of(next, struct page, lru);
527 next = next->next;
528 shmem_dir_free(page);
529 freed++;
530 if (freed >= LATENCY_LIMIT) {
531 cond_resched();
532 freed = 0;
533 }
534 } while (next);
535}
536
f6b3ec23 537static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
1da177e4
LT
538{
539 struct shmem_inode_info *info = SHMEM_I(inode);
540 unsigned long idx;
541 unsigned long size;
542 unsigned long limit;
543 unsigned long stage;
544 unsigned long diroff;
545 struct page **dir;
546 struct page *topdir;
547 struct page *middir;
548 struct page *subdir;
549 swp_entry_t *ptr;
550 LIST_HEAD(pages_to_free);
551 long nr_pages_to_free = 0;
552 long nr_swaps_freed = 0;
553 int offset;
554 int freed;
a2646d1e 555 int punch_hole;
1ae70006
HD
556 spinlock_t *needs_lock;
557 spinlock_t *punch_lock;
a2646d1e 558 unsigned long upper_limit;
1da177e4
LT
559
560 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
f6b3ec23 561 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1da177e4
LT
562 if (idx >= info->next_index)
563 return;
564
565 spin_lock(&info->lock);
566 info->flags |= SHMEM_TRUNCATE;
f6b3ec23
BP
567 if (likely(end == (loff_t) -1)) {
568 limit = info->next_index;
a2646d1e 569 upper_limit = SHMEM_MAX_INDEX;
f6b3ec23 570 info->next_index = idx;
1ae70006 571 needs_lock = NULL;
a2646d1e 572 punch_hole = 0;
f6b3ec23 573 } else {
a2646d1e
HD
574 if (end + 1 >= inode->i_size) { /* we may free a little more */
575 limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
576 PAGE_CACHE_SHIFT;
577 upper_limit = SHMEM_MAX_INDEX;
578 } else {
579 limit = (end + 1) >> PAGE_CACHE_SHIFT;
580 upper_limit = limit;
581 }
1ae70006 582 needs_lock = &info->lock;
f6b3ec23
BP
583 punch_hole = 1;
584 }
585
1da177e4 586 topdir = info->i_indirect;
f6b3ec23 587 if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
1da177e4
LT
588 info->i_indirect = NULL;
589 nr_pages_to_free++;
590 list_add(&topdir->lru, &pages_to_free);
591 }
592 spin_unlock(&info->lock);
593
594 if (info->swapped && idx < SHMEM_NR_DIRECT) {
595 ptr = info->i_direct;
596 size = limit;
597 if (size > SHMEM_NR_DIRECT)
598 size = SHMEM_NR_DIRECT;
1ae70006 599 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
1da177e4 600 }
92a3d03a
BP
601
602 /*
603 * If there are no indirect blocks or we are punching a hole
604 * below indirect blocks, nothing to be done.
605 */
a2646d1e 606 if (!topdir || limit <= SHMEM_NR_DIRECT)
1da177e4
LT
607 goto done2;
608
1ae70006
HD
609 /*
610 * The truncation case has already dropped info->lock, and we're safe
611 * because i_size and next_index have already been lowered, preventing
612 * access beyond. But in the punch_hole case, we still need to take
613 * the lock when updating the swap directory, because there might be
614 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
615 * shmem_writepage. However, whenever we find we can remove a whole
616 * directory page (not at the misaligned start or end of the range),
617 * we first NULLify its pointer in the level above, and then have no
618 * need to take the lock when updating its contents: needs_lock and
619 * punch_lock (either pointing to info->lock or NULL) manage this.
620 */
621
a2646d1e 622 upper_limit -= SHMEM_NR_DIRECT;
1da177e4
LT
623 limit -= SHMEM_NR_DIRECT;
624 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
625 offset = idx % ENTRIES_PER_PAGE;
626 idx -= offset;
627
628 dir = shmem_dir_map(topdir);
629 stage = ENTRIES_PER_PAGEPAGE/2;
630 if (idx < ENTRIES_PER_PAGEPAGE/2) {
631 middir = topdir;
632 diroff = idx/ENTRIES_PER_PAGE;
633 } else {
634 dir += ENTRIES_PER_PAGE/2;
635 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
636 while (stage <= idx)
637 stage += ENTRIES_PER_PAGEPAGE;
638 middir = *dir;
639 if (*dir) {
640 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
641 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
a2646d1e 642 if (!diroff && !offset && upper_limit >= stage) {
1ae70006
HD
643 if (needs_lock) {
644 spin_lock(needs_lock);
645 *dir = NULL;
646 spin_unlock(needs_lock);
647 needs_lock = NULL;
648 } else
649 *dir = NULL;
1da177e4
LT
650 nr_pages_to_free++;
651 list_add(&middir->lru, &pages_to_free);
652 }
653 shmem_dir_unmap(dir);
654 dir = shmem_dir_map(middir);
655 } else {
656 diroff = 0;
657 offset = 0;
658 idx = stage;
659 }
660 }
661
662 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
663 if (unlikely(idx == stage)) {
664 shmem_dir_unmap(dir);
665 dir = shmem_dir_map(topdir) +
666 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
667 while (!*dir) {
668 dir++;
669 idx += ENTRIES_PER_PAGEPAGE;
670 if (idx >= limit)
671 goto done1;
672 }
673 stage = idx + ENTRIES_PER_PAGEPAGE;
674 middir = *dir;
1ae70006
HD
675 if (punch_hole)
676 needs_lock = &info->lock;
a2646d1e 677 if (upper_limit >= stage) {
1ae70006
HD
678 if (needs_lock) {
679 spin_lock(needs_lock);
680 *dir = NULL;
681 spin_unlock(needs_lock);
682 needs_lock = NULL;
683 } else
684 *dir = NULL;
a2646d1e
HD
685 nr_pages_to_free++;
686 list_add(&middir->lru, &pages_to_free);
687 }
1da177e4
LT
688 shmem_dir_unmap(dir);
689 cond_resched();
690 dir = shmem_dir_map(middir);
691 diroff = 0;
692 }
1ae70006 693 punch_lock = needs_lock;
1da177e4 694 subdir = dir[diroff];
1ae70006
HD
695 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
696 if (needs_lock) {
697 spin_lock(needs_lock);
698 dir[diroff] = NULL;
699 spin_unlock(needs_lock);
700 punch_lock = NULL;
701 } else
702 dir[diroff] = NULL;
703 nr_pages_to_free++;
704 list_add(&subdir->lru, &pages_to_free);
705 }
706 if (subdir && page_private(subdir) /* has swap entries */) {
1da177e4
LT
707 size = limit - idx;
708 if (size > ENTRIES_PER_PAGE)
709 size = ENTRIES_PER_PAGE;
710 freed = shmem_map_and_free_swp(subdir,
1ae70006 711 offset, size, &dir, punch_lock);
1da177e4
LT
712 if (!dir)
713 dir = shmem_dir_map(middir);
714 nr_swaps_freed += freed;
1ae70006 715 if (offset || punch_lock) {
1da177e4 716 spin_lock(&info->lock);
1ae70006
HD
717 set_page_private(subdir,
718 page_private(subdir) - freed);
1da177e4 719 spin_unlock(&info->lock);
1ae70006
HD
720 } else
721 BUG_ON(page_private(subdir) != freed);
1da177e4 722 }
1ae70006 723 offset = 0;
1da177e4
LT
724 }
725done1:
726 shmem_dir_unmap(dir);
727done2:
728 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
729 /*
730 * Call truncate_inode_pages again: racing shmem_unuse_inode
731 * may have swizzled a page in from swap since vmtruncate or
732 * generic_delete_inode did it, before we lowered next_index.
733 * Also, though shmem_getpage checks i_size before adding to
734 * cache, no recheck after: so fix the narrow window there too.
16a10019
HD
735 *
736 * Recalling truncate_inode_pages_range and unmap_mapping_range
737 * every time for punch_hole (which never got a chance to clear
738 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
739 * yet hardly ever necessary: try to optimize them out later.
1da177e4 740 */
f6b3ec23 741 truncate_inode_pages_range(inode->i_mapping, start, end);
16a10019
HD
742 if (punch_hole)
743 unmap_mapping_range(inode->i_mapping, start,
744 end - start, 1);
1da177e4
LT
745 }
746
747 spin_lock(&info->lock);
748 info->flags &= ~SHMEM_TRUNCATE;
749 info->swapped -= nr_swaps_freed;
750 if (nr_pages_to_free)
751 shmem_free_blocks(inode, nr_pages_to_free);
752 shmem_recalc_inode(inode);
753 spin_unlock(&info->lock);
754
755 /*
756 * Empty swap vector directory pages to be freed?
757 */
758 if (!list_empty(&pages_to_free)) {
759 pages_to_free.prev->next = NULL;
760 shmem_free_pages(pages_to_free.next);
761 }
762}
763
f6b3ec23
BP
764static void shmem_truncate(struct inode *inode)
765{
766 shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
767}
768
1da177e4
LT
769static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
770{
771 struct inode *inode = dentry->d_inode;
772 struct page *page = NULL;
773 int error;
774
39f0247d 775 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1da177e4
LT
776 if (attr->ia_size < inode->i_size) {
777 /*
778 * If truncating down to a partial page, then
779 * if that page is already allocated, hold it
780 * in memory until the truncation is over, so
781 * truncate_partial_page cannnot miss it were
782 * it assigned to swap.
783 */
784 if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
785 (void) shmem_getpage(inode,
786 attr->ia_size>>PAGE_CACHE_SHIFT,
787 &page, SGP_READ, NULL);
d3602444
HD
788 if (page)
789 unlock_page(page);
1da177e4
LT
790 }
791 /*
792 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
793 * detect if any pages might have been added to cache
794 * after truncate_inode_pages. But we needn't bother
795 * if it's being fully truncated to zero-length: the
796 * nrpages check is efficient enough in that case.
797 */
798 if (attr->ia_size) {
799 struct shmem_inode_info *info = SHMEM_I(inode);
800 spin_lock(&info->lock);
801 info->flags &= ~SHMEM_PAGEIN;
802 spin_unlock(&info->lock);
803 }
804 }
805 }
806
807 error = inode_change_ok(inode, attr);
808 if (!error)
809 error = inode_setattr(inode, attr);
39f0247d
AG
810#ifdef CONFIG_TMPFS_POSIX_ACL
811 if (!error && (attr->ia_valid & ATTR_MODE))
812 error = generic_acl_chmod(inode, &shmem_acl_ops);
813#endif
1da177e4
LT
814 if (page)
815 page_cache_release(page);
816 return error;
817}
818
819static void shmem_delete_inode(struct inode *inode)
820{
1da177e4
LT
821 struct shmem_inode_info *info = SHMEM_I(inode);
822
823 if (inode->i_op->truncate == shmem_truncate) {
fef26658 824 truncate_inode_pages(inode->i_mapping, 0);
1da177e4
LT
825 shmem_unacct_size(info->flags, inode->i_size);
826 inode->i_size = 0;
827 shmem_truncate(inode);
828 if (!list_empty(&info->swaplist)) {
cb5f7b9a 829 mutex_lock(&shmem_swaplist_mutex);
1da177e4 830 list_del_init(&info->swaplist);
cb5f7b9a 831 mutex_unlock(&shmem_swaplist_mutex);
1da177e4
LT
832 }
833 }
0edd73b3 834 BUG_ON(inode->i_blocks);
5b04c689 835 shmem_free_inode(inode->i_sb);
1da177e4
LT
836 clear_inode(inode);
837}
838
839static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
840{
841 swp_entry_t *ptr;
842
843 for (ptr = dir; ptr < edir; ptr++) {
844 if (ptr->val == entry.val)
845 return ptr - dir;
846 }
847 return -1;
848}
849
850static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
851{
852 struct inode *inode;
853 unsigned long idx;
854 unsigned long size;
855 unsigned long limit;
856 unsigned long stage;
857 struct page **dir;
858 struct page *subdir;
859 swp_entry_t *ptr;
860 int offset;
d9fe526a 861 int error;
1da177e4
LT
862
863 idx = 0;
864 ptr = info->i_direct;
865 spin_lock(&info->lock);
1b1b32f2
HD
866 if (!info->swapped) {
867 list_del_init(&info->swaplist);
868 goto lost2;
869 }
1da177e4
LT
870 limit = info->next_index;
871 size = limit;
872 if (size > SHMEM_NR_DIRECT)
873 size = SHMEM_NR_DIRECT;
874 offset = shmem_find_swp(entry, ptr, ptr+size);
2e0e26c7 875 if (offset >= 0)
1da177e4 876 goto found;
1da177e4
LT
877 if (!info->i_indirect)
878 goto lost2;
879
880 dir = shmem_dir_map(info->i_indirect);
881 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
882
883 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
884 if (unlikely(idx == stage)) {
885 shmem_dir_unmap(dir-1);
cb5f7b9a
HD
886 if (cond_resched_lock(&info->lock)) {
887 /* check it has not been truncated */
888 if (limit > info->next_index) {
889 limit = info->next_index;
890 if (idx >= limit)
891 goto lost2;
892 }
893 }
1da177e4
LT
894 dir = shmem_dir_map(info->i_indirect) +
895 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
896 while (!*dir) {
897 dir++;
898 idx += ENTRIES_PER_PAGEPAGE;
899 if (idx >= limit)
900 goto lost1;
901 }
902 stage = idx + ENTRIES_PER_PAGEPAGE;
903 subdir = *dir;
904 shmem_dir_unmap(dir);
905 dir = shmem_dir_map(subdir);
906 }
907 subdir = *dir;
4c21e2f2 908 if (subdir && page_private(subdir)) {
1da177e4
LT
909 ptr = shmem_swp_map(subdir);
910 size = limit - idx;
911 if (size > ENTRIES_PER_PAGE)
912 size = ENTRIES_PER_PAGE;
913 offset = shmem_find_swp(entry, ptr, ptr+size);
2e0e26c7 914 shmem_swp_unmap(ptr);
1da177e4
LT
915 if (offset >= 0) {
916 shmem_dir_unmap(dir);
917 goto found;
918 }
1da177e4
LT
919 }
920 }
921lost1:
922 shmem_dir_unmap(dir-1);
923lost2:
924 spin_unlock(&info->lock);
925 return 0;
926found:
927 idx += offset;
2e0e26c7
HD
928 inode = igrab(&info->vfs_inode);
929 spin_unlock(&info->lock);
930
1b1b32f2
HD
931 /*
932 * Move _head_ to start search for next from here.
933 * But be careful: shmem_delete_inode checks list_empty without taking
934 * mutex, and there's an instant in list_move_tail when info->swaplist
935 * would appear empty, if it were the only one on shmem_swaplist. We
936 * could avoid doing it if inode NULL; or use this minor optimization.
937 */
938 if (shmem_swaplist.next != &info->swaplist)
939 list_move_tail(&shmem_swaplist, &info->swaplist);
2e0e26c7
HD
940 mutex_unlock(&shmem_swaplist_mutex);
941
942 error = 1;
943 if (!inode)
944 goto out;
d13d1443 945 /*
b5a84319
KH
946 * Charge page using GFP_KERNEL while we can wait.
947 * Charged back to the user(not to caller) when swap account is used.
948 * add_to_page_cache() will be called with GFP_NOWAIT.
d13d1443 949 */
82369553 950 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
b409f9fc
HD
951 if (error)
952 goto out;
82369553 953 error = radix_tree_preload(GFP_KERNEL);
69029cd5
KH
954 if (error) {
955 mem_cgroup_uncharge_cache_page(page);
956 goto out;
957 }
b409f9fc 958 error = 1;
2e0e26c7
HD
959
960 spin_lock(&info->lock);
961 ptr = shmem_swp_entry(info, idx, NULL);
69029cd5 962 if (ptr && ptr->val == entry.val) {
e286781d 963 error = add_to_page_cache_locked(page, inode->i_mapping,
b409f9fc 964 idx, GFP_NOWAIT);
69029cd5
KH
965 /* does mem_cgroup_uncharge_cache_page on error */
966 } else /* we must compensate for our precharge above */
967 mem_cgroup_uncharge_cache_page(page);
968
d9fe526a
HD
969 if (error == -EEXIST) {
970 struct page *filepage = find_get_page(inode->i_mapping, idx);
2e0e26c7 971 error = 1;
d9fe526a
HD
972 if (filepage) {
973 /*
974 * There might be a more uptodate page coming down
975 * from a stacked writepage: forget our swappage if so.
976 */
977 if (PageUptodate(filepage))
978 error = 0;
979 page_cache_release(filepage);
980 }
981 }
982 if (!error) {
73b1262f
HD
983 delete_from_swap_cache(page);
984 set_page_dirty(page);
1da177e4 985 info->flags |= SHMEM_PAGEIN;
2e0e26c7
HD
986 shmem_swp_set(info, ptr, 0);
987 swap_free(entry);
988 error = 1; /* not an error, but entry was found */
1da177e4 989 }
2e0e26c7
HD
990 if (ptr)
991 shmem_swp_unmap(ptr);
1da177e4 992 spin_unlock(&info->lock);
b409f9fc 993 radix_tree_preload_end();
2e0e26c7
HD
994out:
995 unlock_page(page);
996 page_cache_release(page);
997 iput(inode); /* allows for NULL */
998 return error;
1da177e4
LT
999}
1000
1001/*
1002 * shmem_unuse() search for an eventually swapped out shmem page.
1003 */
1004int shmem_unuse(swp_entry_t entry, struct page *page)
1005{
1006 struct list_head *p, *next;
1007 struct shmem_inode_info *info;
1008 int found = 0;
1009
cb5f7b9a 1010 mutex_lock(&shmem_swaplist_mutex);
1da177e4
LT
1011 list_for_each_safe(p, next, &shmem_swaplist) {
1012 info = list_entry(p, struct shmem_inode_info, swaplist);
1b1b32f2 1013 found = shmem_unuse_inode(info, entry, page);
cb5f7b9a 1014 cond_resched();
2e0e26c7
HD
1015 if (found)
1016 goto out;
1da177e4 1017 }
cb5f7b9a 1018 mutex_unlock(&shmem_swaplist_mutex);
aaa46865
HD
1019 /*
1020 * Can some race bring us here? We've been holding page lock,
1021 * so I think not; but would rather try again later than BUG()
1022 */
1023 unlock_page(page);
1024 page_cache_release(page);
1025out:
1026 return (found < 0) ? found : 0;
1da177e4
LT
1027}
1028
1029/*
1030 * Move the page from the page cache to the swap cache.
1031 */
1032static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1033{
1034 struct shmem_inode_info *info;
1035 swp_entry_t *entry, swap;
1036 struct address_space *mapping;
1037 unsigned long index;
1038 struct inode *inode;
1039
1040 BUG_ON(!PageLocked(page));
1da177e4
LT
1041 mapping = page->mapping;
1042 index = page->index;
1043 inode = mapping->host;
1044 info = SHMEM_I(inode);
1045 if (info->flags & VM_LOCKED)
1046 goto redirty;
d9fe526a 1047 if (!total_swap_pages)
1da177e4
LT
1048 goto redirty;
1049
d9fe526a
HD
1050 /*
1051 * shmem_backing_dev_info's capabilities prevent regular writeback or
1052 * sync from ever calling shmem_writepage; but a stacking filesystem
1053 * may use the ->writepage of its underlying filesystem, in which case
1054 * tmpfs should write out to swap only in response to memory pressure,
5b0830cb
JA
1055 * and not for the writeback threads or sync. However, in those cases,
1056 * we do still want to check if there's a redundant swappage to be
1057 * discarded.
d9fe526a
HD
1058 */
1059 if (wbc->for_reclaim)
1060 swap = get_swap_page();
1061 else
1062 swap.val = 0;
1063
1da177e4 1064 spin_lock(&info->lock);
1da177e4
LT
1065 if (index >= info->next_index) {
1066 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1067 goto unlock;
1068 }
1069 entry = shmem_swp_entry(info, index, NULL);
d9fe526a
HD
1070 if (entry->val) {
1071 /*
1072 * The more uptodate page coming down from a stacked
1073 * writepage should replace our old swappage.
1074 */
1075 free_swap_and_cache(*entry);
1076 shmem_swp_set(info, entry, 0);
1077 }
1078 shmem_recalc_inode(inode);
1da177e4 1079
d9fe526a 1080 if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
73b1262f 1081 remove_from_page_cache(page);
1da177e4
LT
1082 shmem_swp_set(info, entry, swap.val);
1083 shmem_swp_unmap(entry);
1b1b32f2
HD
1084 if (list_empty(&info->swaplist))
1085 inode = igrab(inode);
1086 else
1087 inode = NULL;
1da177e4 1088 spin_unlock(&info->lock);
aaa46865 1089 swap_shmem_alloc(swap);
d9fe526a 1090 BUG_ON(page_mapped(page));
73b1262f 1091 page_cache_release(page); /* pagecache ref */
9fab5619 1092 swap_writepage(page, wbc);
1b1b32f2
HD
1093 if (inode) {
1094 mutex_lock(&shmem_swaplist_mutex);
1095 /* move instead of add in case we're racing */
1096 list_move_tail(&info->swaplist, &shmem_swaplist);
1097 mutex_unlock(&shmem_swaplist_mutex);
1098 iput(inode);
1099 }
1da177e4
LT
1100 return 0;
1101 }
1102
1103 shmem_swp_unmap(entry);
1104unlock:
1105 spin_unlock(&info->lock);
2ca4532a
DN
1106 /*
1107 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
1108 * clear SWAP_HAS_CACHE flag.
1109 */
cb4b86ba 1110 swapcache_free(swap, NULL);
1da177e4
LT
1111redirty:
1112 set_page_dirty(page);
d9fe526a
HD
1113 if (wbc->for_reclaim)
1114 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
1115 unlock_page(page);
1116 return 0;
1da177e4
LT
1117}
1118
1119#ifdef CONFIG_NUMA
680d794b 1120#ifdef CONFIG_TMPFS
71fe804b 1121static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
680d794b 1122{
095f1fc4 1123 char buffer[64];
680d794b 1124
71fe804b 1125 if (!mpol || mpol->mode == MPOL_DEFAULT)
095f1fc4 1126 return; /* show nothing */
680d794b 1127
71fe804b 1128 mpol_to_str(buffer, sizeof(buffer), mpol, 1);
095f1fc4
LS
1129
1130 seq_printf(seq, ",mpol=%s", buffer);
680d794b 1131}
71fe804b
LS
1132
1133static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1134{
1135 struct mempolicy *mpol = NULL;
1136 if (sbinfo->mpol) {
1137 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1138 mpol = sbinfo->mpol;
1139 mpol_get(mpol);
1140 spin_unlock(&sbinfo->stat_lock);
1141 }
1142 return mpol;
1143}
680d794b 1144#endif /* CONFIG_TMPFS */
1145
02098fea
HD
1146static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1147 struct shmem_inode_info *info, unsigned long idx)
1da177e4 1148{
52cd3b07 1149 struct mempolicy mpol, *spol;
1da177e4 1150 struct vm_area_struct pvma;
c4cc6d07 1151 struct page *page;
1da177e4 1152
52cd3b07
LS
1153 spol = mpol_cond_copy(&mpol,
1154 mpol_shared_policy_lookup(&info->policy, idx));
1155
1da177e4 1156 /* Create a pseudo vma that just contains the policy */
c4cc6d07 1157 pvma.vm_start = 0;
1da177e4 1158 pvma.vm_pgoff = idx;
c4cc6d07 1159 pvma.vm_ops = NULL;
52cd3b07 1160 pvma.vm_policy = spol;
02098fea 1161 page = swapin_readahead(entry, gfp, &pvma, 0);
1da177e4
LT
1162 return page;
1163}
1164
02098fea
HD
1165static struct page *shmem_alloc_page(gfp_t gfp,
1166 struct shmem_inode_info *info, unsigned long idx)
1da177e4
LT
1167{
1168 struct vm_area_struct pvma;
1da177e4 1169
c4cc6d07
HD
1170 /* Create a pseudo vma that just contains the policy */
1171 pvma.vm_start = 0;
1da177e4 1172 pvma.vm_pgoff = idx;
c4cc6d07
HD
1173 pvma.vm_ops = NULL;
1174 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
52cd3b07
LS
1175
1176 /*
1177 * alloc_page_vma() will drop the shared policy reference
1178 */
1179 return alloc_page_vma(gfp, &pvma, 0);
1da177e4 1180}
680d794b 1181#else /* !CONFIG_NUMA */
1182#ifdef CONFIG_TMPFS
71fe804b 1183static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
680d794b 1184{
1185}
1186#endif /* CONFIG_TMPFS */
1187
02098fea
HD
1188static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1189 struct shmem_inode_info *info, unsigned long idx)
1da177e4 1190{
02098fea 1191 return swapin_readahead(entry, gfp, NULL, 0);
1da177e4
LT
1192}
1193
02098fea
HD
1194static inline struct page *shmem_alloc_page(gfp_t gfp,
1195 struct shmem_inode_info *info, unsigned long idx)
1da177e4 1196{
e84e2e13 1197 return alloc_page(gfp);
1da177e4 1198}
680d794b 1199#endif /* CONFIG_NUMA */
1da177e4 1200
71fe804b
LS
1201#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1202static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1203{
1204 return NULL;
1205}
1206#endif
1207
1da177e4
LT
1208/*
1209 * shmem_getpage - either get the page from swap or allocate a new one
1210 *
1211 * If we allocate a new one we do not mark it dirty. That's up to the
1212 * vm. If we swap it in we mark it dirty since we also free the swap
1213 * entry since a page cannot live in both the swap and page cache
1214 */
1215static int shmem_getpage(struct inode *inode, unsigned long idx,
1216 struct page **pagep, enum sgp_type sgp, int *type)
1217{
1218 struct address_space *mapping = inode->i_mapping;
1219 struct shmem_inode_info *info = SHMEM_I(inode);
1220 struct shmem_sb_info *sbinfo;
1221 struct page *filepage = *pagep;
1222 struct page *swappage;
1223 swp_entry_t *entry;
1224 swp_entry_t swap;
02098fea 1225 gfp_t gfp;
1da177e4
LT
1226 int error;
1227
1228 if (idx >= SHMEM_MAX_INDEX)
1229 return -EFBIG;
54cb8821
NP
1230
1231 if (type)
83c54070 1232 *type = 0;
54cb8821 1233
1da177e4
LT
1234 /*
1235 * Normally, filepage is NULL on entry, and either found
1236 * uptodate immediately, or allocated and zeroed, or read
1237 * in under swappage, which is then assigned to filepage.
5402b976 1238 * But shmem_readpage (required for splice) passes in a locked
ae976416
HD
1239 * filepage, which may be found not uptodate by other callers
1240 * too, and may need to be copied from the swappage read in.
1da177e4
LT
1241 */
1242repeat:
1243 if (!filepage)
1244 filepage = find_lock_page(mapping, idx);
1245 if (filepage && PageUptodate(filepage))
1246 goto done;
1247 error = 0;
02098fea 1248 gfp = mapping_gfp_mask(mapping);
b409f9fc
HD
1249 if (!filepage) {
1250 /*
1251 * Try to preload while we can wait, to not make a habit of
1252 * draining atomic reserves; but don't latch on to this cpu.
1253 */
1254 error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
1255 if (error)
1256 goto failed;
1257 radix_tree_preload_end();
1258 }
1da177e4
LT
1259
1260 spin_lock(&info->lock);
1261 shmem_recalc_inode(inode);
1262 entry = shmem_swp_alloc(info, idx, sgp);
1263 if (IS_ERR(entry)) {
1264 spin_unlock(&info->lock);
1265 error = PTR_ERR(entry);
1266 goto failed;
1267 }
1268 swap = *entry;
1269
1270 if (swap.val) {
1271 /* Look it up and read it in.. */
1272 swappage = lookup_swap_cache(swap);
1273 if (!swappage) {
1274 shmem_swp_unmap(entry);
1da177e4 1275 /* here we actually do the io */
83c54070 1276 if (type && !(*type & VM_FAULT_MAJOR)) {
f8891e5e 1277 __count_vm_event(PGMAJFAULT);
83c54070 1278 *type |= VM_FAULT_MAJOR;
1da177e4 1279 }
f8891e5e 1280 spin_unlock(&info->lock);
02098fea 1281 swappage = shmem_swapin(swap, gfp, info, idx);
1da177e4
LT
1282 if (!swappage) {
1283 spin_lock(&info->lock);
1284 entry = shmem_swp_alloc(info, idx, sgp);
1285 if (IS_ERR(entry))
1286 error = PTR_ERR(entry);
1287 else {
1288 if (entry->val == swap.val)
1289 error = -ENOMEM;
1290 shmem_swp_unmap(entry);
1291 }
1292 spin_unlock(&info->lock);
1293 if (error)
1294 goto failed;
1295 goto repeat;
1296 }
1297 wait_on_page_locked(swappage);
1298 page_cache_release(swappage);
1299 goto repeat;
1300 }
1301
1302 /* We have to do this with page locked to prevent races */
529ae9aa 1303 if (!trylock_page(swappage)) {
1da177e4
LT
1304 shmem_swp_unmap(entry);
1305 spin_unlock(&info->lock);
1306 wait_on_page_locked(swappage);
1307 page_cache_release(swappage);
1308 goto repeat;
1309 }
1310 if (PageWriteback(swappage)) {
1311 shmem_swp_unmap(entry);
1312 spin_unlock(&info->lock);
1313 wait_on_page_writeback(swappage);
1314 unlock_page(swappage);
1315 page_cache_release(swappage);
1316 goto repeat;
1317 }
1318 if (!PageUptodate(swappage)) {
1319 shmem_swp_unmap(entry);
1320 spin_unlock(&info->lock);
1321 unlock_page(swappage);
1322 page_cache_release(swappage);
1323 error = -EIO;
1324 goto failed;
1325 }
1326
1327 if (filepage) {
1328 shmem_swp_set(info, entry, 0);
1329 shmem_swp_unmap(entry);
1330 delete_from_swap_cache(swappage);
1331 spin_unlock(&info->lock);
1332 copy_highpage(filepage, swappage);
1333 unlock_page(swappage);
1334 page_cache_release(swappage);
1335 flush_dcache_page(filepage);
1336 SetPageUptodate(filepage);
1337 set_page_dirty(filepage);
1338 swap_free(swap);
e286781d
NP
1339 } else if (!(error = add_to_page_cache_locked(swappage, mapping,
1340 idx, GFP_NOWAIT))) {
1da177e4
LT
1341 info->flags |= SHMEM_PAGEIN;
1342 shmem_swp_set(info, entry, 0);
1343 shmem_swp_unmap(entry);
73b1262f 1344 delete_from_swap_cache(swappage);
1da177e4
LT
1345 spin_unlock(&info->lock);
1346 filepage = swappage;
73b1262f 1347 set_page_dirty(filepage);
1da177e4
LT
1348 swap_free(swap);
1349 } else {
1350 shmem_swp_unmap(entry);
1351 spin_unlock(&info->lock);
82369553 1352 if (error == -ENOMEM) {
ae3abae6
DN
1353 /*
1354 * reclaim from proper memory cgroup and
1355 * call memcg's OOM if needed.
1356 */
1357 error = mem_cgroup_shmem_charge_fallback(
1358 swappage,
b5a84319 1359 current->mm,
c9b0ed51 1360 gfp);
b5a84319
KH
1361 if (error) {
1362 unlock_page(swappage);
1363 page_cache_release(swappage);
82369553 1364 goto failed;
b5a84319 1365 }
82369553 1366 }
b5a84319
KH
1367 unlock_page(swappage);
1368 page_cache_release(swappage);
1da177e4
LT
1369 goto repeat;
1370 }
1371 } else if (sgp == SGP_READ && !filepage) {
1372 shmem_swp_unmap(entry);
1373 filepage = find_get_page(mapping, idx);
1374 if (filepage &&
529ae9aa 1375 (!PageUptodate(filepage) || !trylock_page(filepage))) {
1da177e4
LT
1376 spin_unlock(&info->lock);
1377 wait_on_page_locked(filepage);
1378 page_cache_release(filepage);
1379 filepage = NULL;
1380 goto repeat;
1381 }
1382 spin_unlock(&info->lock);
1383 } else {
1384 shmem_swp_unmap(entry);
1385 sbinfo = SHMEM_SB(inode->i_sb);
0edd73b3 1386 if (sbinfo->max_blocks) {
1da177e4
LT
1387 spin_lock(&sbinfo->stat_lock);
1388 if (sbinfo->free_blocks == 0 ||
1389 shmem_acct_block(info->flags)) {
1390 spin_unlock(&sbinfo->stat_lock);
1391 spin_unlock(&info->lock);
1392 error = -ENOSPC;
1393 goto failed;
1394 }
1395 sbinfo->free_blocks--;
1396 inode->i_blocks += BLOCKS_PER_PAGE;
1397 spin_unlock(&sbinfo->stat_lock);
1398 } else if (shmem_acct_block(info->flags)) {
1399 spin_unlock(&info->lock);
1400 error = -ENOSPC;
1401 goto failed;
1402 }
1403
1404 if (!filepage) {
69029cd5
KH
1405 int ret;
1406
1da177e4 1407 spin_unlock(&info->lock);
02098fea 1408 filepage = shmem_alloc_page(gfp, info, idx);
1da177e4
LT
1409 if (!filepage) {
1410 shmem_unacct_blocks(info->flags, 1);
1411 shmem_free_blocks(inode, 1);
1412 error = -ENOMEM;
1413 goto failed;
1414 }
b2e18538 1415 SetPageSwapBacked(filepage);
1da177e4 1416
82369553
HD
1417 /* Precharge page while we can wait, compensate after */
1418 error = mem_cgroup_cache_charge(filepage, current->mm,
2c26fdd7 1419 GFP_KERNEL);
82369553
HD
1420 if (error) {
1421 page_cache_release(filepage);
1422 shmem_unacct_blocks(info->flags, 1);
1423 shmem_free_blocks(inode, 1);
1424 filepage = NULL;
1425 goto failed;
1426 }
1427
1da177e4
LT
1428 spin_lock(&info->lock);
1429 entry = shmem_swp_alloc(info, idx, sgp);
1430 if (IS_ERR(entry))
1431 error = PTR_ERR(entry);
1432 else {
1433 swap = *entry;
1434 shmem_swp_unmap(entry);
1435 }
69029cd5
KH
1436 ret = error || swap.val;
1437 if (ret)
1438 mem_cgroup_uncharge_cache_page(filepage);
1439 else
1440 ret = add_to_page_cache_lru(filepage, mapping,
1441 idx, GFP_NOWAIT);
1442 /*
1443 * At add_to_page_cache_lru() failure, uncharge will
1444 * be done automatically.
1445 */
1446 if (ret) {
1da177e4
LT
1447 spin_unlock(&info->lock);
1448 page_cache_release(filepage);
1449 shmem_unacct_blocks(info->flags, 1);
1450 shmem_free_blocks(inode, 1);
1451 filepage = NULL;
1452 if (error)
1453 goto failed;
1454 goto repeat;
1455 }
1456 info->flags |= SHMEM_PAGEIN;
1457 }
1458
1459 info->alloced++;
1460 spin_unlock(&info->lock);
e84e2e13 1461 clear_highpage(filepage);
1da177e4
LT
1462 flush_dcache_page(filepage);
1463 SetPageUptodate(filepage);
a0ee5ec5
HD
1464 if (sgp == SGP_DIRTY)
1465 set_page_dirty(filepage);
1da177e4
LT
1466 }
1467done:
d3602444 1468 *pagep = filepage;
1da177e4
LT
1469 return 0;
1470
1471failed:
1472 if (*pagep != filepage) {
1473 unlock_page(filepage);
1474 page_cache_release(filepage);
1475 }
1476 return error;
1477}
1478
d0217ac0 1479static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4 1480{
d3ac7f89 1481 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1da177e4 1482 int error;
d0217ac0 1483 int ret;
1da177e4 1484
d0217ac0
NP
1485 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1486 return VM_FAULT_SIGBUS;
d00806b1 1487
27d54b39 1488 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
d0217ac0
NP
1489 if (error)
1490 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1da177e4 1491
83c54070 1492 return ret | VM_FAULT_LOCKED;
1da177e4
LT
1493}
1494
1da177e4 1495#ifdef CONFIG_NUMA
d8dc74f2 1496static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1da177e4 1497{
d3ac7f89 1498 struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1da177e4
LT
1499 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1500}
1501
d8dc74f2
AB
1502static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1503 unsigned long addr)
1da177e4 1504{
d3ac7f89 1505 struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1da177e4
LT
1506 unsigned long idx;
1507
1508 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1509 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1510}
1511#endif
1512
1513int shmem_lock(struct file *file, int lock, struct user_struct *user)
1514{
d3ac7f89 1515 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1516 struct shmem_inode_info *info = SHMEM_I(inode);
1517 int retval = -ENOMEM;
1518
1519 spin_lock(&info->lock);
1520 if (lock && !(info->flags & VM_LOCKED)) {
1521 if (!user_shm_lock(inode->i_size, user))
1522 goto out_nomem;
1523 info->flags |= VM_LOCKED;
89e004ea 1524 mapping_set_unevictable(file->f_mapping);
1da177e4
LT
1525 }
1526 if (!lock && (info->flags & VM_LOCKED) && user) {
1527 user_shm_unlock(inode->i_size, user);
1528 info->flags &= ~VM_LOCKED;
89e004ea
LS
1529 mapping_clear_unevictable(file->f_mapping);
1530 scan_mapping_unevictable_pages(file->f_mapping);
1da177e4
LT
1531 }
1532 retval = 0;
89e004ea 1533
1da177e4
LT
1534out_nomem:
1535 spin_unlock(&info->lock);
1536 return retval;
1537}
1538
9b83a6a8 1539static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1da177e4
LT
1540{
1541 file_accessed(file);
1542 vma->vm_ops = &shmem_vm_ops;
d0217ac0 1543 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
1544 return 0;
1545}
1546
0b0a0806
HD
1547static struct inode *shmem_get_inode(struct super_block *sb, int mode,
1548 dev_t dev, unsigned long flags)
1da177e4
LT
1549{
1550 struct inode *inode;
1551 struct shmem_inode_info *info;
1552 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1553
5b04c689
PE
1554 if (shmem_reserve_inode(sb))
1555 return NULL;
1da177e4
LT
1556
1557 inode = new_inode(sb);
1558 if (inode) {
1559 inode->i_mode = mode;
76aac0e9
DH
1560 inode->i_uid = current_fsuid();
1561 inode->i_gid = current_fsgid();
1da177e4 1562 inode->i_blocks = 0;
1da177e4
LT
1563 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1564 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
91828a40 1565 inode->i_generation = get_seconds();
1da177e4
LT
1566 info = SHMEM_I(inode);
1567 memset(info, 0, (char *)inode - (char *)info);
1568 spin_lock_init(&info->lock);
0b0a0806 1569 info->flags = flags & VM_NORESERVE;
1da177e4 1570 INIT_LIST_HEAD(&info->swaplist);
72c04902 1571 cache_no_acl(inode);
1da177e4
LT
1572
1573 switch (mode & S_IFMT) {
1574 default:
39f0247d 1575 inode->i_op = &shmem_special_inode_operations;
1da177e4
LT
1576 init_special_inode(inode, mode, dev);
1577 break;
1578 case S_IFREG:
14fcc23f 1579 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
1580 inode->i_op = &shmem_inode_operations;
1581 inode->i_fop = &shmem_file_operations;
71fe804b
LS
1582 mpol_shared_policy_init(&info->policy,
1583 shmem_get_sbmpol(sbinfo));
1da177e4
LT
1584 break;
1585 case S_IFDIR:
d8c76e6f 1586 inc_nlink(inode);
1da177e4
LT
1587 /* Some things misbehave if size == 0 on a directory */
1588 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1589 inode->i_op = &shmem_dir_inode_operations;
1590 inode->i_fop = &simple_dir_operations;
1591 break;
1592 case S_IFLNK:
1593 /*
1594 * Must not load anything in the rbtree,
1595 * mpol_free_shared_policy will not be called.
1596 */
71fe804b 1597 mpol_shared_policy_init(&info->policy, NULL);
1da177e4
LT
1598 break;
1599 }
5b04c689
PE
1600 } else
1601 shmem_free_inode(sb);
1da177e4
LT
1602 return inode;
1603}
1604
1605#ifdef CONFIG_TMPFS
92e1d5be
AV
1606static const struct inode_operations shmem_symlink_inode_operations;
1607static const struct inode_operations shmem_symlink_inline_operations;
1da177e4
LT
1608
1609/*
800d15a5 1610 * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
ae976416
HD
1611 * but providing them allows a tmpfs file to be used for splice, sendfile, and
1612 * below the loop driver, in the generic fashion that many filesystems support.
1da177e4 1613 */
ae976416
HD
1614static int shmem_readpage(struct file *file, struct page *page)
1615{
1616 struct inode *inode = page->mapping->host;
1617 int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1618 unlock_page(page);
1619 return error;
1620}
1621
1da177e4 1622static int
800d15a5
NP
1623shmem_write_begin(struct file *file, struct address_space *mapping,
1624 loff_t pos, unsigned len, unsigned flags,
1625 struct page **pagep, void **fsdata)
1da177e4 1626{
800d15a5
NP
1627 struct inode *inode = mapping->host;
1628 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1629 *pagep = NULL;
1630 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1631}
1632
1633static int
1634shmem_write_end(struct file *file, struct address_space *mapping,
1635 loff_t pos, unsigned len, unsigned copied,
1636 struct page *page, void *fsdata)
1637{
1638 struct inode *inode = mapping->host;
1639
d3602444
HD
1640 if (pos + copied > inode->i_size)
1641 i_size_write(inode, pos + copied);
1642
800d15a5 1643 set_page_dirty(page);
6746aff7 1644 unlock_page(page);
800d15a5
NP
1645 page_cache_release(page);
1646
800d15a5 1647 return copied;
1da177e4
LT
1648}
1649
1da177e4
LT
1650static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1651{
d3ac7f89 1652 struct inode *inode = filp->f_path.dentry->d_inode;
1da177e4
LT
1653 struct address_space *mapping = inode->i_mapping;
1654 unsigned long index, offset;
a0ee5ec5
HD
1655 enum sgp_type sgp = SGP_READ;
1656
1657 /*
1658 * Might this read be for a stacking filesystem? Then when reading
1659 * holes of a sparse file, we actually need to allocate those pages,
1660 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1661 */
1662 if (segment_eq(get_fs(), KERNEL_DS))
1663 sgp = SGP_DIRTY;
1da177e4
LT
1664
1665 index = *ppos >> PAGE_CACHE_SHIFT;
1666 offset = *ppos & ~PAGE_CACHE_MASK;
1667
1668 for (;;) {
1669 struct page *page = NULL;
1670 unsigned long end_index, nr, ret;
1671 loff_t i_size = i_size_read(inode);
1672
1673 end_index = i_size >> PAGE_CACHE_SHIFT;
1674 if (index > end_index)
1675 break;
1676 if (index == end_index) {
1677 nr = i_size & ~PAGE_CACHE_MASK;
1678 if (nr <= offset)
1679 break;
1680 }
1681
a0ee5ec5 1682 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1da177e4
LT
1683 if (desc->error) {
1684 if (desc->error == -EINVAL)
1685 desc->error = 0;
1686 break;
1687 }
d3602444
HD
1688 if (page)
1689 unlock_page(page);
1da177e4
LT
1690
1691 /*
1692 * We must evaluate after, since reads (unlike writes)
1b1dcc1b 1693 * are called without i_mutex protection against truncate
1da177e4
LT
1694 */
1695 nr = PAGE_CACHE_SIZE;
1696 i_size = i_size_read(inode);
1697 end_index = i_size >> PAGE_CACHE_SHIFT;
1698 if (index == end_index) {
1699 nr = i_size & ~PAGE_CACHE_MASK;
1700 if (nr <= offset) {
1701 if (page)
1702 page_cache_release(page);
1703 break;
1704 }
1705 }
1706 nr -= offset;
1707
1708 if (page) {
1709 /*
1710 * If users can be writing to this page using arbitrary
1711 * virtual addresses, take care about potential aliasing
1712 * before reading the page on the kernel side.
1713 */
1714 if (mapping_writably_mapped(mapping))
1715 flush_dcache_page(page);
1716 /*
1717 * Mark the page accessed if we read the beginning.
1718 */
1719 if (!offset)
1720 mark_page_accessed(page);
b5810039 1721 } else {
1da177e4 1722 page = ZERO_PAGE(0);
b5810039
NP
1723 page_cache_get(page);
1724 }
1da177e4
LT
1725
1726 /*
1727 * Ok, we have the page, and it's up-to-date, so
1728 * now we can copy it to user space...
1729 *
1730 * The actor routine returns how many bytes were actually used..
1731 * NOTE! This may not be the same as how much of a user buffer
1732 * we filled up (we may be padding etc), so we can only update
1733 * "pos" here (the actor routine has to update the user buffer
1734 * pointers and the remaining count).
1735 */
1736 ret = actor(desc, page, offset, nr);
1737 offset += ret;
1738 index += offset >> PAGE_CACHE_SHIFT;
1739 offset &= ~PAGE_CACHE_MASK;
1740
1741 page_cache_release(page);
1742 if (ret != nr || !desc->count)
1743 break;
1744
1745 cond_resched();
1746 }
1747
1748 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1749 file_accessed(filp);
1750}
1751
bcd78e49
HD
1752static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1753 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1754{
1755 struct file *filp = iocb->ki_filp;
1756 ssize_t retval;
1757 unsigned long seg;
1758 size_t count;
1759 loff_t *ppos = &iocb->ki_pos;
1760
1761 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1762 if (retval)
1763 return retval;
1764
1765 for (seg = 0; seg < nr_segs; seg++) {
1766 read_descriptor_t desc;
1767
1768 desc.written = 0;
1769 desc.arg.buf = iov[seg].iov_base;
1770 desc.count = iov[seg].iov_len;
1771 if (desc.count == 0)
1772 continue;
1773 desc.error = 0;
1774 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1775 retval += desc.written;
1776 if (desc.error) {
1777 retval = retval ?: desc.error;
1778 break;
1779 }
1780 if (desc.count > 0)
1781 break;
1782 }
1783 return retval;
1da177e4
LT
1784}
1785
726c3342 1786static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 1787{
726c3342 1788 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1da177e4
LT
1789
1790 buf->f_type = TMPFS_MAGIC;
1791 buf->f_bsize = PAGE_CACHE_SIZE;
1792 buf->f_namelen = NAME_MAX;
0edd73b3
HD
1793 spin_lock(&sbinfo->stat_lock);
1794 if (sbinfo->max_blocks) {
1da177e4
LT
1795 buf->f_blocks = sbinfo->max_blocks;
1796 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
0edd73b3
HD
1797 }
1798 if (sbinfo->max_inodes) {
1da177e4
LT
1799 buf->f_files = sbinfo->max_inodes;
1800 buf->f_ffree = sbinfo->free_inodes;
1da177e4
LT
1801 }
1802 /* else leave those fields 0 like simple_statfs */
0edd73b3 1803 spin_unlock(&sbinfo->stat_lock);
1da177e4
LT
1804 return 0;
1805}
1806
1807/*
1808 * File creation. Allocate an inode, and we're done..
1809 */
1810static int
1811shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1812{
0b0a0806 1813 struct inode *inode;
1da177e4
LT
1814 int error = -ENOSPC;
1815
0b0a0806 1816 inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);
1da177e4 1817 if (inode) {
570bc1c2
SS
1818 error = security_inode_init_security(inode, dir, NULL, NULL,
1819 NULL);
1820 if (error) {
1821 if (error != -EOPNOTSUPP) {
1822 iput(inode);
1823 return error;
1824 }
39f0247d
AG
1825 }
1826 error = shmem_acl_init(inode, dir);
1827 if (error) {
1828 iput(inode);
1829 return error;
570bc1c2 1830 }
1da177e4
LT
1831 if (dir->i_mode & S_ISGID) {
1832 inode->i_gid = dir->i_gid;
1833 if (S_ISDIR(mode))
1834 inode->i_mode |= S_ISGID;
1835 }
1836 dir->i_size += BOGO_DIRENT_SIZE;
1837 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1838 d_instantiate(dentry, inode);
1839 dget(dentry); /* Extra count - pin the dentry in core */
1da177e4
LT
1840 }
1841 return error;
1842}
1843
1844static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1845{
1846 int error;
1847
1848 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1849 return error;
d8c76e6f 1850 inc_nlink(dir);
1da177e4
LT
1851 return 0;
1852}
1853
1854static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1855 struct nameidata *nd)
1856{
1857 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1858}
1859
1860/*
1861 * Link a file..
1862 */
1863static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1864{
1865 struct inode *inode = old_dentry->d_inode;
5b04c689 1866 int ret;
1da177e4
LT
1867
1868 /*
1869 * No ordinary (disk based) filesystem counts links as inodes;
1870 * but each new link needs a new dentry, pinning lowmem, and
1871 * tmpfs dentries cannot be pruned until they are unlinked.
1872 */
5b04c689
PE
1873 ret = shmem_reserve_inode(inode->i_sb);
1874 if (ret)
1875 goto out;
1da177e4
LT
1876
1877 dir->i_size += BOGO_DIRENT_SIZE;
1878 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d8c76e6f 1879 inc_nlink(inode);
1da177e4
LT
1880 atomic_inc(&inode->i_count); /* New dentry reference */
1881 dget(dentry); /* Extra pinning count for the created dentry */
1882 d_instantiate(dentry, inode);
5b04c689
PE
1883out:
1884 return ret;
1da177e4
LT
1885}
1886
1887static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1888{
1889 struct inode *inode = dentry->d_inode;
1890
5b04c689
PE
1891 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1892 shmem_free_inode(inode->i_sb);
1da177e4
LT
1893
1894 dir->i_size -= BOGO_DIRENT_SIZE;
1895 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
9a53c3a7 1896 drop_nlink(inode);
1da177e4
LT
1897 dput(dentry); /* Undo the count from "create" - this does all the work */
1898 return 0;
1899}
1900
1901static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1902{
1903 if (!simple_empty(dentry))
1904 return -ENOTEMPTY;
1905
9a53c3a7
DH
1906 drop_nlink(dentry->d_inode);
1907 drop_nlink(dir);
1da177e4
LT
1908 return shmem_unlink(dir, dentry);
1909}
1910
1911/*
1912 * The VFS layer already does all the dentry stuff for rename,
1913 * we just have to decrement the usage count for the target if
1914 * it exists so that the VFS layer correctly free's it when it
1915 * gets overwritten.
1916 */
1917static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1918{
1919 struct inode *inode = old_dentry->d_inode;
1920 int they_are_dirs = S_ISDIR(inode->i_mode);
1921
1922 if (!simple_empty(new_dentry))
1923 return -ENOTEMPTY;
1924
1925 if (new_dentry->d_inode) {
1926 (void) shmem_unlink(new_dir, new_dentry);
1927 if (they_are_dirs)
9a53c3a7 1928 drop_nlink(old_dir);
1da177e4 1929 } else if (they_are_dirs) {
9a53c3a7 1930 drop_nlink(old_dir);
d8c76e6f 1931 inc_nlink(new_dir);
1da177e4
LT
1932 }
1933
1934 old_dir->i_size -= BOGO_DIRENT_SIZE;
1935 new_dir->i_size += BOGO_DIRENT_SIZE;
1936 old_dir->i_ctime = old_dir->i_mtime =
1937 new_dir->i_ctime = new_dir->i_mtime =
1938 inode->i_ctime = CURRENT_TIME;
1939 return 0;
1940}
1941
1942static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1943{
1944 int error;
1945 int len;
1946 struct inode *inode;
1947 struct page *page = NULL;
1948 char *kaddr;
1949 struct shmem_inode_info *info;
1950
1951 len = strlen(symname) + 1;
1952 if (len > PAGE_CACHE_SIZE)
1953 return -ENAMETOOLONG;
1954
0b0a0806 1955 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1da177e4
LT
1956 if (!inode)
1957 return -ENOSPC;
1958
570bc1c2
SS
1959 error = security_inode_init_security(inode, dir, NULL, NULL,
1960 NULL);
1961 if (error) {
1962 if (error != -EOPNOTSUPP) {
1963 iput(inode);
1964 return error;
1965 }
1966 error = 0;
1967 }
1968
1da177e4
LT
1969 info = SHMEM_I(inode);
1970 inode->i_size = len-1;
1971 if (len <= (char *)inode - (char *)info) {
1972 /* do it inline */
1973 memcpy(info, symname, len);
1974 inode->i_op = &shmem_symlink_inline_operations;
1975 } else {
1976 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1977 if (error) {
1978 iput(inode);
1979 return error;
1980 }
14fcc23f 1981 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
1982 inode->i_op = &shmem_symlink_inode_operations;
1983 kaddr = kmap_atomic(page, KM_USER0);
1984 memcpy(kaddr, symname, len);
1985 kunmap_atomic(kaddr, KM_USER0);
1986 set_page_dirty(page);
6746aff7 1987 unlock_page(page);
1da177e4
LT
1988 page_cache_release(page);
1989 }
1990 if (dir->i_mode & S_ISGID)
1991 inode->i_gid = dir->i_gid;
1992 dir->i_size += BOGO_DIRENT_SIZE;
1993 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1994 d_instantiate(dentry, inode);
1995 dget(dentry);
1996 return 0;
1997}
1998
cc314eef 1999static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1da177e4
LT
2000{
2001 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
cc314eef 2002 return NULL;
1da177e4
LT
2003}
2004
cc314eef 2005static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1da177e4
LT
2006{
2007 struct page *page = NULL;
2008 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
2009 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
d3602444
HD
2010 if (page)
2011 unlock_page(page);
cc314eef 2012 return page;
1da177e4
LT
2013}
2014
cc314eef 2015static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1da177e4
LT
2016{
2017 if (!IS_ERR(nd_get_link(nd))) {
cc314eef 2018 struct page *page = cookie;
1da177e4
LT
2019 kunmap(page);
2020 mark_page_accessed(page);
2021 page_cache_release(page);
1da177e4
LT
2022 }
2023}
2024
92e1d5be 2025static const struct inode_operations shmem_symlink_inline_operations = {
1da177e4
LT
2026 .readlink = generic_readlink,
2027 .follow_link = shmem_follow_link_inline,
1da177e4
LT
2028};
2029
92e1d5be 2030static const struct inode_operations shmem_symlink_inode_operations = {
1da177e4
LT
2031 .truncate = shmem_truncate,
2032 .readlink = generic_readlink,
2033 .follow_link = shmem_follow_link,
2034 .put_link = shmem_put_link,
1da177e4
LT
2035};
2036
39f0247d 2037#ifdef CONFIG_TMPFS_POSIX_ACL
46711810 2038/*
39f0247d
AG
2039 * Superblocks without xattr inode operations will get security.* xattr
2040 * support from the VFS "for free". As soon as we have any other xattrs
2041 * like ACLs, we also need to implement the security.* handlers at
2042 * filesystem level, though.
2043 */
2044
2045static size_t shmem_xattr_security_list(struct inode *inode, char *list,
2046 size_t list_len, const char *name,
2047 size_t name_len)
2048{
2049 return security_inode_listsecurity(inode, list, list_len);
2050}
2051
2052static int shmem_xattr_security_get(struct inode *inode, const char *name,
2053 void *buffer, size_t size)
2054{
2055 if (strcmp(name, "") == 0)
2056 return -EINVAL;
42492594 2057 return xattr_getsecurity(inode, name, buffer, size);
39f0247d
AG
2058}
2059
2060static int shmem_xattr_security_set(struct inode *inode, const char *name,
2061 const void *value, size_t size, int flags)
2062{
2063 if (strcmp(name, "") == 0)
2064 return -EINVAL;
2065 return security_inode_setsecurity(inode, name, value, size, flags);
2066}
2067
1f370a23 2068static struct xattr_handler shmem_xattr_security_handler = {
39f0247d
AG
2069 .prefix = XATTR_SECURITY_PREFIX,
2070 .list = shmem_xattr_security_list,
2071 .get = shmem_xattr_security_get,
2072 .set = shmem_xattr_security_set,
2073};
2074
2075static struct xattr_handler *shmem_xattr_handlers[] = {
2076 &shmem_xattr_acl_access_handler,
2077 &shmem_xattr_acl_default_handler,
2078 &shmem_xattr_security_handler,
2079 NULL
2080};
2081#endif
2082
91828a40
DG
2083static struct dentry *shmem_get_parent(struct dentry *child)
2084{
2085 return ERR_PTR(-ESTALE);
2086}
2087
2088static int shmem_match(struct inode *ino, void *vfh)
2089{
2090 __u32 *fh = vfh;
2091 __u64 inum = fh[2];
2092 inum = (inum << 32) | fh[1];
2093 return ino->i_ino == inum && fh[0] == ino->i_generation;
2094}
2095
480b116c
CH
2096static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2097 struct fid *fid, int fh_len, int fh_type)
91828a40 2098{
91828a40 2099 struct inode *inode;
480b116c
CH
2100 struct dentry *dentry = NULL;
2101 u64 inum = fid->raw[2];
2102 inum = (inum << 32) | fid->raw[1];
2103
2104 if (fh_len < 3)
2105 return NULL;
91828a40 2106
480b116c
CH
2107 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2108 shmem_match, fid->raw);
91828a40 2109 if (inode) {
480b116c 2110 dentry = d_find_alias(inode);
91828a40
DG
2111 iput(inode);
2112 }
2113
480b116c 2114 return dentry;
91828a40
DG
2115}
2116
2117static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2118 int connectable)
2119{
2120 struct inode *inode = dentry->d_inode;
2121
2122 if (*len < 3)
2123 return 255;
2124
2125 if (hlist_unhashed(&inode->i_hash)) {
2126 /* Unfortunately insert_inode_hash is not idempotent,
2127 * so as we hash inodes here rather than at creation
2128 * time, we need a lock to ensure we only try
2129 * to do it once
2130 */
2131 static DEFINE_SPINLOCK(lock);
2132 spin_lock(&lock);
2133 if (hlist_unhashed(&inode->i_hash))
2134 __insert_inode_hash(inode,
2135 inode->i_ino + inode->i_generation);
2136 spin_unlock(&lock);
2137 }
2138
2139 fh[0] = inode->i_generation;
2140 fh[1] = inode->i_ino;
2141 fh[2] = ((__u64)inode->i_ino) >> 32;
2142
2143 *len = 3;
2144 return 1;
2145}
2146
39655164 2147static const struct export_operations shmem_export_ops = {
91828a40 2148 .get_parent = shmem_get_parent,
91828a40 2149 .encode_fh = shmem_encode_fh,
480b116c 2150 .fh_to_dentry = shmem_fh_to_dentry,
91828a40
DG
2151};
2152
680d794b 2153static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2154 bool remount)
1da177e4
LT
2155{
2156 char *this_char, *value, *rest;
2157
b00dc3ad
HD
2158 while (options != NULL) {
2159 this_char = options;
2160 for (;;) {
2161 /*
2162 * NUL-terminate this option: unfortunately,
2163 * mount options form a comma-separated list,
2164 * but mpol's nodelist may also contain commas.
2165 */
2166 options = strchr(options, ',');
2167 if (options == NULL)
2168 break;
2169 options++;
2170 if (!isdigit(*options)) {
2171 options[-1] = '\0';
2172 break;
2173 }
2174 }
1da177e4
LT
2175 if (!*this_char)
2176 continue;
2177 if ((value = strchr(this_char,'=')) != NULL) {
2178 *value++ = 0;
2179 } else {
2180 printk(KERN_ERR
2181 "tmpfs: No value for mount option '%s'\n",
2182 this_char);
2183 return 1;
2184 }
2185
2186 if (!strcmp(this_char,"size")) {
2187 unsigned long long size;
2188 size = memparse(value,&rest);
2189 if (*rest == '%') {
2190 size <<= PAGE_SHIFT;
2191 size *= totalram_pages;
2192 do_div(size, 100);
2193 rest++;
2194 }
2195 if (*rest)
2196 goto bad_val;
680d794b 2197 sbinfo->max_blocks =
2198 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
1da177e4 2199 } else if (!strcmp(this_char,"nr_blocks")) {
680d794b 2200 sbinfo->max_blocks = memparse(value, &rest);
1da177e4
LT
2201 if (*rest)
2202 goto bad_val;
2203 } else if (!strcmp(this_char,"nr_inodes")) {
680d794b 2204 sbinfo->max_inodes = memparse(value, &rest);
1da177e4
LT
2205 if (*rest)
2206 goto bad_val;
2207 } else if (!strcmp(this_char,"mode")) {
680d794b 2208 if (remount)
1da177e4 2209 continue;
680d794b 2210 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
1da177e4
LT
2211 if (*rest)
2212 goto bad_val;
2213 } else if (!strcmp(this_char,"uid")) {
680d794b 2214 if (remount)
1da177e4 2215 continue;
680d794b 2216 sbinfo->uid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2217 if (*rest)
2218 goto bad_val;
2219 } else if (!strcmp(this_char,"gid")) {
680d794b 2220 if (remount)
1da177e4 2221 continue;
680d794b 2222 sbinfo->gid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2223 if (*rest)
2224 goto bad_val;
7339ff83 2225 } else if (!strcmp(this_char,"mpol")) {
71fe804b 2226 if (mpol_parse_str(value, &sbinfo->mpol, 1))
7339ff83 2227 goto bad_val;
1da177e4
LT
2228 } else {
2229 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2230 this_char);
2231 return 1;
2232 }
2233 }
2234 return 0;
2235
2236bad_val:
2237 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2238 value, this_char);
2239 return 1;
2240
2241}
2242
2243static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2244{
2245 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
680d794b 2246 struct shmem_sb_info config = *sbinfo;
0edd73b3
HD
2247 unsigned long blocks;
2248 unsigned long inodes;
2249 int error = -EINVAL;
2250
680d794b 2251 if (shmem_parse_options(data, &config, true))
0edd73b3 2252 return error;
1da177e4 2253
0edd73b3
HD
2254 spin_lock(&sbinfo->stat_lock);
2255 blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2256 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
680d794b 2257 if (config.max_blocks < blocks)
0edd73b3 2258 goto out;
680d794b 2259 if (config.max_inodes < inodes)
0edd73b3
HD
2260 goto out;
2261 /*
2262 * Those tests also disallow limited->unlimited while any are in
2263 * use, so i_blocks will always be zero when max_blocks is zero;
2264 * but we must separately disallow unlimited->limited, because
2265 * in that case we have no record of how much is already in use.
2266 */
680d794b 2267 if (config.max_blocks && !sbinfo->max_blocks)
0edd73b3 2268 goto out;
680d794b 2269 if (config.max_inodes && !sbinfo->max_inodes)
0edd73b3
HD
2270 goto out;
2271
2272 error = 0;
680d794b 2273 sbinfo->max_blocks = config.max_blocks;
2274 sbinfo->free_blocks = config.max_blocks - blocks;
2275 sbinfo->max_inodes = config.max_inodes;
2276 sbinfo->free_inodes = config.max_inodes - inodes;
71fe804b
LS
2277
2278 mpol_put(sbinfo->mpol);
2279 sbinfo->mpol = config.mpol; /* transfers initial ref */
0edd73b3
HD
2280out:
2281 spin_unlock(&sbinfo->stat_lock);
2282 return error;
1da177e4 2283}
680d794b 2284
2285static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2286{
2287 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2288
2289 if (sbinfo->max_blocks != shmem_default_max_blocks())
2290 seq_printf(seq, ",size=%luk",
2291 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2292 if (sbinfo->max_inodes != shmem_default_max_inodes())
2293 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2294 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2295 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2296 if (sbinfo->uid != 0)
2297 seq_printf(seq, ",uid=%u", sbinfo->uid);
2298 if (sbinfo->gid != 0)
2299 seq_printf(seq, ",gid=%u", sbinfo->gid);
71fe804b 2300 shmem_show_mpol(seq, sbinfo->mpol);
680d794b 2301 return 0;
2302}
2303#endif /* CONFIG_TMPFS */
1da177e4
LT
2304
2305static void shmem_put_super(struct super_block *sb)
2306{
2307 kfree(sb->s_fs_info);
2308 sb->s_fs_info = NULL;
2309}
2310
2b2af54a 2311int shmem_fill_super(struct super_block *sb, void *data, int silent)
1da177e4
LT
2312{
2313 struct inode *inode;
2314 struct dentry *root;
0edd73b3 2315 struct shmem_sb_info *sbinfo;
680d794b 2316 int err = -ENOMEM;
2317
2318 /* Round up to L1_CACHE_BYTES to resist false sharing */
425fbf04 2319 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
680d794b 2320 L1_CACHE_BYTES), GFP_KERNEL);
2321 if (!sbinfo)
2322 return -ENOMEM;
2323
680d794b 2324 sbinfo->mode = S_IRWXUGO | S_ISVTX;
76aac0e9
DH
2325 sbinfo->uid = current_fsuid();
2326 sbinfo->gid = current_fsgid();
680d794b 2327 sb->s_fs_info = sbinfo;
1da177e4 2328
0edd73b3 2329#ifdef CONFIG_TMPFS
1da177e4
LT
2330 /*
2331 * Per default we only allow half of the physical ram per
2332 * tmpfs instance, limiting inodes to one per page of lowmem;
2333 * but the internal instance is left unlimited.
2334 */
2335 if (!(sb->s_flags & MS_NOUSER)) {
680d794b 2336 sbinfo->max_blocks = shmem_default_max_blocks();
2337 sbinfo->max_inodes = shmem_default_max_inodes();
2338 if (shmem_parse_options(data, sbinfo, false)) {
2339 err = -EINVAL;
2340 goto failed;
2341 }
1da177e4 2342 }
91828a40 2343 sb->s_export_op = &shmem_export_ops;
1da177e4
LT
2344#else
2345 sb->s_flags |= MS_NOUSER;
2346#endif
2347
0edd73b3 2348 spin_lock_init(&sbinfo->stat_lock);
680d794b 2349 sbinfo->free_blocks = sbinfo->max_blocks;
2350 sbinfo->free_inodes = sbinfo->max_inodes;
0edd73b3 2351
1da177e4
LT
2352 sb->s_maxbytes = SHMEM_MAX_BYTES;
2353 sb->s_blocksize = PAGE_CACHE_SIZE;
2354 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2355 sb->s_magic = TMPFS_MAGIC;
2356 sb->s_op = &shmem_ops;
cfd95a9c 2357 sb->s_time_gran = 1;
39f0247d
AG
2358#ifdef CONFIG_TMPFS_POSIX_ACL
2359 sb->s_xattr = shmem_xattr_handlers;
2360 sb->s_flags |= MS_POSIXACL;
2361#endif
0edd73b3 2362
0b0a0806 2363 inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
1da177e4
LT
2364 if (!inode)
2365 goto failed;
680d794b 2366 inode->i_uid = sbinfo->uid;
2367 inode->i_gid = sbinfo->gid;
1da177e4
LT
2368 root = d_alloc_root(inode);
2369 if (!root)
2370 goto failed_iput;
2371 sb->s_root = root;
2372 return 0;
2373
2374failed_iput:
2375 iput(inode);
2376failed:
2377 shmem_put_super(sb);
2378 return err;
2379}
2380
fcc234f8 2381static struct kmem_cache *shmem_inode_cachep;
1da177e4
LT
2382
2383static struct inode *shmem_alloc_inode(struct super_block *sb)
2384{
2385 struct shmem_inode_info *p;
e94b1766 2386 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
1da177e4
LT
2387 if (!p)
2388 return NULL;
2389 return &p->vfs_inode;
2390}
2391
2392static void shmem_destroy_inode(struct inode *inode)
2393{
2394 if ((inode->i_mode & S_IFMT) == S_IFREG) {
2395 /* only struct inode is valid if it's an inline symlink */
2396 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2397 }
2398 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2399}
2400
51cc5068 2401static void init_once(void *foo)
1da177e4
LT
2402{
2403 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2404
a35afb83 2405 inode_init_once(&p->vfs_inode);
1da177e4
LT
2406}
2407
2408static int init_inodecache(void)
2409{
2410 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2411 sizeof(struct shmem_inode_info),
040b5c6f 2412 0, SLAB_PANIC, init_once);
1da177e4
LT
2413 return 0;
2414}
2415
2416static void destroy_inodecache(void)
2417{
1a1d92c1 2418 kmem_cache_destroy(shmem_inode_cachep);
1da177e4
LT
2419}
2420
f5e54d6e 2421static const struct address_space_operations shmem_aops = {
1da177e4 2422 .writepage = shmem_writepage,
76719325 2423 .set_page_dirty = __set_page_dirty_no_writeback,
1da177e4 2424#ifdef CONFIG_TMPFS
ae976416 2425 .readpage = shmem_readpage,
800d15a5
NP
2426 .write_begin = shmem_write_begin,
2427 .write_end = shmem_write_end,
1da177e4 2428#endif
304dbdb7 2429 .migratepage = migrate_page,
aa261f54 2430 .error_remove_page = generic_error_remove_page,
1da177e4
LT
2431};
2432
15ad7cdc 2433static const struct file_operations shmem_file_operations = {
1da177e4
LT
2434 .mmap = shmem_mmap,
2435#ifdef CONFIG_TMPFS
2436 .llseek = generic_file_llseek,
bcd78e49 2437 .read = do_sync_read,
5402b976 2438 .write = do_sync_write,
bcd78e49 2439 .aio_read = shmem_file_aio_read,
5402b976 2440 .aio_write = generic_file_aio_write,
1da177e4 2441 .fsync = simple_sync_file,
ae976416
HD
2442 .splice_read = generic_file_splice_read,
2443 .splice_write = generic_file_splice_write,
1da177e4
LT
2444#endif
2445};
2446
92e1d5be 2447static const struct inode_operations shmem_inode_operations = {
1da177e4
LT
2448 .truncate = shmem_truncate,
2449 .setattr = shmem_notify_change,
f6b3ec23 2450 .truncate_range = shmem_truncate_range,
39f0247d
AG
2451#ifdef CONFIG_TMPFS_POSIX_ACL
2452 .setxattr = generic_setxattr,
2453 .getxattr = generic_getxattr,
2454 .listxattr = generic_listxattr,
2455 .removexattr = generic_removexattr,
6d848a48 2456 .check_acl = shmem_check_acl,
39f0247d
AG
2457#endif
2458
1da177e4
LT
2459};
2460
92e1d5be 2461static const struct inode_operations shmem_dir_inode_operations = {
1da177e4
LT
2462#ifdef CONFIG_TMPFS
2463 .create = shmem_create,
2464 .lookup = simple_lookup,
2465 .link = shmem_link,
2466 .unlink = shmem_unlink,
2467 .symlink = shmem_symlink,
2468 .mkdir = shmem_mkdir,
2469 .rmdir = shmem_rmdir,
2470 .mknod = shmem_mknod,
2471 .rename = shmem_rename,
1da177e4 2472#endif
39f0247d
AG
2473#ifdef CONFIG_TMPFS_POSIX_ACL
2474 .setattr = shmem_notify_change,
2475 .setxattr = generic_setxattr,
2476 .getxattr = generic_getxattr,
2477 .listxattr = generic_listxattr,
2478 .removexattr = generic_removexattr,
6d848a48 2479 .check_acl = shmem_check_acl,
39f0247d
AG
2480#endif
2481};
2482
92e1d5be 2483static const struct inode_operations shmem_special_inode_operations = {
39f0247d
AG
2484#ifdef CONFIG_TMPFS_POSIX_ACL
2485 .setattr = shmem_notify_change,
2486 .setxattr = generic_setxattr,
2487 .getxattr = generic_getxattr,
2488 .listxattr = generic_listxattr,
2489 .removexattr = generic_removexattr,
6d848a48 2490 .check_acl = shmem_check_acl,
39f0247d 2491#endif
1da177e4
LT
2492};
2493
759b9775 2494static const struct super_operations shmem_ops = {
1da177e4
LT
2495 .alloc_inode = shmem_alloc_inode,
2496 .destroy_inode = shmem_destroy_inode,
2497#ifdef CONFIG_TMPFS
2498 .statfs = shmem_statfs,
2499 .remount_fs = shmem_remount_fs,
680d794b 2500 .show_options = shmem_show_options,
1da177e4
LT
2501#endif
2502 .delete_inode = shmem_delete_inode,
2503 .drop_inode = generic_delete_inode,
2504 .put_super = shmem_put_super,
2505};
2506
f0f37e2f 2507static const struct vm_operations_struct shmem_vm_ops = {
54cb8821 2508 .fault = shmem_fault,
1da177e4
LT
2509#ifdef CONFIG_NUMA
2510 .set_policy = shmem_set_policy,
2511 .get_policy = shmem_get_policy,
2512#endif
2513};
2514
2515
454e2398
DH
2516static int shmem_get_sb(struct file_system_type *fs_type,
2517 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
1da177e4 2518{
454e2398 2519 return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
1da177e4
LT
2520}
2521
2522static struct file_system_type tmpfs_fs_type = {
2523 .owner = THIS_MODULE,
2524 .name = "tmpfs",
2525 .get_sb = shmem_get_sb,
2526 .kill_sb = kill_litter_super,
2527};
1da177e4 2528
2b2af54a 2529int __init init_tmpfs(void)
1da177e4
LT
2530{
2531 int error;
2532
e0bf68dd
PZ
2533 error = bdi_init(&shmem_backing_dev_info);
2534 if (error)
2535 goto out4;
2536
1da177e4
LT
2537 error = init_inodecache();
2538 if (error)
2539 goto out3;
2540
2541 error = register_filesystem(&tmpfs_fs_type);
2542 if (error) {
2543 printk(KERN_ERR "Could not register tmpfs\n");
2544 goto out2;
2545 }
95dc112a 2546
1f5ce9e9 2547 shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
1da177e4
LT
2548 tmpfs_fs_type.name, NULL);
2549 if (IS_ERR(shm_mnt)) {
2550 error = PTR_ERR(shm_mnt);
2551 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2552 goto out1;
2553 }
2554 return 0;
2555
2556out1:
2557 unregister_filesystem(&tmpfs_fs_type);
2558out2:
2559 destroy_inodecache();
2560out3:
e0bf68dd
PZ
2561 bdi_destroy(&shmem_backing_dev_info);
2562out4:
1da177e4
LT
2563 shm_mnt = ERR_PTR(error);
2564 return error;
2565}
853ac43a
MM
2566
2567#else /* !CONFIG_SHMEM */
2568
2569/*
2570 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2571 *
2572 * This is intended for small system where the benefits of the full
2573 * shmem code (swap-backed and resource-limited) are outweighed by
2574 * their complexity. On systems without swap this code should be
2575 * effectively equivalent, but much lighter weight.
2576 */
2577
2578#include <linux/ramfs.h>
2579
2580static struct file_system_type tmpfs_fs_type = {
2581 .name = "tmpfs",
2582 .get_sb = ramfs_get_sb,
2583 .kill_sb = kill_litter_super,
2584};
2585
2b2af54a 2586int __init init_tmpfs(void)
853ac43a
MM
2587{
2588 BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
2589
2590 shm_mnt = kern_mount(&tmpfs_fs_type);
2591 BUG_ON(IS_ERR(shm_mnt));
2592
2593 return 0;
2594}
2595
2596int shmem_unuse(swp_entry_t entry, struct page *page)
2597{
2598 return 0;
2599}
2600
3f96b79a
HD
2601int shmem_lock(struct file *file, int lock, struct user_struct *user)
2602{
2603 return 0;
2604}
2605
0b0a0806
HD
2606#define shmem_vm_ops generic_file_vm_ops
2607#define shmem_file_operations ramfs_file_operations
2608#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
2609#define shmem_acct_size(flags, size) 0
2610#define shmem_unacct_size(flags, size) do {} while (0)
caefba17 2611#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE
853ac43a
MM
2612
2613#endif /* CONFIG_SHMEM */
2614
2615/* common code */
1da177e4 2616
46711810 2617/**
1da177e4 2618 * shmem_file_setup - get an unlinked file living in tmpfs
1da177e4
LT
2619 * @name: name for dentry (to be seen in /proc/<pid>/maps
2620 * @size: size to be set for the file
0b0a0806 2621 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
1da177e4 2622 */
168f5ac6 2623struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
1da177e4
LT
2624{
2625 int error;
2626 struct file *file;
2627 struct inode *inode;
2c48b9c4
AV
2628 struct path path;
2629 struct dentry *root;
1da177e4
LT
2630 struct qstr this;
2631
2632 if (IS_ERR(shm_mnt))
2633 return (void *)shm_mnt;
2634
2635 if (size < 0 || size > SHMEM_MAX_BYTES)
2636 return ERR_PTR(-EINVAL);
2637
2638 if (shmem_acct_size(flags, size))
2639 return ERR_PTR(-ENOMEM);
2640
2641 error = -ENOMEM;
2642 this.name = name;
2643 this.len = strlen(name);
2644 this.hash = 0; /* will go */
2645 root = shm_mnt->mnt_root;
2c48b9c4
AV
2646 path.dentry = d_alloc(root, &this);
2647 if (!path.dentry)
1da177e4 2648 goto put_memory;
2c48b9c4 2649 path.mnt = mntget(shm_mnt);
1da177e4 2650
1da177e4 2651 error = -ENOSPC;
0b0a0806 2652 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
1da177e4 2653 if (!inode)
4b42af81 2654 goto put_dentry;
1da177e4 2655
2c48b9c4 2656 d_instantiate(path.dentry, inode);
1da177e4
LT
2657 inode->i_size = size;
2658 inode->i_nlink = 0; /* It is unlinked */
853ac43a
MM
2659#ifndef CONFIG_MMU
2660 error = ramfs_nommu_expand_for_mapping(inode, size);
2661 if (error)
4b42af81 2662 goto put_dentry;
853ac43a 2663#endif
4b42af81
AV
2664
2665 error = -ENFILE;
2c48b9c4 2666 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
4b42af81
AV
2667 &shmem_file_operations);
2668 if (!file)
2669 goto put_dentry;
2670
1da177e4
LT
2671 return file;
2672
1da177e4 2673put_dentry:
2c48b9c4 2674 path_put(&path);
1da177e4
LT
2675put_memory:
2676 shmem_unacct_size(flags, size);
2677 return ERR_PTR(error);
2678}
395e0ddc 2679EXPORT_SYMBOL_GPL(shmem_file_setup);
1da177e4 2680
46711810 2681/**
1da177e4 2682 * shmem_zero_setup - setup a shared anonymous mapping
1da177e4
LT
2683 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2684 */
2685int shmem_zero_setup(struct vm_area_struct *vma)
2686{
2687 struct file *file;
2688 loff_t size = vma->vm_end - vma->vm_start;
2689
2690 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2691 if (IS_ERR(file))
2692 return PTR_ERR(file);
2693
2694 if (vma->vm_file)
2695 fput(vma->vm_file);
2696 vma->vm_file = file;
2697 vma->vm_ops = &shmem_vm_ops;
2698 return 0;
2699}