]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/shmem.c
[PATCH] ext3: Enable atomic inode security labeling
[net-next-2.6.git] / mm / shmem.c
CommitLineData
1da177e4
LT
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
0edd73b3
HD
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
1da177e4
LT
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
12 *
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16 *
17 * This file is released under the GPL.
18 */
19
20/*
21 * This virtual memory filesystem is heavily based on the ramfs. It
22 * extends ramfs by the ability to use swap and honor resource limits
23 * which makes it a completely usable filesystem.
24 */
25
26#include <linux/config.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/devfs_fs_kernel.h>
30#include <linux/fs.h>
31#include <linux/mm.h>
32#include <linux/mman.h>
33#include <linux/file.h>
34#include <linux/swap.h>
35#include <linux/pagemap.h>
36#include <linux/string.h>
37#include <linux/slab.h>
38#include <linux/backing-dev.h>
39#include <linux/shmem_fs.h>
40#include <linux/mount.h>
41#include <linux/writeback.h>
42#include <linux/vfs.h>
43#include <linux/blkdev.h>
44#include <linux/security.h>
45#include <linux/swapops.h>
46#include <linux/mempolicy.h>
47#include <linux/namei.h>
1da177e4
LT
48#include <asm/uaccess.h>
49#include <asm/div64.h>
50#include <asm/pgtable.h>
51
52/* This magic number is used in glibc for posix shared memory */
53#define TMPFS_MAGIC 0x01021994
54
55#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
56#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
57#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
58
59#define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
60#define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
61
62#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
63
64/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
65#define SHMEM_PAGEIN VM_READ
66#define SHMEM_TRUNCATE VM_WRITE
67
68/* Definition to limit shmem_truncate's steps between cond_rescheds */
69#define LATENCY_LIMIT 64
70
71/* Pretend that each entry is of this size in directory's i_size */
72#define BOGO_DIRENT_SIZE 20
73
74/* Keep swapped page count in private field of indirect struct page */
75#define nr_swapped private
76
77/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
78enum sgp_type {
79 SGP_QUICK, /* don't try more than file page cache lookup */
80 SGP_READ, /* don't exceed i_size, don't allocate page */
81 SGP_CACHE, /* don't exceed i_size, may allocate page */
82 SGP_WRITE, /* may exceed i_size, may allocate page */
83};
84
85static int shmem_getpage(struct inode *inode, unsigned long idx,
86 struct page **pagep, enum sgp_type sgp, int *type);
87
88static inline struct page *shmem_dir_alloc(unsigned int gfp_mask)
89{
90 /*
91 * The above definition of ENTRIES_PER_PAGE, and the use of
92 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
93 * might be reconsidered if it ever diverges from PAGE_SIZE.
94 */
95 return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
96}
97
98static inline void shmem_dir_free(struct page *page)
99{
100 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
101}
102
103static struct page **shmem_dir_map(struct page *page)
104{
105 return (struct page **)kmap_atomic(page, KM_USER0);
106}
107
108static inline void shmem_dir_unmap(struct page **dir)
109{
110 kunmap_atomic(dir, KM_USER0);
111}
112
113static swp_entry_t *shmem_swp_map(struct page *page)
114{
115 return (swp_entry_t *)kmap_atomic(page, KM_USER1);
116}
117
118static inline void shmem_swp_balance_unmap(void)
119{
120 /*
121 * When passing a pointer to an i_direct entry, to code which
122 * also handles indirect entries and so will shmem_swp_unmap,
123 * we must arrange for the preempt count to remain in balance.
124 * What kmap_atomic of a lowmem page does depends on config
125 * and architecture, so pretend to kmap_atomic some lowmem page.
126 */
127 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
128}
129
130static inline void shmem_swp_unmap(swp_entry_t *entry)
131{
132 kunmap_atomic(entry, KM_USER1);
133}
134
135static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
136{
137 return sb->s_fs_info;
138}
139
140/*
141 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
142 * for shared memory and for shared anonymous (/dev/zero) mappings
143 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
144 * consistent with the pre-accounting of private mappings ...
145 */
146static inline int shmem_acct_size(unsigned long flags, loff_t size)
147{
148 return (flags & VM_ACCOUNT)?
149 security_vm_enough_memory(VM_ACCT(size)): 0;
150}
151
152static inline void shmem_unacct_size(unsigned long flags, loff_t size)
153{
154 if (flags & VM_ACCOUNT)
155 vm_unacct_memory(VM_ACCT(size));
156}
157
158/*
159 * ... whereas tmpfs objects are accounted incrementally as
160 * pages are allocated, in order to allow huge sparse files.
161 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
162 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
163 */
164static inline int shmem_acct_block(unsigned long flags)
165{
166 return (flags & VM_ACCOUNT)?
167 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
168}
169
170static inline void shmem_unacct_blocks(unsigned long flags, long pages)
171{
172 if (!(flags & VM_ACCOUNT))
173 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
174}
175
176static struct super_operations shmem_ops;
177static struct address_space_operations shmem_aops;
178static struct file_operations shmem_file_operations;
179static struct inode_operations shmem_inode_operations;
180static struct inode_operations shmem_dir_inode_operations;
1da177e4
LT
181static struct vm_operations_struct shmem_vm_ops;
182
6c231b7b 183static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
1da177e4
LT
184 .ra_pages = 0, /* No readahead */
185 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
186 .unplug_io_fn = default_unplug_io_fn,
187};
188
189static LIST_HEAD(shmem_swaplist);
190static DEFINE_SPINLOCK(shmem_swaplist_lock);
191
192static void shmem_free_blocks(struct inode *inode, long pages)
193{
194 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
0edd73b3 195 if (sbinfo->max_blocks) {
1da177e4
LT
196 spin_lock(&sbinfo->stat_lock);
197 sbinfo->free_blocks += pages;
198 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
199 spin_unlock(&sbinfo->stat_lock);
200 }
201}
202
203/*
204 * shmem_recalc_inode - recalculate the size of an inode
205 *
206 * @inode: inode to recalc
207 *
208 * We have to calculate the free blocks since the mm can drop
209 * undirtied hole pages behind our back.
210 *
211 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
212 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
213 *
214 * It has to be called with the spinlock held.
215 */
216static void shmem_recalc_inode(struct inode *inode)
217{
218 struct shmem_inode_info *info = SHMEM_I(inode);
219 long freed;
220
221 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
222 if (freed > 0) {
223 info->alloced -= freed;
224 shmem_unacct_blocks(info->flags, freed);
225 shmem_free_blocks(inode, freed);
226 }
227}
228
229/*
230 * shmem_swp_entry - find the swap vector position in the info structure
231 *
232 * @info: info structure for the inode
233 * @index: index of the page to find
234 * @page: optional page to add to the structure. Has to be preset to
235 * all zeros
236 *
237 * If there is no space allocated yet it will return NULL when
238 * page is NULL, else it will use the page for the needed block,
239 * setting it to NULL on return to indicate that it has been used.
240 *
241 * The swap vector is organized the following way:
242 *
243 * There are SHMEM_NR_DIRECT entries directly stored in the
244 * shmem_inode_info structure. So small files do not need an addional
245 * allocation.
246 *
247 * For pages with index > SHMEM_NR_DIRECT there is the pointer
248 * i_indirect which points to a page which holds in the first half
249 * doubly indirect blocks, in the second half triple indirect blocks:
250 *
251 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
252 * following layout (for SHMEM_NR_DIRECT == 16):
253 *
254 * i_indirect -> dir --> 16-19
255 * | +-> 20-23
256 * |
257 * +-->dir2 --> 24-27
258 * | +-> 28-31
259 * | +-> 32-35
260 * | +-> 36-39
261 * |
262 * +-->dir3 --> 40-43
263 * +-> 44-47
264 * +-> 48-51
265 * +-> 52-55
266 */
267static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
268{
269 unsigned long offset;
270 struct page **dir;
271 struct page *subdir;
272
273 if (index < SHMEM_NR_DIRECT) {
274 shmem_swp_balance_unmap();
275 return info->i_direct+index;
276 }
277 if (!info->i_indirect) {
278 if (page) {
279 info->i_indirect = *page;
280 *page = NULL;
281 }
282 return NULL; /* need another page */
283 }
284
285 index -= SHMEM_NR_DIRECT;
286 offset = index % ENTRIES_PER_PAGE;
287 index /= ENTRIES_PER_PAGE;
288 dir = shmem_dir_map(info->i_indirect);
289
290 if (index >= ENTRIES_PER_PAGE/2) {
291 index -= ENTRIES_PER_PAGE/2;
292 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
293 index %= ENTRIES_PER_PAGE;
294 subdir = *dir;
295 if (!subdir) {
296 if (page) {
297 *dir = *page;
298 *page = NULL;
299 }
300 shmem_dir_unmap(dir);
301 return NULL; /* need another page */
302 }
303 shmem_dir_unmap(dir);
304 dir = shmem_dir_map(subdir);
305 }
306
307 dir += index;
308 subdir = *dir;
309 if (!subdir) {
310 if (!page || !(subdir = *page)) {
311 shmem_dir_unmap(dir);
312 return NULL; /* need a page */
313 }
314 *dir = subdir;
315 *page = NULL;
316 }
317 shmem_dir_unmap(dir);
318 return shmem_swp_map(subdir) + offset;
319}
320
321static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
322{
323 long incdec = value? 1: -1;
324
325 entry->val = value;
326 info->swapped += incdec;
327 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT)
328 kmap_atomic_to_page(entry)->nr_swapped += incdec;
329}
330
331/*
332 * shmem_swp_alloc - get the position of the swap entry for the page.
333 * If it does not exist allocate the entry.
334 *
335 * @info: info structure for the inode
336 * @index: index of the page to find
337 * @sgp: check and recheck i_size? skip allocation?
338 */
339static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
340{
341 struct inode *inode = &info->vfs_inode;
342 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
343 struct page *page = NULL;
344 swp_entry_t *entry;
345
346 if (sgp != SGP_WRITE &&
347 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
348 return ERR_PTR(-EINVAL);
349
350 while (!(entry = shmem_swp_entry(info, index, &page))) {
351 if (sgp == SGP_READ)
352 return shmem_swp_map(ZERO_PAGE(0));
353 /*
354 * Test free_blocks against 1 not 0, since we have 1 data
355 * page (and perhaps indirect index pages) yet to allocate:
356 * a waste to allocate index if we cannot allocate data.
357 */
0edd73b3 358 if (sbinfo->max_blocks) {
1da177e4
LT
359 spin_lock(&sbinfo->stat_lock);
360 if (sbinfo->free_blocks <= 1) {
361 spin_unlock(&sbinfo->stat_lock);
362 return ERR_PTR(-ENOSPC);
363 }
364 sbinfo->free_blocks--;
365 inode->i_blocks += BLOCKS_PER_PAGE;
366 spin_unlock(&sbinfo->stat_lock);
367 }
368
369 spin_unlock(&info->lock);
370 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
371 if (page) {
372 page->nr_swapped = 0;
373 }
374 spin_lock(&info->lock);
375
376 if (!page) {
377 shmem_free_blocks(inode, 1);
378 return ERR_PTR(-ENOMEM);
379 }
380 if (sgp != SGP_WRITE &&
381 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
382 entry = ERR_PTR(-EINVAL);
383 break;
384 }
385 if (info->next_index <= index)
386 info->next_index = index + 1;
387 }
388 if (page) {
389 /* another task gave its page, or truncated the file */
390 shmem_free_blocks(inode, 1);
391 shmem_dir_free(page);
392 }
393 if (info->next_index <= index && !IS_ERR(entry))
394 info->next_index = index + 1;
395 return entry;
396}
397
398/*
399 * shmem_free_swp - free some swap entries in a directory
400 *
401 * @dir: pointer to the directory
402 * @edir: pointer after last entry of the directory
403 */
404static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
405{
406 swp_entry_t *ptr;
407 int freed = 0;
408
409 for (ptr = dir; ptr < edir; ptr++) {
410 if (ptr->val) {
411 free_swap_and_cache(*ptr);
412 *ptr = (swp_entry_t){0};
413 freed++;
414 }
415 }
416 return freed;
417}
418
419static int shmem_map_and_free_swp(struct page *subdir,
420 int offset, int limit, struct page ***dir)
421{
422 swp_entry_t *ptr;
423 int freed = 0;
424
425 ptr = shmem_swp_map(subdir);
426 for (; offset < limit; offset += LATENCY_LIMIT) {
427 int size = limit - offset;
428 if (size > LATENCY_LIMIT)
429 size = LATENCY_LIMIT;
430 freed += shmem_free_swp(ptr+offset, ptr+offset+size);
431 if (need_resched()) {
432 shmem_swp_unmap(ptr);
433 if (*dir) {
434 shmem_dir_unmap(*dir);
435 *dir = NULL;
436 }
437 cond_resched();
438 ptr = shmem_swp_map(subdir);
439 }
440 }
441 shmem_swp_unmap(ptr);
442 return freed;
443}
444
445static void shmem_free_pages(struct list_head *next)
446{
447 struct page *page;
448 int freed = 0;
449
450 do {
451 page = container_of(next, struct page, lru);
452 next = next->next;
453 shmem_dir_free(page);
454 freed++;
455 if (freed >= LATENCY_LIMIT) {
456 cond_resched();
457 freed = 0;
458 }
459 } while (next);
460}
461
462static void shmem_truncate(struct inode *inode)
463{
464 struct shmem_inode_info *info = SHMEM_I(inode);
465 unsigned long idx;
466 unsigned long size;
467 unsigned long limit;
468 unsigned long stage;
469 unsigned long diroff;
470 struct page **dir;
471 struct page *topdir;
472 struct page *middir;
473 struct page *subdir;
474 swp_entry_t *ptr;
475 LIST_HEAD(pages_to_free);
476 long nr_pages_to_free = 0;
477 long nr_swaps_freed = 0;
478 int offset;
479 int freed;
480
481 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
482 idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
483 if (idx >= info->next_index)
484 return;
485
486 spin_lock(&info->lock);
487 info->flags |= SHMEM_TRUNCATE;
488 limit = info->next_index;
489 info->next_index = idx;
490 topdir = info->i_indirect;
491 if (topdir && idx <= SHMEM_NR_DIRECT) {
492 info->i_indirect = NULL;
493 nr_pages_to_free++;
494 list_add(&topdir->lru, &pages_to_free);
495 }
496 spin_unlock(&info->lock);
497
498 if (info->swapped && idx < SHMEM_NR_DIRECT) {
499 ptr = info->i_direct;
500 size = limit;
501 if (size > SHMEM_NR_DIRECT)
502 size = SHMEM_NR_DIRECT;
503 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size);
504 }
505 if (!topdir)
506 goto done2;
507
508 BUG_ON(limit <= SHMEM_NR_DIRECT);
509 limit -= SHMEM_NR_DIRECT;
510 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
511 offset = idx % ENTRIES_PER_PAGE;
512 idx -= offset;
513
514 dir = shmem_dir_map(topdir);
515 stage = ENTRIES_PER_PAGEPAGE/2;
516 if (idx < ENTRIES_PER_PAGEPAGE/2) {
517 middir = topdir;
518 diroff = idx/ENTRIES_PER_PAGE;
519 } else {
520 dir += ENTRIES_PER_PAGE/2;
521 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
522 while (stage <= idx)
523 stage += ENTRIES_PER_PAGEPAGE;
524 middir = *dir;
525 if (*dir) {
526 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
527 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
528 if (!diroff && !offset) {
529 *dir = NULL;
530 nr_pages_to_free++;
531 list_add(&middir->lru, &pages_to_free);
532 }
533 shmem_dir_unmap(dir);
534 dir = shmem_dir_map(middir);
535 } else {
536 diroff = 0;
537 offset = 0;
538 idx = stage;
539 }
540 }
541
542 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
543 if (unlikely(idx == stage)) {
544 shmem_dir_unmap(dir);
545 dir = shmem_dir_map(topdir) +
546 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
547 while (!*dir) {
548 dir++;
549 idx += ENTRIES_PER_PAGEPAGE;
550 if (idx >= limit)
551 goto done1;
552 }
553 stage = idx + ENTRIES_PER_PAGEPAGE;
554 middir = *dir;
555 *dir = NULL;
556 nr_pages_to_free++;
557 list_add(&middir->lru, &pages_to_free);
558 shmem_dir_unmap(dir);
559 cond_resched();
560 dir = shmem_dir_map(middir);
561 diroff = 0;
562 }
563 subdir = dir[diroff];
564 if (subdir && subdir->nr_swapped) {
565 size = limit - idx;
566 if (size > ENTRIES_PER_PAGE)
567 size = ENTRIES_PER_PAGE;
568 freed = shmem_map_and_free_swp(subdir,
569 offset, size, &dir);
570 if (!dir)
571 dir = shmem_dir_map(middir);
572 nr_swaps_freed += freed;
573 if (offset)
574 spin_lock(&info->lock);
575 subdir->nr_swapped -= freed;
576 if (offset)
577 spin_unlock(&info->lock);
578 BUG_ON(subdir->nr_swapped > offset);
579 }
580 if (offset)
581 offset = 0;
582 else if (subdir) {
583 dir[diroff] = NULL;
584 nr_pages_to_free++;
585 list_add(&subdir->lru, &pages_to_free);
586 }
587 }
588done1:
589 shmem_dir_unmap(dir);
590done2:
591 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
592 /*
593 * Call truncate_inode_pages again: racing shmem_unuse_inode
594 * may have swizzled a page in from swap since vmtruncate or
595 * generic_delete_inode did it, before we lowered next_index.
596 * Also, though shmem_getpage checks i_size before adding to
597 * cache, no recheck after: so fix the narrow window there too.
598 */
599 truncate_inode_pages(inode->i_mapping, inode->i_size);
600 }
601
602 spin_lock(&info->lock);
603 info->flags &= ~SHMEM_TRUNCATE;
604 info->swapped -= nr_swaps_freed;
605 if (nr_pages_to_free)
606 shmem_free_blocks(inode, nr_pages_to_free);
607 shmem_recalc_inode(inode);
608 spin_unlock(&info->lock);
609
610 /*
611 * Empty swap vector directory pages to be freed?
612 */
613 if (!list_empty(&pages_to_free)) {
614 pages_to_free.prev->next = NULL;
615 shmem_free_pages(pages_to_free.next);
616 }
617}
618
619static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
620{
621 struct inode *inode = dentry->d_inode;
622 struct page *page = NULL;
623 int error;
624
625 if (attr->ia_valid & ATTR_SIZE) {
626 if (attr->ia_size < inode->i_size) {
627 /*
628 * If truncating down to a partial page, then
629 * if that page is already allocated, hold it
630 * in memory until the truncation is over, so
631 * truncate_partial_page cannnot miss it were
632 * it assigned to swap.
633 */
634 if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
635 (void) shmem_getpage(inode,
636 attr->ia_size>>PAGE_CACHE_SHIFT,
637 &page, SGP_READ, NULL);
638 }
639 /*
640 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
641 * detect if any pages might have been added to cache
642 * after truncate_inode_pages. But we needn't bother
643 * if it's being fully truncated to zero-length: the
644 * nrpages check is efficient enough in that case.
645 */
646 if (attr->ia_size) {
647 struct shmem_inode_info *info = SHMEM_I(inode);
648 spin_lock(&info->lock);
649 info->flags &= ~SHMEM_PAGEIN;
650 spin_unlock(&info->lock);
651 }
652 }
653 }
654
655 error = inode_change_ok(inode, attr);
656 if (!error)
657 error = inode_setattr(inode, attr);
658 if (page)
659 page_cache_release(page);
660 return error;
661}
662
663static void shmem_delete_inode(struct inode *inode)
664{
665 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
666 struct shmem_inode_info *info = SHMEM_I(inode);
667
668 if (inode->i_op->truncate == shmem_truncate) {
fef26658 669 truncate_inode_pages(inode->i_mapping, 0);
1da177e4
LT
670 shmem_unacct_size(info->flags, inode->i_size);
671 inode->i_size = 0;
672 shmem_truncate(inode);
673 if (!list_empty(&info->swaplist)) {
674 spin_lock(&shmem_swaplist_lock);
675 list_del_init(&info->swaplist);
676 spin_unlock(&shmem_swaplist_lock);
677 }
678 }
0edd73b3
HD
679 BUG_ON(inode->i_blocks);
680 if (sbinfo->max_inodes) {
1da177e4
LT
681 spin_lock(&sbinfo->stat_lock);
682 sbinfo->free_inodes++;
683 spin_unlock(&sbinfo->stat_lock);
684 }
685 clear_inode(inode);
686}
687
688static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
689{
690 swp_entry_t *ptr;
691
692 for (ptr = dir; ptr < edir; ptr++) {
693 if (ptr->val == entry.val)
694 return ptr - dir;
695 }
696 return -1;
697}
698
699static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
700{
701 struct inode *inode;
702 unsigned long idx;
703 unsigned long size;
704 unsigned long limit;
705 unsigned long stage;
706 struct page **dir;
707 struct page *subdir;
708 swp_entry_t *ptr;
709 int offset;
710
711 idx = 0;
712 ptr = info->i_direct;
713 spin_lock(&info->lock);
714 limit = info->next_index;
715 size = limit;
716 if (size > SHMEM_NR_DIRECT)
717 size = SHMEM_NR_DIRECT;
718 offset = shmem_find_swp(entry, ptr, ptr+size);
719 if (offset >= 0) {
720 shmem_swp_balance_unmap();
721 goto found;
722 }
723 if (!info->i_indirect)
724 goto lost2;
725
726 dir = shmem_dir_map(info->i_indirect);
727 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
728
729 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
730 if (unlikely(idx == stage)) {
731 shmem_dir_unmap(dir-1);
732 dir = shmem_dir_map(info->i_indirect) +
733 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
734 while (!*dir) {
735 dir++;
736 idx += ENTRIES_PER_PAGEPAGE;
737 if (idx >= limit)
738 goto lost1;
739 }
740 stage = idx + ENTRIES_PER_PAGEPAGE;
741 subdir = *dir;
742 shmem_dir_unmap(dir);
743 dir = shmem_dir_map(subdir);
744 }
745 subdir = *dir;
746 if (subdir && subdir->nr_swapped) {
747 ptr = shmem_swp_map(subdir);
748 size = limit - idx;
749 if (size > ENTRIES_PER_PAGE)
750 size = ENTRIES_PER_PAGE;
751 offset = shmem_find_swp(entry, ptr, ptr+size);
752 if (offset >= 0) {
753 shmem_dir_unmap(dir);
754 goto found;
755 }
756 shmem_swp_unmap(ptr);
757 }
758 }
759lost1:
760 shmem_dir_unmap(dir-1);
761lost2:
762 spin_unlock(&info->lock);
763 return 0;
764found:
765 idx += offset;
766 inode = &info->vfs_inode;
767 if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
768 info->flags |= SHMEM_PAGEIN;
769 shmem_swp_set(info, ptr + offset, 0);
770 }
771 shmem_swp_unmap(ptr);
772 spin_unlock(&info->lock);
773 /*
774 * Decrement swap count even when the entry is left behind:
775 * try_to_unuse will skip over mms, then reincrement count.
776 */
777 swap_free(entry);
778 return 1;
779}
780
781/*
782 * shmem_unuse() search for an eventually swapped out shmem page.
783 */
784int shmem_unuse(swp_entry_t entry, struct page *page)
785{
786 struct list_head *p, *next;
787 struct shmem_inode_info *info;
788 int found = 0;
789
790 spin_lock(&shmem_swaplist_lock);
791 list_for_each_safe(p, next, &shmem_swaplist) {
792 info = list_entry(p, struct shmem_inode_info, swaplist);
793 if (!info->swapped)
794 list_del_init(&info->swaplist);
795 else if (shmem_unuse_inode(info, entry, page)) {
796 /* move head to start search for next from here */
797 list_move_tail(&shmem_swaplist, &info->swaplist);
798 found = 1;
799 break;
800 }
801 }
802 spin_unlock(&shmem_swaplist_lock);
803 return found;
804}
805
806/*
807 * Move the page from the page cache to the swap cache.
808 */
809static int shmem_writepage(struct page *page, struct writeback_control *wbc)
810{
811 struct shmem_inode_info *info;
812 swp_entry_t *entry, swap;
813 struct address_space *mapping;
814 unsigned long index;
815 struct inode *inode;
816
817 BUG_ON(!PageLocked(page));
818 BUG_ON(page_mapped(page));
819
820 mapping = page->mapping;
821 index = page->index;
822 inode = mapping->host;
823 info = SHMEM_I(inode);
824 if (info->flags & VM_LOCKED)
825 goto redirty;
826 swap = get_swap_page();
827 if (!swap.val)
828 goto redirty;
829
830 spin_lock(&info->lock);
831 shmem_recalc_inode(inode);
832 if (index >= info->next_index) {
833 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
834 goto unlock;
835 }
836 entry = shmem_swp_entry(info, index, NULL);
837 BUG_ON(!entry);
838 BUG_ON(entry->val);
839
840 if (move_to_swap_cache(page, swap) == 0) {
841 shmem_swp_set(info, entry, swap.val);
842 shmem_swp_unmap(entry);
843 spin_unlock(&info->lock);
844 if (list_empty(&info->swaplist)) {
845 spin_lock(&shmem_swaplist_lock);
846 /* move instead of add in case we're racing */
847 list_move_tail(&info->swaplist, &shmem_swaplist);
848 spin_unlock(&shmem_swaplist_lock);
849 }
850 unlock_page(page);
851 return 0;
852 }
853
854 shmem_swp_unmap(entry);
855unlock:
856 spin_unlock(&info->lock);
857 swap_free(swap);
858redirty:
859 set_page_dirty(page);
860 return WRITEPAGE_ACTIVATE; /* Return with the page locked */
861}
862
863#ifdef CONFIG_NUMA
864static struct page *shmem_swapin_async(struct shared_policy *p,
865 swp_entry_t entry, unsigned long idx)
866{
867 struct page *page;
868 struct vm_area_struct pvma;
869
870 /* Create a pseudo vma that just contains the policy */
871 memset(&pvma, 0, sizeof(struct vm_area_struct));
872 pvma.vm_end = PAGE_SIZE;
873 pvma.vm_pgoff = idx;
874 pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
875 page = read_swap_cache_async(entry, &pvma, 0);
876 mpol_free(pvma.vm_policy);
877 return page;
878}
879
880struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
881 unsigned long idx)
882{
883 struct shared_policy *p = &info->policy;
884 int i, num;
885 struct page *page;
886 unsigned long offset;
887
888 num = valid_swaphandles(entry, &offset);
889 for (i = 0; i < num; offset++, i++) {
890 page = shmem_swapin_async(p,
891 swp_entry(swp_type(entry), offset), idx);
892 if (!page)
893 break;
894 page_cache_release(page);
895 }
896 lru_add_drain(); /* Push any new pages onto the LRU now */
897 return shmem_swapin_async(p, entry, idx);
898}
899
900static struct page *
901shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info,
902 unsigned long idx)
903{
904 struct vm_area_struct pvma;
905 struct page *page;
906
907 memset(&pvma, 0, sizeof(struct vm_area_struct));
908 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
909 pvma.vm_pgoff = idx;
910 pvma.vm_end = PAGE_SIZE;
911 page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
912 mpol_free(pvma.vm_policy);
913 return page;
914}
915#else
916static inline struct page *
917shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
918{
919 swapin_readahead(entry, 0, NULL);
920 return read_swap_cache_async(entry, NULL, 0);
921}
922
923static inline struct page *
924shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info,
925 unsigned long idx)
926{
927 return alloc_page(gfp | __GFP_ZERO);
928}
929#endif
930
931/*
932 * shmem_getpage - either get the page from swap or allocate a new one
933 *
934 * If we allocate a new one we do not mark it dirty. That's up to the
935 * vm. If we swap it in we mark it dirty since we also free the swap
936 * entry since a page cannot live in both the swap and page cache
937 */
938static int shmem_getpage(struct inode *inode, unsigned long idx,
939 struct page **pagep, enum sgp_type sgp, int *type)
940{
941 struct address_space *mapping = inode->i_mapping;
942 struct shmem_inode_info *info = SHMEM_I(inode);
943 struct shmem_sb_info *sbinfo;
944 struct page *filepage = *pagep;
945 struct page *swappage;
946 swp_entry_t *entry;
947 swp_entry_t swap;
948 int error;
949
950 if (idx >= SHMEM_MAX_INDEX)
951 return -EFBIG;
952 /*
953 * Normally, filepage is NULL on entry, and either found
954 * uptodate immediately, or allocated and zeroed, or read
955 * in under swappage, which is then assigned to filepage.
956 * But shmem_prepare_write passes in a locked filepage,
957 * which may be found not uptodate by other callers too,
958 * and may need to be copied from the swappage read in.
959 */
960repeat:
961 if (!filepage)
962 filepage = find_lock_page(mapping, idx);
963 if (filepage && PageUptodate(filepage))
964 goto done;
965 error = 0;
966 if (sgp == SGP_QUICK)
967 goto failed;
968
969 spin_lock(&info->lock);
970 shmem_recalc_inode(inode);
971 entry = shmem_swp_alloc(info, idx, sgp);
972 if (IS_ERR(entry)) {
973 spin_unlock(&info->lock);
974 error = PTR_ERR(entry);
975 goto failed;
976 }
977 swap = *entry;
978
979 if (swap.val) {
980 /* Look it up and read it in.. */
981 swappage = lookup_swap_cache(swap);
982 if (!swappage) {
983 shmem_swp_unmap(entry);
984 spin_unlock(&info->lock);
985 /* here we actually do the io */
986 if (type && *type == VM_FAULT_MINOR) {
987 inc_page_state(pgmajfault);
988 *type = VM_FAULT_MAJOR;
989 }
990 swappage = shmem_swapin(info, swap, idx);
991 if (!swappage) {
992 spin_lock(&info->lock);
993 entry = shmem_swp_alloc(info, idx, sgp);
994 if (IS_ERR(entry))
995 error = PTR_ERR(entry);
996 else {
997 if (entry->val == swap.val)
998 error = -ENOMEM;
999 shmem_swp_unmap(entry);
1000 }
1001 spin_unlock(&info->lock);
1002 if (error)
1003 goto failed;
1004 goto repeat;
1005 }
1006 wait_on_page_locked(swappage);
1007 page_cache_release(swappage);
1008 goto repeat;
1009 }
1010
1011 /* We have to do this with page locked to prevent races */
1012 if (TestSetPageLocked(swappage)) {
1013 shmem_swp_unmap(entry);
1014 spin_unlock(&info->lock);
1015 wait_on_page_locked(swappage);
1016 page_cache_release(swappage);
1017 goto repeat;
1018 }
1019 if (PageWriteback(swappage)) {
1020 shmem_swp_unmap(entry);
1021 spin_unlock(&info->lock);
1022 wait_on_page_writeback(swappage);
1023 unlock_page(swappage);
1024 page_cache_release(swappage);
1025 goto repeat;
1026 }
1027 if (!PageUptodate(swappage)) {
1028 shmem_swp_unmap(entry);
1029 spin_unlock(&info->lock);
1030 unlock_page(swappage);
1031 page_cache_release(swappage);
1032 error = -EIO;
1033 goto failed;
1034 }
1035
1036 if (filepage) {
1037 shmem_swp_set(info, entry, 0);
1038 shmem_swp_unmap(entry);
1039 delete_from_swap_cache(swappage);
1040 spin_unlock(&info->lock);
1041 copy_highpage(filepage, swappage);
1042 unlock_page(swappage);
1043 page_cache_release(swappage);
1044 flush_dcache_page(filepage);
1045 SetPageUptodate(filepage);
1046 set_page_dirty(filepage);
1047 swap_free(swap);
1048 } else if (!(error = move_from_swap_cache(
1049 swappage, idx, mapping))) {
1050 info->flags |= SHMEM_PAGEIN;
1051 shmem_swp_set(info, entry, 0);
1052 shmem_swp_unmap(entry);
1053 spin_unlock(&info->lock);
1054 filepage = swappage;
1055 swap_free(swap);
1056 } else {
1057 shmem_swp_unmap(entry);
1058 spin_unlock(&info->lock);
1059 unlock_page(swappage);
1060 page_cache_release(swappage);
1061 if (error == -ENOMEM) {
1062 /* let kswapd refresh zone for GFP_ATOMICs */
1063 blk_congestion_wait(WRITE, HZ/50);
1064 }
1065 goto repeat;
1066 }
1067 } else if (sgp == SGP_READ && !filepage) {
1068 shmem_swp_unmap(entry);
1069 filepage = find_get_page(mapping, idx);
1070 if (filepage &&
1071 (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1072 spin_unlock(&info->lock);
1073 wait_on_page_locked(filepage);
1074 page_cache_release(filepage);
1075 filepage = NULL;
1076 goto repeat;
1077 }
1078 spin_unlock(&info->lock);
1079 } else {
1080 shmem_swp_unmap(entry);
1081 sbinfo = SHMEM_SB(inode->i_sb);
0edd73b3 1082 if (sbinfo->max_blocks) {
1da177e4
LT
1083 spin_lock(&sbinfo->stat_lock);
1084 if (sbinfo->free_blocks == 0 ||
1085 shmem_acct_block(info->flags)) {
1086 spin_unlock(&sbinfo->stat_lock);
1087 spin_unlock(&info->lock);
1088 error = -ENOSPC;
1089 goto failed;
1090 }
1091 sbinfo->free_blocks--;
1092 inode->i_blocks += BLOCKS_PER_PAGE;
1093 spin_unlock(&sbinfo->stat_lock);
1094 } else if (shmem_acct_block(info->flags)) {
1095 spin_unlock(&info->lock);
1096 error = -ENOSPC;
1097 goto failed;
1098 }
1099
1100 if (!filepage) {
1101 spin_unlock(&info->lock);
1102 filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1103 info,
1104 idx);
1105 if (!filepage) {
1106 shmem_unacct_blocks(info->flags, 1);
1107 shmem_free_blocks(inode, 1);
1108 error = -ENOMEM;
1109 goto failed;
1110 }
1111
1112 spin_lock(&info->lock);
1113 entry = shmem_swp_alloc(info, idx, sgp);
1114 if (IS_ERR(entry))
1115 error = PTR_ERR(entry);
1116 else {
1117 swap = *entry;
1118 shmem_swp_unmap(entry);
1119 }
1120 if (error || swap.val || 0 != add_to_page_cache_lru(
1121 filepage, mapping, idx, GFP_ATOMIC)) {
1122 spin_unlock(&info->lock);
1123 page_cache_release(filepage);
1124 shmem_unacct_blocks(info->flags, 1);
1125 shmem_free_blocks(inode, 1);
1126 filepage = NULL;
1127 if (error)
1128 goto failed;
1129 goto repeat;
1130 }
1131 info->flags |= SHMEM_PAGEIN;
1132 }
1133
1134 info->alloced++;
1135 spin_unlock(&info->lock);
1136 flush_dcache_page(filepage);
1137 SetPageUptodate(filepage);
1138 }
1139done:
1140 if (*pagep != filepage) {
1141 unlock_page(filepage);
1142 *pagep = filepage;
1143 }
1144 return 0;
1145
1146failed:
1147 if (*pagep != filepage) {
1148 unlock_page(filepage);
1149 page_cache_release(filepage);
1150 }
1151 return error;
1152}
1153
1154struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
1155{
1156 struct inode *inode = vma->vm_file->f_dentry->d_inode;
1157 struct page *page = NULL;
1158 unsigned long idx;
1159 int error;
1160
1161 idx = (address - vma->vm_start) >> PAGE_SHIFT;
1162 idx += vma->vm_pgoff;
1163 idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1164 if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1165 return NOPAGE_SIGBUS;
1166
1167 error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
1168 if (error)
1169 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1170
1171 mark_page_accessed(page);
1172 return page;
1173}
1174
1175static int shmem_populate(struct vm_area_struct *vma,
1176 unsigned long addr, unsigned long len,
1177 pgprot_t prot, unsigned long pgoff, int nonblock)
1178{
1179 struct inode *inode = vma->vm_file->f_dentry->d_inode;
1180 struct mm_struct *mm = vma->vm_mm;
1181 enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1182 unsigned long size;
1183
1184 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1185 if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1186 return -EINVAL;
1187
1188 while ((long) len > 0) {
1189 struct page *page = NULL;
1190 int err;
1191 /*
1192 * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1193 */
1194 err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1195 if (err)
1196 return err;
d44ed4f8 1197 /* Page may still be null, but only if nonblock was set. */
1da177e4
LT
1198 if (page) {
1199 mark_page_accessed(page);
1200 err = install_page(mm, vma, addr, page, prot);
1201 if (err) {
1202 page_cache_release(page);
1203 return err;
1204 }
d44ed4f8
PBG
1205 } else {
1206 /* No page was found just because we can't read it in
1207 * now (being here implies nonblock != 0), but the page
1208 * may exist, so set the PTE to fault it in later. */
1da177e4
LT
1209 err = install_file_pte(mm, vma, addr, pgoff, prot);
1210 if (err)
1211 return err;
1212 }
1213
1214 len -= PAGE_SIZE;
1215 addr += PAGE_SIZE;
1216 pgoff++;
1217 }
1218 return 0;
1219}
1220
1221#ifdef CONFIG_NUMA
1222int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1223{
1224 struct inode *i = vma->vm_file->f_dentry->d_inode;
1225 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1226}
1227
1228struct mempolicy *
1229shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1230{
1231 struct inode *i = vma->vm_file->f_dentry->d_inode;
1232 unsigned long idx;
1233
1234 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1235 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1236}
1237#endif
1238
1239int shmem_lock(struct file *file, int lock, struct user_struct *user)
1240{
1241 struct inode *inode = file->f_dentry->d_inode;
1242 struct shmem_inode_info *info = SHMEM_I(inode);
1243 int retval = -ENOMEM;
1244
1245 spin_lock(&info->lock);
1246 if (lock && !(info->flags & VM_LOCKED)) {
1247 if (!user_shm_lock(inode->i_size, user))
1248 goto out_nomem;
1249 info->flags |= VM_LOCKED;
1250 }
1251 if (!lock && (info->flags & VM_LOCKED) && user) {
1252 user_shm_unlock(inode->i_size, user);
1253 info->flags &= ~VM_LOCKED;
1254 }
1255 retval = 0;
1256out_nomem:
1257 spin_unlock(&info->lock);
1258 return retval;
1259}
1260
1261static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1262{
1263 file_accessed(file);
1264 vma->vm_ops = &shmem_vm_ops;
1265 return 0;
1266}
1267
1268static struct inode *
1269shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1270{
1271 struct inode *inode;
1272 struct shmem_inode_info *info;
1273 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1274
0edd73b3 1275 if (sbinfo->max_inodes) {
1da177e4
LT
1276 spin_lock(&sbinfo->stat_lock);
1277 if (!sbinfo->free_inodes) {
1278 spin_unlock(&sbinfo->stat_lock);
1279 return NULL;
1280 }
1281 sbinfo->free_inodes--;
1282 spin_unlock(&sbinfo->stat_lock);
1283 }
1284
1285 inode = new_inode(sb);
1286 if (inode) {
1287 inode->i_mode = mode;
1288 inode->i_uid = current->fsuid;
1289 inode->i_gid = current->fsgid;
1290 inode->i_blksize = PAGE_CACHE_SIZE;
1291 inode->i_blocks = 0;
1292 inode->i_mapping->a_ops = &shmem_aops;
1293 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1294 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1295 info = SHMEM_I(inode);
1296 memset(info, 0, (char *)inode - (char *)info);
1297 spin_lock_init(&info->lock);
1298 INIT_LIST_HEAD(&info->swaplist);
1299
1300 switch (mode & S_IFMT) {
1301 default:
1da177e4
LT
1302 init_special_inode(inode, mode, dev);
1303 break;
1304 case S_IFREG:
1305 inode->i_op = &shmem_inode_operations;
1306 inode->i_fop = &shmem_file_operations;
1307 mpol_shared_policy_init(&info->policy);
1308 break;
1309 case S_IFDIR:
1310 inode->i_nlink++;
1311 /* Some things misbehave if size == 0 on a directory */
1312 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1313 inode->i_op = &shmem_dir_inode_operations;
1314 inode->i_fop = &simple_dir_operations;
1315 break;
1316 case S_IFLNK:
1317 /*
1318 * Must not load anything in the rbtree,
1319 * mpol_free_shared_policy will not be called.
1320 */
1321 mpol_shared_policy_init(&info->policy);
1322 break;
1323 }
0edd73b3 1324 } else if (sbinfo->max_inodes) {
1da177e4
LT
1325 spin_lock(&sbinfo->stat_lock);
1326 sbinfo->free_inodes++;
1327 spin_unlock(&sbinfo->stat_lock);
1328 }
1329 return inode;
1330}
1331
1332#ifdef CONFIG_TMPFS
1da177e4
LT
1333static struct inode_operations shmem_symlink_inode_operations;
1334static struct inode_operations shmem_symlink_inline_operations;
1335
1336/*
1337 * Normally tmpfs makes no use of shmem_prepare_write, but it
1338 * lets a tmpfs file be used read-write below the loop driver.
1339 */
1340static int
1341shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1342{
1343 struct inode *inode = page->mapping->host;
1344 return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1345}
1346
1347static ssize_t
1348shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1349{
1350 struct inode *inode = file->f_dentry->d_inode;
1351 loff_t pos;
1352 unsigned long written;
1353 ssize_t err;
1354
1355 if ((ssize_t) count < 0)
1356 return -EINVAL;
1357
1358 if (!access_ok(VERIFY_READ, buf, count))
1359 return -EFAULT;
1360
1361 down(&inode->i_sem);
1362
1363 pos = *ppos;
1364 written = 0;
1365
1366 err = generic_write_checks(file, &pos, &count, 0);
1367 if (err || !count)
1368 goto out;
1369
1370 err = remove_suid(file->f_dentry);
1371 if (err)
1372 goto out;
1373
1374 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1375
1376 do {
1377 struct page *page = NULL;
1378 unsigned long bytes, index, offset;
1379 char *kaddr;
1380 int left;
1381
1382 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1383 index = pos >> PAGE_CACHE_SHIFT;
1384 bytes = PAGE_CACHE_SIZE - offset;
1385 if (bytes > count)
1386 bytes = count;
1387
1388 /*
1389 * We don't hold page lock across copy from user -
1390 * what would it guard against? - so no deadlock here.
1391 * But it still may be a good idea to prefault below.
1392 */
1393
1394 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1395 if (err)
1396 break;
1397
1398 left = bytes;
1399 if (PageHighMem(page)) {
1400 volatile unsigned char dummy;
1401 __get_user(dummy, buf);
1402 __get_user(dummy, buf + bytes - 1);
1403
1404 kaddr = kmap_atomic(page, KM_USER0);
1405 left = __copy_from_user_inatomic(kaddr + offset,
1406 buf, bytes);
1407 kunmap_atomic(kaddr, KM_USER0);
1408 }
1409 if (left) {
1410 kaddr = kmap(page);
1411 left = __copy_from_user(kaddr + offset, buf, bytes);
1412 kunmap(page);
1413 }
1414
1415 written += bytes;
1416 count -= bytes;
1417 pos += bytes;
1418 buf += bytes;
1419 if (pos > inode->i_size)
1420 i_size_write(inode, pos);
1421
1422 flush_dcache_page(page);
1423 set_page_dirty(page);
1424 mark_page_accessed(page);
1425 page_cache_release(page);
1426
1427 if (left) {
1428 pos -= left;
1429 written -= left;
1430 err = -EFAULT;
1431 break;
1432 }
1433
1434 /*
1435 * Our dirty pages are not counted in nr_dirty,
1436 * and we do not attempt to balance dirty pages.
1437 */
1438
1439 cond_resched();
1440 } while (count);
1441
1442 *ppos = pos;
1443 if (written)
1444 err = written;
1445out:
1446 up(&inode->i_sem);
1447 return err;
1448}
1449
1450static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1451{
1452 struct inode *inode = filp->f_dentry->d_inode;
1453 struct address_space *mapping = inode->i_mapping;
1454 unsigned long index, offset;
1455
1456 index = *ppos >> PAGE_CACHE_SHIFT;
1457 offset = *ppos & ~PAGE_CACHE_MASK;
1458
1459 for (;;) {
1460 struct page *page = NULL;
1461 unsigned long end_index, nr, ret;
1462 loff_t i_size = i_size_read(inode);
1463
1464 end_index = i_size >> PAGE_CACHE_SHIFT;
1465 if (index > end_index)
1466 break;
1467 if (index == end_index) {
1468 nr = i_size & ~PAGE_CACHE_MASK;
1469 if (nr <= offset)
1470 break;
1471 }
1472
1473 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1474 if (desc->error) {
1475 if (desc->error == -EINVAL)
1476 desc->error = 0;
1477 break;
1478 }
1479
1480 /*
1481 * We must evaluate after, since reads (unlike writes)
1482 * are called without i_sem protection against truncate
1483 */
1484 nr = PAGE_CACHE_SIZE;
1485 i_size = i_size_read(inode);
1486 end_index = i_size >> PAGE_CACHE_SHIFT;
1487 if (index == end_index) {
1488 nr = i_size & ~PAGE_CACHE_MASK;
1489 if (nr <= offset) {
1490 if (page)
1491 page_cache_release(page);
1492 break;
1493 }
1494 }
1495 nr -= offset;
1496
1497 if (page) {
1498 /*
1499 * If users can be writing to this page using arbitrary
1500 * virtual addresses, take care about potential aliasing
1501 * before reading the page on the kernel side.
1502 */
1503 if (mapping_writably_mapped(mapping))
1504 flush_dcache_page(page);
1505 /*
1506 * Mark the page accessed if we read the beginning.
1507 */
1508 if (!offset)
1509 mark_page_accessed(page);
1510 } else
1511 page = ZERO_PAGE(0);
1512
1513 /*
1514 * Ok, we have the page, and it's up-to-date, so
1515 * now we can copy it to user space...
1516 *
1517 * The actor routine returns how many bytes were actually used..
1518 * NOTE! This may not be the same as how much of a user buffer
1519 * we filled up (we may be padding etc), so we can only update
1520 * "pos" here (the actor routine has to update the user buffer
1521 * pointers and the remaining count).
1522 */
1523 ret = actor(desc, page, offset, nr);
1524 offset += ret;
1525 index += offset >> PAGE_CACHE_SHIFT;
1526 offset &= ~PAGE_CACHE_MASK;
1527
1528 page_cache_release(page);
1529 if (ret != nr || !desc->count)
1530 break;
1531
1532 cond_resched();
1533 }
1534
1535 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1536 file_accessed(filp);
1537}
1538
1539static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1540{
1541 read_descriptor_t desc;
1542
1543 if ((ssize_t) count < 0)
1544 return -EINVAL;
1545 if (!access_ok(VERIFY_WRITE, buf, count))
1546 return -EFAULT;
1547 if (!count)
1548 return 0;
1549
1550 desc.written = 0;
1551 desc.count = count;
1552 desc.arg.buf = buf;
1553 desc.error = 0;
1554
1555 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1556 if (desc.written)
1557 return desc.written;
1558 return desc.error;
1559}
1560
1561static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
1562 size_t count, read_actor_t actor, void *target)
1563{
1564 read_descriptor_t desc;
1565
1566 if (!count)
1567 return 0;
1568
1569 desc.written = 0;
1570 desc.count = count;
1571 desc.arg.data = target;
1572 desc.error = 0;
1573
1574 do_shmem_file_read(in_file, ppos, &desc, actor);
1575 if (desc.written)
1576 return desc.written;
1577 return desc.error;
1578}
1579
1580static int shmem_statfs(struct super_block *sb, struct kstatfs *buf)
1581{
1582 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1583
1584 buf->f_type = TMPFS_MAGIC;
1585 buf->f_bsize = PAGE_CACHE_SIZE;
1586 buf->f_namelen = NAME_MAX;
0edd73b3
HD
1587 spin_lock(&sbinfo->stat_lock);
1588 if (sbinfo->max_blocks) {
1da177e4
LT
1589 buf->f_blocks = sbinfo->max_blocks;
1590 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
0edd73b3
HD
1591 }
1592 if (sbinfo->max_inodes) {
1da177e4
LT
1593 buf->f_files = sbinfo->max_inodes;
1594 buf->f_ffree = sbinfo->free_inodes;
1da177e4
LT
1595 }
1596 /* else leave those fields 0 like simple_statfs */
0edd73b3 1597 spin_unlock(&sbinfo->stat_lock);
1da177e4
LT
1598 return 0;
1599}
1600
1601/*
1602 * File creation. Allocate an inode, and we're done..
1603 */
1604static int
1605shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1606{
1607 struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1608 int error = -ENOSPC;
1609
1610 if (inode) {
1611 if (dir->i_mode & S_ISGID) {
1612 inode->i_gid = dir->i_gid;
1613 if (S_ISDIR(mode))
1614 inode->i_mode |= S_ISGID;
1615 }
1616 dir->i_size += BOGO_DIRENT_SIZE;
1617 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1618 d_instantiate(dentry, inode);
1619 dget(dentry); /* Extra count - pin the dentry in core */
1620 error = 0;
1621 }
1622 return error;
1623}
1624
1625static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1626{
1627 int error;
1628
1629 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1630 return error;
1631 dir->i_nlink++;
1632 return 0;
1633}
1634
1635static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1636 struct nameidata *nd)
1637{
1638 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1639}
1640
1641/*
1642 * Link a file..
1643 */
1644static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1645{
1646 struct inode *inode = old_dentry->d_inode;
1647 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1648
1649 /*
1650 * No ordinary (disk based) filesystem counts links as inodes;
1651 * but each new link needs a new dentry, pinning lowmem, and
1652 * tmpfs dentries cannot be pruned until they are unlinked.
1653 */
0edd73b3 1654 if (sbinfo->max_inodes) {
1da177e4
LT
1655 spin_lock(&sbinfo->stat_lock);
1656 if (!sbinfo->free_inodes) {
1657 spin_unlock(&sbinfo->stat_lock);
1658 return -ENOSPC;
1659 }
1660 sbinfo->free_inodes--;
1661 spin_unlock(&sbinfo->stat_lock);
1662 }
1663
1664 dir->i_size += BOGO_DIRENT_SIZE;
1665 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1666 inode->i_nlink++;
1667 atomic_inc(&inode->i_count); /* New dentry reference */
1668 dget(dentry); /* Extra pinning count for the created dentry */
1669 d_instantiate(dentry, inode);
1670 return 0;
1671}
1672
1673static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1674{
1675 struct inode *inode = dentry->d_inode;
1676
1677 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1678 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
0edd73b3 1679 if (sbinfo->max_inodes) {
1da177e4
LT
1680 spin_lock(&sbinfo->stat_lock);
1681 sbinfo->free_inodes++;
1682 spin_unlock(&sbinfo->stat_lock);
1683 }
1684 }
1685
1686 dir->i_size -= BOGO_DIRENT_SIZE;
1687 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1688 inode->i_nlink--;
1689 dput(dentry); /* Undo the count from "create" - this does all the work */
1690 return 0;
1691}
1692
1693static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1694{
1695 if (!simple_empty(dentry))
1696 return -ENOTEMPTY;
1697
1698 dir->i_nlink--;
1699 return shmem_unlink(dir, dentry);
1700}
1701
1702/*
1703 * The VFS layer already does all the dentry stuff for rename,
1704 * we just have to decrement the usage count for the target if
1705 * it exists so that the VFS layer correctly free's it when it
1706 * gets overwritten.
1707 */
1708static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1709{
1710 struct inode *inode = old_dentry->d_inode;
1711 int they_are_dirs = S_ISDIR(inode->i_mode);
1712
1713 if (!simple_empty(new_dentry))
1714 return -ENOTEMPTY;
1715
1716 if (new_dentry->d_inode) {
1717 (void) shmem_unlink(new_dir, new_dentry);
1718 if (they_are_dirs)
1719 old_dir->i_nlink--;
1720 } else if (they_are_dirs) {
1721 old_dir->i_nlink--;
1722 new_dir->i_nlink++;
1723 }
1724
1725 old_dir->i_size -= BOGO_DIRENT_SIZE;
1726 new_dir->i_size += BOGO_DIRENT_SIZE;
1727 old_dir->i_ctime = old_dir->i_mtime =
1728 new_dir->i_ctime = new_dir->i_mtime =
1729 inode->i_ctime = CURRENT_TIME;
1730 return 0;
1731}
1732
1733static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1734{
1735 int error;
1736 int len;
1737 struct inode *inode;
1738 struct page *page = NULL;
1739 char *kaddr;
1740 struct shmem_inode_info *info;
1741
1742 len = strlen(symname) + 1;
1743 if (len > PAGE_CACHE_SIZE)
1744 return -ENAMETOOLONG;
1745
1746 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1747 if (!inode)
1748 return -ENOSPC;
1749
1750 info = SHMEM_I(inode);
1751 inode->i_size = len-1;
1752 if (len <= (char *)inode - (char *)info) {
1753 /* do it inline */
1754 memcpy(info, symname, len);
1755 inode->i_op = &shmem_symlink_inline_operations;
1756 } else {
1757 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1758 if (error) {
1759 iput(inode);
1760 return error;
1761 }
1762 inode->i_op = &shmem_symlink_inode_operations;
1763 kaddr = kmap_atomic(page, KM_USER0);
1764 memcpy(kaddr, symname, len);
1765 kunmap_atomic(kaddr, KM_USER0);
1766 set_page_dirty(page);
1767 page_cache_release(page);
1768 }
1769 if (dir->i_mode & S_ISGID)
1770 inode->i_gid = dir->i_gid;
1771 dir->i_size += BOGO_DIRENT_SIZE;
1772 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1773 d_instantiate(dentry, inode);
1774 dget(dentry);
1775 return 0;
1776}
1777
cc314eef 1778static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1da177e4
LT
1779{
1780 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
cc314eef 1781 return NULL;
1da177e4
LT
1782}
1783
cc314eef 1784static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1da177e4
LT
1785{
1786 struct page *page = NULL;
1787 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1788 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
cc314eef 1789 return page;
1da177e4
LT
1790}
1791
cc314eef 1792static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1da177e4
LT
1793{
1794 if (!IS_ERR(nd_get_link(nd))) {
cc314eef 1795 struct page *page = cookie;
1da177e4
LT
1796 kunmap(page);
1797 mark_page_accessed(page);
1798 page_cache_release(page);
1da177e4
LT
1799 }
1800}
1801
1802static struct inode_operations shmem_symlink_inline_operations = {
1803 .readlink = generic_readlink,
1804 .follow_link = shmem_follow_link_inline,
1da177e4
LT
1805};
1806
1807static struct inode_operations shmem_symlink_inode_operations = {
1808 .truncate = shmem_truncate,
1809 .readlink = generic_readlink,
1810 .follow_link = shmem_follow_link,
1811 .put_link = shmem_put_link,
1da177e4
LT
1812};
1813
1814static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes)
1815{
1816 char *this_char, *value, *rest;
1817
1818 while ((this_char = strsep(&options, ",")) != NULL) {
1819 if (!*this_char)
1820 continue;
1821 if ((value = strchr(this_char,'=')) != NULL) {
1822 *value++ = 0;
1823 } else {
1824 printk(KERN_ERR
1825 "tmpfs: No value for mount option '%s'\n",
1826 this_char);
1827 return 1;
1828 }
1829
1830 if (!strcmp(this_char,"size")) {
1831 unsigned long long size;
1832 size = memparse(value,&rest);
1833 if (*rest == '%') {
1834 size <<= PAGE_SHIFT;
1835 size *= totalram_pages;
1836 do_div(size, 100);
1837 rest++;
1838 }
1839 if (*rest)
1840 goto bad_val;
1841 *blocks = size >> PAGE_CACHE_SHIFT;
1842 } else if (!strcmp(this_char,"nr_blocks")) {
1843 *blocks = memparse(value,&rest);
1844 if (*rest)
1845 goto bad_val;
1846 } else if (!strcmp(this_char,"nr_inodes")) {
1847 *inodes = memparse(value,&rest);
1848 if (*rest)
1849 goto bad_val;
1850 } else if (!strcmp(this_char,"mode")) {
1851 if (!mode)
1852 continue;
1853 *mode = simple_strtoul(value,&rest,8);
1854 if (*rest)
1855 goto bad_val;
1856 } else if (!strcmp(this_char,"uid")) {
1857 if (!uid)
1858 continue;
1859 *uid = simple_strtoul(value,&rest,0);
1860 if (*rest)
1861 goto bad_val;
1862 } else if (!strcmp(this_char,"gid")) {
1863 if (!gid)
1864 continue;
1865 *gid = simple_strtoul(value,&rest,0);
1866 if (*rest)
1867 goto bad_val;
1868 } else {
1869 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
1870 this_char);
1871 return 1;
1872 }
1873 }
1874 return 0;
1875
1876bad_val:
1877 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
1878 value, this_char);
1879 return 1;
1880
1881}
1882
1883static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
1884{
1885 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
0edd73b3
HD
1886 unsigned long max_blocks = sbinfo->max_blocks;
1887 unsigned long max_inodes = sbinfo->max_inodes;
1888 unsigned long blocks;
1889 unsigned long inodes;
1890 int error = -EINVAL;
1891
1892 if (shmem_parse_options(data, NULL, NULL, NULL,
1893 &max_blocks, &max_inodes))
1894 return error;
1da177e4 1895
0edd73b3
HD
1896 spin_lock(&sbinfo->stat_lock);
1897 blocks = sbinfo->max_blocks - sbinfo->free_blocks;
1898 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
1899 if (max_blocks < blocks)
1900 goto out;
1901 if (max_inodes < inodes)
1902 goto out;
1903 /*
1904 * Those tests also disallow limited->unlimited while any are in
1905 * use, so i_blocks will always be zero when max_blocks is zero;
1906 * but we must separately disallow unlimited->limited, because
1907 * in that case we have no record of how much is already in use.
1908 */
1909 if (max_blocks && !sbinfo->max_blocks)
1910 goto out;
1911 if (max_inodes && !sbinfo->max_inodes)
1912 goto out;
1913
1914 error = 0;
1915 sbinfo->max_blocks = max_blocks;
1916 sbinfo->free_blocks = max_blocks - blocks;
1917 sbinfo->max_inodes = max_inodes;
1918 sbinfo->free_inodes = max_inodes - inodes;
1919out:
1920 spin_unlock(&sbinfo->stat_lock);
1921 return error;
1da177e4
LT
1922}
1923#endif
1924
1925static void shmem_put_super(struct super_block *sb)
1926{
1927 kfree(sb->s_fs_info);
1928 sb->s_fs_info = NULL;
1929}
1930
1da177e4
LT
1931static int shmem_fill_super(struct super_block *sb,
1932 void *data, int silent)
1933{
1934 struct inode *inode;
1935 struct dentry *root;
1936 int mode = S_IRWXUGO | S_ISVTX;
1937 uid_t uid = current->fsuid;
1938 gid_t gid = current->fsgid;
1939 int err = -ENOMEM;
0edd73b3 1940 struct shmem_sb_info *sbinfo;
1da177e4
LT
1941 unsigned long blocks = 0;
1942 unsigned long inodes = 0;
1943
0edd73b3 1944#ifdef CONFIG_TMPFS
1da177e4
LT
1945 /*
1946 * Per default we only allow half of the physical ram per
1947 * tmpfs instance, limiting inodes to one per page of lowmem;
1948 * but the internal instance is left unlimited.
1949 */
1950 if (!(sb->s_flags & MS_NOUSER)) {
1951 blocks = totalram_pages / 2;
1952 inodes = totalram_pages - totalhigh_pages;
1953 if (inodes > blocks)
1954 inodes = blocks;
0edd73b3
HD
1955 if (shmem_parse_options(data, &mode, &uid, &gid,
1956 &blocks, &inodes))
1da177e4
LT
1957 return -EINVAL;
1958 }
1da177e4
LT
1959#else
1960 sb->s_flags |= MS_NOUSER;
1961#endif
1962
0edd73b3
HD
1963 /* Round up to L1_CACHE_BYTES to resist false sharing */
1964 sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
1965 L1_CACHE_BYTES), GFP_KERNEL);
1966 if (!sbinfo)
1967 return -ENOMEM;
1968
1969 spin_lock_init(&sbinfo->stat_lock);
1970 sbinfo->max_blocks = blocks;
1971 sbinfo->free_blocks = blocks;
1972 sbinfo->max_inodes = inodes;
1973 sbinfo->free_inodes = inodes;
1974
1975 sb->s_fs_info = sbinfo;
1da177e4
LT
1976 sb->s_maxbytes = SHMEM_MAX_BYTES;
1977 sb->s_blocksize = PAGE_CACHE_SIZE;
1978 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1979 sb->s_magic = TMPFS_MAGIC;
1980 sb->s_op = &shmem_ops;
0edd73b3 1981
1da177e4
LT
1982 inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
1983 if (!inode)
1984 goto failed;
1985 inode->i_uid = uid;
1986 inode->i_gid = gid;
1987 root = d_alloc_root(inode);
1988 if (!root)
1989 goto failed_iput;
1990 sb->s_root = root;
1991 return 0;
1992
1993failed_iput:
1994 iput(inode);
1995failed:
1996 shmem_put_super(sb);
1997 return err;
1998}
1999
2000static kmem_cache_t *shmem_inode_cachep;
2001
2002static struct inode *shmem_alloc_inode(struct super_block *sb)
2003{
2004 struct shmem_inode_info *p;
2005 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
2006 if (!p)
2007 return NULL;
2008 return &p->vfs_inode;
2009}
2010
2011static void shmem_destroy_inode(struct inode *inode)
2012{
2013 if ((inode->i_mode & S_IFMT) == S_IFREG) {
2014 /* only struct inode is valid if it's an inline symlink */
2015 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2016 }
2017 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2018}
2019
2020static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
2021{
2022 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2023
2024 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2025 SLAB_CTOR_CONSTRUCTOR) {
2026 inode_init_once(&p->vfs_inode);
2027 }
2028}
2029
2030static int init_inodecache(void)
2031{
2032 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2033 sizeof(struct shmem_inode_info),
2034 0, 0, init_once, NULL);
2035 if (shmem_inode_cachep == NULL)
2036 return -ENOMEM;
2037 return 0;
2038}
2039
2040static void destroy_inodecache(void)
2041{
2042 if (kmem_cache_destroy(shmem_inode_cachep))
2043 printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n");
2044}
2045
2046static struct address_space_operations shmem_aops = {
2047 .writepage = shmem_writepage,
2048 .set_page_dirty = __set_page_dirty_nobuffers,
2049#ifdef CONFIG_TMPFS
2050 .prepare_write = shmem_prepare_write,
2051 .commit_write = simple_commit_write,
2052#endif
2053};
2054
2055static struct file_operations shmem_file_operations = {
2056 .mmap = shmem_mmap,
2057#ifdef CONFIG_TMPFS
2058 .llseek = generic_file_llseek,
2059 .read = shmem_file_read,
2060 .write = shmem_file_write,
2061 .fsync = simple_sync_file,
2062 .sendfile = shmem_file_sendfile,
2063#endif
2064};
2065
2066static struct inode_operations shmem_inode_operations = {
2067 .truncate = shmem_truncate,
2068 .setattr = shmem_notify_change,
1da177e4
LT
2069};
2070
2071static struct inode_operations shmem_dir_inode_operations = {
2072#ifdef CONFIG_TMPFS
2073 .create = shmem_create,
2074 .lookup = simple_lookup,
2075 .link = shmem_link,
2076 .unlink = shmem_unlink,
2077 .symlink = shmem_symlink,
2078 .mkdir = shmem_mkdir,
2079 .rmdir = shmem_rmdir,
2080 .mknod = shmem_mknod,
2081 .rename = shmem_rename,
1da177e4
LT
2082#endif
2083};
2084
2085static struct super_operations shmem_ops = {
2086 .alloc_inode = shmem_alloc_inode,
2087 .destroy_inode = shmem_destroy_inode,
2088#ifdef CONFIG_TMPFS
2089 .statfs = shmem_statfs,
2090 .remount_fs = shmem_remount_fs,
2091#endif
2092 .delete_inode = shmem_delete_inode,
2093 .drop_inode = generic_delete_inode,
2094 .put_super = shmem_put_super,
2095};
2096
2097static struct vm_operations_struct shmem_vm_ops = {
2098 .nopage = shmem_nopage,
2099 .populate = shmem_populate,
2100#ifdef CONFIG_NUMA
2101 .set_policy = shmem_set_policy,
2102 .get_policy = shmem_get_policy,
2103#endif
2104};
2105
2106
1da177e4
LT
2107static struct super_block *shmem_get_sb(struct file_system_type *fs_type,
2108 int flags, const char *dev_name, void *data)
2109{
2110 return get_sb_nodev(fs_type, flags, data, shmem_fill_super);
2111}
2112
2113static struct file_system_type tmpfs_fs_type = {
2114 .owner = THIS_MODULE,
2115 .name = "tmpfs",
2116 .get_sb = shmem_get_sb,
2117 .kill_sb = kill_litter_super,
2118};
2119static struct vfsmount *shm_mnt;
2120
2121static int __init init_tmpfs(void)
2122{
2123 int error;
2124
2125 error = init_inodecache();
2126 if (error)
2127 goto out3;
2128
2129 error = register_filesystem(&tmpfs_fs_type);
2130 if (error) {
2131 printk(KERN_ERR "Could not register tmpfs\n");
2132 goto out2;
2133 }
2134#ifdef CONFIG_TMPFS
2135 devfs_mk_dir("shm");
2136#endif
2137 shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER,
2138 tmpfs_fs_type.name, NULL);
2139 if (IS_ERR(shm_mnt)) {
2140 error = PTR_ERR(shm_mnt);
2141 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2142 goto out1;
2143 }
2144 return 0;
2145
2146out1:
2147 unregister_filesystem(&tmpfs_fs_type);
2148out2:
2149 destroy_inodecache();
2150out3:
2151 shm_mnt = ERR_PTR(error);
2152 return error;
2153}
2154module_init(init_tmpfs)
2155
2156/*
2157 * shmem_file_setup - get an unlinked file living in tmpfs
2158 *
2159 * @name: name for dentry (to be seen in /proc/<pid>/maps
2160 * @size: size to be set for the file
2161 *
2162 */
2163struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2164{
2165 int error;
2166 struct file *file;
2167 struct inode *inode;
2168 struct dentry *dentry, *root;
2169 struct qstr this;
2170
2171 if (IS_ERR(shm_mnt))
2172 return (void *)shm_mnt;
2173
2174 if (size < 0 || size > SHMEM_MAX_BYTES)
2175 return ERR_PTR(-EINVAL);
2176
2177 if (shmem_acct_size(flags, size))
2178 return ERR_PTR(-ENOMEM);
2179
2180 error = -ENOMEM;
2181 this.name = name;
2182 this.len = strlen(name);
2183 this.hash = 0; /* will go */
2184 root = shm_mnt->mnt_root;
2185 dentry = d_alloc(root, &this);
2186 if (!dentry)
2187 goto put_memory;
2188
2189 error = -ENFILE;
2190 file = get_empty_filp();
2191 if (!file)
2192 goto put_dentry;
2193
2194 error = -ENOSPC;
2195 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2196 if (!inode)
2197 goto close_file;
2198
2199 SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2200 d_instantiate(dentry, inode);
2201 inode->i_size = size;
2202 inode->i_nlink = 0; /* It is unlinked */
2203 file->f_vfsmnt = mntget(shm_mnt);
2204 file->f_dentry = dentry;
2205 file->f_mapping = inode->i_mapping;
2206 file->f_op = &shmem_file_operations;
2207 file->f_mode = FMODE_WRITE | FMODE_READ;
2208 return file;
2209
2210close_file:
2211 put_filp(file);
2212put_dentry:
2213 dput(dentry);
2214put_memory:
2215 shmem_unacct_size(flags, size);
2216 return ERR_PTR(error);
2217}
2218
2219/*
2220 * shmem_zero_setup - setup a shared anonymous mapping
2221 *
2222 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2223 */
2224int shmem_zero_setup(struct vm_area_struct *vma)
2225{
2226 struct file *file;
2227 loff_t size = vma->vm_end - vma->vm_start;
2228
2229 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2230 if (IS_ERR(file))
2231 return PTR_ERR(file);
2232
2233 if (vma->vm_file)
2234 fput(vma->vm_file);
2235 vma->vm_file = file;
2236 vma->vm_ops = &shmem_vm_ops;
2237 return 0;
2238}