]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Resizable virtual memory filesystem for Linux. | |
3 | * | |
4 | * Copyright (C) 2000 Linus Torvalds. | |
5 | * 2000 Transmeta Corp. | |
6 | * 2000-2001 Christoph Rohland | |
7 | * 2000-2001 SAP AG | |
8 | * 2002 Red Hat Inc. | |
9 | * Copyright (C) 2002-2005 Hugh Dickins. | |
10 | * Copyright (C) 2002-2005 VERITAS Software Corporation. | |
11 | * Copyright (C) 2004 Andi Kleen, SuSE Labs | |
12 | * | |
13 | * Extended attribute support for tmpfs: | |
14 | * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> | |
15 | * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> | |
16 | * | |
17 | * tiny-shmem: | |
18 | * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> | |
19 | * | |
20 | * This file is released under the GPL. | |
21 | */ | |
22 | ||
23 | #include <linux/fs.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/vfs.h> | |
26 | #include <linux/mount.h> | |
27 | #include <linux/pagemap.h> | |
28 | #include <linux/file.h> | |
29 | #include <linux/mm.h> | |
30 | #include <linux/module.h> | |
31 | #include <linux/swap.h> | |
32 | #include <linux/ima.h> | |
33 | ||
34 | static struct vfsmount *shm_mnt; | |
35 | ||
36 | #ifdef CONFIG_SHMEM | |
37 | /* | |
38 | * This virtual memory filesystem is heavily based on the ramfs. It | |
39 | * extends ramfs by the ability to use swap and honor resource limits | |
40 | * which makes it a completely usable filesystem. | |
41 | */ | |
42 | ||
43 | #include <linux/xattr.h> | |
44 | #include <linux/exportfs.h> | |
45 | #include <linux/generic_acl.h> | |
46 | #include <linux/mman.h> | |
47 | #include <linux/string.h> | |
48 | #include <linux/slab.h> | |
49 | #include <linux/backing-dev.h> | |
50 | #include <linux/shmem_fs.h> | |
51 | #include <linux/writeback.h> | |
52 | #include <linux/blkdev.h> | |
53 | #include <linux/security.h> | |
54 | #include <linux/swapops.h> | |
55 | #include <linux/mempolicy.h> | |
56 | #include <linux/namei.h> | |
57 | #include <linux/ctype.h> | |
58 | #include <linux/migrate.h> | |
59 | #include <linux/highmem.h> | |
60 | #include <linux/seq_file.h> | |
61 | #include <linux/magic.h> | |
62 | ||
63 | #include <asm/uaccess.h> | |
64 | #include <asm/div64.h> | |
65 | #include <asm/pgtable.h> | |
66 | ||
67 | /* | |
68 | * The maximum size of a shmem/tmpfs file is limited by the maximum size of | |
69 | * its triple-indirect swap vector - see illustration at shmem_swp_entry(). | |
70 | * | |
71 | * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel, | |
72 | * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum | |
73 | * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel, | |
74 | * MAX_LFS_FILESIZE being then more restrictive than swap vector layout. | |
75 | * | |
76 | * We use / and * instead of shifts in the definitions below, so that the swap | |
77 | * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE. | |
78 | */ | |
79 | #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) | |
80 | #define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) | |
81 | ||
82 | #define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) | |
83 | #define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT) | |
84 | ||
85 | #define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE) | |
86 | #define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT)) | |
87 | ||
88 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) | |
89 | #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) | |
90 | ||
91 | /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ | |
92 | #define SHMEM_PAGEIN VM_READ | |
93 | #define SHMEM_TRUNCATE VM_WRITE | |
94 | ||
95 | /* Definition to limit shmem_truncate's steps between cond_rescheds */ | |
96 | #define LATENCY_LIMIT 64 | |
97 | ||
98 | /* Pretend that each entry is of this size in directory's i_size */ | |
99 | #define BOGO_DIRENT_SIZE 20 | |
100 | ||
101 | /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ | |
102 | enum sgp_type { | |
103 | SGP_READ, /* don't exceed i_size, don't allocate page */ | |
104 | SGP_CACHE, /* don't exceed i_size, may allocate page */ | |
105 | SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ | |
106 | SGP_WRITE, /* may exceed i_size, may allocate page */ | |
107 | }; | |
108 | ||
109 | #ifdef CONFIG_TMPFS | |
110 | static unsigned long shmem_default_max_blocks(void) | |
111 | { | |
112 | return totalram_pages / 2; | |
113 | } | |
114 | ||
115 | static unsigned long shmem_default_max_inodes(void) | |
116 | { | |
117 | return min(totalram_pages - totalhigh_pages, totalram_pages / 2); | |
118 | } | |
119 | #endif | |
120 | ||
121 | static int shmem_getpage(struct inode *inode, unsigned long idx, | |
122 | struct page **pagep, enum sgp_type sgp, int *type); | |
123 | ||
124 | static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) | |
125 | { | |
126 | /* | |
127 | * The above definition of ENTRIES_PER_PAGE, and the use of | |
128 | * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: | |
129 | * might be reconsidered if it ever diverges from PAGE_SIZE. | |
130 | * | |
131 | * Mobility flags are masked out as swap vectors cannot move | |
132 | */ | |
133 | return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, | |
134 | PAGE_CACHE_SHIFT-PAGE_SHIFT); | |
135 | } | |
136 | ||
137 | static inline void shmem_dir_free(struct page *page) | |
138 | { | |
139 | __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); | |
140 | } | |
141 | ||
142 | static struct page **shmem_dir_map(struct page *page) | |
143 | { | |
144 | return (struct page **)kmap_atomic(page, KM_USER0); | |
145 | } | |
146 | ||
147 | static inline void shmem_dir_unmap(struct page **dir) | |
148 | { | |
149 | kunmap_atomic(dir, KM_USER0); | |
150 | } | |
151 | ||
152 | static swp_entry_t *shmem_swp_map(struct page *page) | |
153 | { | |
154 | return (swp_entry_t *)kmap_atomic(page, KM_USER1); | |
155 | } | |
156 | ||
157 | static inline void shmem_swp_balance_unmap(void) | |
158 | { | |
159 | /* | |
160 | * When passing a pointer to an i_direct entry, to code which | |
161 | * also handles indirect entries and so will shmem_swp_unmap, | |
162 | * we must arrange for the preempt count to remain in balance. | |
163 | * What kmap_atomic of a lowmem page does depends on config | |
164 | * and architecture, so pretend to kmap_atomic some lowmem page. | |
165 | */ | |
166 | (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); | |
167 | } | |
168 | ||
169 | static inline void shmem_swp_unmap(swp_entry_t *entry) | |
170 | { | |
171 | kunmap_atomic(entry, KM_USER1); | |
172 | } | |
173 | ||
174 | static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) | |
175 | { | |
176 | return sb->s_fs_info; | |
177 | } | |
178 | ||
179 | /* | |
180 | * shmem_file_setup pre-accounts the whole fixed size of a VM object, | |
181 | * for shared memory and for shared anonymous (/dev/zero) mappings | |
182 | * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), | |
183 | * consistent with the pre-accounting of private mappings ... | |
184 | */ | |
185 | static inline int shmem_acct_size(unsigned long flags, loff_t size) | |
186 | { | |
187 | return (flags & VM_NORESERVE) ? | |
188 | 0 : security_vm_enough_memory_kern(VM_ACCT(size)); | |
189 | } | |
190 | ||
191 | static inline void shmem_unacct_size(unsigned long flags, loff_t size) | |
192 | { | |
193 | if (!(flags & VM_NORESERVE)) | |
194 | vm_unacct_memory(VM_ACCT(size)); | |
195 | } | |
196 | ||
197 | /* | |
198 | * ... whereas tmpfs objects are accounted incrementally as | |
199 | * pages are allocated, in order to allow huge sparse files. | |
200 | * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, | |
201 | * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. | |
202 | */ | |
203 | static inline int shmem_acct_block(unsigned long flags) | |
204 | { | |
205 | return (flags & VM_NORESERVE) ? | |
206 | security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0; | |
207 | } | |
208 | ||
209 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) | |
210 | { | |
211 | if (flags & VM_NORESERVE) | |
212 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); | |
213 | } | |
214 | ||
215 | static const struct super_operations shmem_ops; | |
216 | static const struct address_space_operations shmem_aops; | |
217 | static const struct file_operations shmem_file_operations; | |
218 | static const struct inode_operations shmem_inode_operations; | |
219 | static const struct inode_operations shmem_dir_inode_operations; | |
220 | static const struct inode_operations shmem_special_inode_operations; | |
221 | static const struct vm_operations_struct shmem_vm_ops; | |
222 | ||
223 | static struct backing_dev_info shmem_backing_dev_info __read_mostly = { | |
224 | .ra_pages = 0, /* No readahead */ | |
225 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, | |
226 | .unplug_io_fn = default_unplug_io_fn, | |
227 | }; | |
228 | ||
229 | static LIST_HEAD(shmem_swaplist); | |
230 | static DEFINE_MUTEX(shmem_swaplist_mutex); | |
231 | ||
232 | static void shmem_free_blocks(struct inode *inode, long pages) | |
233 | { | |
234 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | |
235 | if (sbinfo->max_blocks) { | |
236 | spin_lock(&sbinfo->stat_lock); | |
237 | sbinfo->free_blocks += pages; | |
238 | inode->i_blocks -= pages*BLOCKS_PER_PAGE; | |
239 | spin_unlock(&sbinfo->stat_lock); | |
240 | } | |
241 | } | |
242 | ||
243 | static int shmem_reserve_inode(struct super_block *sb) | |
244 | { | |
245 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | |
246 | if (sbinfo->max_inodes) { | |
247 | spin_lock(&sbinfo->stat_lock); | |
248 | if (!sbinfo->free_inodes) { | |
249 | spin_unlock(&sbinfo->stat_lock); | |
250 | return -ENOSPC; | |
251 | } | |
252 | sbinfo->free_inodes--; | |
253 | spin_unlock(&sbinfo->stat_lock); | |
254 | } | |
255 | return 0; | |
256 | } | |
257 | ||
258 | static void shmem_free_inode(struct super_block *sb) | |
259 | { | |
260 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | |
261 | if (sbinfo->max_inodes) { | |
262 | spin_lock(&sbinfo->stat_lock); | |
263 | sbinfo->free_inodes++; | |
264 | spin_unlock(&sbinfo->stat_lock); | |
265 | } | |
266 | } | |
267 | ||
268 | /** | |
269 | * shmem_recalc_inode - recalculate the size of an inode | |
270 | * @inode: inode to recalc | |
271 | * | |
272 | * We have to calculate the free blocks since the mm can drop | |
273 | * undirtied hole pages behind our back. | |
274 | * | |
275 | * But normally info->alloced == inode->i_mapping->nrpages + info->swapped | |
276 | * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) | |
277 | * | |
278 | * It has to be called with the spinlock held. | |
279 | */ | |
280 | static void shmem_recalc_inode(struct inode *inode) | |
281 | { | |
282 | struct shmem_inode_info *info = SHMEM_I(inode); | |
283 | long freed; | |
284 | ||
285 | freed = info->alloced - info->swapped - inode->i_mapping->nrpages; | |
286 | if (freed > 0) { | |
287 | info->alloced -= freed; | |
288 | shmem_unacct_blocks(info->flags, freed); | |
289 | shmem_free_blocks(inode, freed); | |
290 | } | |
291 | } | |
292 | ||
293 | /** | |
294 | * shmem_swp_entry - find the swap vector position in the info structure | |
295 | * @info: info structure for the inode | |
296 | * @index: index of the page to find | |
297 | * @page: optional page to add to the structure. Has to be preset to | |
298 | * all zeros | |
299 | * | |
300 | * If there is no space allocated yet it will return NULL when | |
301 | * page is NULL, else it will use the page for the needed block, | |
302 | * setting it to NULL on return to indicate that it has been used. | |
303 | * | |
304 | * The swap vector is organized the following way: | |
305 | * | |
306 | * There are SHMEM_NR_DIRECT entries directly stored in the | |
307 | * shmem_inode_info structure. So small files do not need an addional | |
308 | * allocation. | |
309 | * | |
310 | * For pages with index > SHMEM_NR_DIRECT there is the pointer | |
311 | * i_indirect which points to a page which holds in the first half | |
312 | * doubly indirect blocks, in the second half triple indirect blocks: | |
313 | * | |
314 | * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the | |
315 | * following layout (for SHMEM_NR_DIRECT == 16): | |
316 | * | |
317 | * i_indirect -> dir --> 16-19 | |
318 | * | +-> 20-23 | |
319 | * | | |
320 | * +-->dir2 --> 24-27 | |
321 | * | +-> 28-31 | |
322 | * | +-> 32-35 | |
323 | * | +-> 36-39 | |
324 | * | | |
325 | * +-->dir3 --> 40-43 | |
326 | * +-> 44-47 | |
327 | * +-> 48-51 | |
328 | * +-> 52-55 | |
329 | */ | |
330 | static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) | |
331 | { | |
332 | unsigned long offset; | |
333 | struct page **dir; | |
334 | struct page *subdir; | |
335 | ||
336 | if (index < SHMEM_NR_DIRECT) { | |
337 | shmem_swp_balance_unmap(); | |
338 | return info->i_direct+index; | |
339 | } | |
340 | if (!info->i_indirect) { | |
341 | if (page) { | |
342 | info->i_indirect = *page; | |
343 | *page = NULL; | |
344 | } | |
345 | return NULL; /* need another page */ | |
346 | } | |
347 | ||
348 | index -= SHMEM_NR_DIRECT; | |
349 | offset = index % ENTRIES_PER_PAGE; | |
350 | index /= ENTRIES_PER_PAGE; | |
351 | dir = shmem_dir_map(info->i_indirect); | |
352 | ||
353 | if (index >= ENTRIES_PER_PAGE/2) { | |
354 | index -= ENTRIES_PER_PAGE/2; | |
355 | dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; | |
356 | index %= ENTRIES_PER_PAGE; | |
357 | subdir = *dir; | |
358 | if (!subdir) { | |
359 | if (page) { | |
360 | *dir = *page; | |
361 | *page = NULL; | |
362 | } | |
363 | shmem_dir_unmap(dir); | |
364 | return NULL; /* need another page */ | |
365 | } | |
366 | shmem_dir_unmap(dir); | |
367 | dir = shmem_dir_map(subdir); | |
368 | } | |
369 | ||
370 | dir += index; | |
371 | subdir = *dir; | |
372 | if (!subdir) { | |
373 | if (!page || !(subdir = *page)) { | |
374 | shmem_dir_unmap(dir); | |
375 | return NULL; /* need a page */ | |
376 | } | |
377 | *dir = subdir; | |
378 | *page = NULL; | |
379 | } | |
380 | shmem_dir_unmap(dir); | |
381 | return shmem_swp_map(subdir) + offset; | |
382 | } | |
383 | ||
384 | static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) | |
385 | { | |
386 | long incdec = value? 1: -1; | |
387 | ||
388 | entry->val = value; | |
389 | info->swapped += incdec; | |
390 | if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { | |
391 | struct page *page = kmap_atomic_to_page(entry); | |
392 | set_page_private(page, page_private(page) + incdec); | |
393 | } | |
394 | } | |
395 | ||
396 | /** | |
397 | * shmem_swp_alloc - get the position of the swap entry for the page. | |
398 | * @info: info structure for the inode | |
399 | * @index: index of the page to find | |
400 | * @sgp: check and recheck i_size? skip allocation? | |
401 | * | |
402 | * If the entry does not exist, allocate it. | |
403 | */ | |
404 | static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) | |
405 | { | |
406 | struct inode *inode = &info->vfs_inode; | |
407 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | |
408 | struct page *page = NULL; | |
409 | swp_entry_t *entry; | |
410 | ||
411 | if (sgp != SGP_WRITE && | |
412 | ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | |
413 | return ERR_PTR(-EINVAL); | |
414 | ||
415 | while (!(entry = shmem_swp_entry(info, index, &page))) { | |
416 | if (sgp == SGP_READ) | |
417 | return shmem_swp_map(ZERO_PAGE(0)); | |
418 | /* | |
419 | * Test free_blocks against 1 not 0, since we have 1 data | |
420 | * page (and perhaps indirect index pages) yet to allocate: | |
421 | * a waste to allocate index if we cannot allocate data. | |
422 | */ | |
423 | if (sbinfo->max_blocks) { | |
424 | spin_lock(&sbinfo->stat_lock); | |
425 | if (sbinfo->free_blocks <= 1) { | |
426 | spin_unlock(&sbinfo->stat_lock); | |
427 | return ERR_PTR(-ENOSPC); | |
428 | } | |
429 | sbinfo->free_blocks--; | |
430 | inode->i_blocks += BLOCKS_PER_PAGE; | |
431 | spin_unlock(&sbinfo->stat_lock); | |
432 | } | |
433 | ||
434 | spin_unlock(&info->lock); | |
435 | page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); | |
436 | if (page) | |
437 | set_page_private(page, 0); | |
438 | spin_lock(&info->lock); | |
439 | ||
440 | if (!page) { | |
441 | shmem_free_blocks(inode, 1); | |
442 | return ERR_PTR(-ENOMEM); | |
443 | } | |
444 | if (sgp != SGP_WRITE && | |
445 | ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { | |
446 | entry = ERR_PTR(-EINVAL); | |
447 | break; | |
448 | } | |
449 | if (info->next_index <= index) | |
450 | info->next_index = index + 1; | |
451 | } | |
452 | if (page) { | |
453 | /* another task gave its page, or truncated the file */ | |
454 | shmem_free_blocks(inode, 1); | |
455 | shmem_dir_free(page); | |
456 | } | |
457 | if (info->next_index <= index && !IS_ERR(entry)) | |
458 | info->next_index = index + 1; | |
459 | return entry; | |
460 | } | |
461 | ||
462 | /** | |
463 | * shmem_free_swp - free some swap entries in a directory | |
464 | * @dir: pointer to the directory | |
465 | * @edir: pointer after last entry of the directory | |
466 | * @punch_lock: pointer to spinlock when needed for the holepunch case | |
467 | */ | |
468 | static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir, | |
469 | spinlock_t *punch_lock) | |
470 | { | |
471 | spinlock_t *punch_unlock = NULL; | |
472 | swp_entry_t *ptr; | |
473 | int freed = 0; | |
474 | ||
475 | for (ptr = dir; ptr < edir; ptr++) { | |
476 | if (ptr->val) { | |
477 | if (unlikely(punch_lock)) { | |
478 | punch_unlock = punch_lock; | |
479 | punch_lock = NULL; | |
480 | spin_lock(punch_unlock); | |
481 | if (!ptr->val) | |
482 | continue; | |
483 | } | |
484 | free_swap_and_cache(*ptr); | |
485 | *ptr = (swp_entry_t){0}; | |
486 | freed++; | |
487 | } | |
488 | } | |
489 | if (punch_unlock) | |
490 | spin_unlock(punch_unlock); | |
491 | return freed; | |
492 | } | |
493 | ||
494 | static int shmem_map_and_free_swp(struct page *subdir, int offset, | |
495 | int limit, struct page ***dir, spinlock_t *punch_lock) | |
496 | { | |
497 | swp_entry_t *ptr; | |
498 | int freed = 0; | |
499 | ||
500 | ptr = shmem_swp_map(subdir); | |
501 | for (; offset < limit; offset += LATENCY_LIMIT) { | |
502 | int size = limit - offset; | |
503 | if (size > LATENCY_LIMIT) | |
504 | size = LATENCY_LIMIT; | |
505 | freed += shmem_free_swp(ptr+offset, ptr+offset+size, | |
506 | punch_lock); | |
507 | if (need_resched()) { | |
508 | shmem_swp_unmap(ptr); | |
509 | if (*dir) { | |
510 | shmem_dir_unmap(*dir); | |
511 | *dir = NULL; | |
512 | } | |
513 | cond_resched(); | |
514 | ptr = shmem_swp_map(subdir); | |
515 | } | |
516 | } | |
517 | shmem_swp_unmap(ptr); | |
518 | return freed; | |
519 | } | |
520 | ||
521 | static void shmem_free_pages(struct list_head *next) | |
522 | { | |
523 | struct page *page; | |
524 | int freed = 0; | |
525 | ||
526 | do { | |
527 | page = container_of(next, struct page, lru); | |
528 | next = next->next; | |
529 | shmem_dir_free(page); | |
530 | freed++; | |
531 | if (freed >= LATENCY_LIMIT) { | |
532 | cond_resched(); | |
533 | freed = 0; | |
534 | } | |
535 | } while (next); | |
536 | } | |
537 | ||
538 | static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) | |
539 | { | |
540 | struct shmem_inode_info *info = SHMEM_I(inode); | |
541 | unsigned long idx; | |
542 | unsigned long size; | |
543 | unsigned long limit; | |
544 | unsigned long stage; | |
545 | unsigned long diroff; | |
546 | struct page **dir; | |
547 | struct page *topdir; | |
548 | struct page *middir; | |
549 | struct page *subdir; | |
550 | swp_entry_t *ptr; | |
551 | LIST_HEAD(pages_to_free); | |
552 | long nr_pages_to_free = 0; | |
553 | long nr_swaps_freed = 0; | |
554 | int offset; | |
555 | int freed; | |
556 | int punch_hole; | |
557 | spinlock_t *needs_lock; | |
558 | spinlock_t *punch_lock; | |
559 | unsigned long upper_limit; | |
560 | ||
561 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; | |
562 | idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | |
563 | if (idx >= info->next_index) | |
564 | return; | |
565 | ||
566 | spin_lock(&info->lock); | |
567 | info->flags |= SHMEM_TRUNCATE; | |
568 | if (likely(end == (loff_t) -1)) { | |
569 | limit = info->next_index; | |
570 | upper_limit = SHMEM_MAX_INDEX; | |
571 | info->next_index = idx; | |
572 | needs_lock = NULL; | |
573 | punch_hole = 0; | |
574 | } else { | |
575 | if (end + 1 >= inode->i_size) { /* we may free a little more */ | |
576 | limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >> | |
577 | PAGE_CACHE_SHIFT; | |
578 | upper_limit = SHMEM_MAX_INDEX; | |
579 | } else { | |
580 | limit = (end + 1) >> PAGE_CACHE_SHIFT; | |
581 | upper_limit = limit; | |
582 | } | |
583 | needs_lock = &info->lock; | |
584 | punch_hole = 1; | |
585 | } | |
586 | ||
587 | topdir = info->i_indirect; | |
588 | if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { | |
589 | info->i_indirect = NULL; | |
590 | nr_pages_to_free++; | |
591 | list_add(&topdir->lru, &pages_to_free); | |
592 | } | |
593 | spin_unlock(&info->lock); | |
594 | ||
595 | if (info->swapped && idx < SHMEM_NR_DIRECT) { | |
596 | ptr = info->i_direct; | |
597 | size = limit; | |
598 | if (size > SHMEM_NR_DIRECT) | |
599 | size = SHMEM_NR_DIRECT; | |
600 | nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock); | |
601 | } | |
602 | ||
603 | /* | |
604 | * If there are no indirect blocks or we are punching a hole | |
605 | * below indirect blocks, nothing to be done. | |
606 | */ | |
607 | if (!topdir || limit <= SHMEM_NR_DIRECT) | |
608 | goto done2; | |
609 | ||
610 | /* | |
611 | * The truncation case has already dropped info->lock, and we're safe | |
612 | * because i_size and next_index have already been lowered, preventing | |
613 | * access beyond. But in the punch_hole case, we still need to take | |
614 | * the lock when updating the swap directory, because there might be | |
615 | * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or | |
616 | * shmem_writepage. However, whenever we find we can remove a whole | |
617 | * directory page (not at the misaligned start or end of the range), | |
618 | * we first NULLify its pointer in the level above, and then have no | |
619 | * need to take the lock when updating its contents: needs_lock and | |
620 | * punch_lock (either pointing to info->lock or NULL) manage this. | |
621 | */ | |
622 | ||
623 | upper_limit -= SHMEM_NR_DIRECT; | |
624 | limit -= SHMEM_NR_DIRECT; | |
625 | idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; | |
626 | offset = idx % ENTRIES_PER_PAGE; | |
627 | idx -= offset; | |
628 | ||
629 | dir = shmem_dir_map(topdir); | |
630 | stage = ENTRIES_PER_PAGEPAGE/2; | |
631 | if (idx < ENTRIES_PER_PAGEPAGE/2) { | |
632 | middir = topdir; | |
633 | diroff = idx/ENTRIES_PER_PAGE; | |
634 | } else { | |
635 | dir += ENTRIES_PER_PAGE/2; | |
636 | dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; | |
637 | while (stage <= idx) | |
638 | stage += ENTRIES_PER_PAGEPAGE; | |
639 | middir = *dir; | |
640 | if (*dir) { | |
641 | diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % | |
642 | ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; | |
643 | if (!diroff && !offset && upper_limit >= stage) { | |
644 | if (needs_lock) { | |
645 | spin_lock(needs_lock); | |
646 | *dir = NULL; | |
647 | spin_unlock(needs_lock); | |
648 | needs_lock = NULL; | |
649 | } else | |
650 | *dir = NULL; | |
651 | nr_pages_to_free++; | |
652 | list_add(&middir->lru, &pages_to_free); | |
653 | } | |
654 | shmem_dir_unmap(dir); | |
655 | dir = shmem_dir_map(middir); | |
656 | } else { | |
657 | diroff = 0; | |
658 | offset = 0; | |
659 | idx = stage; | |
660 | } | |
661 | } | |
662 | ||
663 | for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { | |
664 | if (unlikely(idx == stage)) { | |
665 | shmem_dir_unmap(dir); | |
666 | dir = shmem_dir_map(topdir) + | |
667 | ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; | |
668 | while (!*dir) { | |
669 | dir++; | |
670 | idx += ENTRIES_PER_PAGEPAGE; | |
671 | if (idx >= limit) | |
672 | goto done1; | |
673 | } | |
674 | stage = idx + ENTRIES_PER_PAGEPAGE; | |
675 | middir = *dir; | |
676 | if (punch_hole) | |
677 | needs_lock = &info->lock; | |
678 | if (upper_limit >= stage) { | |
679 | if (needs_lock) { | |
680 | spin_lock(needs_lock); | |
681 | *dir = NULL; | |
682 | spin_unlock(needs_lock); | |
683 | needs_lock = NULL; | |
684 | } else | |
685 | *dir = NULL; | |
686 | nr_pages_to_free++; | |
687 | list_add(&middir->lru, &pages_to_free); | |
688 | } | |
689 | shmem_dir_unmap(dir); | |
690 | cond_resched(); | |
691 | dir = shmem_dir_map(middir); | |
692 | diroff = 0; | |
693 | } | |
694 | punch_lock = needs_lock; | |
695 | subdir = dir[diroff]; | |
696 | if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) { | |
697 | if (needs_lock) { | |
698 | spin_lock(needs_lock); | |
699 | dir[diroff] = NULL; | |
700 | spin_unlock(needs_lock); | |
701 | punch_lock = NULL; | |
702 | } else | |
703 | dir[diroff] = NULL; | |
704 | nr_pages_to_free++; | |
705 | list_add(&subdir->lru, &pages_to_free); | |
706 | } | |
707 | if (subdir && page_private(subdir) /* has swap entries */) { | |
708 | size = limit - idx; | |
709 | if (size > ENTRIES_PER_PAGE) | |
710 | size = ENTRIES_PER_PAGE; | |
711 | freed = shmem_map_and_free_swp(subdir, | |
712 | offset, size, &dir, punch_lock); | |
713 | if (!dir) | |
714 | dir = shmem_dir_map(middir); | |
715 | nr_swaps_freed += freed; | |
716 | if (offset || punch_lock) { | |
717 | spin_lock(&info->lock); | |
718 | set_page_private(subdir, | |
719 | page_private(subdir) - freed); | |
720 | spin_unlock(&info->lock); | |
721 | } else | |
722 | BUG_ON(page_private(subdir) != freed); | |
723 | } | |
724 | offset = 0; | |
725 | } | |
726 | done1: | |
727 | shmem_dir_unmap(dir); | |
728 | done2: | |
729 | if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { | |
730 | /* | |
731 | * Call truncate_inode_pages again: racing shmem_unuse_inode | |
732 | * may have swizzled a page in from swap since vmtruncate or | |
733 | * generic_delete_inode did it, before we lowered next_index. | |
734 | * Also, though shmem_getpage checks i_size before adding to | |
735 | * cache, no recheck after: so fix the narrow window there too. | |
736 | * | |
737 | * Recalling truncate_inode_pages_range and unmap_mapping_range | |
738 | * every time for punch_hole (which never got a chance to clear | |
739 | * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive, | |
740 | * yet hardly ever necessary: try to optimize them out later. | |
741 | */ | |
742 | truncate_inode_pages_range(inode->i_mapping, start, end); | |
743 | if (punch_hole) | |
744 | unmap_mapping_range(inode->i_mapping, start, | |
745 | end - start, 1); | |
746 | } | |
747 | ||
748 | spin_lock(&info->lock); | |
749 | info->flags &= ~SHMEM_TRUNCATE; | |
750 | info->swapped -= nr_swaps_freed; | |
751 | if (nr_pages_to_free) | |
752 | shmem_free_blocks(inode, nr_pages_to_free); | |
753 | shmem_recalc_inode(inode); | |
754 | spin_unlock(&info->lock); | |
755 | ||
756 | /* | |
757 | * Empty swap vector directory pages to be freed? | |
758 | */ | |
759 | if (!list_empty(&pages_to_free)) { | |
760 | pages_to_free.prev->next = NULL; | |
761 | shmem_free_pages(pages_to_free.next); | |
762 | } | |
763 | } | |
764 | ||
765 | static void shmem_truncate(struct inode *inode) | |
766 | { | |
767 | shmem_truncate_range(inode, inode->i_size, (loff_t)-1); | |
768 | } | |
769 | ||
770 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | |
771 | { | |
772 | struct inode *inode = dentry->d_inode; | |
773 | struct page *page = NULL; | |
774 | int error; | |
775 | ||
776 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { | |
777 | if (attr->ia_size < inode->i_size) { | |
778 | /* | |
779 | * If truncating down to a partial page, then | |
780 | * if that page is already allocated, hold it | |
781 | * in memory until the truncation is over, so | |
782 | * truncate_partial_page cannnot miss it were | |
783 | * it assigned to swap. | |
784 | */ | |
785 | if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { | |
786 | (void) shmem_getpage(inode, | |
787 | attr->ia_size>>PAGE_CACHE_SHIFT, | |
788 | &page, SGP_READ, NULL); | |
789 | if (page) | |
790 | unlock_page(page); | |
791 | } | |
792 | /* | |
793 | * Reset SHMEM_PAGEIN flag so that shmem_truncate can | |
794 | * detect if any pages might have been added to cache | |
795 | * after truncate_inode_pages. But we needn't bother | |
796 | * if it's being fully truncated to zero-length: the | |
797 | * nrpages check is efficient enough in that case. | |
798 | */ | |
799 | if (attr->ia_size) { | |
800 | struct shmem_inode_info *info = SHMEM_I(inode); | |
801 | spin_lock(&info->lock); | |
802 | info->flags &= ~SHMEM_PAGEIN; | |
803 | spin_unlock(&info->lock); | |
804 | } | |
805 | } | |
806 | } | |
807 | ||
808 | error = inode_change_ok(inode, attr); | |
809 | if (!error) | |
810 | error = inode_setattr(inode, attr); | |
811 | #ifdef CONFIG_TMPFS_POSIX_ACL | |
812 | if (!error && (attr->ia_valid & ATTR_MODE)) | |
813 | error = generic_acl_chmod(inode, &shmem_acl_ops); | |
814 | #endif | |
815 | if (page) | |
816 | page_cache_release(page); | |
817 | return error; | |
818 | } | |
819 | ||
820 | static void shmem_delete_inode(struct inode *inode) | |
821 | { | |
822 | struct shmem_inode_info *info = SHMEM_I(inode); | |
823 | ||
824 | if (inode->i_op->truncate == shmem_truncate) { | |
825 | truncate_inode_pages(inode->i_mapping, 0); | |
826 | shmem_unacct_size(info->flags, inode->i_size); | |
827 | inode->i_size = 0; | |
828 | shmem_truncate(inode); | |
829 | if (!list_empty(&info->swaplist)) { | |
830 | mutex_lock(&shmem_swaplist_mutex); | |
831 | list_del_init(&info->swaplist); | |
832 | mutex_unlock(&shmem_swaplist_mutex); | |
833 | } | |
834 | } | |
835 | BUG_ON(inode->i_blocks); | |
836 | shmem_free_inode(inode->i_sb); | |
837 | clear_inode(inode); | |
838 | } | |
839 | ||
840 | static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) | |
841 | { | |
842 | swp_entry_t *ptr; | |
843 | ||
844 | for (ptr = dir; ptr < edir; ptr++) { | |
845 | if (ptr->val == entry.val) | |
846 | return ptr - dir; | |
847 | } | |
848 | return -1; | |
849 | } | |
850 | ||
851 | static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) | |
852 | { | |
853 | struct inode *inode; | |
854 | unsigned long idx; | |
855 | unsigned long size; | |
856 | unsigned long limit; | |
857 | unsigned long stage; | |
858 | struct page **dir; | |
859 | struct page *subdir; | |
860 | swp_entry_t *ptr; | |
861 | int offset; | |
862 | int error; | |
863 | ||
864 | idx = 0; | |
865 | ptr = info->i_direct; | |
866 | spin_lock(&info->lock); | |
867 | if (!info->swapped) { | |
868 | list_del_init(&info->swaplist); | |
869 | goto lost2; | |
870 | } | |
871 | limit = info->next_index; | |
872 | size = limit; | |
873 | if (size > SHMEM_NR_DIRECT) | |
874 | size = SHMEM_NR_DIRECT; | |
875 | offset = shmem_find_swp(entry, ptr, ptr+size); | |
876 | if (offset >= 0) | |
877 | goto found; | |
878 | if (!info->i_indirect) | |
879 | goto lost2; | |
880 | ||
881 | dir = shmem_dir_map(info->i_indirect); | |
882 | stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; | |
883 | ||
884 | for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { | |
885 | if (unlikely(idx == stage)) { | |
886 | shmem_dir_unmap(dir-1); | |
887 | if (cond_resched_lock(&info->lock)) { | |
888 | /* check it has not been truncated */ | |
889 | if (limit > info->next_index) { | |
890 | limit = info->next_index; | |
891 | if (idx >= limit) | |
892 | goto lost2; | |
893 | } | |
894 | } | |
895 | dir = shmem_dir_map(info->i_indirect) + | |
896 | ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; | |
897 | while (!*dir) { | |
898 | dir++; | |
899 | idx += ENTRIES_PER_PAGEPAGE; | |
900 | if (idx >= limit) | |
901 | goto lost1; | |
902 | } | |
903 | stage = idx + ENTRIES_PER_PAGEPAGE; | |
904 | subdir = *dir; | |
905 | shmem_dir_unmap(dir); | |
906 | dir = shmem_dir_map(subdir); | |
907 | } | |
908 | subdir = *dir; | |
909 | if (subdir && page_private(subdir)) { | |
910 | ptr = shmem_swp_map(subdir); | |
911 | size = limit - idx; | |
912 | if (size > ENTRIES_PER_PAGE) | |
913 | size = ENTRIES_PER_PAGE; | |
914 | offset = shmem_find_swp(entry, ptr, ptr+size); | |
915 | shmem_swp_unmap(ptr); | |
916 | if (offset >= 0) { | |
917 | shmem_dir_unmap(dir); | |
918 | goto found; | |
919 | } | |
920 | } | |
921 | } | |
922 | lost1: | |
923 | shmem_dir_unmap(dir-1); | |
924 | lost2: | |
925 | spin_unlock(&info->lock); | |
926 | return 0; | |
927 | found: | |
928 | idx += offset; | |
929 | inode = igrab(&info->vfs_inode); | |
930 | spin_unlock(&info->lock); | |
931 | ||
932 | /* | |
933 | * Move _head_ to start search for next from here. | |
934 | * But be careful: shmem_delete_inode checks list_empty without taking | |
935 | * mutex, and there's an instant in list_move_tail when info->swaplist | |
936 | * would appear empty, if it were the only one on shmem_swaplist. We | |
937 | * could avoid doing it if inode NULL; or use this minor optimization. | |
938 | */ | |
939 | if (shmem_swaplist.next != &info->swaplist) | |
940 | list_move_tail(&shmem_swaplist, &info->swaplist); | |
941 | mutex_unlock(&shmem_swaplist_mutex); | |
942 | ||
943 | error = 1; | |
944 | if (!inode) | |
945 | goto out; | |
946 | /* | |
947 | * Charge page using GFP_KERNEL while we can wait. | |
948 | * Charged back to the user(not to caller) when swap account is used. | |
949 | * add_to_page_cache() will be called with GFP_NOWAIT. | |
950 | */ | |
951 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); | |
952 | if (error) | |
953 | goto out; | |
954 | error = radix_tree_preload(GFP_KERNEL); | |
955 | if (error) { | |
956 | mem_cgroup_uncharge_cache_page(page); | |
957 | goto out; | |
958 | } | |
959 | error = 1; | |
960 | ||
961 | spin_lock(&info->lock); | |
962 | ptr = shmem_swp_entry(info, idx, NULL); | |
963 | if (ptr && ptr->val == entry.val) { | |
964 | error = add_to_page_cache_locked(page, inode->i_mapping, | |
965 | idx, GFP_NOWAIT); | |
966 | /* does mem_cgroup_uncharge_cache_page on error */ | |
967 | } else /* we must compensate for our precharge above */ | |
968 | mem_cgroup_uncharge_cache_page(page); | |
969 | ||
970 | if (error == -EEXIST) { | |
971 | struct page *filepage = find_get_page(inode->i_mapping, idx); | |
972 | error = 1; | |
973 | if (filepage) { | |
974 | /* | |
975 | * There might be a more uptodate page coming down | |
976 | * from a stacked writepage: forget our swappage if so. | |
977 | */ | |
978 | if (PageUptodate(filepage)) | |
979 | error = 0; | |
980 | page_cache_release(filepage); | |
981 | } | |
982 | } | |
983 | if (!error) { | |
984 | delete_from_swap_cache(page); | |
985 | set_page_dirty(page); | |
986 | info->flags |= SHMEM_PAGEIN; | |
987 | shmem_swp_set(info, ptr, 0); | |
988 | swap_free(entry); | |
989 | error = 1; /* not an error, but entry was found */ | |
990 | } | |
991 | if (ptr) | |
992 | shmem_swp_unmap(ptr); | |
993 | spin_unlock(&info->lock); | |
994 | radix_tree_preload_end(); | |
995 | out: | |
996 | unlock_page(page); | |
997 | page_cache_release(page); | |
998 | iput(inode); /* allows for NULL */ | |
999 | return error; | |
1000 | } | |
1001 | ||
1002 | /* | |
1003 | * shmem_unuse() search for an eventually swapped out shmem page. | |
1004 | */ | |
1005 | int shmem_unuse(swp_entry_t entry, struct page *page) | |
1006 | { | |
1007 | struct list_head *p, *next; | |
1008 | struct shmem_inode_info *info; | |
1009 | int found = 0; | |
1010 | ||
1011 | mutex_lock(&shmem_swaplist_mutex); | |
1012 | list_for_each_safe(p, next, &shmem_swaplist) { | |
1013 | info = list_entry(p, struct shmem_inode_info, swaplist); | |
1014 | found = shmem_unuse_inode(info, entry, page); | |
1015 | cond_resched(); | |
1016 | if (found) | |
1017 | goto out; | |
1018 | } | |
1019 | mutex_unlock(&shmem_swaplist_mutex); | |
1020 | /* | |
1021 | * Can some race bring us here? We've been holding page lock, | |
1022 | * so I think not; but would rather try again later than BUG() | |
1023 | */ | |
1024 | unlock_page(page); | |
1025 | page_cache_release(page); | |
1026 | out: | |
1027 | return (found < 0) ? found : 0; | |
1028 | } | |
1029 | ||
1030 | /* | |
1031 | * Move the page from the page cache to the swap cache. | |
1032 | */ | |
1033 | static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |
1034 | { | |
1035 | struct shmem_inode_info *info; | |
1036 | swp_entry_t *entry, swap; | |
1037 | struct address_space *mapping; | |
1038 | unsigned long index; | |
1039 | struct inode *inode; | |
1040 | ||
1041 | BUG_ON(!PageLocked(page)); | |
1042 | mapping = page->mapping; | |
1043 | index = page->index; | |
1044 | inode = mapping->host; | |
1045 | info = SHMEM_I(inode); | |
1046 | if (info->flags & VM_LOCKED) | |
1047 | goto redirty; | |
1048 | if (!total_swap_pages) | |
1049 | goto redirty; | |
1050 | ||
1051 | /* | |
1052 | * shmem_backing_dev_info's capabilities prevent regular writeback or | |
1053 | * sync from ever calling shmem_writepage; but a stacking filesystem | |
1054 | * may use the ->writepage of its underlying filesystem, in which case | |
1055 | * tmpfs should write out to swap only in response to memory pressure, | |
1056 | * and not for the writeback threads or sync. However, in those cases, | |
1057 | * we do still want to check if there's a redundant swappage to be | |
1058 | * discarded. | |
1059 | */ | |
1060 | if (wbc->for_reclaim) | |
1061 | swap = get_swap_page(); | |
1062 | else | |
1063 | swap.val = 0; | |
1064 | ||
1065 | spin_lock(&info->lock); | |
1066 | if (index >= info->next_index) { | |
1067 | BUG_ON(!(info->flags & SHMEM_TRUNCATE)); | |
1068 | goto unlock; | |
1069 | } | |
1070 | entry = shmem_swp_entry(info, index, NULL); | |
1071 | if (entry->val) { | |
1072 | /* | |
1073 | * The more uptodate page coming down from a stacked | |
1074 | * writepage should replace our old swappage. | |
1075 | */ | |
1076 | free_swap_and_cache(*entry); | |
1077 | shmem_swp_set(info, entry, 0); | |
1078 | } | |
1079 | shmem_recalc_inode(inode); | |
1080 | ||
1081 | if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { | |
1082 | remove_from_page_cache(page); | |
1083 | shmem_swp_set(info, entry, swap.val); | |
1084 | shmem_swp_unmap(entry); | |
1085 | if (list_empty(&info->swaplist)) | |
1086 | inode = igrab(inode); | |
1087 | else | |
1088 | inode = NULL; | |
1089 | spin_unlock(&info->lock); | |
1090 | swap_shmem_alloc(swap); | |
1091 | BUG_ON(page_mapped(page)); | |
1092 | page_cache_release(page); /* pagecache ref */ | |
1093 | swap_writepage(page, wbc); | |
1094 | if (inode) { | |
1095 | mutex_lock(&shmem_swaplist_mutex); | |
1096 | /* move instead of add in case we're racing */ | |
1097 | list_move_tail(&info->swaplist, &shmem_swaplist); | |
1098 | mutex_unlock(&shmem_swaplist_mutex); | |
1099 | iput(inode); | |
1100 | } | |
1101 | return 0; | |
1102 | } | |
1103 | ||
1104 | shmem_swp_unmap(entry); | |
1105 | unlock: | |
1106 | spin_unlock(&info->lock); | |
1107 | /* | |
1108 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely | |
1109 | * clear SWAP_HAS_CACHE flag. | |
1110 | */ | |
1111 | swapcache_free(swap, NULL); | |
1112 | redirty: | |
1113 | set_page_dirty(page); | |
1114 | if (wbc->for_reclaim) | |
1115 | return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ | |
1116 | unlock_page(page); | |
1117 | return 0; | |
1118 | } | |
1119 | ||
1120 | #ifdef CONFIG_NUMA | |
1121 | #ifdef CONFIG_TMPFS | |
1122 | static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) | |
1123 | { | |
1124 | char buffer[64]; | |
1125 | ||
1126 | if (!mpol || mpol->mode == MPOL_DEFAULT) | |
1127 | return; /* show nothing */ | |
1128 | ||
1129 | mpol_to_str(buffer, sizeof(buffer), mpol, 1); | |
1130 | ||
1131 | seq_printf(seq, ",mpol=%s", buffer); | |
1132 | } | |
1133 | ||
1134 | static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) | |
1135 | { | |
1136 | struct mempolicy *mpol = NULL; | |
1137 | if (sbinfo->mpol) { | |
1138 | spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ | |
1139 | mpol = sbinfo->mpol; | |
1140 | mpol_get(mpol); | |
1141 | spin_unlock(&sbinfo->stat_lock); | |
1142 | } | |
1143 | return mpol; | |
1144 | } | |
1145 | #endif /* CONFIG_TMPFS */ | |
1146 | ||
1147 | static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, | |
1148 | struct shmem_inode_info *info, unsigned long idx) | |
1149 | { | |
1150 | struct mempolicy mpol, *spol; | |
1151 | struct vm_area_struct pvma; | |
1152 | struct page *page; | |
1153 | ||
1154 | spol = mpol_cond_copy(&mpol, | |
1155 | mpol_shared_policy_lookup(&info->policy, idx)); | |
1156 | ||
1157 | /* Create a pseudo vma that just contains the policy */ | |
1158 | pvma.vm_start = 0; | |
1159 | pvma.vm_pgoff = idx; | |
1160 | pvma.vm_ops = NULL; | |
1161 | pvma.vm_policy = spol; | |
1162 | page = swapin_readahead(entry, gfp, &pvma, 0); | |
1163 | return page; | |
1164 | } | |
1165 | ||
1166 | static struct page *shmem_alloc_page(gfp_t gfp, | |
1167 | struct shmem_inode_info *info, unsigned long idx) | |
1168 | { | |
1169 | struct vm_area_struct pvma; | |
1170 | ||
1171 | /* Create a pseudo vma that just contains the policy */ | |
1172 | pvma.vm_start = 0; | |
1173 | pvma.vm_pgoff = idx; | |
1174 | pvma.vm_ops = NULL; | |
1175 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); | |
1176 | ||
1177 | /* | |
1178 | * alloc_page_vma() will drop the shared policy reference | |
1179 | */ | |
1180 | return alloc_page_vma(gfp, &pvma, 0); | |
1181 | } | |
1182 | #else /* !CONFIG_NUMA */ | |
1183 | #ifdef CONFIG_TMPFS | |
1184 | static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p) | |
1185 | { | |
1186 | } | |
1187 | #endif /* CONFIG_TMPFS */ | |
1188 | ||
1189 | static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, | |
1190 | struct shmem_inode_info *info, unsigned long idx) | |
1191 | { | |
1192 | return swapin_readahead(entry, gfp, NULL, 0); | |
1193 | } | |
1194 | ||
1195 | static inline struct page *shmem_alloc_page(gfp_t gfp, | |
1196 | struct shmem_inode_info *info, unsigned long idx) | |
1197 | { | |
1198 | return alloc_page(gfp); | |
1199 | } | |
1200 | #endif /* CONFIG_NUMA */ | |
1201 | ||
1202 | #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) | |
1203 | static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) | |
1204 | { | |
1205 | return NULL; | |
1206 | } | |
1207 | #endif | |
1208 | ||
1209 | /* | |
1210 | * shmem_getpage - either get the page from swap or allocate a new one | |
1211 | * | |
1212 | * If we allocate a new one we do not mark it dirty. That's up to the | |
1213 | * vm. If we swap it in we mark it dirty since we also free the swap | |
1214 | * entry since a page cannot live in both the swap and page cache | |
1215 | */ | |
1216 | static int shmem_getpage(struct inode *inode, unsigned long idx, | |
1217 | struct page **pagep, enum sgp_type sgp, int *type) | |
1218 | { | |
1219 | struct address_space *mapping = inode->i_mapping; | |
1220 | struct shmem_inode_info *info = SHMEM_I(inode); | |
1221 | struct shmem_sb_info *sbinfo; | |
1222 | struct page *filepage = *pagep; | |
1223 | struct page *swappage; | |
1224 | swp_entry_t *entry; | |
1225 | swp_entry_t swap; | |
1226 | gfp_t gfp; | |
1227 | int error; | |
1228 | ||
1229 | if (idx >= SHMEM_MAX_INDEX) | |
1230 | return -EFBIG; | |
1231 | ||
1232 | if (type) | |
1233 | *type = 0; | |
1234 | ||
1235 | /* | |
1236 | * Normally, filepage is NULL on entry, and either found | |
1237 | * uptodate immediately, or allocated and zeroed, or read | |
1238 | * in under swappage, which is then assigned to filepage. | |
1239 | * But shmem_readpage (required for splice) passes in a locked | |
1240 | * filepage, which may be found not uptodate by other callers | |
1241 | * too, and may need to be copied from the swappage read in. | |
1242 | */ | |
1243 | repeat: | |
1244 | if (!filepage) | |
1245 | filepage = find_lock_page(mapping, idx); | |
1246 | if (filepage && PageUptodate(filepage)) | |
1247 | goto done; | |
1248 | error = 0; | |
1249 | gfp = mapping_gfp_mask(mapping); | |
1250 | if (!filepage) { | |
1251 | /* | |
1252 | * Try to preload while we can wait, to not make a habit of | |
1253 | * draining atomic reserves; but don't latch on to this cpu. | |
1254 | */ | |
1255 | error = radix_tree_preload(gfp & ~__GFP_HIGHMEM); | |
1256 | if (error) | |
1257 | goto failed; | |
1258 | radix_tree_preload_end(); | |
1259 | } | |
1260 | ||
1261 | spin_lock(&info->lock); | |
1262 | shmem_recalc_inode(inode); | |
1263 | entry = shmem_swp_alloc(info, idx, sgp); | |
1264 | if (IS_ERR(entry)) { | |
1265 | spin_unlock(&info->lock); | |
1266 | error = PTR_ERR(entry); | |
1267 | goto failed; | |
1268 | } | |
1269 | swap = *entry; | |
1270 | ||
1271 | if (swap.val) { | |
1272 | /* Look it up and read it in.. */ | |
1273 | swappage = lookup_swap_cache(swap); | |
1274 | if (!swappage) { | |
1275 | shmem_swp_unmap(entry); | |
1276 | /* here we actually do the io */ | |
1277 | if (type && !(*type & VM_FAULT_MAJOR)) { | |
1278 | __count_vm_event(PGMAJFAULT); | |
1279 | *type |= VM_FAULT_MAJOR; | |
1280 | } | |
1281 | spin_unlock(&info->lock); | |
1282 | swappage = shmem_swapin(swap, gfp, info, idx); | |
1283 | if (!swappage) { | |
1284 | spin_lock(&info->lock); | |
1285 | entry = shmem_swp_alloc(info, idx, sgp); | |
1286 | if (IS_ERR(entry)) | |
1287 | error = PTR_ERR(entry); | |
1288 | else { | |
1289 | if (entry->val == swap.val) | |
1290 | error = -ENOMEM; | |
1291 | shmem_swp_unmap(entry); | |
1292 | } | |
1293 | spin_unlock(&info->lock); | |
1294 | if (error) | |
1295 | goto failed; | |
1296 | goto repeat; | |
1297 | } | |
1298 | wait_on_page_locked(swappage); | |
1299 | page_cache_release(swappage); | |
1300 | goto repeat; | |
1301 | } | |
1302 | ||
1303 | /* We have to do this with page locked to prevent races */ | |
1304 | if (!trylock_page(swappage)) { | |
1305 | shmem_swp_unmap(entry); | |
1306 | spin_unlock(&info->lock); | |
1307 | wait_on_page_locked(swappage); | |
1308 | page_cache_release(swappage); | |
1309 | goto repeat; | |
1310 | } | |
1311 | if (PageWriteback(swappage)) { | |
1312 | shmem_swp_unmap(entry); | |
1313 | spin_unlock(&info->lock); | |
1314 | wait_on_page_writeback(swappage); | |
1315 | unlock_page(swappage); | |
1316 | page_cache_release(swappage); | |
1317 | goto repeat; | |
1318 | } | |
1319 | if (!PageUptodate(swappage)) { | |
1320 | shmem_swp_unmap(entry); | |
1321 | spin_unlock(&info->lock); | |
1322 | unlock_page(swappage); | |
1323 | page_cache_release(swappage); | |
1324 | error = -EIO; | |
1325 | goto failed; | |
1326 | } | |
1327 | ||
1328 | if (filepage) { | |
1329 | shmem_swp_set(info, entry, 0); | |
1330 | shmem_swp_unmap(entry); | |
1331 | delete_from_swap_cache(swappage); | |
1332 | spin_unlock(&info->lock); | |
1333 | copy_highpage(filepage, swappage); | |
1334 | unlock_page(swappage); | |
1335 | page_cache_release(swappage); | |
1336 | flush_dcache_page(filepage); | |
1337 | SetPageUptodate(filepage); | |
1338 | set_page_dirty(filepage); | |
1339 | swap_free(swap); | |
1340 | } else if (!(error = add_to_page_cache_locked(swappage, mapping, | |
1341 | idx, GFP_NOWAIT))) { | |
1342 | info->flags |= SHMEM_PAGEIN; | |
1343 | shmem_swp_set(info, entry, 0); | |
1344 | shmem_swp_unmap(entry); | |
1345 | delete_from_swap_cache(swappage); | |
1346 | spin_unlock(&info->lock); | |
1347 | filepage = swappage; | |
1348 | set_page_dirty(filepage); | |
1349 | swap_free(swap); | |
1350 | } else { | |
1351 | shmem_swp_unmap(entry); | |
1352 | spin_unlock(&info->lock); | |
1353 | if (error == -ENOMEM) { | |
1354 | /* | |
1355 | * reclaim from proper memory cgroup and | |
1356 | * call memcg's OOM if needed. | |
1357 | */ | |
1358 | error = mem_cgroup_shmem_charge_fallback( | |
1359 | swappage, | |
1360 | current->mm, | |
1361 | gfp); | |
1362 | if (error) { | |
1363 | unlock_page(swappage); | |
1364 | page_cache_release(swappage); | |
1365 | goto failed; | |
1366 | } | |
1367 | } | |
1368 | unlock_page(swappage); | |
1369 | page_cache_release(swappage); | |
1370 | goto repeat; | |
1371 | } | |
1372 | } else if (sgp == SGP_READ && !filepage) { | |
1373 | shmem_swp_unmap(entry); | |
1374 | filepage = find_get_page(mapping, idx); | |
1375 | if (filepage && | |
1376 | (!PageUptodate(filepage) || !trylock_page(filepage))) { | |
1377 | spin_unlock(&info->lock); | |
1378 | wait_on_page_locked(filepage); | |
1379 | page_cache_release(filepage); | |
1380 | filepage = NULL; | |
1381 | goto repeat; | |
1382 | } | |
1383 | spin_unlock(&info->lock); | |
1384 | } else { | |
1385 | shmem_swp_unmap(entry); | |
1386 | sbinfo = SHMEM_SB(inode->i_sb); | |
1387 | if (sbinfo->max_blocks) { | |
1388 | spin_lock(&sbinfo->stat_lock); | |
1389 | if (sbinfo->free_blocks == 0 || | |
1390 | shmem_acct_block(info->flags)) { | |
1391 | spin_unlock(&sbinfo->stat_lock); | |
1392 | spin_unlock(&info->lock); | |
1393 | error = -ENOSPC; | |
1394 | goto failed; | |
1395 | } | |
1396 | sbinfo->free_blocks--; | |
1397 | inode->i_blocks += BLOCKS_PER_PAGE; | |
1398 | spin_unlock(&sbinfo->stat_lock); | |
1399 | } else if (shmem_acct_block(info->flags)) { | |
1400 | spin_unlock(&info->lock); | |
1401 | error = -ENOSPC; | |
1402 | goto failed; | |
1403 | } | |
1404 | ||
1405 | if (!filepage) { | |
1406 | int ret; | |
1407 | ||
1408 | spin_unlock(&info->lock); | |
1409 | filepage = shmem_alloc_page(gfp, info, idx); | |
1410 | if (!filepage) { | |
1411 | shmem_unacct_blocks(info->flags, 1); | |
1412 | shmem_free_blocks(inode, 1); | |
1413 | error = -ENOMEM; | |
1414 | goto failed; | |
1415 | } | |
1416 | SetPageSwapBacked(filepage); | |
1417 | ||
1418 | /* Precharge page while we can wait, compensate after */ | |
1419 | error = mem_cgroup_cache_charge(filepage, current->mm, | |
1420 | GFP_KERNEL); | |
1421 | if (error) { | |
1422 | page_cache_release(filepage); | |
1423 | shmem_unacct_blocks(info->flags, 1); | |
1424 | shmem_free_blocks(inode, 1); | |
1425 | filepage = NULL; | |
1426 | goto failed; | |
1427 | } | |
1428 | ||
1429 | spin_lock(&info->lock); | |
1430 | entry = shmem_swp_alloc(info, idx, sgp); | |
1431 | if (IS_ERR(entry)) | |
1432 | error = PTR_ERR(entry); | |
1433 | else { | |
1434 | swap = *entry; | |
1435 | shmem_swp_unmap(entry); | |
1436 | } | |
1437 | ret = error || swap.val; | |
1438 | if (ret) | |
1439 | mem_cgroup_uncharge_cache_page(filepage); | |
1440 | else | |
1441 | ret = add_to_page_cache_lru(filepage, mapping, | |
1442 | idx, GFP_NOWAIT); | |
1443 | /* | |
1444 | * At add_to_page_cache_lru() failure, uncharge will | |
1445 | * be done automatically. | |
1446 | */ | |
1447 | if (ret) { | |
1448 | spin_unlock(&info->lock); | |
1449 | page_cache_release(filepage); | |
1450 | shmem_unacct_blocks(info->flags, 1); | |
1451 | shmem_free_blocks(inode, 1); | |
1452 | filepage = NULL; | |
1453 | if (error) | |
1454 | goto failed; | |
1455 | goto repeat; | |
1456 | } | |
1457 | info->flags |= SHMEM_PAGEIN; | |
1458 | } | |
1459 | ||
1460 | info->alloced++; | |
1461 | spin_unlock(&info->lock); | |
1462 | clear_highpage(filepage); | |
1463 | flush_dcache_page(filepage); | |
1464 | SetPageUptodate(filepage); | |
1465 | if (sgp == SGP_DIRTY) | |
1466 | set_page_dirty(filepage); | |
1467 | } | |
1468 | done: | |
1469 | *pagep = filepage; | |
1470 | return 0; | |
1471 | ||
1472 | failed: | |
1473 | if (*pagep != filepage) { | |
1474 | unlock_page(filepage); | |
1475 | page_cache_release(filepage); | |
1476 | } | |
1477 | return error; | |
1478 | } | |
1479 | ||
1480 | static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
1481 | { | |
1482 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | |
1483 | int error; | |
1484 | int ret; | |
1485 | ||
1486 | if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | |
1487 | return VM_FAULT_SIGBUS; | |
1488 | ||
1489 | error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); | |
1490 | if (error) | |
1491 | return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); | |
1492 | ||
1493 | return ret | VM_FAULT_LOCKED; | |
1494 | } | |
1495 | ||
1496 | #ifdef CONFIG_NUMA | |
1497 | static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) | |
1498 | { | |
1499 | struct inode *i = vma->vm_file->f_path.dentry->d_inode; | |
1500 | return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); | |
1501 | } | |
1502 | ||
1503 | static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, | |
1504 | unsigned long addr) | |
1505 | { | |
1506 | struct inode *i = vma->vm_file->f_path.dentry->d_inode; | |
1507 | unsigned long idx; | |
1508 | ||
1509 | idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | |
1510 | return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); | |
1511 | } | |
1512 | #endif | |
1513 | ||
1514 | int shmem_lock(struct file *file, int lock, struct user_struct *user) | |
1515 | { | |
1516 | struct inode *inode = file->f_path.dentry->d_inode; | |
1517 | struct shmem_inode_info *info = SHMEM_I(inode); | |
1518 | int retval = -ENOMEM; | |
1519 | ||
1520 | spin_lock(&info->lock); | |
1521 | if (lock && !(info->flags & VM_LOCKED)) { | |
1522 | if (!user_shm_lock(inode->i_size, user)) | |
1523 | goto out_nomem; | |
1524 | info->flags |= VM_LOCKED; | |
1525 | mapping_set_unevictable(file->f_mapping); | |
1526 | } | |
1527 | if (!lock && (info->flags & VM_LOCKED) && user) { | |
1528 | user_shm_unlock(inode->i_size, user); | |
1529 | info->flags &= ~VM_LOCKED; | |
1530 | mapping_clear_unevictable(file->f_mapping); | |
1531 | scan_mapping_unevictable_pages(file->f_mapping); | |
1532 | } | |
1533 | retval = 0; | |
1534 | ||
1535 | out_nomem: | |
1536 | spin_unlock(&info->lock); | |
1537 | return retval; | |
1538 | } | |
1539 | ||
1540 | static int shmem_mmap(struct file *file, struct vm_area_struct *vma) | |
1541 | { | |
1542 | file_accessed(file); | |
1543 | vma->vm_ops = &shmem_vm_ops; | |
1544 | vma->vm_flags |= VM_CAN_NONLINEAR; | |
1545 | return 0; | |
1546 | } | |
1547 | ||
1548 | static struct inode *shmem_get_inode(struct super_block *sb, int mode, | |
1549 | dev_t dev, unsigned long flags) | |
1550 | { | |
1551 | struct inode *inode; | |
1552 | struct shmem_inode_info *info; | |
1553 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | |
1554 | ||
1555 | if (shmem_reserve_inode(sb)) | |
1556 | return NULL; | |
1557 | ||
1558 | inode = new_inode(sb); | |
1559 | if (inode) { | |
1560 | inode->i_mode = mode; | |
1561 | inode->i_uid = current_fsuid(); | |
1562 | inode->i_gid = current_fsgid(); | |
1563 | inode->i_blocks = 0; | |
1564 | inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; | |
1565 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | |
1566 | inode->i_generation = get_seconds(); | |
1567 | info = SHMEM_I(inode); | |
1568 | memset(info, 0, (char *)inode - (char *)info); | |
1569 | spin_lock_init(&info->lock); | |
1570 | info->flags = flags & VM_NORESERVE; | |
1571 | INIT_LIST_HEAD(&info->swaplist); | |
1572 | cache_no_acl(inode); | |
1573 | ||
1574 | switch (mode & S_IFMT) { | |
1575 | default: | |
1576 | inode->i_op = &shmem_special_inode_operations; | |
1577 | init_special_inode(inode, mode, dev); | |
1578 | break; | |
1579 | case S_IFREG: | |
1580 | inode->i_mapping->a_ops = &shmem_aops; | |
1581 | inode->i_op = &shmem_inode_operations; | |
1582 | inode->i_fop = &shmem_file_operations; | |
1583 | mpol_shared_policy_init(&info->policy, | |
1584 | shmem_get_sbmpol(sbinfo)); | |
1585 | break; | |
1586 | case S_IFDIR: | |
1587 | inc_nlink(inode); | |
1588 | /* Some things misbehave if size == 0 on a directory */ | |
1589 | inode->i_size = 2 * BOGO_DIRENT_SIZE; | |
1590 | inode->i_op = &shmem_dir_inode_operations; | |
1591 | inode->i_fop = &simple_dir_operations; | |
1592 | break; | |
1593 | case S_IFLNK: | |
1594 | /* | |
1595 | * Must not load anything in the rbtree, | |
1596 | * mpol_free_shared_policy will not be called. | |
1597 | */ | |
1598 | mpol_shared_policy_init(&info->policy, NULL); | |
1599 | break; | |
1600 | } | |
1601 | } else | |
1602 | shmem_free_inode(sb); | |
1603 | return inode; | |
1604 | } | |
1605 | ||
1606 | #ifdef CONFIG_TMPFS | |
1607 | static const struct inode_operations shmem_symlink_inode_operations; | |
1608 | static const struct inode_operations shmem_symlink_inline_operations; | |
1609 | ||
1610 | /* | |
1611 | * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin; | |
1612 | * but providing them allows a tmpfs file to be used for splice, sendfile, and | |
1613 | * below the loop driver, in the generic fashion that many filesystems support. | |
1614 | */ | |
1615 | static int shmem_readpage(struct file *file, struct page *page) | |
1616 | { | |
1617 | struct inode *inode = page->mapping->host; | |
1618 | int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL); | |
1619 | unlock_page(page); | |
1620 | return error; | |
1621 | } | |
1622 | ||
1623 | static int | |
1624 | shmem_write_begin(struct file *file, struct address_space *mapping, | |
1625 | loff_t pos, unsigned len, unsigned flags, | |
1626 | struct page **pagep, void **fsdata) | |
1627 | { | |
1628 | struct inode *inode = mapping->host; | |
1629 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | |
1630 | *pagep = NULL; | |
1631 | return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); | |
1632 | } | |
1633 | ||
1634 | static int | |
1635 | shmem_write_end(struct file *file, struct address_space *mapping, | |
1636 | loff_t pos, unsigned len, unsigned copied, | |
1637 | struct page *page, void *fsdata) | |
1638 | { | |
1639 | struct inode *inode = mapping->host; | |
1640 | ||
1641 | if (pos + copied > inode->i_size) | |
1642 | i_size_write(inode, pos + copied); | |
1643 | ||
1644 | set_page_dirty(page); | |
1645 | unlock_page(page); | |
1646 | page_cache_release(page); | |
1647 | ||
1648 | return copied; | |
1649 | } | |
1650 | ||
1651 | static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) | |
1652 | { | |
1653 | struct inode *inode = filp->f_path.dentry->d_inode; | |
1654 | struct address_space *mapping = inode->i_mapping; | |
1655 | unsigned long index, offset; | |
1656 | enum sgp_type sgp = SGP_READ; | |
1657 | ||
1658 | /* | |
1659 | * Might this read be for a stacking filesystem? Then when reading | |
1660 | * holes of a sparse file, we actually need to allocate those pages, | |
1661 | * and even mark them dirty, so it cannot exceed the max_blocks limit. | |
1662 | */ | |
1663 | if (segment_eq(get_fs(), KERNEL_DS)) | |
1664 | sgp = SGP_DIRTY; | |
1665 | ||
1666 | index = *ppos >> PAGE_CACHE_SHIFT; | |
1667 | offset = *ppos & ~PAGE_CACHE_MASK; | |
1668 | ||
1669 | for (;;) { | |
1670 | struct page *page = NULL; | |
1671 | unsigned long end_index, nr, ret; | |
1672 | loff_t i_size = i_size_read(inode); | |
1673 | ||
1674 | end_index = i_size >> PAGE_CACHE_SHIFT; | |
1675 | if (index > end_index) | |
1676 | break; | |
1677 | if (index == end_index) { | |
1678 | nr = i_size & ~PAGE_CACHE_MASK; | |
1679 | if (nr <= offset) | |
1680 | break; | |
1681 | } | |
1682 | ||
1683 | desc->error = shmem_getpage(inode, index, &page, sgp, NULL); | |
1684 | if (desc->error) { | |
1685 | if (desc->error == -EINVAL) | |
1686 | desc->error = 0; | |
1687 | break; | |
1688 | } | |
1689 | if (page) | |
1690 | unlock_page(page); | |
1691 | ||
1692 | /* | |
1693 | * We must evaluate after, since reads (unlike writes) | |
1694 | * are called without i_mutex protection against truncate | |
1695 | */ | |
1696 | nr = PAGE_CACHE_SIZE; | |
1697 | i_size = i_size_read(inode); | |
1698 | end_index = i_size >> PAGE_CACHE_SHIFT; | |
1699 | if (index == end_index) { | |
1700 | nr = i_size & ~PAGE_CACHE_MASK; | |
1701 | if (nr <= offset) { | |
1702 | if (page) | |
1703 | page_cache_release(page); | |
1704 | break; | |
1705 | } | |
1706 | } | |
1707 | nr -= offset; | |
1708 | ||
1709 | if (page) { | |
1710 | /* | |
1711 | * If users can be writing to this page using arbitrary | |
1712 | * virtual addresses, take care about potential aliasing | |
1713 | * before reading the page on the kernel side. | |
1714 | */ | |
1715 | if (mapping_writably_mapped(mapping)) | |
1716 | flush_dcache_page(page); | |
1717 | /* | |
1718 | * Mark the page accessed if we read the beginning. | |
1719 | */ | |
1720 | if (!offset) | |
1721 | mark_page_accessed(page); | |
1722 | } else { | |
1723 | page = ZERO_PAGE(0); | |
1724 | page_cache_get(page); | |
1725 | } | |
1726 | ||
1727 | /* | |
1728 | * Ok, we have the page, and it's up-to-date, so | |
1729 | * now we can copy it to user space... | |
1730 | * | |
1731 | * The actor routine returns how many bytes were actually used.. | |
1732 | * NOTE! This may not be the same as how much of a user buffer | |
1733 | * we filled up (we may be padding etc), so we can only update | |
1734 | * "pos" here (the actor routine has to update the user buffer | |
1735 | * pointers and the remaining count). | |
1736 | */ | |
1737 | ret = actor(desc, page, offset, nr); | |
1738 | offset += ret; | |
1739 | index += offset >> PAGE_CACHE_SHIFT; | |
1740 | offset &= ~PAGE_CACHE_MASK; | |
1741 | ||
1742 | page_cache_release(page); | |
1743 | if (ret != nr || !desc->count) | |
1744 | break; | |
1745 | ||
1746 | cond_resched(); | |
1747 | } | |
1748 | ||
1749 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; | |
1750 | file_accessed(filp); | |
1751 | } | |
1752 | ||
1753 | static ssize_t shmem_file_aio_read(struct kiocb *iocb, | |
1754 | const struct iovec *iov, unsigned long nr_segs, loff_t pos) | |
1755 | { | |
1756 | struct file *filp = iocb->ki_filp; | |
1757 | ssize_t retval; | |
1758 | unsigned long seg; | |
1759 | size_t count; | |
1760 | loff_t *ppos = &iocb->ki_pos; | |
1761 | ||
1762 | retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); | |
1763 | if (retval) | |
1764 | return retval; | |
1765 | ||
1766 | for (seg = 0; seg < nr_segs; seg++) { | |
1767 | read_descriptor_t desc; | |
1768 | ||
1769 | desc.written = 0; | |
1770 | desc.arg.buf = iov[seg].iov_base; | |
1771 | desc.count = iov[seg].iov_len; | |
1772 | if (desc.count == 0) | |
1773 | continue; | |
1774 | desc.error = 0; | |
1775 | do_shmem_file_read(filp, ppos, &desc, file_read_actor); | |
1776 | retval += desc.written; | |
1777 | if (desc.error) { | |
1778 | retval = retval ?: desc.error; | |
1779 | break; | |
1780 | } | |
1781 | if (desc.count > 0) | |
1782 | break; | |
1783 | } | |
1784 | return retval; | |
1785 | } | |
1786 | ||
1787 | static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) | |
1788 | { | |
1789 | struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); | |
1790 | ||
1791 | buf->f_type = TMPFS_MAGIC; | |
1792 | buf->f_bsize = PAGE_CACHE_SIZE; | |
1793 | buf->f_namelen = NAME_MAX; | |
1794 | spin_lock(&sbinfo->stat_lock); | |
1795 | if (sbinfo->max_blocks) { | |
1796 | buf->f_blocks = sbinfo->max_blocks; | |
1797 | buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; | |
1798 | } | |
1799 | if (sbinfo->max_inodes) { | |
1800 | buf->f_files = sbinfo->max_inodes; | |
1801 | buf->f_ffree = sbinfo->free_inodes; | |
1802 | } | |
1803 | /* else leave those fields 0 like simple_statfs */ | |
1804 | spin_unlock(&sbinfo->stat_lock); | |
1805 | return 0; | |
1806 | } | |
1807 | ||
1808 | /* | |
1809 | * File creation. Allocate an inode, and we're done.. | |
1810 | */ | |
1811 | static int | |
1812 | shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) | |
1813 | { | |
1814 | struct inode *inode; | |
1815 | int error = -ENOSPC; | |
1816 | ||
1817 | inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE); | |
1818 | if (inode) { | |
1819 | error = security_inode_init_security(inode, dir, NULL, NULL, | |
1820 | NULL); | |
1821 | if (error) { | |
1822 | if (error != -EOPNOTSUPP) { | |
1823 | iput(inode); | |
1824 | return error; | |
1825 | } | |
1826 | } | |
1827 | error = shmem_acl_init(inode, dir); | |
1828 | if (error) { | |
1829 | iput(inode); | |
1830 | return error; | |
1831 | } | |
1832 | if (dir->i_mode & S_ISGID) { | |
1833 | inode->i_gid = dir->i_gid; | |
1834 | if (S_ISDIR(mode)) | |
1835 | inode->i_mode |= S_ISGID; | |
1836 | } | |
1837 | dir->i_size += BOGO_DIRENT_SIZE; | |
1838 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; | |
1839 | d_instantiate(dentry, inode); | |
1840 | dget(dentry); /* Extra count - pin the dentry in core */ | |
1841 | } | |
1842 | return error; | |
1843 | } | |
1844 | ||
1845 | static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |
1846 | { | |
1847 | int error; | |
1848 | ||
1849 | if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) | |
1850 | return error; | |
1851 | inc_nlink(dir); | |
1852 | return 0; | |
1853 | } | |
1854 | ||
1855 | static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, | |
1856 | struct nameidata *nd) | |
1857 | { | |
1858 | return shmem_mknod(dir, dentry, mode | S_IFREG, 0); | |
1859 | } | |
1860 | ||
1861 | /* | |
1862 | * Link a file.. | |
1863 | */ | |
1864 | static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) | |
1865 | { | |
1866 | struct inode *inode = old_dentry->d_inode; | |
1867 | int ret; | |
1868 | ||
1869 | /* | |
1870 | * No ordinary (disk based) filesystem counts links as inodes; | |
1871 | * but each new link needs a new dentry, pinning lowmem, and | |
1872 | * tmpfs dentries cannot be pruned until they are unlinked. | |
1873 | */ | |
1874 | ret = shmem_reserve_inode(inode->i_sb); | |
1875 | if (ret) | |
1876 | goto out; | |
1877 | ||
1878 | dir->i_size += BOGO_DIRENT_SIZE; | |
1879 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; | |
1880 | inc_nlink(inode); | |
1881 | atomic_inc(&inode->i_count); /* New dentry reference */ | |
1882 | dget(dentry); /* Extra pinning count for the created dentry */ | |
1883 | d_instantiate(dentry, inode); | |
1884 | out: | |
1885 | return ret; | |
1886 | } | |
1887 | ||
1888 | static int shmem_unlink(struct inode *dir, struct dentry *dentry) | |
1889 | { | |
1890 | struct inode *inode = dentry->d_inode; | |
1891 | ||
1892 | if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) | |
1893 | shmem_free_inode(inode->i_sb); | |
1894 | ||
1895 | dir->i_size -= BOGO_DIRENT_SIZE; | |
1896 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; | |
1897 | drop_nlink(inode); | |
1898 | dput(dentry); /* Undo the count from "create" - this does all the work */ | |
1899 | return 0; | |
1900 | } | |
1901 | ||
1902 | static int shmem_rmdir(struct inode *dir, struct dentry *dentry) | |
1903 | { | |
1904 | if (!simple_empty(dentry)) | |
1905 | return -ENOTEMPTY; | |
1906 | ||
1907 | drop_nlink(dentry->d_inode); | |
1908 | drop_nlink(dir); | |
1909 | return shmem_unlink(dir, dentry); | |
1910 | } | |
1911 | ||
1912 | /* | |
1913 | * The VFS layer already does all the dentry stuff for rename, | |
1914 | * we just have to decrement the usage count for the target if | |
1915 | * it exists so that the VFS layer correctly free's it when it | |
1916 | * gets overwritten. | |
1917 | */ | |
1918 | static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) | |
1919 | { | |
1920 | struct inode *inode = old_dentry->d_inode; | |
1921 | int they_are_dirs = S_ISDIR(inode->i_mode); | |
1922 | ||
1923 | if (!simple_empty(new_dentry)) | |
1924 | return -ENOTEMPTY; | |
1925 | ||
1926 | if (new_dentry->d_inode) { | |
1927 | (void) shmem_unlink(new_dir, new_dentry); | |
1928 | if (they_are_dirs) | |
1929 | drop_nlink(old_dir); | |
1930 | } else if (they_are_dirs) { | |
1931 | drop_nlink(old_dir); | |
1932 | inc_nlink(new_dir); | |
1933 | } | |
1934 | ||
1935 | old_dir->i_size -= BOGO_DIRENT_SIZE; | |
1936 | new_dir->i_size += BOGO_DIRENT_SIZE; | |
1937 | old_dir->i_ctime = old_dir->i_mtime = | |
1938 | new_dir->i_ctime = new_dir->i_mtime = | |
1939 | inode->i_ctime = CURRENT_TIME; | |
1940 | return 0; | |
1941 | } | |
1942 | ||
1943 | static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | |
1944 | { | |
1945 | int error; | |
1946 | int len; | |
1947 | struct inode *inode; | |
1948 | struct page *page = NULL; | |
1949 | char *kaddr; | |
1950 | struct shmem_inode_info *info; | |
1951 | ||
1952 | len = strlen(symname) + 1; | |
1953 | if (len > PAGE_CACHE_SIZE) | |
1954 | return -ENAMETOOLONG; | |
1955 | ||
1956 | inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); | |
1957 | if (!inode) | |
1958 | return -ENOSPC; | |
1959 | ||
1960 | error = security_inode_init_security(inode, dir, NULL, NULL, | |
1961 | NULL); | |
1962 | if (error) { | |
1963 | if (error != -EOPNOTSUPP) { | |
1964 | iput(inode); | |
1965 | return error; | |
1966 | } | |
1967 | error = 0; | |
1968 | } | |
1969 | ||
1970 | info = SHMEM_I(inode); | |
1971 | inode->i_size = len-1; | |
1972 | if (len <= (char *)inode - (char *)info) { | |
1973 | /* do it inline */ | |
1974 | memcpy(info, symname, len); | |
1975 | inode->i_op = &shmem_symlink_inline_operations; | |
1976 | } else { | |
1977 | error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); | |
1978 | if (error) { | |
1979 | iput(inode); | |
1980 | return error; | |
1981 | } | |
1982 | inode->i_mapping->a_ops = &shmem_aops; | |
1983 | inode->i_op = &shmem_symlink_inode_operations; | |
1984 | kaddr = kmap_atomic(page, KM_USER0); | |
1985 | memcpy(kaddr, symname, len); | |
1986 | kunmap_atomic(kaddr, KM_USER0); | |
1987 | set_page_dirty(page); | |
1988 | unlock_page(page); | |
1989 | page_cache_release(page); | |
1990 | } | |
1991 | if (dir->i_mode & S_ISGID) | |
1992 | inode->i_gid = dir->i_gid; | |
1993 | dir->i_size += BOGO_DIRENT_SIZE; | |
1994 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; | |
1995 | d_instantiate(dentry, inode); | |
1996 | dget(dentry); | |
1997 | return 0; | |
1998 | } | |
1999 | ||
2000 | static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) | |
2001 | { | |
2002 | nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); | |
2003 | return NULL; | |
2004 | } | |
2005 | ||
2006 | static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) | |
2007 | { | |
2008 | struct page *page = NULL; | |
2009 | int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); | |
2010 | nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); | |
2011 | if (page) | |
2012 | unlock_page(page); | |
2013 | return page; | |
2014 | } | |
2015 | ||
2016 | static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) | |
2017 | { | |
2018 | if (!IS_ERR(nd_get_link(nd))) { | |
2019 | struct page *page = cookie; | |
2020 | kunmap(page); | |
2021 | mark_page_accessed(page); | |
2022 | page_cache_release(page); | |
2023 | } | |
2024 | } | |
2025 | ||
2026 | static const struct inode_operations shmem_symlink_inline_operations = { | |
2027 | .readlink = generic_readlink, | |
2028 | .follow_link = shmem_follow_link_inline, | |
2029 | }; | |
2030 | ||
2031 | static const struct inode_operations shmem_symlink_inode_operations = { | |
2032 | .truncate = shmem_truncate, | |
2033 | .readlink = generic_readlink, | |
2034 | .follow_link = shmem_follow_link, | |
2035 | .put_link = shmem_put_link, | |
2036 | }; | |
2037 | ||
2038 | #ifdef CONFIG_TMPFS_POSIX_ACL | |
2039 | /* | |
2040 | * Superblocks without xattr inode operations will get security.* xattr | |
2041 | * support from the VFS "for free". As soon as we have any other xattrs | |
2042 | * like ACLs, we also need to implement the security.* handlers at | |
2043 | * filesystem level, though. | |
2044 | */ | |
2045 | ||
2046 | static size_t shmem_xattr_security_list(struct inode *inode, char *list, | |
2047 | size_t list_len, const char *name, | |
2048 | size_t name_len) | |
2049 | { | |
2050 | return security_inode_listsecurity(inode, list, list_len); | |
2051 | } | |
2052 | ||
2053 | static int shmem_xattr_security_get(struct inode *inode, const char *name, | |
2054 | void *buffer, size_t size) | |
2055 | { | |
2056 | if (strcmp(name, "") == 0) | |
2057 | return -EINVAL; | |
2058 | return xattr_getsecurity(inode, name, buffer, size); | |
2059 | } | |
2060 | ||
2061 | static int shmem_xattr_security_set(struct inode *inode, const char *name, | |
2062 | const void *value, size_t size, int flags) | |
2063 | { | |
2064 | if (strcmp(name, "") == 0) | |
2065 | return -EINVAL; | |
2066 | return security_inode_setsecurity(inode, name, value, size, flags); | |
2067 | } | |
2068 | ||
2069 | static struct xattr_handler shmem_xattr_security_handler = { | |
2070 | .prefix = XATTR_SECURITY_PREFIX, | |
2071 | .list = shmem_xattr_security_list, | |
2072 | .get = shmem_xattr_security_get, | |
2073 | .set = shmem_xattr_security_set, | |
2074 | }; | |
2075 | ||
2076 | static struct xattr_handler *shmem_xattr_handlers[] = { | |
2077 | &shmem_xattr_acl_access_handler, | |
2078 | &shmem_xattr_acl_default_handler, | |
2079 | &shmem_xattr_security_handler, | |
2080 | NULL | |
2081 | }; | |
2082 | #endif | |
2083 | ||
2084 | static struct dentry *shmem_get_parent(struct dentry *child) | |
2085 | { | |
2086 | return ERR_PTR(-ESTALE); | |
2087 | } | |
2088 | ||
2089 | static int shmem_match(struct inode *ino, void *vfh) | |
2090 | { | |
2091 | __u32 *fh = vfh; | |
2092 | __u64 inum = fh[2]; | |
2093 | inum = (inum << 32) | fh[1]; | |
2094 | return ino->i_ino == inum && fh[0] == ino->i_generation; | |
2095 | } | |
2096 | ||
2097 | static struct dentry *shmem_fh_to_dentry(struct super_block *sb, | |
2098 | struct fid *fid, int fh_len, int fh_type) | |
2099 | { | |
2100 | struct inode *inode; | |
2101 | struct dentry *dentry = NULL; | |
2102 | u64 inum = fid->raw[2]; | |
2103 | inum = (inum << 32) | fid->raw[1]; | |
2104 | ||
2105 | if (fh_len < 3) | |
2106 | return NULL; | |
2107 | ||
2108 | inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), | |
2109 | shmem_match, fid->raw); | |
2110 | if (inode) { | |
2111 | dentry = d_find_alias(inode); | |
2112 | iput(inode); | |
2113 | } | |
2114 | ||
2115 | return dentry; | |
2116 | } | |
2117 | ||
2118 | static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, | |
2119 | int connectable) | |
2120 | { | |
2121 | struct inode *inode = dentry->d_inode; | |
2122 | ||
2123 | if (*len < 3) | |
2124 | return 255; | |
2125 | ||
2126 | if (hlist_unhashed(&inode->i_hash)) { | |
2127 | /* Unfortunately insert_inode_hash is not idempotent, | |
2128 | * so as we hash inodes here rather than at creation | |
2129 | * time, we need a lock to ensure we only try | |
2130 | * to do it once | |
2131 | */ | |
2132 | static DEFINE_SPINLOCK(lock); | |
2133 | spin_lock(&lock); | |
2134 | if (hlist_unhashed(&inode->i_hash)) | |
2135 | __insert_inode_hash(inode, | |
2136 | inode->i_ino + inode->i_generation); | |
2137 | spin_unlock(&lock); | |
2138 | } | |
2139 | ||
2140 | fh[0] = inode->i_generation; | |
2141 | fh[1] = inode->i_ino; | |
2142 | fh[2] = ((__u64)inode->i_ino) >> 32; | |
2143 | ||
2144 | *len = 3; | |
2145 | return 1; | |
2146 | } | |
2147 | ||
2148 | static const struct export_operations shmem_export_ops = { | |
2149 | .get_parent = shmem_get_parent, | |
2150 | .encode_fh = shmem_encode_fh, | |
2151 | .fh_to_dentry = shmem_fh_to_dentry, | |
2152 | }; | |
2153 | ||
2154 | static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, | |
2155 | bool remount) | |
2156 | { | |
2157 | char *this_char, *value, *rest; | |
2158 | ||
2159 | while (options != NULL) { | |
2160 | this_char = options; | |
2161 | for (;;) { | |
2162 | /* | |
2163 | * NUL-terminate this option: unfortunately, | |
2164 | * mount options form a comma-separated list, | |
2165 | * but mpol's nodelist may also contain commas. | |
2166 | */ | |
2167 | options = strchr(options, ','); | |
2168 | if (options == NULL) | |
2169 | break; | |
2170 | options++; | |
2171 | if (!isdigit(*options)) { | |
2172 | options[-1] = '\0'; | |
2173 | break; | |
2174 | } | |
2175 | } | |
2176 | if (!*this_char) | |
2177 | continue; | |
2178 | if ((value = strchr(this_char,'=')) != NULL) { | |
2179 | *value++ = 0; | |
2180 | } else { | |
2181 | printk(KERN_ERR | |
2182 | "tmpfs: No value for mount option '%s'\n", | |
2183 | this_char); | |
2184 | return 1; | |
2185 | } | |
2186 | ||
2187 | if (!strcmp(this_char,"size")) { | |
2188 | unsigned long long size; | |
2189 | size = memparse(value,&rest); | |
2190 | if (*rest == '%') { | |
2191 | size <<= PAGE_SHIFT; | |
2192 | size *= totalram_pages; | |
2193 | do_div(size, 100); | |
2194 | rest++; | |
2195 | } | |
2196 | if (*rest) | |
2197 | goto bad_val; | |
2198 | sbinfo->max_blocks = | |
2199 | DIV_ROUND_UP(size, PAGE_CACHE_SIZE); | |
2200 | } else if (!strcmp(this_char,"nr_blocks")) { | |
2201 | sbinfo->max_blocks = memparse(value, &rest); | |
2202 | if (*rest) | |
2203 | goto bad_val; | |
2204 | } else if (!strcmp(this_char,"nr_inodes")) { | |
2205 | sbinfo->max_inodes = memparse(value, &rest); | |
2206 | if (*rest) | |
2207 | goto bad_val; | |
2208 | } else if (!strcmp(this_char,"mode")) { | |
2209 | if (remount) | |
2210 | continue; | |
2211 | sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; | |
2212 | if (*rest) | |
2213 | goto bad_val; | |
2214 | } else if (!strcmp(this_char,"uid")) { | |
2215 | if (remount) | |
2216 | continue; | |
2217 | sbinfo->uid = simple_strtoul(value, &rest, 0); | |
2218 | if (*rest) | |
2219 | goto bad_val; | |
2220 | } else if (!strcmp(this_char,"gid")) { | |
2221 | if (remount) | |
2222 | continue; | |
2223 | sbinfo->gid = simple_strtoul(value, &rest, 0); | |
2224 | if (*rest) | |
2225 | goto bad_val; | |
2226 | } else if (!strcmp(this_char,"mpol")) { | |
2227 | if (mpol_parse_str(value, &sbinfo->mpol, 1)) | |
2228 | goto bad_val; | |
2229 | } else { | |
2230 | printk(KERN_ERR "tmpfs: Bad mount option %s\n", | |
2231 | this_char); | |
2232 | return 1; | |
2233 | } | |
2234 | } | |
2235 | return 0; | |
2236 | ||
2237 | bad_val: | |
2238 | printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", | |
2239 | value, this_char); | |
2240 | return 1; | |
2241 | ||
2242 | } | |
2243 | ||
2244 | static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) | |
2245 | { | |
2246 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | |
2247 | struct shmem_sb_info config = *sbinfo; | |
2248 | unsigned long blocks; | |
2249 | unsigned long inodes; | |
2250 | int error = -EINVAL; | |
2251 | ||
2252 | if (shmem_parse_options(data, &config, true)) | |
2253 | return error; | |
2254 | ||
2255 | spin_lock(&sbinfo->stat_lock); | |
2256 | blocks = sbinfo->max_blocks - sbinfo->free_blocks; | |
2257 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; | |
2258 | if (config.max_blocks < blocks) | |
2259 | goto out; | |
2260 | if (config.max_inodes < inodes) | |
2261 | goto out; | |
2262 | /* | |
2263 | * Those tests also disallow limited->unlimited while any are in | |
2264 | * use, so i_blocks will always be zero when max_blocks is zero; | |
2265 | * but we must separately disallow unlimited->limited, because | |
2266 | * in that case we have no record of how much is already in use. | |
2267 | */ | |
2268 | if (config.max_blocks && !sbinfo->max_blocks) | |
2269 | goto out; | |
2270 | if (config.max_inodes && !sbinfo->max_inodes) | |
2271 | goto out; | |
2272 | ||
2273 | error = 0; | |
2274 | sbinfo->max_blocks = config.max_blocks; | |
2275 | sbinfo->free_blocks = config.max_blocks - blocks; | |
2276 | sbinfo->max_inodes = config.max_inodes; | |
2277 | sbinfo->free_inodes = config.max_inodes - inodes; | |
2278 | ||
2279 | mpol_put(sbinfo->mpol); | |
2280 | sbinfo->mpol = config.mpol; /* transfers initial ref */ | |
2281 | out: | |
2282 | spin_unlock(&sbinfo->stat_lock); | |
2283 | return error; | |
2284 | } | |
2285 | ||
2286 | static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) | |
2287 | { | |
2288 | struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); | |
2289 | ||
2290 | if (sbinfo->max_blocks != shmem_default_max_blocks()) | |
2291 | seq_printf(seq, ",size=%luk", | |
2292 | sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); | |
2293 | if (sbinfo->max_inodes != shmem_default_max_inodes()) | |
2294 | seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); | |
2295 | if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) | |
2296 | seq_printf(seq, ",mode=%03o", sbinfo->mode); | |
2297 | if (sbinfo->uid != 0) | |
2298 | seq_printf(seq, ",uid=%u", sbinfo->uid); | |
2299 | if (sbinfo->gid != 0) | |
2300 | seq_printf(seq, ",gid=%u", sbinfo->gid); | |
2301 | shmem_show_mpol(seq, sbinfo->mpol); | |
2302 | return 0; | |
2303 | } | |
2304 | #endif /* CONFIG_TMPFS */ | |
2305 | ||
2306 | static void shmem_put_super(struct super_block *sb) | |
2307 | { | |
2308 | kfree(sb->s_fs_info); | |
2309 | sb->s_fs_info = NULL; | |
2310 | } | |
2311 | ||
2312 | int shmem_fill_super(struct super_block *sb, void *data, int silent) | |
2313 | { | |
2314 | struct inode *inode; | |
2315 | struct dentry *root; | |
2316 | struct shmem_sb_info *sbinfo; | |
2317 | int err = -ENOMEM; | |
2318 | ||
2319 | /* Round up to L1_CACHE_BYTES to resist false sharing */ | |
2320 | sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), | |
2321 | L1_CACHE_BYTES), GFP_KERNEL); | |
2322 | if (!sbinfo) | |
2323 | return -ENOMEM; | |
2324 | ||
2325 | sbinfo->mode = S_IRWXUGO | S_ISVTX; | |
2326 | sbinfo->uid = current_fsuid(); | |
2327 | sbinfo->gid = current_fsgid(); | |
2328 | sb->s_fs_info = sbinfo; | |
2329 | ||
2330 | #ifdef CONFIG_TMPFS | |
2331 | /* | |
2332 | * Per default we only allow half of the physical ram per | |
2333 | * tmpfs instance, limiting inodes to one per page of lowmem; | |
2334 | * but the internal instance is left unlimited. | |
2335 | */ | |
2336 | if (!(sb->s_flags & MS_NOUSER)) { | |
2337 | sbinfo->max_blocks = shmem_default_max_blocks(); | |
2338 | sbinfo->max_inodes = shmem_default_max_inodes(); | |
2339 | if (shmem_parse_options(data, sbinfo, false)) { | |
2340 | err = -EINVAL; | |
2341 | goto failed; | |
2342 | } | |
2343 | } | |
2344 | sb->s_export_op = &shmem_export_ops; | |
2345 | #else | |
2346 | sb->s_flags |= MS_NOUSER; | |
2347 | #endif | |
2348 | ||
2349 | spin_lock_init(&sbinfo->stat_lock); | |
2350 | sbinfo->free_blocks = sbinfo->max_blocks; | |
2351 | sbinfo->free_inodes = sbinfo->max_inodes; | |
2352 | ||
2353 | sb->s_maxbytes = SHMEM_MAX_BYTES; | |
2354 | sb->s_blocksize = PAGE_CACHE_SIZE; | |
2355 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | |
2356 | sb->s_magic = TMPFS_MAGIC; | |
2357 | sb->s_op = &shmem_ops; | |
2358 | sb->s_time_gran = 1; | |
2359 | #ifdef CONFIG_TMPFS_POSIX_ACL | |
2360 | sb->s_xattr = shmem_xattr_handlers; | |
2361 | sb->s_flags |= MS_POSIXACL; | |
2362 | #endif | |
2363 | ||
2364 | inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); | |
2365 | if (!inode) | |
2366 | goto failed; | |
2367 | inode->i_uid = sbinfo->uid; | |
2368 | inode->i_gid = sbinfo->gid; | |
2369 | root = d_alloc_root(inode); | |
2370 | if (!root) | |
2371 | goto failed_iput; | |
2372 | sb->s_root = root; | |
2373 | return 0; | |
2374 | ||
2375 | failed_iput: | |
2376 | iput(inode); | |
2377 | failed: | |
2378 | shmem_put_super(sb); | |
2379 | return err; | |
2380 | } | |
2381 | ||
2382 | static struct kmem_cache *shmem_inode_cachep; | |
2383 | ||
2384 | static struct inode *shmem_alloc_inode(struct super_block *sb) | |
2385 | { | |
2386 | struct shmem_inode_info *p; | |
2387 | p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); | |
2388 | if (!p) | |
2389 | return NULL; | |
2390 | return &p->vfs_inode; | |
2391 | } | |
2392 | ||
2393 | static void shmem_destroy_inode(struct inode *inode) | |
2394 | { | |
2395 | if ((inode->i_mode & S_IFMT) == S_IFREG) { | |
2396 | /* only struct inode is valid if it's an inline symlink */ | |
2397 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); | |
2398 | } | |
2399 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); | |
2400 | } | |
2401 | ||
2402 | static void init_once(void *foo) | |
2403 | { | |
2404 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; | |
2405 | ||
2406 | inode_init_once(&p->vfs_inode); | |
2407 | } | |
2408 | ||
2409 | static int init_inodecache(void) | |
2410 | { | |
2411 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", | |
2412 | sizeof(struct shmem_inode_info), | |
2413 | 0, SLAB_PANIC, init_once); | |
2414 | return 0; | |
2415 | } | |
2416 | ||
2417 | static void destroy_inodecache(void) | |
2418 | { | |
2419 | kmem_cache_destroy(shmem_inode_cachep); | |
2420 | } | |
2421 | ||
2422 | static const struct address_space_operations shmem_aops = { | |
2423 | .writepage = shmem_writepage, | |
2424 | .set_page_dirty = __set_page_dirty_no_writeback, | |
2425 | #ifdef CONFIG_TMPFS | |
2426 | .readpage = shmem_readpage, | |
2427 | .write_begin = shmem_write_begin, | |
2428 | .write_end = shmem_write_end, | |
2429 | #endif | |
2430 | .migratepage = migrate_page, | |
2431 | .error_remove_page = generic_error_remove_page, | |
2432 | }; | |
2433 | ||
2434 | static const struct file_operations shmem_file_operations = { | |
2435 | .mmap = shmem_mmap, | |
2436 | #ifdef CONFIG_TMPFS | |
2437 | .llseek = generic_file_llseek, | |
2438 | .read = do_sync_read, | |
2439 | .write = do_sync_write, | |
2440 | .aio_read = shmem_file_aio_read, | |
2441 | .aio_write = generic_file_aio_write, | |
2442 | .fsync = simple_sync_file, | |
2443 | .splice_read = generic_file_splice_read, | |
2444 | .splice_write = generic_file_splice_write, | |
2445 | #endif | |
2446 | }; | |
2447 | ||
2448 | static const struct inode_operations shmem_inode_operations = { | |
2449 | .truncate = shmem_truncate, | |
2450 | .setattr = shmem_notify_change, | |
2451 | .truncate_range = shmem_truncate_range, | |
2452 | #ifdef CONFIG_TMPFS_POSIX_ACL | |
2453 | .setxattr = generic_setxattr, | |
2454 | .getxattr = generic_getxattr, | |
2455 | .listxattr = generic_listxattr, | |
2456 | .removexattr = generic_removexattr, | |
2457 | .check_acl = shmem_check_acl, | |
2458 | #endif | |
2459 | ||
2460 | }; | |
2461 | ||
2462 | static const struct inode_operations shmem_dir_inode_operations = { | |
2463 | #ifdef CONFIG_TMPFS | |
2464 | .create = shmem_create, | |
2465 | .lookup = simple_lookup, | |
2466 | .link = shmem_link, | |
2467 | .unlink = shmem_unlink, | |
2468 | .symlink = shmem_symlink, | |
2469 | .mkdir = shmem_mkdir, | |
2470 | .rmdir = shmem_rmdir, | |
2471 | .mknod = shmem_mknod, | |
2472 | .rename = shmem_rename, | |
2473 | #endif | |
2474 | #ifdef CONFIG_TMPFS_POSIX_ACL | |
2475 | .setattr = shmem_notify_change, | |
2476 | .setxattr = generic_setxattr, | |
2477 | .getxattr = generic_getxattr, | |
2478 | .listxattr = generic_listxattr, | |
2479 | .removexattr = generic_removexattr, | |
2480 | .check_acl = shmem_check_acl, | |
2481 | #endif | |
2482 | }; | |
2483 | ||
2484 | static const struct inode_operations shmem_special_inode_operations = { | |
2485 | #ifdef CONFIG_TMPFS_POSIX_ACL | |
2486 | .setattr = shmem_notify_change, | |
2487 | .setxattr = generic_setxattr, | |
2488 | .getxattr = generic_getxattr, | |
2489 | .listxattr = generic_listxattr, | |
2490 | .removexattr = generic_removexattr, | |
2491 | .check_acl = shmem_check_acl, | |
2492 | #endif | |
2493 | }; | |
2494 | ||
2495 | static const struct super_operations shmem_ops = { | |
2496 | .alloc_inode = shmem_alloc_inode, | |
2497 | .destroy_inode = shmem_destroy_inode, | |
2498 | #ifdef CONFIG_TMPFS | |
2499 | .statfs = shmem_statfs, | |
2500 | .remount_fs = shmem_remount_fs, | |
2501 | .show_options = shmem_show_options, | |
2502 | #endif | |
2503 | .delete_inode = shmem_delete_inode, | |
2504 | .drop_inode = generic_delete_inode, | |
2505 | .put_super = shmem_put_super, | |
2506 | }; | |
2507 | ||
2508 | static const struct vm_operations_struct shmem_vm_ops = { | |
2509 | .fault = shmem_fault, | |
2510 | #ifdef CONFIG_NUMA | |
2511 | .set_policy = shmem_set_policy, | |
2512 | .get_policy = shmem_get_policy, | |
2513 | #endif | |
2514 | }; | |
2515 | ||
2516 | ||
2517 | static int shmem_get_sb(struct file_system_type *fs_type, | |
2518 | int flags, const char *dev_name, void *data, struct vfsmount *mnt) | |
2519 | { | |
2520 | return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt); | |
2521 | } | |
2522 | ||
2523 | static struct file_system_type tmpfs_fs_type = { | |
2524 | .owner = THIS_MODULE, | |
2525 | .name = "tmpfs", | |
2526 | .get_sb = shmem_get_sb, | |
2527 | .kill_sb = kill_litter_super, | |
2528 | }; | |
2529 | ||
2530 | int __init init_tmpfs(void) | |
2531 | { | |
2532 | int error; | |
2533 | ||
2534 | error = bdi_init(&shmem_backing_dev_info); | |
2535 | if (error) | |
2536 | goto out4; | |
2537 | ||
2538 | error = init_inodecache(); | |
2539 | if (error) | |
2540 | goto out3; | |
2541 | ||
2542 | error = register_filesystem(&tmpfs_fs_type); | |
2543 | if (error) { | |
2544 | printk(KERN_ERR "Could not register tmpfs\n"); | |
2545 | goto out2; | |
2546 | } | |
2547 | ||
2548 | shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER, | |
2549 | tmpfs_fs_type.name, NULL); | |
2550 | if (IS_ERR(shm_mnt)) { | |
2551 | error = PTR_ERR(shm_mnt); | |
2552 | printk(KERN_ERR "Could not kern_mount tmpfs\n"); | |
2553 | goto out1; | |
2554 | } | |
2555 | return 0; | |
2556 | ||
2557 | out1: | |
2558 | unregister_filesystem(&tmpfs_fs_type); | |
2559 | out2: | |
2560 | destroy_inodecache(); | |
2561 | out3: | |
2562 | bdi_destroy(&shmem_backing_dev_info); | |
2563 | out4: | |
2564 | shm_mnt = ERR_PTR(error); | |
2565 | return error; | |
2566 | } | |
2567 | ||
2568 | #else /* !CONFIG_SHMEM */ | |
2569 | ||
2570 | /* | |
2571 | * tiny-shmem: simple shmemfs and tmpfs using ramfs code | |
2572 | * | |
2573 | * This is intended for small system where the benefits of the full | |
2574 | * shmem code (swap-backed and resource-limited) are outweighed by | |
2575 | * their complexity. On systems without swap this code should be | |
2576 | * effectively equivalent, but much lighter weight. | |
2577 | */ | |
2578 | ||
2579 | #include <linux/ramfs.h> | |
2580 | ||
2581 | static struct file_system_type tmpfs_fs_type = { | |
2582 | .name = "tmpfs", | |
2583 | .get_sb = ramfs_get_sb, | |
2584 | .kill_sb = kill_litter_super, | |
2585 | }; | |
2586 | ||
2587 | int __init init_tmpfs(void) | |
2588 | { | |
2589 | BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); | |
2590 | ||
2591 | shm_mnt = kern_mount(&tmpfs_fs_type); | |
2592 | BUG_ON(IS_ERR(shm_mnt)); | |
2593 | ||
2594 | return 0; | |
2595 | } | |
2596 | ||
2597 | int shmem_unuse(swp_entry_t entry, struct page *page) | |
2598 | { | |
2599 | return 0; | |
2600 | } | |
2601 | ||
2602 | int shmem_lock(struct file *file, int lock, struct user_struct *user) | |
2603 | { | |
2604 | return 0; | |
2605 | } | |
2606 | ||
2607 | #define shmem_vm_ops generic_file_vm_ops | |
2608 | #define shmem_file_operations ramfs_file_operations | |
2609 | #define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev) | |
2610 | #define shmem_acct_size(flags, size) 0 | |
2611 | #define shmem_unacct_size(flags, size) do {} while (0) | |
2612 | #define SHMEM_MAX_BYTES MAX_LFS_FILESIZE | |
2613 | ||
2614 | #endif /* CONFIG_SHMEM */ | |
2615 | ||
2616 | /* common code */ | |
2617 | ||
2618 | /** | |
2619 | * shmem_file_setup - get an unlinked file living in tmpfs | |
2620 | * @name: name for dentry (to be seen in /proc/<pid>/maps | |
2621 | * @size: size to be set for the file | |
2622 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size | |
2623 | */ | |
2624 | struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) | |
2625 | { | |
2626 | int error; | |
2627 | struct file *file; | |
2628 | struct inode *inode; | |
2629 | struct dentry *dentry, *root; | |
2630 | struct qstr this; | |
2631 | ||
2632 | if (IS_ERR(shm_mnt)) | |
2633 | return (void *)shm_mnt; | |
2634 | ||
2635 | if (size < 0 || size > SHMEM_MAX_BYTES) | |
2636 | return ERR_PTR(-EINVAL); | |
2637 | ||
2638 | if (shmem_acct_size(flags, size)) | |
2639 | return ERR_PTR(-ENOMEM); | |
2640 | ||
2641 | error = -ENOMEM; | |
2642 | this.name = name; | |
2643 | this.len = strlen(name); | |
2644 | this.hash = 0; /* will go */ | |
2645 | root = shm_mnt->mnt_root; | |
2646 | dentry = d_alloc(root, &this); | |
2647 | if (!dentry) | |
2648 | goto put_memory; | |
2649 | ||
2650 | error = -ENFILE; | |
2651 | file = get_empty_filp(); | |
2652 | if (!file) | |
2653 | goto put_dentry; | |
2654 | ||
2655 | error = -ENOSPC; | |
2656 | inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags); | |
2657 | if (!inode) | |
2658 | goto close_file; | |
2659 | ||
2660 | d_instantiate(dentry, inode); | |
2661 | inode->i_size = size; | |
2662 | inode->i_nlink = 0; /* It is unlinked */ | |
2663 | init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, | |
2664 | &shmem_file_operations); | |
2665 | ||
2666 | #ifndef CONFIG_MMU | |
2667 | error = ramfs_nommu_expand_for_mapping(inode, size); | |
2668 | if (error) | |
2669 | goto close_file; | |
2670 | #endif | |
2671 | ima_counts_get(file); | |
2672 | return file; | |
2673 | ||
2674 | close_file: | |
2675 | put_filp(file); | |
2676 | put_dentry: | |
2677 | dput(dentry); | |
2678 | put_memory: | |
2679 | shmem_unacct_size(flags, size); | |
2680 | return ERR_PTR(error); | |
2681 | } | |
2682 | EXPORT_SYMBOL_GPL(shmem_file_setup); | |
2683 | ||
2684 | /** | |
2685 | * shmem_zero_setup - setup a shared anonymous mapping | |
2686 | * @vma: the vma to be mmapped is prepared by do_mmap_pgoff | |
2687 | */ | |
2688 | int shmem_zero_setup(struct vm_area_struct *vma) | |
2689 | { | |
2690 | struct file *file; | |
2691 | loff_t size = vma->vm_end - vma->vm_start; | |
2692 | ||
2693 | file = shmem_file_setup("dev/zero", size, vma->vm_flags); | |
2694 | if (IS_ERR(file)) | |
2695 | return PTR_ERR(file); | |
2696 | ||
2697 | if (vma->vm_file) | |
2698 | fput(vma->vm_file); | |
2699 | vma->vm_file = file; | |
2700 | vma->vm_ops = &shmem_vm_ops; | |
2701 | return 0; | |
2702 | } |