]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/file_table.c
MAINTAINERS: add Matt Mackall and Herbert Xu to HARDWARE RANDOM NUMBER GENERATOR
[net-next-2.6.git] / fs / file_table.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/file_table.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/string.h>
9#include <linux/slab.h>
10#include <linux/file.h>
9f3acc31 11#include <linux/fdtable.h>
1da177e4
LT
12#include <linux/init.h>
13#include <linux/module.h>
1da177e4
LT
14#include <linux/fs.h>
15#include <linux/security.h>
6146f0d5 16#include <linux/ima.h>
1da177e4 17#include <linux/eventpoll.h>
ab2af1f5 18#include <linux/rcupdate.h>
1da177e4 19#include <linux/mount.h>
16f7e0fe 20#include <linux/capability.h>
1da177e4 21#include <linux/cdev.h>
0eeca283 22#include <linux/fsnotify.h>
529bf6be
DS
23#include <linux/sysctl.h>
24#include <linux/percpu_counter.h>
25
26#include <asm/atomic.h>
1da177e4
LT
27
28/* sysctl tunables... */
29struct files_stat_struct files_stat = {
30 .max_files = NR_FILE
31};
32
1da177e4 33/* public. Not pretty! */
529bf6be 34__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
1da177e4 35
b6b3fdea
ED
36/* SLAB cache for file structures */
37static struct kmem_cache *filp_cachep __read_mostly;
38
529bf6be 39static struct percpu_counter nr_files __cacheline_aligned_in_smp;
1da177e4 40
529bf6be 41static inline void file_free_rcu(struct rcu_head *head)
1da177e4 42{
d76b0d9b
DH
43 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
44
45 put_cred(f->f_cred);
529bf6be 46 kmem_cache_free(filp_cachep, f);
1da177e4
LT
47}
48
529bf6be 49static inline void file_free(struct file *f)
1da177e4 50{
529bf6be 51 percpu_counter_dec(&nr_files);
ad775f5a 52 file_check_state(f);
529bf6be 53 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
1da177e4
LT
54}
55
529bf6be
DS
56/*
57 * Return the total number of open files in the system
58 */
59static int get_nr_files(void)
1da177e4 60{
529bf6be 61 return percpu_counter_read_positive(&nr_files);
1da177e4
LT
62}
63
529bf6be
DS
64/*
65 * Return the maximum number of open files in the system
66 */
67int get_max_files(void)
ab2af1f5 68{
529bf6be 69 return files_stat.max_files;
ab2af1f5 70}
529bf6be
DS
71EXPORT_SYMBOL_GPL(get_max_files);
72
73/*
74 * Handle nr_files sysctl
75 */
76#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
77int proc_nr_files(ctl_table *table, int write, struct file *filp,
78 void __user *buffer, size_t *lenp, loff_t *ppos)
79{
80 files_stat.nr_files = get_nr_files();
81 return proc_dointvec(table, write, filp, buffer, lenp, ppos);
82}
83#else
84int proc_nr_files(ctl_table *table, int write, struct file *filp,
85 void __user *buffer, size_t *lenp, loff_t *ppos)
86{
87 return -ENOSYS;
88}
89#endif
ab2af1f5 90
1da177e4
LT
91/* Find an unused file structure and return a pointer to it.
92 * Returns NULL, if there are no more free file structures or
93 * we run out of memory.
430e285e
DH
94 *
95 * Be very careful using this. You are responsible for
96 * getting write access to any mount that you might assign
97 * to this filp, if it is opened for write. If this is not
98 * done, you will imbalance int the mount's writer count
99 * and a warning at __fput() time.
1da177e4
LT
100 */
101struct file *get_empty_filp(void)
102{
86a264ab 103 const struct cred *cred = current_cred();
af4d2ecb 104 static int old_max;
1da177e4
LT
105 struct file * f;
106
107 /*
108 * Privileged users can go above max_files
109 */
529bf6be
DS
110 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
111 /*
112 * percpu_counters are inaccurate. Do an expensive check before
113 * we go and fail.
114 */
52d9f3b4 115 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
529bf6be
DS
116 goto over;
117 }
af4d2ecb 118
4975e45f 119 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
af4d2ecb
KK
120 if (f == NULL)
121 goto fail;
122
529bf6be 123 percpu_counter_inc(&nr_files);
af4d2ecb
KK
124 if (security_file_alloc(f))
125 goto fail_sec;
1da177e4 126
5a6b7951 127 INIT_LIST_HEAD(&f->f_u.fu_list);
516e0cc5 128 atomic_long_set(&f->f_count, 1);
af4d2ecb 129 rwlock_init(&f->f_owner.lock);
d76b0d9b 130 f->f_cred = get_cred(cred);
68499914 131 spin_lock_init(&f->f_lock);
5a6b7951 132 eventpoll_init_file(f);
af4d2ecb 133 /* f->f_version: 0 */
af4d2ecb
KK
134 return f;
135
136over:
1da177e4 137 /* Ran out of filps - report that */
529bf6be 138 if (get_nr_files() > old_max) {
1da177e4 139 printk(KERN_INFO "VFS: file-max limit %d reached\n",
529bf6be
DS
140 get_max_files());
141 old_max = get_nr_files();
1da177e4 142 }
af4d2ecb
KK
143 goto fail;
144
145fail_sec:
146 file_free(f);
1da177e4
LT
147fail:
148 return NULL;
149}
150
151EXPORT_SYMBOL(get_empty_filp);
152
ce8d2cdf
DH
153/**
154 * alloc_file - allocate and initialize a 'struct file'
155 * @mnt: the vfsmount on which the file will reside
156 * @dentry: the dentry representing the new file
157 * @mode: the mode with which the new file will be opened
158 * @fop: the 'struct file_operations' for the new file
159 *
160 * Use this instead of get_empty_filp() to get a new
161 * 'struct file'. Do so because of the same initialization
162 * pitfalls reasons listed for init_file(). This is a
163 * preferred interface to using init_file().
164 *
165 * If all the callers of init_file() are eliminated, its
166 * code should be moved into this function.
167 */
168struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry,
aeb5d727 169 fmode_t mode, const struct file_operations *fop)
ce8d2cdf
DH
170{
171 struct file *file;
ce8d2cdf
DH
172
173 file = get_empty_filp();
174 if (!file)
175 return NULL;
176
177 init_file(file, mnt, dentry, mode, fop);
178 return file;
179}
180EXPORT_SYMBOL(alloc_file);
181
182/**
183 * init_file - initialize a 'struct file'
184 * @file: the already allocated 'struct file' to initialized
185 * @mnt: the vfsmount on which the file resides
186 * @dentry: the dentry representing this file
187 * @mode: the mode the file is opened with
188 * @fop: the 'struct file_operations' for this file
189 *
190 * Use this instead of setting the members directly. Doing so
191 * avoids making mistakes like forgetting the mntget() or
192 * forgetting to take a write on the mnt.
193 *
194 * Note: This is a crappy interface. It is here to make
195 * merging with the existing users of get_empty_filp()
196 * who have complex failure logic easier. All users
197 * of this should be moving to alloc_file().
198 */
199int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry,
aeb5d727 200 fmode_t mode, const struct file_operations *fop)
ce8d2cdf
DH
201{
202 int error = 0;
203 file->f_path.dentry = dentry;
204 file->f_path.mnt = mntget(mnt);
205 file->f_mapping = dentry->d_inode->i_mapping;
206 file->f_mode = mode;
207 file->f_op = fop;
4a3fd211
DH
208
209 /*
210 * These mounts don't really matter in practice
211 * for r/o bind mounts. They aren't userspace-
212 * visible. We do this for consistency, and so
213 * that we can do debugging checks at __fput()
214 */
215 if ((mode & FMODE_WRITE) && !special_file(dentry->d_inode->i_mode)) {
ad775f5a 216 file_take_write(file);
96029c4e 217 error = mnt_clone_write(mnt);
4a3fd211
DH
218 WARN_ON(error);
219 }
ce8d2cdf
DH
220 return error;
221}
222EXPORT_SYMBOL(init_file);
223
fc9b52cd 224void fput(struct file *file)
1da177e4 225{
516e0cc5 226 if (atomic_long_dec_and_test(&file->f_count))
1da177e4
LT
227 __fput(file);
228}
229
230EXPORT_SYMBOL(fput);
231
aceaf78d
DH
232/**
233 * drop_file_write_access - give up ability to write to a file
234 * @file: the file to which we will stop writing
235 *
236 * This is a central place which will give up the ability
237 * to write to @file, along with access to write through
238 * its vfsmount.
239 */
240void drop_file_write_access(struct file *file)
241{
4a3fd211 242 struct vfsmount *mnt = file->f_path.mnt;
aceaf78d
DH
243 struct dentry *dentry = file->f_path.dentry;
244 struct inode *inode = dentry->d_inode;
245
246 put_write_access(inode);
ad775f5a
DH
247
248 if (special_file(inode->i_mode))
249 return;
250 if (file_check_writeable(file) != 0)
251 return;
252 mnt_drop_write(mnt);
253 file_release_write(file);
aceaf78d
DH
254}
255EXPORT_SYMBOL_GPL(drop_file_write_access);
256
1da177e4
LT
257/* __fput is called from task context when aio completion releases the last
258 * last use of a struct file *. Do not use otherwise.
259 */
fc9b52cd 260void __fput(struct file *file)
1da177e4 261{
0f7fc9e4
JJS
262 struct dentry *dentry = file->f_path.dentry;
263 struct vfsmount *mnt = file->f_path.mnt;
1da177e4
LT
264 struct inode *inode = dentry->d_inode;
265
266 might_sleep();
0eeca283
RL
267
268 fsnotify_close(file);
1da177e4
LT
269 /*
270 * The function eventpoll_release() should be the first called
271 * in the file cleanup chain.
272 */
273 eventpoll_release(file);
274 locks_remove_flock(file);
275
233e70f4
AV
276 if (unlikely(file->f_flags & FASYNC)) {
277 if (file->f_op && file->f_op->fasync)
278 file->f_op->fasync(-1, file, 0);
279 }
1da177e4
LT
280 if (file->f_op && file->f_op->release)
281 file->f_op->release(inode, file);
282 security_file_free(file);
6146f0d5 283 ima_file_free(file);
577c4eb0 284 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
1da177e4
LT
285 cdev_put(inode->i_cdev);
286 fops_put(file->f_op);
609d7fa9 287 put_pid(file->f_owner.pid);
1da177e4 288 file_kill(file);
aceaf78d
DH
289 if (file->f_mode & FMODE_WRITE)
290 drop_file_write_access(file);
0f7fc9e4
JJS
291 file->f_path.dentry = NULL;
292 file->f_path.mnt = NULL;
1da177e4
LT
293 file_free(file);
294 dput(dentry);
295 mntput(mnt);
296}
297
fc9b52cd 298struct file *fget(unsigned int fd)
1da177e4
LT
299{
300 struct file *file;
301 struct files_struct *files = current->files;
302
ab2af1f5 303 rcu_read_lock();
1da177e4 304 file = fcheck_files(files, fd);
ab2af1f5 305 if (file) {
516e0cc5 306 if (!atomic_long_inc_not_zero(&file->f_count)) {
ab2af1f5
DS
307 /* File object ref couldn't be taken */
308 rcu_read_unlock();
309 return NULL;
310 }
311 }
312 rcu_read_unlock();
313
1da177e4
LT
314 return file;
315}
316
317EXPORT_SYMBOL(fget);
318
319/*
320 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
321 * You can use this only if it is guranteed that the current task already
322 * holds a refcnt to that file. That check has to be done at fget() only
323 * and a flag is returned to be passed to the corresponding fput_light().
324 * There must not be a cloning between an fget_light/fput_light pair.
325 */
fc9b52cd 326struct file *fget_light(unsigned int fd, int *fput_needed)
1da177e4
LT
327{
328 struct file *file;
329 struct files_struct *files = current->files;
330
331 *fput_needed = 0;
332 if (likely((atomic_read(&files->count) == 1))) {
333 file = fcheck_files(files, fd);
334 } else {
ab2af1f5 335 rcu_read_lock();
1da177e4
LT
336 file = fcheck_files(files, fd);
337 if (file) {
516e0cc5 338 if (atomic_long_inc_not_zero(&file->f_count))
ab2af1f5
DS
339 *fput_needed = 1;
340 else
341 /* Didn't get the reference, someone's freed */
342 file = NULL;
1da177e4 343 }
ab2af1f5 344 rcu_read_unlock();
1da177e4 345 }
ab2af1f5 346
1da177e4
LT
347 return file;
348}
349
350
351void put_filp(struct file *file)
352{
516e0cc5 353 if (atomic_long_dec_and_test(&file->f_count)) {
1da177e4
LT
354 security_file_free(file);
355 file_kill(file);
356 file_free(file);
357 }
358}
359
360void file_move(struct file *file, struct list_head *list)
361{
362 if (!list)
363 return;
364 file_list_lock();
2f512016 365 list_move(&file->f_u.fu_list, list);
1da177e4
LT
366 file_list_unlock();
367}
368
369void file_kill(struct file *file)
370{
2f512016 371 if (!list_empty(&file->f_u.fu_list)) {
1da177e4 372 file_list_lock();
2f512016 373 list_del_init(&file->f_u.fu_list);
1da177e4
LT
374 file_list_unlock();
375 }
376}
377
378int fs_may_remount_ro(struct super_block *sb)
379{
cfdaf9e5 380 struct file *file;
1da177e4
LT
381
382 /* Check that no files are currently opened for writing. */
383 file_list_lock();
cfdaf9e5 384 list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
0f7fc9e4 385 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
386
387 /* File with pending delete? */
388 if (inode->i_nlink == 0)
389 goto too_bad;
390
391 /* Writeable file? */
392 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
393 goto too_bad;
394 }
395 file_list_unlock();
396 return 1; /* Tis' cool bro. */
397too_bad:
398 file_list_unlock();
399 return 0;
400}
401
864d7c4c 402/**
403 * mark_files_ro - mark all files read-only
404 * @sb: superblock in question
405 *
406 * All files are marked read-only. We don't care about pending
407 * delete files so this should be used in 'force' mode only.
408 */
409void mark_files_ro(struct super_block *sb)
410{
411 struct file *f;
412
413retry:
414 file_list_lock();
415 list_for_each_entry(f, &sb->s_files, f_u.fu_list) {
416 struct vfsmount *mnt;
417 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
418 continue;
419 if (!file_count(f))
420 continue;
421 if (!(f->f_mode & FMODE_WRITE))
422 continue;
423 f->f_mode &= ~FMODE_WRITE;
424 if (file_check_writeable(f) != 0)
425 continue;
426 file_release_write(f);
427 mnt = mntget(f->f_path.mnt);
428 file_list_unlock();
429 /*
430 * This can sleep, so we can't hold
431 * the file_list_lock() spinlock.
432 */
433 mnt_drop_write(mnt);
434 mntput(mnt);
435 goto retry;
436 }
437 file_list_unlock();
438}
439
1da177e4
LT
440void __init files_init(unsigned long mempages)
441{
442 int n;
b6b3fdea
ED
443
444 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
445 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
446
447 /*
448 * One file with associated inode and dcache is very roughly 1K.
1da177e4
LT
449 * Per default don't use more than 10% of our memory for files.
450 */
451
452 n = (mempages * (PAGE_SIZE / 1024)) / 10;
453 files_stat.max_files = n;
454 if (files_stat.max_files < NR_FILE)
455 files_stat.max_files = NR_FILE;
ab2af1f5 456 files_defer_init();
0216bfcf 457 percpu_counter_init(&nr_files, 0);
1da177e4 458}