]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - fs/file_table.c
xps: Transmit Packet Steering
[net-next-2.6.git] / fs / file_table.c
... / ...
CommitLineData
1/*
2 * linux/fs/file_table.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/string.h>
9#include <linux/slab.h>
10#include <linux/file.h>
11#include <linux/fdtable.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/security.h>
16#include <linux/eventpoll.h>
17#include <linux/rcupdate.h>
18#include <linux/mount.h>
19#include <linux/capability.h>
20#include <linux/cdev.h>
21#include <linux/fsnotify.h>
22#include <linux/sysctl.h>
23#include <linux/lglock.h>
24#include <linux/percpu_counter.h>
25#include <linux/percpu.h>
26#include <linux/ima.h>
27
28#include <asm/atomic.h>
29
30#include "internal.h"
31
32/* sysctl tunables... */
33struct files_stat_struct files_stat = {
34 .max_files = NR_FILE
35};
36
37DECLARE_LGLOCK(files_lglock);
38DEFINE_LGLOCK(files_lglock);
39
40/* SLAB cache for file structures */
41static struct kmem_cache *filp_cachep __read_mostly;
42
43static struct percpu_counter nr_files __cacheline_aligned_in_smp;
44
45static inline void file_free_rcu(struct rcu_head *head)
46{
47 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
48
49 put_cred(f->f_cred);
50 kmem_cache_free(filp_cachep, f);
51}
52
53static inline void file_free(struct file *f)
54{
55 percpu_counter_dec(&nr_files);
56 file_check_state(f);
57 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
58}
59
60/*
61 * Return the total number of open files in the system
62 */
63static long get_nr_files(void)
64{
65 return percpu_counter_read_positive(&nr_files);
66}
67
68/*
69 * Return the maximum number of open files in the system
70 */
71unsigned long get_max_files(void)
72{
73 return files_stat.max_files;
74}
75EXPORT_SYMBOL_GPL(get_max_files);
76
77/*
78 * Handle nr_files sysctl
79 */
80#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
81int proc_nr_files(ctl_table *table, int write,
82 void __user *buffer, size_t *lenp, loff_t *ppos)
83{
84 files_stat.nr_files = get_nr_files();
85 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
86}
87#else
88int proc_nr_files(ctl_table *table, int write,
89 void __user *buffer, size_t *lenp, loff_t *ppos)
90{
91 return -ENOSYS;
92}
93#endif
94
95/* Find an unused file structure and return a pointer to it.
96 * Returns NULL, if there are no more free file structures or
97 * we run out of memory.
98 *
99 * Be very careful using this. You are responsible for
100 * getting write access to any mount that you might assign
101 * to this filp, if it is opened for write. If this is not
102 * done, you will imbalance int the mount's writer count
103 * and a warning at __fput() time.
104 */
105struct file *get_empty_filp(void)
106{
107 const struct cred *cred = current_cred();
108 static long old_max;
109 struct file * f;
110
111 /*
112 * Privileged users can go above max_files
113 */
114 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
115 /*
116 * percpu_counters are inaccurate. Do an expensive check before
117 * we go and fail.
118 */
119 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
120 goto over;
121 }
122
123 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
124 if (f == NULL)
125 goto fail;
126
127 percpu_counter_inc(&nr_files);
128 if (security_file_alloc(f))
129 goto fail_sec;
130
131 INIT_LIST_HEAD(&f->f_u.fu_list);
132 atomic_long_set(&f->f_count, 1);
133 rwlock_init(&f->f_owner.lock);
134 f->f_cred = get_cred(cred);
135 spin_lock_init(&f->f_lock);
136 eventpoll_init_file(f);
137 /* f->f_version: 0 */
138 return f;
139
140over:
141 /* Ran out of filps - report that */
142 if (get_nr_files() > old_max) {
143 pr_info("VFS: file-max limit %lu reached\n", get_max_files());
144 old_max = get_nr_files();
145 }
146 goto fail;
147
148fail_sec:
149 file_free(f);
150fail:
151 return NULL;
152}
153
154/**
155 * alloc_file - allocate and initialize a 'struct file'
156 * @mnt: the vfsmount on which the file will reside
157 * @dentry: the dentry representing the new file
158 * @mode: the mode with which the new file will be opened
159 * @fop: the 'struct file_operations' for the new file
160 *
161 * Use this instead of get_empty_filp() to get a new
162 * 'struct file'. Do so because of the same initialization
163 * pitfalls reasons listed for init_file(). This is a
164 * preferred interface to using init_file().
165 *
166 * If all the callers of init_file() are eliminated, its
167 * code should be moved into this function.
168 */
169struct file *alloc_file(struct path *path, fmode_t mode,
170 const struct file_operations *fop)
171{
172 struct file *file;
173
174 file = get_empty_filp();
175 if (!file)
176 return NULL;
177
178 file->f_path = *path;
179 file->f_mapping = path->dentry->d_inode->i_mapping;
180 file->f_mode = mode;
181 file->f_op = fop;
182
183 /*
184 * These mounts don't really matter in practice
185 * for r/o bind mounts. They aren't userspace-
186 * visible. We do this for consistency, and so
187 * that we can do debugging checks at __fput()
188 */
189 if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
190 file_take_write(file);
191 WARN_ON(mnt_clone_write(path->mnt));
192 }
193 ima_counts_get(file);
194 return file;
195}
196EXPORT_SYMBOL(alloc_file);
197
198/**
199 * drop_file_write_access - give up ability to write to a file
200 * @file: the file to which we will stop writing
201 *
202 * This is a central place which will give up the ability
203 * to write to @file, along with access to write through
204 * its vfsmount.
205 */
206void drop_file_write_access(struct file *file)
207{
208 struct vfsmount *mnt = file->f_path.mnt;
209 struct dentry *dentry = file->f_path.dentry;
210 struct inode *inode = dentry->d_inode;
211
212 put_write_access(inode);
213
214 if (special_file(inode->i_mode))
215 return;
216 if (file_check_writeable(file) != 0)
217 return;
218 mnt_drop_write(mnt);
219 file_release_write(file);
220}
221EXPORT_SYMBOL_GPL(drop_file_write_access);
222
223/* the real guts of fput() - releasing the last reference to file
224 */
225static void __fput(struct file *file)
226{
227 struct dentry *dentry = file->f_path.dentry;
228 struct vfsmount *mnt = file->f_path.mnt;
229 struct inode *inode = dentry->d_inode;
230
231 might_sleep();
232
233 fsnotify_close(file);
234 /*
235 * The function eventpoll_release() should be the first called
236 * in the file cleanup chain.
237 */
238 eventpoll_release(file);
239 locks_remove_flock(file);
240
241 if (unlikely(file->f_flags & FASYNC)) {
242 if (file->f_op && file->f_op->fasync)
243 file->f_op->fasync(-1, file, 0);
244 }
245 if (file->f_op && file->f_op->release)
246 file->f_op->release(inode, file);
247 security_file_free(file);
248 ima_file_free(file);
249 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
250 cdev_put(inode->i_cdev);
251 fops_put(file->f_op);
252 put_pid(file->f_owner.pid);
253 file_sb_list_del(file);
254 if (file->f_mode & FMODE_WRITE)
255 drop_file_write_access(file);
256 file->f_path.dentry = NULL;
257 file->f_path.mnt = NULL;
258 file_free(file);
259 dput(dentry);
260 mntput(mnt);
261}
262
263void fput(struct file *file)
264{
265 if (atomic_long_dec_and_test(&file->f_count))
266 __fput(file);
267}
268
269EXPORT_SYMBOL(fput);
270
271struct file *fget(unsigned int fd)
272{
273 struct file *file;
274 struct files_struct *files = current->files;
275
276 rcu_read_lock();
277 file = fcheck_files(files, fd);
278 if (file) {
279 if (!atomic_long_inc_not_zero(&file->f_count)) {
280 /* File object ref couldn't be taken */
281 rcu_read_unlock();
282 return NULL;
283 }
284 }
285 rcu_read_unlock();
286
287 return file;
288}
289
290EXPORT_SYMBOL(fget);
291
292/*
293 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
294 *
295 * You can use this instead of fget if you satisfy all of the following
296 * conditions:
297 * 1) You must call fput_light before exiting the syscall and returning control
298 * to userspace (i.e. you cannot remember the returned struct file * after
299 * returning to userspace).
300 * 2) You must not call filp_close on the returned struct file * in between
301 * calls to fget_light and fput_light.
302 * 3) You must not clone the current task in between the calls to fget_light
303 * and fput_light.
304 *
305 * The fput_needed flag returned by fget_light should be passed to the
306 * corresponding fput_light.
307 */
308struct file *fget_light(unsigned int fd, int *fput_needed)
309{
310 struct file *file;
311 struct files_struct *files = current->files;
312
313 *fput_needed = 0;
314 if (likely((atomic_read(&files->count) == 1))) {
315 file = fcheck_files(files, fd);
316 } else {
317 rcu_read_lock();
318 file = fcheck_files(files, fd);
319 if (file) {
320 if (atomic_long_inc_not_zero(&file->f_count))
321 *fput_needed = 1;
322 else
323 /* Didn't get the reference, someone's freed */
324 file = NULL;
325 }
326 rcu_read_unlock();
327 }
328
329 return file;
330}
331
332void put_filp(struct file *file)
333{
334 if (atomic_long_dec_and_test(&file->f_count)) {
335 security_file_free(file);
336 file_sb_list_del(file);
337 file_free(file);
338 }
339}
340
341static inline int file_list_cpu(struct file *file)
342{
343#ifdef CONFIG_SMP
344 return file->f_sb_list_cpu;
345#else
346 return smp_processor_id();
347#endif
348}
349
350/* helper for file_sb_list_add to reduce ifdefs */
351static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
352{
353 struct list_head *list;
354#ifdef CONFIG_SMP
355 int cpu;
356 cpu = smp_processor_id();
357 file->f_sb_list_cpu = cpu;
358 list = per_cpu_ptr(sb->s_files, cpu);
359#else
360 list = &sb->s_files;
361#endif
362 list_add(&file->f_u.fu_list, list);
363}
364
365/**
366 * file_sb_list_add - add a file to the sb's file list
367 * @file: file to add
368 * @sb: sb to add it to
369 *
370 * Use this function to associate a file with the superblock of the inode it
371 * refers to.
372 */
373void file_sb_list_add(struct file *file, struct super_block *sb)
374{
375 lg_local_lock(files_lglock);
376 __file_sb_list_add(file, sb);
377 lg_local_unlock(files_lglock);
378}
379
380/**
381 * file_sb_list_del - remove a file from the sb's file list
382 * @file: file to remove
383 * @sb: sb to remove it from
384 *
385 * Use this function to remove a file from its superblock.
386 */
387void file_sb_list_del(struct file *file)
388{
389 if (!list_empty(&file->f_u.fu_list)) {
390 lg_local_lock_cpu(files_lglock, file_list_cpu(file));
391 list_del_init(&file->f_u.fu_list);
392 lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
393 }
394}
395
396#ifdef CONFIG_SMP
397
398/*
399 * These macros iterate all files on all CPUs for a given superblock.
400 * files_lglock must be held globally.
401 */
402#define do_file_list_for_each_entry(__sb, __file) \
403{ \
404 int i; \
405 for_each_possible_cpu(i) { \
406 struct list_head *list; \
407 list = per_cpu_ptr((__sb)->s_files, i); \
408 list_for_each_entry((__file), list, f_u.fu_list)
409
410#define while_file_list_for_each_entry \
411 } \
412}
413
414#else
415
416#define do_file_list_for_each_entry(__sb, __file) \
417{ \
418 struct list_head *list; \
419 list = &(sb)->s_files; \
420 list_for_each_entry((__file), list, f_u.fu_list)
421
422#define while_file_list_for_each_entry \
423}
424
425#endif
426
427int fs_may_remount_ro(struct super_block *sb)
428{
429 struct file *file;
430 /* Check that no files are currently opened for writing. */
431 lg_global_lock(files_lglock);
432 do_file_list_for_each_entry(sb, file) {
433 struct inode *inode = file->f_path.dentry->d_inode;
434
435 /* File with pending delete? */
436 if (inode->i_nlink == 0)
437 goto too_bad;
438
439 /* Writeable file? */
440 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
441 goto too_bad;
442 } while_file_list_for_each_entry;
443 lg_global_unlock(files_lglock);
444 return 1; /* Tis' cool bro. */
445too_bad:
446 lg_global_unlock(files_lglock);
447 return 0;
448}
449
450/**
451 * mark_files_ro - mark all files read-only
452 * @sb: superblock in question
453 *
454 * All files are marked read-only. We don't care about pending
455 * delete files so this should be used in 'force' mode only.
456 */
457void mark_files_ro(struct super_block *sb)
458{
459 struct file *f;
460
461retry:
462 lg_global_lock(files_lglock);
463 do_file_list_for_each_entry(sb, f) {
464 struct vfsmount *mnt;
465 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
466 continue;
467 if (!file_count(f))
468 continue;
469 if (!(f->f_mode & FMODE_WRITE))
470 continue;
471 spin_lock(&f->f_lock);
472 f->f_mode &= ~FMODE_WRITE;
473 spin_unlock(&f->f_lock);
474 if (file_check_writeable(f) != 0)
475 continue;
476 file_release_write(f);
477 mnt = mntget(f->f_path.mnt);
478 /* This can sleep, so we can't hold the spinlock. */
479 lg_global_unlock(files_lglock);
480 mnt_drop_write(mnt);
481 mntput(mnt);
482 goto retry;
483 } while_file_list_for_each_entry;
484 lg_global_unlock(files_lglock);
485}
486
487void __init files_init(unsigned long mempages)
488{
489 unsigned long n;
490
491 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
492 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
493
494 /*
495 * One file with associated inode and dcache is very roughly 1K.
496 * Per default don't use more than 10% of our memory for files.
497 */
498
499 n = (mempages * (PAGE_SIZE / 1024)) / 10;
500 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
501 files_defer_init();
502 lg_lock_init(files_lglock);
503 percpu_counter_init(&nr_files, 0);
504}