]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/file.c
sched: Better name for for_each_domain_rd
[net-next-2.6.git] / fs / file.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/file.c
3 *
4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
5 *
6 * Manage the dynamic fd arrays in the process files_struct.
7 */
8
1027abe8 9#include <linux/module.h>
1da177e4
LT
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/time.h>
d43c36dc 13#include <linux/sched.h>
1da177e4
LT
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/file.h>
9f3acc31 17#include <linux/fdtable.h>
1da177e4 18#include <linux/bitops.h>
ab2af1f5
DS
19#include <linux/interrupt.h>
20#include <linux/spinlock.h>
21#include <linux/rcupdate.h>
22#include <linux/workqueue.h>
23
24struct fdtable_defer {
25 spinlock_t lock;
26 struct work_struct wq;
ab2af1f5
DS
27 struct fdtable *next;
28};
29
9cfe015a 30int sysctl_nr_open __read_mostly = 1024*1024;
eceea0b3
AV
31int sysctl_nr_open_min = BITS_PER_LONG;
32int sysctl_nr_open_max = 1024 * 1024; /* raised later */
9cfe015a 33
ab2af1f5
DS
34/*
35 * We use this list to defer free fdtables that have vmalloced
36 * sets/arrays. By keeping a per-cpu list, we avoid having to embed
37 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in
38 * this per-task structure.
39 */
40static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
1da177e4 41
5466b456 42static inline void * alloc_fdmem(unsigned int size)
1da177e4 43{
1da177e4 44 if (size <= PAGE_SIZE)
5466b456
VL
45 return kmalloc(size, GFP_KERNEL);
46 else
47 return vmalloc(size);
1da177e4
LT
48}
49
5466b456 50static inline void free_fdarr(struct fdtable *fdt)
1da177e4 51{
5466b456
VL
52 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *)))
53 kfree(fdt->fd);
1da177e4 54 else
5466b456 55 vfree(fdt->fd);
1da177e4
LT
56}
57
5466b456 58static inline void free_fdset(struct fdtable *fdt)
1da177e4 59{
5466b456
VL
60 if (fdt->max_fds <= (PAGE_SIZE * BITS_PER_BYTE / 2))
61 kfree(fdt->open_fds);
62 else
63 vfree(fdt->open_fds);
ab2af1f5 64}
1da177e4 65
65f27f38 66static void free_fdtable_work(struct work_struct *work)
ab2af1f5 67{
65f27f38
DH
68 struct fdtable_defer *f =
69 container_of(work, struct fdtable_defer, wq);
ab2af1f5 70 struct fdtable *fdt;
1da177e4 71
ab2af1f5
DS
72 spin_lock_bh(&f->lock);
73 fdt = f->next;
74 f->next = NULL;
75 spin_unlock_bh(&f->lock);
76 while(fdt) {
77 struct fdtable *next = fdt->next;
5466b456
VL
78 vfree(fdt->fd);
79 free_fdset(fdt);
80 kfree(fdt);
ab2af1f5
DS
81 fdt = next;
82 }
83}
1da177e4 84
4fd45812 85void free_fdtable_rcu(struct rcu_head *rcu)
ab2af1f5
DS
86{
87 struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
ab2af1f5 88 struct fdtable_defer *fddef;
1da177e4 89
ab2af1f5 90 BUG_ON(!fdt);
ab2af1f5 91
4fd45812 92 if (fdt->max_fds <= NR_OPEN_DEFAULT) {
ab2af1f5 93 /*
4fd45812
VL
94 * This fdtable is embedded in the files structure and that
95 * structure itself is getting destroyed.
ab2af1f5 96 */
4fd45812
VL
97 kmem_cache_free(files_cachep,
98 container_of(fdt, struct files_struct, fdtab));
ab2af1f5
DS
99 return;
100 }
5466b456 101 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) {
ab2af1f5 102 kfree(fdt->fd);
5466b456 103 kfree(fdt->open_fds);
ab2af1f5 104 kfree(fdt);
1da177e4 105 } else {
ab2af1f5
DS
106 fddef = &get_cpu_var(fdtable_defer_list);
107 spin_lock(&fddef->lock);
108 fdt->next = fddef->next;
109 fddef->next = fdt;
593be07a
TH
110 /* vmallocs are handled from the workqueue context */
111 schedule_work(&fddef->wq);
ab2af1f5
DS
112 spin_unlock(&fddef->lock);
113 put_cpu_var(fdtable_defer_list);
1da177e4 114 }
ab2af1f5
DS
115}
116
ab2af1f5
DS
117/*
118 * Expand the fdset in the files_struct. Called with the files spinlock
119 * held for write.
120 */
5466b456 121static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
ab2af1f5 122{
5466b456 123 unsigned int cpy, set;
ab2af1f5 124
5466b456 125 BUG_ON(nfdt->max_fds < ofdt->max_fds);
5466b456
VL
126
127 cpy = ofdt->max_fds * sizeof(struct file *);
128 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
129 memcpy(nfdt->fd, ofdt->fd, cpy);
130 memset((char *)(nfdt->fd) + cpy, 0, set);
131
132 cpy = ofdt->max_fds / BITS_PER_BYTE;
133 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
134 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
135 memset((char *)(nfdt->open_fds) + cpy, 0, set);
136 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
137 memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
1da177e4
LT
138}
139
5466b456 140static struct fdtable * alloc_fdtable(unsigned int nr)
1da177e4 141{
5466b456
VL
142 struct fdtable *fdt;
143 char *data;
1da177e4 144
ab2af1f5 145 /*
5466b456
VL
146 * Figure out how many fds we actually want to support in this fdtable.
147 * Allocation steps are keyed to the size of the fdarray, since it
148 * grows far faster than any of the other dynamic data. We try to fit
149 * the fdarray into comfortable page-tuned chunks: starting at 1024B
150 * and growing in powers of two from there on.
ab2af1f5 151 */
5466b456
VL
152 nr /= (1024 / sizeof(struct file *));
153 nr = roundup_pow_of_two(nr + 1);
154 nr *= (1024 / sizeof(struct file *));
5c598b34
AV
155 /*
156 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
157 * had been set lower between the check in expand_files() and here. Deal
158 * with that in caller, it's cheaper that way.
159 *
160 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
161 * bitmaps handling below becomes unpleasant, to put it mildly...
162 */
163 if (unlikely(nr > sysctl_nr_open))
164 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
bbea9f69 165
5466b456
VL
166 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
167 if (!fdt)
bbea9f69 168 goto out;
5466b456
VL
169 fdt->max_fds = nr;
170 data = alloc_fdmem(nr * sizeof(struct file *));
171 if (!data)
172 goto out_fdt;
173 fdt->fd = (struct file **)data;
174 data = alloc_fdmem(max_t(unsigned int,
175 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
176 if (!data)
177 goto out_arr;
178 fdt->open_fds = (fd_set *)data;
179 data += nr / BITS_PER_BYTE;
180 fdt->close_on_exec = (fd_set *)data;
181 INIT_RCU_HEAD(&fdt->rcu);
182 fdt->next = NULL;
183
ab2af1f5 184 return fdt;
5466b456
VL
185
186out_arr:
187 free_fdarr(fdt);
188out_fdt:
ab2af1f5 189 kfree(fdt);
5466b456 190out:
ab2af1f5
DS
191 return NULL;
192}
1da177e4 193
ab2af1f5 194/*
74d392aa
VL
195 * Expand the file descriptor table.
196 * This function will allocate a new fdtable and both fd array and fdset, of
197 * the given size.
198 * Return <0 error code on error; 1 on successful completion.
199 * The files->file_lock should be held on entry, and will be held on exit.
ab2af1f5
DS
200 */
201static int expand_fdtable(struct files_struct *files, int nr)
202 __releases(files->file_lock)
203 __acquires(files->file_lock)
204{
74d392aa 205 struct fdtable *new_fdt, *cur_fdt;
ab2af1f5
DS
206
207 spin_unlock(&files->file_lock);
74d392aa 208 new_fdt = alloc_fdtable(nr);
ab2af1f5 209 spin_lock(&files->file_lock);
74d392aa
VL
210 if (!new_fdt)
211 return -ENOMEM;
5c598b34
AV
212 /*
213 * extremely unlikely race - sysctl_nr_open decreased between the check in
214 * caller and alloc_fdtable(). Cheaper to catch it here...
215 */
216 if (unlikely(new_fdt->max_fds <= nr)) {
217 free_fdarr(new_fdt);
218 free_fdset(new_fdt);
219 kfree(new_fdt);
220 return -EMFILE;
221 }
ab2af1f5 222 /*
74d392aa
VL
223 * Check again since another task may have expanded the fd table while
224 * we dropped the lock
ab2af1f5 225 */
74d392aa 226 cur_fdt = files_fdtable(files);
bbea9f69 227 if (nr >= cur_fdt->max_fds) {
74d392aa
VL
228 /* Continue as planned */
229 copy_fdtable(new_fdt, cur_fdt);
230 rcu_assign_pointer(files->fdt, new_fdt);
4fd45812 231 if (cur_fdt->max_fds > NR_OPEN_DEFAULT)
01b2d93c 232 free_fdtable(cur_fdt);
ab2af1f5 233 } else {
74d392aa 234 /* Somebody else expanded, so undo our attempt */
5466b456
VL
235 free_fdarr(new_fdt);
236 free_fdset(new_fdt);
237 kfree(new_fdt);
ab2af1f5 238 }
74d392aa 239 return 1;
1da177e4
LT
240}
241
242/*
243 * Expand files.
74d392aa
VL
244 * This function will expand the file structures, if the requested size exceeds
245 * the current capacity and there is room for expansion.
246 * Return <0 error code on error; 0 when nothing done; 1 when files were
247 * expanded and execution may have blocked.
248 * The files->file_lock should be held on entry, and will be held on exit.
1da177e4
LT
249 */
250int expand_files(struct files_struct *files, int nr)
251{
badf1662 252 struct fdtable *fdt;
1da177e4 253
badf1662 254 fdt = files_fdtable(files);
4e1e018e
AV
255
256 /*
257 * N.B. For clone tasks sharing a files structure, this test
258 * will limit the total number of files that can be opened.
259 */
260 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
261 return -EMFILE;
262
74d392aa 263 /* Do we need to expand? */
bbea9f69 264 if (nr < fdt->max_fds)
74d392aa 265 return 0;
4e1e018e 266
74d392aa 267 /* Can we expand? */
9cfe015a 268 if (nr >= sysctl_nr_open)
74d392aa
VL
269 return -EMFILE;
270
271 /* All good, so we try */
272 return expand_fdtable(files, nr);
1da177e4 273}
ab2af1f5 274
02afc626
AV
275static int count_open_files(struct fdtable *fdt)
276{
277 int size = fdt->max_fds;
278 int i;
279
280 /* Find the last open fd */
281 for (i = size/(8*sizeof(long)); i > 0; ) {
282 if (fdt->open_fds->fds_bits[--i])
283 break;
284 }
285 i = (i+1) * 8 * sizeof(long);
286 return i;
287}
288
02afc626
AV
289/*
290 * Allocate a new files structure and copy contents from the
291 * passed in files structure.
292 * errorp will be valid only when the returned files_struct is NULL.
293 */
294struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
295{
296 struct files_struct *newf;
297 struct file **old_fds, **new_fds;
298 int open_files, size, i;
299 struct fdtable *old_fdt, *new_fdt;
300
301 *errorp = -ENOMEM;
afbec7ff 302 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
02afc626
AV
303 if (!newf)
304 goto out;
305
afbec7ff
AV
306 atomic_set(&newf->count, 1);
307
308 spin_lock_init(&newf->file_lock);
309 newf->next_fd = 0;
310 new_fdt = &newf->fdtab;
311 new_fdt->max_fds = NR_OPEN_DEFAULT;
312 new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
313 new_fdt->open_fds = (fd_set *)&newf->open_fds_init;
314 new_fdt->fd = &newf->fd_array[0];
315 INIT_RCU_HEAD(&new_fdt->rcu);
316 new_fdt->next = NULL;
317
02afc626
AV
318 spin_lock(&oldf->file_lock);
319 old_fdt = files_fdtable(oldf);
02afc626
AV
320 open_files = count_open_files(old_fdt);
321
322 /*
323 * Check whether we need to allocate a larger fd array and fd set.
02afc626 324 */
adbecb12 325 while (unlikely(open_files > new_fdt->max_fds)) {
02afc626 326 spin_unlock(&oldf->file_lock);
9dec3c4d 327
adbecb12
AV
328 if (new_fdt != &newf->fdtab) {
329 free_fdarr(new_fdt);
330 free_fdset(new_fdt);
331 kfree(new_fdt);
332 }
333
9dec3c4d
AV
334 new_fdt = alloc_fdtable(open_files - 1);
335 if (!new_fdt) {
336 *errorp = -ENOMEM;
337 goto out_release;
338 }
339
340 /* beyond sysctl_nr_open; nothing to do */
341 if (unlikely(new_fdt->max_fds < open_files)) {
342 free_fdarr(new_fdt);
343 free_fdset(new_fdt);
344 kfree(new_fdt);
345 *errorp = -EMFILE;
02afc626 346 goto out_release;
9dec3c4d 347 }
9dec3c4d 348
02afc626
AV
349 /*
350 * Reacquire the oldf lock and a pointer to its fd table
351 * who knows it may have a new bigger fd table. We need
352 * the latest pointer.
353 */
354 spin_lock(&oldf->file_lock);
355 old_fdt = files_fdtable(oldf);
adbecb12 356 open_files = count_open_files(old_fdt);
02afc626
AV
357 }
358
359 old_fds = old_fdt->fd;
360 new_fds = new_fdt->fd;
361
362 memcpy(new_fdt->open_fds->fds_bits,
363 old_fdt->open_fds->fds_bits, open_files/8);
364 memcpy(new_fdt->close_on_exec->fds_bits,
365 old_fdt->close_on_exec->fds_bits, open_files/8);
366
367 for (i = open_files; i != 0; i--) {
368 struct file *f = *old_fds++;
369 if (f) {
370 get_file(f);
371 } else {
372 /*
373 * The fd may be claimed in the fd bitmap but not yet
374 * instantiated in the files array if a sibling thread
375 * is partway through open(). So make sure that this
376 * fd is available to the new process.
377 */
378 FD_CLR(open_files - i, new_fdt->open_fds);
379 }
380 rcu_assign_pointer(*new_fds++, f);
381 }
382 spin_unlock(&oldf->file_lock);
383
384 /* compute the remainder to be cleared */
385 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
386
387 /* This is long word aligned thus could use a optimized version */
388 memset(new_fds, 0, size);
389
390 if (new_fdt->max_fds > open_files) {
391 int left = (new_fdt->max_fds-open_files)/8;
392 int start = open_files / (8 * sizeof(unsigned long));
393
394 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
395 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
396 }
397
afbec7ff
AV
398 rcu_assign_pointer(newf->fdt, new_fdt);
399
02afc626
AV
400 return newf;
401
402out_release:
403 kmem_cache_free(files_cachep, newf);
404out:
405 return NULL;
406}
407
ab2af1f5
DS
408static void __devinit fdtable_defer_list_init(int cpu)
409{
410 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
411 spin_lock_init(&fddef->lock);
65f27f38 412 INIT_WORK(&fddef->wq, free_fdtable_work);
ab2af1f5
DS
413 fddef->next = NULL;
414}
415
416void __init files_defer_init(void)
417{
418 int i;
0a945022 419 for_each_possible_cpu(i)
ab2af1f5 420 fdtable_defer_list_init(i);
eceea0b3
AV
421 sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) &
422 -BITS_PER_LONG;
ab2af1f5 423}
f52111b1
AV
424
425struct files_struct init_files = {
426 .count = ATOMIC_INIT(1),
427 .fdt = &init_files.fdtab,
428 .fdtab = {
429 .max_fds = NR_OPEN_DEFAULT,
430 .fd = &init_files.fd_array[0],
431 .close_on_exec = (fd_set *)&init_files.close_on_exec_init,
432 .open_fds = (fd_set *)&init_files.open_fds_init,
433 .rcu = RCU_HEAD_INIT,
434 },
435 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
436};
1027abe8
AV
437
438/*
439 * allocate a file descriptor, mark it busy.
440 */
441int alloc_fd(unsigned start, unsigned flags)
442{
443 struct files_struct *files = current->files;
444 unsigned int fd;
445 int error;
446 struct fdtable *fdt;
447
448 spin_lock(&files->file_lock);
449repeat:
450 fdt = files_fdtable(files);
451 fd = start;
452 if (fd < files->next_fd)
453 fd = files->next_fd;
454
455 if (fd < fdt->max_fds)
456 fd = find_next_zero_bit(fdt->open_fds->fds_bits,
457 fdt->max_fds, fd);
458
459 error = expand_files(files, fd);
460 if (error < 0)
461 goto out;
462
463 /*
464 * If we needed to expand the fs array we
465 * might have blocked - try again.
466 */
467 if (error)
468 goto repeat;
469
470 if (start <= files->next_fd)
471 files->next_fd = fd + 1;
472
473 FD_SET(fd, fdt->open_fds);
474 if (flags & O_CLOEXEC)
475 FD_SET(fd, fdt->close_on_exec);
476 else
477 FD_CLR(fd, fdt->close_on_exec);
478 error = fd;
479#if 1
480 /* Sanity check */
481 if (rcu_dereference(fdt->fd[fd]) != NULL) {
482 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
483 rcu_assign_pointer(fdt->fd[fd], NULL);
484 }
485#endif
486
487out:
488 spin_unlock(&files->file_lock);
489 return error;
490}
491
492int get_unused_fd(void)
493{
494 return alloc_fd(0, 0);
495}
496EXPORT_SYMBOL(get_unused_fd);