]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/fcntl.c
[PATCH] sched: add new SCHED_BATCH policy
[net-next-2.6.git] / fs / fcntl.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/fcntl.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/syscalls.h>
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fs.h>
11#include <linux/file.h>
16f7e0fe 12#include <linux/capability.h>
1da177e4
LT
13#include <linux/dnotify.h>
14#include <linux/smp_lock.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/security.h>
18#include <linux/ptrace.h>
7ed20e1a 19#include <linux/signal.h>
ab2af1f5 20#include <linux/rcupdate.h>
1da177e4
LT
21
22#include <asm/poll.h>
23#include <asm/siginfo.h>
24#include <asm/uaccess.h>
25
26void fastcall set_close_on_exec(unsigned int fd, int flag)
27{
28 struct files_struct *files = current->files;
badf1662 29 struct fdtable *fdt;
1da177e4 30 spin_lock(&files->file_lock);
badf1662 31 fdt = files_fdtable(files);
1da177e4 32 if (flag)
badf1662 33 FD_SET(fd, fdt->close_on_exec);
1da177e4 34 else
badf1662 35 FD_CLR(fd, fdt->close_on_exec);
1da177e4
LT
36 spin_unlock(&files->file_lock);
37}
38
39static inline int get_close_on_exec(unsigned int fd)
40{
41 struct files_struct *files = current->files;
badf1662 42 struct fdtable *fdt;
1da177e4 43 int res;
b835996f 44 rcu_read_lock();
badf1662
DS
45 fdt = files_fdtable(files);
46 res = FD_ISSET(fd, fdt->close_on_exec);
b835996f 47 rcu_read_unlock();
1da177e4
LT
48 return res;
49}
50
51/*
52 * locate_fd finds a free file descriptor in the open_fds fdset,
53 * expanding the fd arrays if necessary. Must be called with the
54 * file_lock held for write.
55 */
56
57static int locate_fd(struct files_struct *files,
58 struct file *file, unsigned int orig_start)
59{
60 unsigned int newfd;
61 unsigned int start;
62 int error;
badf1662 63 struct fdtable *fdt;
1da177e4
LT
64
65 error = -EINVAL;
66 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
67 goto out;
68
69repeat:
ab2af1f5 70 fdt = files_fdtable(files);
1da177e4
LT
71 /*
72 * Someone might have closed fd's in the range
badf1662 73 * orig_start..fdt->next_fd
1da177e4
LT
74 */
75 start = orig_start;
badf1662
DS
76 if (start < fdt->next_fd)
77 start = fdt->next_fd;
1da177e4
LT
78
79 newfd = start;
badf1662
DS
80 if (start < fdt->max_fdset) {
81 newfd = find_next_zero_bit(fdt->open_fds->fds_bits,
82 fdt->max_fdset, start);
1da177e4
LT
83 }
84
85 error = -EMFILE;
86 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
87 goto out;
88
89 error = expand_files(files, newfd);
90 if (error < 0)
91 goto out;
92
93 /*
94 * If we needed to expand the fs array we
95 * might have blocked - try again.
96 */
97 if (error)
98 goto repeat;
99
ab2af1f5
DS
100 /*
101 * We reacquired files_lock, so we are safe as long as
102 * we reacquire the fdtable pointer and use it while holding
103 * the lock, no one can free it during that time.
104 */
105 fdt = files_fdtable(files);
badf1662
DS
106 if (start <= fdt->next_fd)
107 fdt->next_fd = newfd + 1;
ab2af1f5 108
1da177e4
LT
109 error = newfd;
110
111out:
112 return error;
113}
114
115static int dupfd(struct file *file, unsigned int start)
116{
117 struct files_struct * files = current->files;
badf1662 118 struct fdtable *fdt;
1da177e4
LT
119 int fd;
120
121 spin_lock(&files->file_lock);
122 fd = locate_fd(files, file, start);
123 if (fd >= 0) {
badf1662
DS
124 /* locate_fd() may have expanded fdtable, load the ptr */
125 fdt = files_fdtable(files);
126 FD_SET(fd, fdt->open_fds);
127 FD_CLR(fd, fdt->close_on_exec);
1da177e4
LT
128 spin_unlock(&files->file_lock);
129 fd_install(fd, file);
130 } else {
131 spin_unlock(&files->file_lock);
132 fput(file);
133 }
134
135 return fd;
136}
137
138asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
139{
140 int err = -EBADF;
141 struct file * file, *tofree;
142 struct files_struct * files = current->files;
badf1662 143 struct fdtable *fdt;
1da177e4
LT
144
145 spin_lock(&files->file_lock);
146 if (!(file = fcheck(oldfd)))
147 goto out_unlock;
148 err = newfd;
149 if (newfd == oldfd)
150 goto out_unlock;
151 err = -EBADF;
152 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
153 goto out_unlock;
154 get_file(file); /* We are now finished with oldfd */
155
156 err = expand_files(files, newfd);
157 if (err < 0)
158 goto out_fput;
159
160 /* To avoid races with open() and dup(), we will mark the fd as
161 * in-use in the open-file bitmap throughout the entire dup2()
162 * process. This is quite safe: do_close() uses the fd array
163 * entry, not the bitmap, to decide what work needs to be
164 * done. --sct */
165 /* Doesn't work. open() might be there first. --AV */
166
167 /* Yes. It's a race. In user space. Nothing sane to do */
168 err = -EBUSY;
badf1662
DS
169 fdt = files_fdtable(files);
170 tofree = fdt->fd[newfd];
171 if (!tofree && FD_ISSET(newfd, fdt->open_fds))
1da177e4
LT
172 goto out_fput;
173
ab2af1f5 174 rcu_assign_pointer(fdt->fd[newfd], file);
badf1662
DS
175 FD_SET(newfd, fdt->open_fds);
176 FD_CLR(newfd, fdt->close_on_exec);
1da177e4
LT
177 spin_unlock(&files->file_lock);
178
179 if (tofree)
180 filp_close(tofree, files);
181 err = newfd;
182out:
183 return err;
184out_unlock:
185 spin_unlock(&files->file_lock);
186 goto out;
187
188out_fput:
189 spin_unlock(&files->file_lock);
190 fput(file);
191 goto out;
192}
193
194asmlinkage long sys_dup(unsigned int fildes)
195{
196 int ret = -EBADF;
197 struct file * file = fget(fildes);
198
199 if (file)
200 ret = dupfd(file, 0);
201 return ret;
202}
203
204#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
205
206static int setfl(int fd, struct file * filp, unsigned long arg)
207{
208 struct inode * inode = filp->f_dentry->d_inode;
209 int error = 0;
210
211 /* O_APPEND cannot be cleared if the file is marked as append-only */
212 if (!(arg & O_APPEND) && IS_APPEND(inode))
213 return -EPERM;
214
215 /* O_NOATIME can only be set by the owner or superuser */
216 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
217 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
218 return -EPERM;
219
220 /* required for strict SunOS emulation */
221 if (O_NONBLOCK != O_NDELAY)
222 if (arg & O_NDELAY)
223 arg |= O_NONBLOCK;
224
225 if (arg & O_DIRECT) {
226 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
227 !filp->f_mapping->a_ops->direct_IO)
228 return -EINVAL;
229 }
230
231 if (filp->f_op && filp->f_op->check_flags)
232 error = filp->f_op->check_flags(arg);
233 if (error)
234 return error;
235
236 lock_kernel();
237 if ((arg ^ filp->f_flags) & FASYNC) {
238 if (filp->f_op && filp->f_op->fasync) {
239 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
240 if (error < 0)
241 goto out;
242 }
243 }
244
245 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
246 out:
247 unlock_kernel();
248 return error;
249}
250
251static void f_modown(struct file *filp, unsigned long pid,
252 uid_t uid, uid_t euid, int force)
253{
254 write_lock_irq(&filp->f_owner.lock);
255 if (force || !filp->f_owner.pid) {
256 filp->f_owner.pid = pid;
257 filp->f_owner.uid = uid;
258 filp->f_owner.euid = euid;
259 }
260 write_unlock_irq(&filp->f_owner.lock);
261}
262
263int f_setown(struct file *filp, unsigned long arg, int force)
264{
265 int err;
266
267 err = security_file_set_fowner(filp);
268 if (err)
269 return err;
270
271 f_modown(filp, arg, current->uid, current->euid, force);
272 return 0;
273}
274
275EXPORT_SYMBOL(f_setown);
276
277void f_delown(struct file *filp)
278{
279 f_modown(filp, 0, 0, 0, 1);
280}
281
282static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
283 struct file *filp)
284{
285 long err = -EINVAL;
286
287 switch (cmd) {
288 case F_DUPFD:
289 get_file(filp);
290 err = dupfd(filp, arg);
291 break;
292 case F_GETFD:
293 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
294 break;
295 case F_SETFD:
296 err = 0;
297 set_close_on_exec(fd, arg & FD_CLOEXEC);
298 break;
299 case F_GETFL:
300 err = filp->f_flags;
301 break;
302 case F_SETFL:
303 err = setfl(fd, filp, arg);
304 break;
305 case F_GETLK:
306 err = fcntl_getlk(filp, (struct flock __user *) arg);
307 break;
308 case F_SETLK:
309 case F_SETLKW:
c293621b 310 err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
1da177e4
LT
311 break;
312 case F_GETOWN:
313 /*
314 * XXX If f_owner is a process group, the
315 * negative return value will get converted
316 * into an error. Oops. If we keep the
317 * current syscall conventions, the only way
318 * to fix this will be in libc.
319 */
320 err = filp->f_owner.pid;
321 force_successful_syscall_return();
322 break;
323 case F_SETOWN:
324 err = f_setown(filp, arg, 1);
325 break;
326 case F_GETSIG:
327 err = filp->f_owner.signum;
328 break;
329 case F_SETSIG:
330 /* arg == 0 restores default behaviour. */
7ed20e1a 331 if (!valid_signal(arg)) {
1da177e4
LT
332 break;
333 }
334 err = 0;
335 filp->f_owner.signum = arg;
336 break;
337 case F_GETLEASE:
338 err = fcntl_getlease(filp);
339 break;
340 case F_SETLEASE:
341 err = fcntl_setlease(fd, filp, arg);
342 break;
343 case F_NOTIFY:
344 err = fcntl_dirnotify(fd, filp, arg);
345 break;
346 default:
347 break;
348 }
349 return err;
350}
351
352asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
353{
354 struct file *filp;
355 long err = -EBADF;
356
357 filp = fget(fd);
358 if (!filp)
359 goto out;
360
361 err = security_file_fcntl(filp, cmd, arg);
362 if (err) {
363 fput(filp);
364 return err;
365 }
366
367 err = do_fcntl(fd, cmd, arg, filp);
368
369 fput(filp);
370out:
371 return err;
372}
373
374#if BITS_PER_LONG == 32
375asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
376{
377 struct file * filp;
378 long err;
379
380 err = -EBADF;
381 filp = fget(fd);
382 if (!filp)
383 goto out;
384
385 err = security_file_fcntl(filp, cmd, arg);
386 if (err) {
387 fput(filp);
388 return err;
389 }
390 err = -EBADF;
391
392 switch (cmd) {
393 case F_GETLK64:
394 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
395 break;
396 case F_SETLK64:
397 case F_SETLKW64:
c293621b
PS
398 err = fcntl_setlk64(fd, filp, cmd,
399 (struct flock64 __user *) arg);
1da177e4
LT
400 break;
401 default:
402 err = do_fcntl(fd, cmd, arg, filp);
403 break;
404 }
405 fput(filp);
406out:
407 return err;
408}
409#endif
410
411/* Table to convert sigio signal codes into poll band bitmaps */
412
413static long band_table[NSIGPOLL] = {
414 POLLIN | POLLRDNORM, /* POLL_IN */
415 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
416 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
417 POLLERR, /* POLL_ERR */
418 POLLPRI | POLLRDBAND, /* POLL_PRI */
419 POLLHUP | POLLERR /* POLL_HUP */
420};
421
422static inline int sigio_perm(struct task_struct *p,
423 struct fown_struct *fown, int sig)
424{
425 return (((fown->euid == 0) ||
426 (fown->euid == p->suid) || (fown->euid == p->uid) ||
427 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
428 !security_file_send_sigiotask(p, fown, sig));
429}
430
431static void send_sigio_to_task(struct task_struct *p,
432 struct fown_struct *fown,
433 int fd,
434 int reason)
435{
436 if (!sigio_perm(p, fown, fown->signum))
437 return;
438
439 switch (fown->signum) {
440 siginfo_t si;
441 default:
442 /* Queue a rt signal with the appropriate fd as its
443 value. We use SI_SIGIO as the source, not
444 SI_KERNEL, since kernel signals always get
445 delivered even if we can't queue. Failure to
446 queue in this case _should_ be reported; we fall
447 back to SIGIO in that case. --sct */
448 si.si_signo = fown->signum;
449 si.si_errno = 0;
450 si.si_code = reason;
451 /* Make sure we are called with one of the POLL_*
452 reasons, otherwise we could leak kernel stack into
453 userspace. */
454 if ((reason & __SI_MASK) != __SI_POLL)
455 BUG();
456 if (reason - POLL_IN >= NSIGPOLL)
457 si.si_band = ~0L;
458 else
459 si.si_band = band_table[reason - POLL_IN];
460 si.si_fd = fd;
850d6fbe 461 if (!group_send_sig_info(fown->signum, &si, p))
1da177e4
LT
462 break;
463 /* fall-through: fall back on the old plain SIGIO signal */
464 case 0:
850d6fbe 465 group_send_sig_info(SIGIO, SEND_SIG_PRIV, p);
1da177e4
LT
466 }
467}
468
469void send_sigio(struct fown_struct *fown, int fd, int band)
470{
471 struct task_struct *p;
472 int pid;
473
474 read_lock(&fown->lock);
475 pid = fown->pid;
476 if (!pid)
477 goto out_unlock_fown;
478
479 read_lock(&tasklist_lock);
480 if (pid > 0) {
481 p = find_task_by_pid(pid);
482 if (p) {
483 send_sigio_to_task(p, fown, fd, band);
484 }
485 } else {
486 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
487 send_sigio_to_task(p, fown, fd, band);
488 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
489 }
490 read_unlock(&tasklist_lock);
491 out_unlock_fown:
492 read_unlock(&fown->lock);
493}
494
495static void send_sigurg_to_task(struct task_struct *p,
496 struct fown_struct *fown)
497{
498 if (sigio_perm(p, fown, SIGURG))
850d6fbe 499 group_send_sig_info(SIGURG, SEND_SIG_PRIV, p);
1da177e4
LT
500}
501
502int send_sigurg(struct fown_struct *fown)
503{
504 struct task_struct *p;
505 int pid, ret = 0;
506
507 read_lock(&fown->lock);
508 pid = fown->pid;
509 if (!pid)
510 goto out_unlock_fown;
511
512 ret = 1;
513
514 read_lock(&tasklist_lock);
515 if (pid > 0) {
516 p = find_task_by_pid(pid);
517 if (p) {
518 send_sigurg_to_task(p, fown);
519 }
520 } else {
521 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
522 send_sigurg_to_task(p, fown);
523 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
524 }
525 read_unlock(&tasklist_lock);
526 out_unlock_fown:
527 read_unlock(&fown->lock);
528 return ret;
529}
530
531static DEFINE_RWLOCK(fasync_lock);
532static kmem_cache_t *fasync_cache;
533
534/*
535 * fasync_helper() is used by some character device drivers (mainly mice)
536 * to set up the fasync queue. It returns negative on error, 0 if it did
537 * no changes and positive if it added/deleted the entry.
538 */
539int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
540{
541 struct fasync_struct *fa, **fp;
542 struct fasync_struct *new = NULL;
543 int result = 0;
544
545 if (on) {
546 new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
547 if (!new)
548 return -ENOMEM;
549 }
550 write_lock_irq(&fasync_lock);
551 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
552 if (fa->fa_file == filp) {
553 if(on) {
554 fa->fa_fd = fd;
555 kmem_cache_free(fasync_cache, new);
556 } else {
557 *fp = fa->fa_next;
558 kmem_cache_free(fasync_cache, fa);
559 result = 1;
560 }
561 goto out;
562 }
563 }
564
565 if (on) {
566 new->magic = FASYNC_MAGIC;
567 new->fa_file = filp;
568 new->fa_fd = fd;
569 new->fa_next = *fapp;
570 *fapp = new;
571 result = 1;
572 }
573out:
574 write_unlock_irq(&fasync_lock);
575 return result;
576}
577
578EXPORT_SYMBOL(fasync_helper);
579
580void __kill_fasync(struct fasync_struct *fa, int sig, int band)
581{
582 while (fa) {
583 struct fown_struct * fown;
584 if (fa->magic != FASYNC_MAGIC) {
585 printk(KERN_ERR "kill_fasync: bad magic number in "
586 "fasync_struct!\n");
587 return;
588 }
589 fown = &fa->fa_file->f_owner;
590 /* Don't send SIGURG to processes which have not set a
591 queued signum: SIGURG has its own default signalling
592 mechanism. */
593 if (!(sig == SIGURG && fown->signum == 0))
594 send_sigio(fown, fa->fa_fd, band);
595 fa = fa->fa_next;
596 }
597}
598
599EXPORT_SYMBOL(__kill_fasync);
600
601void kill_fasync(struct fasync_struct **fp, int sig, int band)
602{
603 /* First a quick test without locking: usually
604 * the list is empty.
605 */
606 if (*fp) {
607 read_lock(&fasync_lock);
608 /* reread *fp after obtaining the lock */
609 __kill_fasync(*fp, sig, band);
610 read_unlock(&fasync_lock);
611 }
612}
613EXPORT_SYMBOL(kill_fasync);
614
615static int __init fasync_init(void)
616{
617 fasync_cache = kmem_cache_create("fasync_cache",
618 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
619 return 0;
620}
621
622module_init(fasync_init)