]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/fcntl.c
[PATCH] files-sparc64-fix 2
[net-next-2.6.git] / fs / fcntl.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/fcntl.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/syscalls.h>
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fs.h>
11#include <linux/file.h>
12#include <linux/dnotify.h>
13#include <linux/smp_lock.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/security.h>
17#include <linux/ptrace.h>
7ed20e1a 18#include <linux/signal.h>
1da177e4
LT
19
20#include <asm/poll.h>
21#include <asm/siginfo.h>
22#include <asm/uaccess.h>
23
24void fastcall set_close_on_exec(unsigned int fd, int flag)
25{
26 struct files_struct *files = current->files;
badf1662 27 struct fdtable *fdt;
1da177e4 28 spin_lock(&files->file_lock);
badf1662 29 fdt = files_fdtable(files);
1da177e4 30 if (flag)
badf1662 31 FD_SET(fd, fdt->close_on_exec);
1da177e4 32 else
badf1662 33 FD_CLR(fd, fdt->close_on_exec);
1da177e4
LT
34 spin_unlock(&files->file_lock);
35}
36
37static inline int get_close_on_exec(unsigned int fd)
38{
39 struct files_struct *files = current->files;
badf1662 40 struct fdtable *fdt;
1da177e4
LT
41 int res;
42 spin_lock(&files->file_lock);
badf1662
DS
43 fdt = files_fdtable(files);
44 res = FD_ISSET(fd, fdt->close_on_exec);
1da177e4
LT
45 spin_unlock(&files->file_lock);
46 return res;
47}
48
49/*
50 * locate_fd finds a free file descriptor in the open_fds fdset,
51 * expanding the fd arrays if necessary. Must be called with the
52 * file_lock held for write.
53 */
54
55static int locate_fd(struct files_struct *files,
56 struct file *file, unsigned int orig_start)
57{
58 unsigned int newfd;
59 unsigned int start;
60 int error;
badf1662 61 struct fdtable *fdt;
1da177e4
LT
62
63 error = -EINVAL;
64 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
65 goto out;
66
badf1662 67 fdt = files_fdtable(files);
1da177e4
LT
68repeat:
69 /*
70 * Someone might have closed fd's in the range
badf1662 71 * orig_start..fdt->next_fd
1da177e4
LT
72 */
73 start = orig_start;
badf1662
DS
74 if (start < fdt->next_fd)
75 start = fdt->next_fd;
1da177e4
LT
76
77 newfd = start;
badf1662
DS
78 if (start < fdt->max_fdset) {
79 newfd = find_next_zero_bit(fdt->open_fds->fds_bits,
80 fdt->max_fdset, start);
1da177e4
LT
81 }
82
83 error = -EMFILE;
84 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
85 goto out;
86
87 error = expand_files(files, newfd);
88 if (error < 0)
89 goto out;
90
91 /*
92 * If we needed to expand the fs array we
93 * might have blocked - try again.
94 */
95 if (error)
96 goto repeat;
97
badf1662
DS
98 if (start <= fdt->next_fd)
99 fdt->next_fd = newfd + 1;
1da177e4
LT
100
101 error = newfd;
102
103out:
104 return error;
105}
106
107static int dupfd(struct file *file, unsigned int start)
108{
109 struct files_struct * files = current->files;
badf1662 110 struct fdtable *fdt;
1da177e4
LT
111 int fd;
112
113 spin_lock(&files->file_lock);
114 fd = locate_fd(files, file, start);
115 if (fd >= 0) {
badf1662
DS
116 /* locate_fd() may have expanded fdtable, load the ptr */
117 fdt = files_fdtable(files);
118 FD_SET(fd, fdt->open_fds);
119 FD_CLR(fd, fdt->close_on_exec);
1da177e4
LT
120 spin_unlock(&files->file_lock);
121 fd_install(fd, file);
122 } else {
123 spin_unlock(&files->file_lock);
124 fput(file);
125 }
126
127 return fd;
128}
129
130asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
131{
132 int err = -EBADF;
133 struct file * file, *tofree;
134 struct files_struct * files = current->files;
badf1662 135 struct fdtable *fdt;
1da177e4
LT
136
137 spin_lock(&files->file_lock);
138 if (!(file = fcheck(oldfd)))
139 goto out_unlock;
140 err = newfd;
141 if (newfd == oldfd)
142 goto out_unlock;
143 err = -EBADF;
144 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
145 goto out_unlock;
146 get_file(file); /* We are now finished with oldfd */
147
148 err = expand_files(files, newfd);
149 if (err < 0)
150 goto out_fput;
151
152 /* To avoid races with open() and dup(), we will mark the fd as
153 * in-use in the open-file bitmap throughout the entire dup2()
154 * process. This is quite safe: do_close() uses the fd array
155 * entry, not the bitmap, to decide what work needs to be
156 * done. --sct */
157 /* Doesn't work. open() might be there first. --AV */
158
159 /* Yes. It's a race. In user space. Nothing sane to do */
160 err = -EBUSY;
badf1662
DS
161 fdt = files_fdtable(files);
162 tofree = fdt->fd[newfd];
163 if (!tofree && FD_ISSET(newfd, fdt->open_fds))
1da177e4
LT
164 goto out_fput;
165
badf1662
DS
166 fdt->fd[newfd] = file;
167 FD_SET(newfd, fdt->open_fds);
168 FD_CLR(newfd, fdt->close_on_exec);
1da177e4
LT
169 spin_unlock(&files->file_lock);
170
171 if (tofree)
172 filp_close(tofree, files);
173 err = newfd;
174out:
175 return err;
176out_unlock:
177 spin_unlock(&files->file_lock);
178 goto out;
179
180out_fput:
181 spin_unlock(&files->file_lock);
182 fput(file);
183 goto out;
184}
185
186asmlinkage long sys_dup(unsigned int fildes)
187{
188 int ret = -EBADF;
189 struct file * file = fget(fildes);
190
191 if (file)
192 ret = dupfd(file, 0);
193 return ret;
194}
195
196#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
197
198static int setfl(int fd, struct file * filp, unsigned long arg)
199{
200 struct inode * inode = filp->f_dentry->d_inode;
201 int error = 0;
202
203 /* O_APPEND cannot be cleared if the file is marked as append-only */
204 if (!(arg & O_APPEND) && IS_APPEND(inode))
205 return -EPERM;
206
207 /* O_NOATIME can only be set by the owner or superuser */
208 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
209 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
210 return -EPERM;
211
212 /* required for strict SunOS emulation */
213 if (O_NONBLOCK != O_NDELAY)
214 if (arg & O_NDELAY)
215 arg |= O_NONBLOCK;
216
217 if (arg & O_DIRECT) {
218 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
219 !filp->f_mapping->a_ops->direct_IO)
220 return -EINVAL;
221 }
222
223 if (filp->f_op && filp->f_op->check_flags)
224 error = filp->f_op->check_flags(arg);
225 if (error)
226 return error;
227
228 lock_kernel();
229 if ((arg ^ filp->f_flags) & FASYNC) {
230 if (filp->f_op && filp->f_op->fasync) {
231 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
232 if (error < 0)
233 goto out;
234 }
235 }
236
237 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
238 out:
239 unlock_kernel();
240 return error;
241}
242
243static void f_modown(struct file *filp, unsigned long pid,
244 uid_t uid, uid_t euid, int force)
245{
246 write_lock_irq(&filp->f_owner.lock);
247 if (force || !filp->f_owner.pid) {
248 filp->f_owner.pid = pid;
249 filp->f_owner.uid = uid;
250 filp->f_owner.euid = euid;
251 }
252 write_unlock_irq(&filp->f_owner.lock);
253}
254
255int f_setown(struct file *filp, unsigned long arg, int force)
256{
257 int err;
258
259 err = security_file_set_fowner(filp);
260 if (err)
261 return err;
262
263 f_modown(filp, arg, current->uid, current->euid, force);
264 return 0;
265}
266
267EXPORT_SYMBOL(f_setown);
268
269void f_delown(struct file *filp)
270{
271 f_modown(filp, 0, 0, 0, 1);
272}
273
274static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
275 struct file *filp)
276{
277 long err = -EINVAL;
278
279 switch (cmd) {
280 case F_DUPFD:
281 get_file(filp);
282 err = dupfd(filp, arg);
283 break;
284 case F_GETFD:
285 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
286 break;
287 case F_SETFD:
288 err = 0;
289 set_close_on_exec(fd, arg & FD_CLOEXEC);
290 break;
291 case F_GETFL:
292 err = filp->f_flags;
293 break;
294 case F_SETFL:
295 err = setfl(fd, filp, arg);
296 break;
297 case F_GETLK:
298 err = fcntl_getlk(filp, (struct flock __user *) arg);
299 break;
300 case F_SETLK:
301 case F_SETLKW:
c293621b 302 err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
1da177e4
LT
303 break;
304 case F_GETOWN:
305 /*
306 * XXX If f_owner is a process group, the
307 * negative return value will get converted
308 * into an error. Oops. If we keep the
309 * current syscall conventions, the only way
310 * to fix this will be in libc.
311 */
312 err = filp->f_owner.pid;
313 force_successful_syscall_return();
314 break;
315 case F_SETOWN:
316 err = f_setown(filp, arg, 1);
317 break;
318 case F_GETSIG:
319 err = filp->f_owner.signum;
320 break;
321 case F_SETSIG:
322 /* arg == 0 restores default behaviour. */
7ed20e1a 323 if (!valid_signal(arg)) {
1da177e4
LT
324 break;
325 }
326 err = 0;
327 filp->f_owner.signum = arg;
328 break;
329 case F_GETLEASE:
330 err = fcntl_getlease(filp);
331 break;
332 case F_SETLEASE:
333 err = fcntl_setlease(fd, filp, arg);
334 break;
335 case F_NOTIFY:
336 err = fcntl_dirnotify(fd, filp, arg);
337 break;
338 default:
339 break;
340 }
341 return err;
342}
343
344asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
345{
346 struct file *filp;
347 long err = -EBADF;
348
349 filp = fget(fd);
350 if (!filp)
351 goto out;
352
353 err = security_file_fcntl(filp, cmd, arg);
354 if (err) {
355 fput(filp);
356 return err;
357 }
358
359 err = do_fcntl(fd, cmd, arg, filp);
360
361 fput(filp);
362out:
363 return err;
364}
365
366#if BITS_PER_LONG == 32
367asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
368{
369 struct file * filp;
370 long err;
371
372 err = -EBADF;
373 filp = fget(fd);
374 if (!filp)
375 goto out;
376
377 err = security_file_fcntl(filp, cmd, arg);
378 if (err) {
379 fput(filp);
380 return err;
381 }
382 err = -EBADF;
383
384 switch (cmd) {
385 case F_GETLK64:
386 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
387 break;
388 case F_SETLK64:
389 case F_SETLKW64:
c293621b
PS
390 err = fcntl_setlk64(fd, filp, cmd,
391 (struct flock64 __user *) arg);
1da177e4
LT
392 break;
393 default:
394 err = do_fcntl(fd, cmd, arg, filp);
395 break;
396 }
397 fput(filp);
398out:
399 return err;
400}
401#endif
402
403/* Table to convert sigio signal codes into poll band bitmaps */
404
405static long band_table[NSIGPOLL] = {
406 POLLIN | POLLRDNORM, /* POLL_IN */
407 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
408 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
409 POLLERR, /* POLL_ERR */
410 POLLPRI | POLLRDBAND, /* POLL_PRI */
411 POLLHUP | POLLERR /* POLL_HUP */
412};
413
414static inline int sigio_perm(struct task_struct *p,
415 struct fown_struct *fown, int sig)
416{
417 return (((fown->euid == 0) ||
418 (fown->euid == p->suid) || (fown->euid == p->uid) ||
419 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
420 !security_file_send_sigiotask(p, fown, sig));
421}
422
423static void send_sigio_to_task(struct task_struct *p,
424 struct fown_struct *fown,
425 int fd,
426 int reason)
427{
428 if (!sigio_perm(p, fown, fown->signum))
429 return;
430
431 switch (fown->signum) {
432 siginfo_t si;
433 default:
434 /* Queue a rt signal with the appropriate fd as its
435 value. We use SI_SIGIO as the source, not
436 SI_KERNEL, since kernel signals always get
437 delivered even if we can't queue. Failure to
438 queue in this case _should_ be reported; we fall
439 back to SIGIO in that case. --sct */
440 si.si_signo = fown->signum;
441 si.si_errno = 0;
442 si.si_code = reason;
443 /* Make sure we are called with one of the POLL_*
444 reasons, otherwise we could leak kernel stack into
445 userspace. */
446 if ((reason & __SI_MASK) != __SI_POLL)
447 BUG();
448 if (reason - POLL_IN >= NSIGPOLL)
449 si.si_band = ~0L;
450 else
451 si.si_band = band_table[reason - POLL_IN];
452 si.si_fd = fd;
fc9c9ab2 453 if (!send_group_sig_info(fown->signum, &si, p))
1da177e4
LT
454 break;
455 /* fall-through: fall back on the old plain SIGIO signal */
456 case 0:
457 send_group_sig_info(SIGIO, SEND_SIG_PRIV, p);
458 }
459}
460
461void send_sigio(struct fown_struct *fown, int fd, int band)
462{
463 struct task_struct *p;
464 int pid;
465
466 read_lock(&fown->lock);
467 pid = fown->pid;
468 if (!pid)
469 goto out_unlock_fown;
470
471 read_lock(&tasklist_lock);
472 if (pid > 0) {
473 p = find_task_by_pid(pid);
474 if (p) {
475 send_sigio_to_task(p, fown, fd, band);
476 }
477 } else {
478 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
479 send_sigio_to_task(p, fown, fd, band);
480 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
481 }
482 read_unlock(&tasklist_lock);
483 out_unlock_fown:
484 read_unlock(&fown->lock);
485}
486
487static void send_sigurg_to_task(struct task_struct *p,
488 struct fown_struct *fown)
489{
490 if (sigio_perm(p, fown, SIGURG))
491 send_group_sig_info(SIGURG, SEND_SIG_PRIV, p);
492}
493
494int send_sigurg(struct fown_struct *fown)
495{
496 struct task_struct *p;
497 int pid, ret = 0;
498
499 read_lock(&fown->lock);
500 pid = fown->pid;
501 if (!pid)
502 goto out_unlock_fown;
503
504 ret = 1;
505
506 read_lock(&tasklist_lock);
507 if (pid > 0) {
508 p = find_task_by_pid(pid);
509 if (p) {
510 send_sigurg_to_task(p, fown);
511 }
512 } else {
513 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
514 send_sigurg_to_task(p, fown);
515 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
516 }
517 read_unlock(&tasklist_lock);
518 out_unlock_fown:
519 read_unlock(&fown->lock);
520 return ret;
521}
522
523static DEFINE_RWLOCK(fasync_lock);
524static kmem_cache_t *fasync_cache;
525
526/*
527 * fasync_helper() is used by some character device drivers (mainly mice)
528 * to set up the fasync queue. It returns negative on error, 0 if it did
529 * no changes and positive if it added/deleted the entry.
530 */
531int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
532{
533 struct fasync_struct *fa, **fp;
534 struct fasync_struct *new = NULL;
535 int result = 0;
536
537 if (on) {
538 new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
539 if (!new)
540 return -ENOMEM;
541 }
542 write_lock_irq(&fasync_lock);
543 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
544 if (fa->fa_file == filp) {
545 if(on) {
546 fa->fa_fd = fd;
547 kmem_cache_free(fasync_cache, new);
548 } else {
549 *fp = fa->fa_next;
550 kmem_cache_free(fasync_cache, fa);
551 result = 1;
552 }
553 goto out;
554 }
555 }
556
557 if (on) {
558 new->magic = FASYNC_MAGIC;
559 new->fa_file = filp;
560 new->fa_fd = fd;
561 new->fa_next = *fapp;
562 *fapp = new;
563 result = 1;
564 }
565out:
566 write_unlock_irq(&fasync_lock);
567 return result;
568}
569
570EXPORT_SYMBOL(fasync_helper);
571
572void __kill_fasync(struct fasync_struct *fa, int sig, int band)
573{
574 while (fa) {
575 struct fown_struct * fown;
576 if (fa->magic != FASYNC_MAGIC) {
577 printk(KERN_ERR "kill_fasync: bad magic number in "
578 "fasync_struct!\n");
579 return;
580 }
581 fown = &fa->fa_file->f_owner;
582 /* Don't send SIGURG to processes which have not set a
583 queued signum: SIGURG has its own default signalling
584 mechanism. */
585 if (!(sig == SIGURG && fown->signum == 0))
586 send_sigio(fown, fa->fa_fd, band);
587 fa = fa->fa_next;
588 }
589}
590
591EXPORT_SYMBOL(__kill_fasync);
592
593void kill_fasync(struct fasync_struct **fp, int sig, int band)
594{
595 /* First a quick test without locking: usually
596 * the list is empty.
597 */
598 if (*fp) {
599 read_lock(&fasync_lock);
600 /* reread *fp after obtaining the lock */
601 __kill_fasync(*fp, sig, band);
602 read_unlock(&fasync_lock);
603 }
604}
605EXPORT_SYMBOL(kill_fasync);
606
607static int __init fasync_init(void)
608{
609 fasync_cache = kmem_cache_create("fasync_cache",
610 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
611 return 0;
612}
613
614module_init(fasync_init)