2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "xfs_trans.h"
27 #include "xfs_dmapi.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_btree.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dir_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_error.h"
41 #include "xfs_ioctl32.h"
43 #include <linux/dcache.h>
44 #include <linux/smp_lock.h>
46 static struct vm_operations_struct linvfs_file_vm_ops;
47 #ifdef CONFIG_XFS_DMAPI
48 static struct vm_operations_struct linvfs_dmapi_file_vm_ops;
59 struct iovec iov = {buf, count};
60 struct file *file = iocb->ki_filp;
61 vnode_t *vp = LINVFS_GET_VP(file->f_dentry->d_inode);
64 BUG_ON(iocb->ki_pos != pos);
66 if (unlikely(file->f_flags & O_DIRECT))
67 ioflags |= IO_ISDIRECT;
68 VOP_READ(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL, rval);
80 return __linvfs_read(iocb, buf, IO_ISAIO, count, pos);
84 linvfs_aio_read_invis(
90 return __linvfs_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
97 const char __user *buf,
102 struct iovec iov = {(void __user *)buf, count};
103 struct file *file = iocb->ki_filp;
104 struct inode *inode = file->f_mapping->host;
105 vnode_t *vp = LINVFS_GET_VP(inode);
108 BUG_ON(iocb->ki_pos != pos);
109 if (unlikely(file->f_flags & O_DIRECT))
110 ioflags |= IO_ISDIRECT;
112 VOP_WRITE(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL, rval);
120 const char __user *buf,
124 return __linvfs_write(iocb, buf, IO_ISAIO, count, pos);
128 linvfs_aio_write_invis(
130 const char __user *buf,
134 return __linvfs_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
138 STATIC inline ssize_t
141 const struct iovec *iov,
143 unsigned long nr_segs,
146 struct inode *inode = file->f_mapping->host;
147 vnode_t *vp = LINVFS_GET_VP(inode);
151 kiocb = kmalloc(sizeof(*kiocb), GFP_KERNEL);
152 if (unlikely(!kiocb))
155 init_sync_kiocb(kiocb, file);
156 kiocb->ki_pos = *ppos;
158 if (unlikely(file->f_flags & O_DIRECT))
159 ioflags |= IO_ISDIRECT;
160 VOP_READ(vp, kiocb, iov, nr_segs, &kiocb->ki_pos, ioflags, NULL, rval);
162 *ppos = kiocb->ki_pos;
170 const struct iovec *iov,
171 unsigned long nr_segs,
174 return __linvfs_readv(file, iov, 0, nr_segs, ppos);
180 const struct iovec *iov,
181 unsigned long nr_segs,
184 return __linvfs_readv(file, iov, IO_INVIS, nr_segs, ppos);
188 STATIC inline ssize_t
191 const struct iovec *iov,
193 unsigned long nr_segs,
196 struct inode *inode = file->f_mapping->host;
197 vnode_t *vp = LINVFS_GET_VP(inode);
201 kiocb = kmalloc(sizeof(*kiocb), GFP_KERNEL);
202 if (unlikely(!kiocb))
205 init_sync_kiocb(kiocb, file);
206 kiocb->ki_pos = *ppos;
207 if (unlikely(file->f_flags & O_DIRECT))
208 ioflags |= IO_ISDIRECT;
210 VOP_WRITE(vp, kiocb, iov, nr_segs, &kiocb->ki_pos, ioflags, NULL, rval);
212 *ppos = kiocb->ki_pos;
221 const struct iovec *iov,
222 unsigned long nr_segs,
225 return __linvfs_writev(file, iov, 0, nr_segs, ppos);
231 const struct iovec *iov,
232 unsigned long nr_segs,
235 return __linvfs_writev(file, iov, IO_INVIS, nr_segs, ppos);
246 vnode_t *vp = LINVFS_GET_VP(filp->f_dentry->d_inode);
249 VOP_SENDFILE(vp, filp, ppos, 0, count, actor, target, NULL, rval);
259 vnode_t *vp = LINVFS_GET_VP(inode);
262 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
266 VOP_OPEN(vp, NULL, error);
276 vnode_t *vp = LINVFS_GET_VP(inode);
280 VOP_RELEASE(vp, error);
288 struct dentry *dentry,
291 struct inode *inode = dentry->d_inode;
292 vnode_t *vp = LINVFS_GET_VP(inode);
294 int flags = FSYNC_WAIT;
300 VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error);
305 * linvfs_readdir maps to VOP_READDIR().
306 * We need to build a uio, cred, ...
309 #define nextdp(dp) ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen))
311 #ifdef CONFIG_XFS_DMAPI
314 linvfs_filemap_nopage(
315 struct vm_area_struct *area,
316 unsigned long address,
319 struct inode *inode = area->vm_file->f_dentry->d_inode;
320 vnode_t *vp = LINVFS_GET_VP(inode);
321 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
324 ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
326 error = XFS_SEND_MMAP(mp, area, 0);
330 return filemap_nopage(area, address, type);
333 #endif /* CONFIG_XFS_DMAPI */
348 int namelen, size = 0;
349 size_t rlen = PAGE_CACHE_SIZE;
350 xfs_off_t start_offset, curr_offset;
351 xfs_dirent_t *dbp = NULL;
353 vp = LINVFS_GET_VP(filp->f_dentry->d_inode);
356 /* Try fairly hard to get memory */
358 if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL)))
361 } while (rlen >= 1024);
363 if (read_buf == NULL)
367 uio.uio_segflg = UIO_SYSSPACE;
368 curr_offset = filp->f_pos;
369 if (filp->f_pos != 0x7fffffff)
370 uio.uio_offset = filp->f_pos;
372 uio.uio_offset = 0xffffffff;
375 uio.uio_resid = iov.iov_len = rlen;
376 iov.iov_base = read_buf;
379 start_offset = uio.uio_offset;
381 VOP_READDIR(vp, &uio, NULL, &eof, error);
382 if ((uio.uio_offset == start_offset) || error) {
387 size = rlen - uio.uio_resid;
388 dbp = (xfs_dirent_t *)read_buf;
390 namelen = strlen(dbp->d_name);
392 if (filldir(dirent, dbp->d_name, namelen,
393 (loff_t) curr_offset & 0x7fffffff,
398 size -= dbp->d_reclen;
399 curr_offset = (loff_t)dbp->d_off /* & 0x7fffffff */;
406 filp->f_pos = uio.uio_offset & 0x7fffffff;
408 filp->f_pos = curr_offset;
419 struct vm_area_struct *vma)
421 struct inode *ip = filp->f_dentry->d_inode;
422 vnode_t *vp = LINVFS_GET_VP(ip);
426 vma->vm_ops = &linvfs_file_vm_ops;
428 #ifdef CONFIG_XFS_DMAPI
429 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
430 vma->vm_ops = &linvfs_dmapi_file_vm_ops;
432 #endif /* CONFIG_XFS_DMAPI */
434 vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
435 if (unlikely(!vattr))
437 vattr->va_mask = XFS_AT_UPDATIME;
438 VOP_SETATTR(vp, vattr, XFS_AT_UPDATIME, NULL, error);
440 __vn_revalidate(vp, vattr); /* update flags */
453 struct inode *inode = filp->f_dentry->d_inode;
454 vnode_t *vp = LINVFS_GET_VP(inode);
456 VOP_IOCTL(vp, inode, filp, 0, cmd, (void __user *)arg, error);
459 /* NOTE: some of the ioctl's return positive #'s as a
460 * byte count indicating success, such as
461 * readlink_by_handle. So we don't "sign flip"
462 * like most other routines. This means true
463 * errors need to be returned as a negative value.
475 struct inode *inode = filp->f_dentry->d_inode;
476 vnode_t *vp = LINVFS_GET_VP(inode);
479 VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error);
482 /* NOTE: some of the ioctl's return positive #'s as a
483 * byte count indicating success, such as
484 * readlink_by_handle. So we don't "sign flip"
485 * like most other routines. This means true
486 * errors need to be returned as a negative value.
491 #ifdef CONFIG_XFS_DMAPI
492 #ifdef HAVE_VMOP_MPROTECT
495 struct vm_area_struct *vma,
496 unsigned int newflags)
498 vnode_t *vp = LINVFS_GET_VP(vma->vm_file->f_dentry->d_inode);
501 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
502 if ((vma->vm_flags & VM_MAYSHARE) &&
503 (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) {
504 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
506 error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
511 #endif /* HAVE_VMOP_MPROTECT */
512 #endif /* CONFIG_XFS_DMAPI */
514 #ifdef HAVE_FOP_OPEN_EXEC
515 /* If the user is attempting to execute a file that is offline then
516 * we have to trigger a DMAPI READ event before the file is marked as busy
517 * otherwise the invisible I/O will not be able to write to the file to bring
524 vnode_t *vp = LINVFS_GET_VP(inode);
525 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
529 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
535 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)) {
536 error = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
543 #endif /* HAVE_FOP_OPEN_EXEC */
545 struct file_operations linvfs_file_operations = {
546 .llseek = generic_file_llseek,
547 .read = do_sync_read,
548 .write = do_sync_write,
549 .readv = linvfs_readv,
550 .writev = linvfs_writev,
551 .aio_read = linvfs_aio_read,
552 .aio_write = linvfs_aio_write,
553 .sendfile = linvfs_sendfile,
554 .unlocked_ioctl = linvfs_ioctl,
556 .compat_ioctl = linvfs_compat_ioctl,
558 .mmap = linvfs_file_mmap,
560 .release = linvfs_release,
561 .fsync = linvfs_fsync,
562 #ifdef HAVE_FOP_OPEN_EXEC
563 .open_exec = linvfs_open_exec,
567 struct file_operations linvfs_invis_file_operations = {
568 .llseek = generic_file_llseek,
569 .read = do_sync_read,
570 .write = do_sync_write,
571 .readv = linvfs_readv_invis,
572 .writev = linvfs_writev_invis,
573 .aio_read = linvfs_aio_read_invis,
574 .aio_write = linvfs_aio_write_invis,
575 .sendfile = linvfs_sendfile,
576 .unlocked_ioctl = linvfs_ioctl_invis,
578 .compat_ioctl = linvfs_compat_invis_ioctl,
580 .mmap = linvfs_file_mmap,
582 .release = linvfs_release,
583 .fsync = linvfs_fsync,
587 struct file_operations linvfs_dir_operations = {
588 .read = generic_read_dir,
589 .readdir = linvfs_readdir,
590 .unlocked_ioctl = linvfs_ioctl,
592 .compat_ioctl = linvfs_compat_ioctl,
594 .fsync = linvfs_fsync,
597 static struct vm_operations_struct linvfs_file_vm_ops = {
598 .nopage = filemap_nopage,
599 .populate = filemap_populate,
602 #ifdef CONFIG_XFS_DMAPI
603 static struct vm_operations_struct linvfs_dmapi_file_vm_ops = {
604 .nopage = linvfs_filemap_nopage,
605 .populate = filemap_populate,
606 #ifdef HAVE_VMOP_MPROTECT
607 .mprotect = linvfs_mprotect,
610 #endif /* CONFIG_XFS_DMAPI */