]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/xfs/linux-2.6/xfs_file.c
Merge branch 'fix' of git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6
[net-next-2.6.git] / fs / xfs / linux-2.6 / xfs_file.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
dda35b8f 19#include "xfs_fs.h"
a844f451 20#include "xfs_bit.h"
1da177e4 21#include "xfs_log.h"
a844f451 22#include "xfs_inum.h"
1da177e4 23#include "xfs_sb.h"
a844f451 24#include "xfs_ag.h"
1da177e4
LT
25#include "xfs_dir2.h"
26#include "xfs_trans.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_alloc_btree.h"
31#include "xfs_ialloc_btree.h"
32#include "xfs_alloc.h"
33#include "xfs_btree.h"
34#include "xfs_attr_sf.h"
1da177e4
LT
35#include "xfs_dir2_sf.h"
36#include "xfs_dinode.h"
37#include "xfs_inode.h"
fd3200be 38#include "xfs_inode_item.h"
dda35b8f 39#include "xfs_bmap.h"
1da177e4
LT
40#include "xfs_error.h"
41#include "xfs_rw.h"
739bfb2a 42#include "xfs_vnodeops.h"
f999a5bf 43#include "xfs_da_btree.h"
ddcd856d 44#include "xfs_ioctl.h"
dda35b8f 45#include "xfs_trace.h"
1da177e4
LT
46
47#include <linux/dcache.h>
1da177e4 48
f0f37e2f 49static const struct vm_operations_struct xfs_file_vm_ops;
1da177e4 50
dda35b8f
CH
51/*
52 * xfs_iozero
53 *
54 * xfs_iozero clears the specified range of buffer supplied,
55 * and marks all the affected blocks as valid and modified. If
56 * an affected block is not allocated, it will be allocated. If
57 * an affected block is not completely overwritten, and is not
58 * valid before the operation, it will be read from disk before
59 * being partially zeroed.
60 */
61STATIC int
62xfs_iozero(
63 struct xfs_inode *ip, /* inode */
64 loff_t pos, /* offset in file */
65 size_t count) /* size of data to zero */
66{
67 struct page *page;
68 struct address_space *mapping;
69 int status;
70
71 mapping = VFS_I(ip)->i_mapping;
72 do {
73 unsigned offset, bytes;
74 void *fsdata;
75
76 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
77 bytes = PAGE_CACHE_SIZE - offset;
78 if (bytes > count)
79 bytes = count;
80
81 status = pagecache_write_begin(NULL, mapping, pos, bytes,
82 AOP_FLAG_UNINTERRUPTIBLE,
83 &page, &fsdata);
84 if (status)
85 break;
86
87 zero_user(page, offset, bytes);
88
89 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
90 page, fsdata);
91 WARN_ON(status <= 0); /* can't return less than zero! */
92 pos += bytes;
93 count -= bytes;
94 status = 0;
95 } while (count);
96
97 return (-status);
98}
99
fd3200be
CH
100STATIC int
101xfs_file_fsync(
102 struct file *file,
fd3200be
CH
103 int datasync)
104{
7ea80859
CH
105 struct inode *inode = file->f_mapping->host;
106 struct xfs_inode *ip = XFS_I(inode);
fd3200be
CH
107 struct xfs_trans *tp;
108 int error = 0;
109 int log_flushed = 0;
110
111 xfs_itrace_entry(ip);
112
113 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
114 return -XFS_ERROR(EIO);
115
116 xfs_iflags_clear(ip, XFS_ITRUNCATED);
117
37bc5743
CH
118 xfs_ioend_wait(ip);
119
fd3200be
CH
120 /*
121 * We always need to make sure that the required inode state is safe on
122 * disk. The inode might be clean but we still might need to force the
123 * log because of committed transactions that haven't hit the disk yet.
124 * Likewise, there could be unflushed non-transactional changes to the
125 * inode core that have to go to disk and this requires us to issue
126 * a synchronous transaction to capture these changes correctly.
127 *
128 * This code relies on the assumption that if the i_update_core field
129 * of the inode is clear and the inode is unpinned then it is clean
130 * and no action is required.
131 */
132 xfs_ilock(ip, XFS_ILOCK_SHARED);
133
66d834ea
CH
134 /*
135 * First check if the VFS inode is marked dirty. All the dirtying
136 * of non-transactional updates no goes through mark_inode_dirty*,
137 * which allows us to distinguish beteeen pure timestamp updates
138 * and i_size updates which need to be caught for fdatasync.
139 * After that also theck for the dirty state in the XFS inode, which
140 * might gets cleared when the inode gets written out via the AIL
141 * or xfs_iflush_cluster.
142 */
7ea80859
CH
143 if (((inode->i_state & I_DIRTY_DATASYNC) ||
144 ((inode->i_state & I_DIRTY_SYNC) && !datasync)) &&
66d834ea 145 ip->i_update_core) {
fd3200be
CH
146 /*
147 * Kick off a transaction to log the inode core to get the
148 * updates. The sync transaction will also force the log.
149 */
150 xfs_iunlock(ip, XFS_ILOCK_SHARED);
151 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
152 error = xfs_trans_reserve(tp, 0,
153 XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);
154 if (error) {
155 xfs_trans_cancel(tp, 0);
156 return -error;
157 }
158 xfs_ilock(ip, XFS_ILOCK_EXCL);
159
160 /*
161 * Note - it's possible that we might have pushed ourselves out
162 * of the way during trans_reserve which would flush the inode.
163 * But there's no guarantee that the inode buffer has actually
164 * gone out yet (it's delwri). Plus the buffer could be pinned
165 * anyway if it's part of an inode in another recent
166 * transaction. So we play it safe and fire off the
167 * transaction anyway.
168 */
169 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
170 xfs_trans_ihold(tp, ip);
171 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
172 xfs_trans_set_sync(tp);
173 error = _xfs_trans_commit(tp, 0, &log_flushed);
174
175 xfs_iunlock(ip, XFS_ILOCK_EXCL);
176 } else {
177 /*
178 * Timestamps/size haven't changed since last inode flush or
179 * inode transaction commit. That means either nothing got
180 * written or a transaction committed which caught the updates.
181 * If the latter happened and the transaction hasn't hit the
182 * disk yet, the inode will be still be pinned. If it is,
183 * force the log.
184 */
fd3200be 185 if (xfs_ipincount(ip)) {
024910cb
CH
186 error = _xfs_log_force_lsn(ip->i_mount,
187 ip->i_itemp->ili_last_lsn,
188 XFS_LOG_SYNC, &log_flushed);
fd3200be 189 }
024910cb 190 xfs_iunlock(ip, XFS_ILOCK_SHARED);
fd3200be
CH
191 }
192
193 if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
194 /*
195 * If the log write didn't issue an ordered tag we need
196 * to flush the disk cache for the data device now.
197 */
198 if (!log_flushed)
199 xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp);
200
201 /*
202 * If this inode is on the RT dev we need to flush that
203 * cache as well.
204 */
205 if (XFS_IS_REALTIME_INODE(ip))
206 xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
207 }
208
209 return -error;
210}
211
00258e36
CH
212STATIC ssize_t
213xfs_file_aio_read(
dda35b8f
CH
214 struct kiocb *iocb,
215 const struct iovec *iovp,
00258e36
CH
216 unsigned long nr_segs,
217 loff_t pos)
dda35b8f
CH
218{
219 struct file *file = iocb->ki_filp;
220 struct inode *inode = file->f_mapping->host;
00258e36
CH
221 struct xfs_inode *ip = XFS_I(inode);
222 struct xfs_mount *mp = ip->i_mount;
dda35b8f
CH
223 size_t size = 0;
224 ssize_t ret = 0;
00258e36 225 int ioflags = 0;
dda35b8f
CH
226 xfs_fsize_t n;
227 unsigned long seg;
228
dda35b8f
CH
229 XFS_STATS_INC(xs_read_calls);
230
00258e36
CH
231 BUG_ON(iocb->ki_pos != pos);
232
233 if (unlikely(file->f_flags & O_DIRECT))
234 ioflags |= IO_ISDIRECT;
235 if (file->f_mode & FMODE_NOCMTIME)
236 ioflags |= IO_INVIS;
237
dda35b8f 238 /* START copy & waste from filemap.c */
00258e36 239 for (seg = 0; seg < nr_segs; seg++) {
dda35b8f
CH
240 const struct iovec *iv = &iovp[seg];
241
242 /*
243 * If any segment has a negative length, or the cumulative
244 * length ever wraps negative then return -EINVAL.
245 */
246 size += iv->iov_len;
247 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
248 return XFS_ERROR(-EINVAL);
249 }
250 /* END copy & waste from filemap.c */
251
252 if (unlikely(ioflags & IO_ISDIRECT)) {
253 xfs_buftarg_t *target =
254 XFS_IS_REALTIME_INODE(ip) ?
255 mp->m_rtdev_targp : mp->m_ddev_targp;
00258e36 256 if ((iocb->ki_pos & target->bt_smask) ||
dda35b8f 257 (size & target->bt_smask)) {
00258e36
CH
258 if (iocb->ki_pos == ip->i_size)
259 return 0;
dda35b8f
CH
260 return -XFS_ERROR(EINVAL);
261 }
262 }
263
00258e36
CH
264 n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
265 if (n <= 0 || size == 0)
dda35b8f
CH
266 return 0;
267
268 if (n < size)
269 size = n;
270
271 if (XFS_FORCED_SHUTDOWN(mp))
272 return -EIO;
273
274 if (unlikely(ioflags & IO_ISDIRECT))
275 mutex_lock(&inode->i_mutex);
276 xfs_ilock(ip, XFS_IOLOCK_SHARED);
277
278 if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
279 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
280 int iolock = XFS_IOLOCK_SHARED;
281
00258e36 282 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, iocb->ki_pos, size,
dda35b8f
CH
283 dmflags, &iolock);
284 if (ret) {
285 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
286 if (unlikely(ioflags & IO_ISDIRECT))
287 mutex_unlock(&inode->i_mutex);
288 return ret;
289 }
290 }
291
292 if (unlikely(ioflags & IO_ISDIRECT)) {
00258e36
CH
293 if (inode->i_mapping->nrpages) {
294 ret = -xfs_flushinval_pages(ip,
295 (iocb->ki_pos & PAGE_CACHE_MASK),
296 -1, FI_REMAPF_LOCKED);
297 }
dda35b8f
CH
298 mutex_unlock(&inode->i_mutex);
299 if (ret) {
300 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
301 return ret;
302 }
303 }
304
00258e36 305 trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
dda35b8f 306
00258e36 307 ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
dda35b8f
CH
308 if (ret > 0)
309 XFS_STATS_ADD(xs_read_bytes, ret);
310
311 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
312 return ret;
313}
314
00258e36
CH
315STATIC ssize_t
316xfs_file_splice_read(
dda35b8f
CH
317 struct file *infilp,
318 loff_t *ppos,
319 struct pipe_inode_info *pipe,
320 size_t count,
00258e36 321 unsigned int flags)
dda35b8f 322{
00258e36
CH
323 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
324 struct xfs_mount *mp = ip->i_mount;
325 int ioflags = 0;
dda35b8f
CH
326 ssize_t ret;
327
328 XFS_STATS_INC(xs_read_calls);
00258e36
CH
329
330 if (infilp->f_mode & FMODE_NOCMTIME)
331 ioflags |= IO_INVIS;
332
dda35b8f
CH
333 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
334 return -EIO;
335
336 xfs_ilock(ip, XFS_IOLOCK_SHARED);
337
338 if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
339 int iolock = XFS_IOLOCK_SHARED;
340 int error;
341
342 error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
343 FILP_DELAY_FLAG(infilp), &iolock);
344 if (error) {
345 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
346 return -error;
347 }
348 }
349
350 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
351
352 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
353 if (ret > 0)
354 XFS_STATS_ADD(xs_read_bytes, ret);
355
356 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
357 return ret;
358}
359
00258e36
CH
360STATIC ssize_t
361xfs_file_splice_write(
dda35b8f
CH
362 struct pipe_inode_info *pipe,
363 struct file *outfilp,
364 loff_t *ppos,
365 size_t count,
00258e36 366 unsigned int flags)
dda35b8f 367{
dda35b8f 368 struct inode *inode = outfilp->f_mapping->host;
00258e36
CH
369 struct xfs_inode *ip = XFS_I(inode);
370 struct xfs_mount *mp = ip->i_mount;
dda35b8f 371 xfs_fsize_t isize, new_size;
00258e36
CH
372 int ioflags = 0;
373 ssize_t ret;
dda35b8f
CH
374
375 XFS_STATS_INC(xs_write_calls);
00258e36
CH
376
377 if (outfilp->f_mode & FMODE_NOCMTIME)
378 ioflags |= IO_INVIS;
379
dda35b8f
CH
380 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
381 return -EIO;
382
383 xfs_ilock(ip, XFS_IOLOCK_EXCL);
384
385 if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
386 int iolock = XFS_IOLOCK_EXCL;
387 int error;
388
389 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
390 FILP_DELAY_FLAG(outfilp), &iolock);
391 if (error) {
392 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
393 return -error;
394 }
395 }
396
397 new_size = *ppos + count;
398
399 xfs_ilock(ip, XFS_ILOCK_EXCL);
400 if (new_size > ip->i_size)
401 ip->i_new_size = new_size;
402 xfs_iunlock(ip, XFS_ILOCK_EXCL);
403
404 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
405
406 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
407 if (ret > 0)
408 XFS_STATS_ADD(xs_write_bytes, ret);
409
410 isize = i_size_read(inode);
411 if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
412 *ppos = isize;
413
414 if (*ppos > ip->i_size) {
415 xfs_ilock(ip, XFS_ILOCK_EXCL);
416 if (*ppos > ip->i_size)
417 ip->i_size = *ppos;
418 xfs_iunlock(ip, XFS_ILOCK_EXCL);
419 }
420
421 if (ip->i_new_size) {
422 xfs_ilock(ip, XFS_ILOCK_EXCL);
423 ip->i_new_size = 0;
424 if (ip->i_d.di_size > ip->i_size)
425 ip->i_d.di_size = ip->i_size;
426 xfs_iunlock(ip, XFS_ILOCK_EXCL);
427 }
428 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
429 return ret;
430}
431
432/*
433 * This routine is called to handle zeroing any space in the last
434 * block of the file that is beyond the EOF. We do this since the
435 * size is being increased without writing anything to that block
436 * and we don't want anyone to read the garbage on the disk.
437 */
438STATIC int /* error (positive) */
439xfs_zero_last_block(
440 xfs_inode_t *ip,
441 xfs_fsize_t offset,
442 xfs_fsize_t isize)
443{
444 xfs_fileoff_t last_fsb;
445 xfs_mount_t *mp = ip->i_mount;
446 int nimaps;
447 int zero_offset;
448 int zero_len;
449 int error = 0;
450 xfs_bmbt_irec_t imap;
451
452 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
453
454 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
455 if (zero_offset == 0) {
456 /*
457 * There are no extra bytes in the last block on disk to
458 * zero, so return.
459 */
460 return 0;
461 }
462
463 last_fsb = XFS_B_TO_FSBT(mp, isize);
464 nimaps = 1;
465 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
466 &nimaps, NULL, NULL);
467 if (error) {
468 return error;
469 }
470 ASSERT(nimaps > 0);
471 /*
472 * If the block underlying isize is just a hole, then there
473 * is nothing to zero.
474 */
475 if (imap.br_startblock == HOLESTARTBLOCK) {
476 return 0;
477 }
478 /*
479 * Zero the part of the last block beyond the EOF, and write it
480 * out sync. We need to drop the ilock while we do this so we
481 * don't deadlock when the buffer cache calls back to us.
482 */
483 xfs_iunlock(ip, XFS_ILOCK_EXCL);
484
485 zero_len = mp->m_sb.sb_blocksize - zero_offset;
486 if (isize + zero_len > offset)
487 zero_len = offset - isize;
488 error = xfs_iozero(ip, isize, zero_len);
489
490 xfs_ilock(ip, XFS_ILOCK_EXCL);
491 ASSERT(error >= 0);
492 return error;
493}
494
495/*
496 * Zero any on disk space between the current EOF and the new,
497 * larger EOF. This handles the normal case of zeroing the remainder
498 * of the last block in the file and the unusual case of zeroing blocks
499 * out beyond the size of the file. This second case only happens
500 * with fixed size extents and when the system crashes before the inode
501 * size was updated but after blocks were allocated. If fill is set,
502 * then any holes in the range are filled and zeroed. If not, the holes
503 * are left alone as holes.
504 */
505
506int /* error (positive) */
507xfs_zero_eof(
508 xfs_inode_t *ip,
509 xfs_off_t offset, /* starting I/O offset */
510 xfs_fsize_t isize) /* current inode size */
511{
512 xfs_mount_t *mp = ip->i_mount;
513 xfs_fileoff_t start_zero_fsb;
514 xfs_fileoff_t end_zero_fsb;
515 xfs_fileoff_t zero_count_fsb;
516 xfs_fileoff_t last_fsb;
517 xfs_fileoff_t zero_off;
518 xfs_fsize_t zero_len;
519 int nimaps;
520 int error = 0;
521 xfs_bmbt_irec_t imap;
522
523 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
524 ASSERT(offset > isize);
525
526 /*
527 * First handle zeroing the block on which isize resides.
528 * We only zero a part of that block so it is handled specially.
529 */
530 error = xfs_zero_last_block(ip, offset, isize);
531 if (error) {
532 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
533 return error;
534 }
535
536 /*
537 * Calculate the range between the new size and the old
538 * where blocks needing to be zeroed may exist. To get the
539 * block where the last byte in the file currently resides,
540 * we need to subtract one from the size and truncate back
541 * to a block boundary. We subtract 1 in case the size is
542 * exactly on a block boundary.
543 */
544 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
545 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
546 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
547 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
548 if (last_fsb == end_zero_fsb) {
549 /*
550 * The size was only incremented on its last block.
551 * We took care of that above, so just return.
552 */
553 return 0;
554 }
555
556 ASSERT(start_zero_fsb <= end_zero_fsb);
557 while (start_zero_fsb <= end_zero_fsb) {
558 nimaps = 1;
559 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
560 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
561 0, NULL, 0, &imap, &nimaps, NULL, NULL);
562 if (error) {
563 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
564 return error;
565 }
566 ASSERT(nimaps > 0);
567
568 if (imap.br_state == XFS_EXT_UNWRITTEN ||
569 imap.br_startblock == HOLESTARTBLOCK) {
570 /*
571 * This loop handles initializing pages that were
572 * partially initialized by the code below this
573 * loop. It basically zeroes the part of the page
574 * that sits on a hole and sets the page as P_HOLE
575 * and calls remapf if it is a mapped file.
576 */
577 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
578 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
579 continue;
580 }
581
582 /*
583 * There are blocks we need to zero.
584 * Drop the inode lock while we're doing the I/O.
585 * We'll still have the iolock to protect us.
586 */
587 xfs_iunlock(ip, XFS_ILOCK_EXCL);
588
589 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
590 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
591
592 if ((zero_off + zero_len) > offset)
593 zero_len = offset - zero_off;
594
595 error = xfs_iozero(ip, zero_off, zero_len);
596 if (error) {
597 goto out_lock;
598 }
599
600 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
601 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
602
603 xfs_ilock(ip, XFS_ILOCK_EXCL);
604 }
605
606 return 0;
607
608out_lock:
609 xfs_ilock(ip, XFS_ILOCK_EXCL);
610 ASSERT(error >= 0);
611 return error;
612}
613
00258e36
CH
614STATIC ssize_t
615xfs_file_aio_write(
dda35b8f
CH
616 struct kiocb *iocb,
617 const struct iovec *iovp,
00258e36
CH
618 unsigned long nr_segs,
619 loff_t pos)
dda35b8f
CH
620{
621 struct file *file = iocb->ki_filp;
622 struct address_space *mapping = file->f_mapping;
623 struct inode *inode = mapping->host;
00258e36
CH
624 struct xfs_inode *ip = XFS_I(inode);
625 struct xfs_mount *mp = ip->i_mount;
dda35b8f 626 ssize_t ret = 0, error = 0;
00258e36 627 int ioflags = 0;
dda35b8f
CH
628 xfs_fsize_t isize, new_size;
629 int iolock;
630 int eventsent = 0;
631 size_t ocount = 0, count;
dda35b8f
CH
632 int need_i_mutex;
633
634 XFS_STATS_INC(xs_write_calls);
635
00258e36
CH
636 BUG_ON(iocb->ki_pos != pos);
637
638 if (unlikely(file->f_flags & O_DIRECT))
639 ioflags |= IO_ISDIRECT;
640 if (file->f_mode & FMODE_NOCMTIME)
641 ioflags |= IO_INVIS;
642
643 error = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
dda35b8f
CH
644 if (error)
645 return error;
646
647 count = ocount;
dda35b8f
CH
648 if (count == 0)
649 return 0;
650
dda35b8f
CH
651 xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
652
653 if (XFS_FORCED_SHUTDOWN(mp))
654 return -EIO;
655
656relock:
657 if (ioflags & IO_ISDIRECT) {
658 iolock = XFS_IOLOCK_SHARED;
659 need_i_mutex = 0;
660 } else {
661 iolock = XFS_IOLOCK_EXCL;
662 need_i_mutex = 1;
663 mutex_lock(&inode->i_mutex);
664 }
665
00258e36 666 xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);
dda35b8f
CH
667
668start:
669 error = -generic_write_checks(file, &pos, &count,
670 S_ISBLK(inode->i_mode));
671 if (error) {
00258e36 672 xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
dda35b8f
CH
673 goto out_unlock_mutex;
674 }
675
00258e36 676 if ((DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) &&
dda35b8f
CH
677 !(ioflags & IO_INVIS) && !eventsent)) {
678 int dmflags = FILP_DELAY_FLAG(file);
679
680 if (need_i_mutex)
681 dmflags |= DM_FLAGS_IMUX;
682
00258e36
CH
683 xfs_iunlock(ip, XFS_ILOCK_EXCL);
684 error = XFS_SEND_DATA(ip->i_mount, DM_EVENT_WRITE, ip,
dda35b8f
CH
685 pos, count, dmflags, &iolock);
686 if (error) {
687 goto out_unlock_internal;
688 }
00258e36 689 xfs_ilock(ip, XFS_ILOCK_EXCL);
dda35b8f
CH
690 eventsent = 1;
691
692 /*
693 * The iolock was dropped and reacquired in XFS_SEND_DATA
694 * so we have to recheck the size when appending.
695 * We will only "goto start;" once, since having sent the
696 * event prevents another call to XFS_SEND_DATA, which is
697 * what allows the size to change in the first place.
698 */
00258e36 699 if ((file->f_flags & O_APPEND) && pos != ip->i_size)
dda35b8f
CH
700 goto start;
701 }
702
703 if (ioflags & IO_ISDIRECT) {
704 xfs_buftarg_t *target =
00258e36 705 XFS_IS_REALTIME_INODE(ip) ?
dda35b8f
CH
706 mp->m_rtdev_targp : mp->m_ddev_targp;
707
708 if ((pos & target->bt_smask) || (count & target->bt_smask)) {
00258e36 709 xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
dda35b8f
CH
710 return XFS_ERROR(-EINVAL);
711 }
712
00258e36
CH
713 if (!need_i_mutex && (mapping->nrpages || pos > ip->i_size)) {
714 xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
dda35b8f
CH
715 iolock = XFS_IOLOCK_EXCL;
716 need_i_mutex = 1;
717 mutex_lock(&inode->i_mutex);
00258e36 718 xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);
dda35b8f
CH
719 goto start;
720 }
721 }
722
723 new_size = pos + count;
00258e36
CH
724 if (new_size > ip->i_size)
725 ip->i_new_size = new_size;
dda35b8f
CH
726
727 if (likely(!(ioflags & IO_INVIS)))
728 file_update_time(file);
729
730 /*
731 * If the offset is beyond the size of the file, we have a couple
732 * of things to do. First, if there is already space allocated
733 * we need to either create holes or zero the disk or ...
734 *
735 * If there is a page where the previous size lands, we need
736 * to zero it out up to the new size.
737 */
738
00258e36
CH
739 if (pos > ip->i_size) {
740 error = xfs_zero_eof(ip, pos, ip->i_size);
dda35b8f 741 if (error) {
00258e36 742 xfs_iunlock(ip, XFS_ILOCK_EXCL);
dda35b8f
CH
743 goto out_unlock_internal;
744 }
745 }
00258e36 746 xfs_iunlock(ip, XFS_ILOCK_EXCL);
dda35b8f
CH
747
748 /*
749 * If we're writing the file then make sure to clear the
750 * setuid and setgid bits if the process is not being run
751 * by root. This keeps people from modifying setuid and
752 * setgid binaries.
753 */
754 error = -file_remove_suid(file);
755 if (unlikely(error))
756 goto out_unlock_internal;
757
758 /* We can write back this queue in page reclaim */
759 current->backing_dev_info = mapping->backing_dev_info;
760
761 if ((ioflags & IO_ISDIRECT)) {
762 if (mapping->nrpages) {
763 WARN_ON(need_i_mutex == 0);
00258e36 764 error = xfs_flushinval_pages(ip,
dda35b8f
CH
765 (pos & PAGE_CACHE_MASK),
766 -1, FI_REMAPF_LOCKED);
767 if (error)
768 goto out_unlock_internal;
769 }
770
771 if (need_i_mutex) {
772 /* demote the lock now the cached pages are gone */
00258e36 773 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
dda35b8f
CH
774 mutex_unlock(&inode->i_mutex);
775
776 iolock = XFS_IOLOCK_SHARED;
777 need_i_mutex = 0;
778 }
779
00258e36 780 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, ioflags);
dda35b8f 781 ret = generic_file_direct_write(iocb, iovp,
00258e36 782 &nr_segs, pos, &iocb->ki_pos, count, ocount);
dda35b8f
CH
783
784 /*
785 * direct-io write to a hole: fall through to buffered I/O
786 * for completing the rest of the request.
787 */
788 if (ret >= 0 && ret != count) {
789 XFS_STATS_ADD(xs_write_bytes, ret);
790
791 pos += ret;
792 count -= ret;
793
794 ioflags &= ~IO_ISDIRECT;
00258e36 795 xfs_iunlock(ip, iolock);
dda35b8f
CH
796 goto relock;
797 }
798 } else {
799 int enospc = 0;
800 ssize_t ret2 = 0;
801
802write_retry:
00258e36
CH
803 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, ioflags);
804 ret2 = generic_file_buffered_write(iocb, iovp, nr_segs,
805 pos, &iocb->ki_pos, count, ret);
dda35b8f
CH
806 /*
807 * if we just got an ENOSPC, flush the inode now we
808 * aren't holding any page locks and retry *once*
809 */
810 if (ret2 == -ENOSPC && !enospc) {
00258e36 811 error = xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
dda35b8f
CH
812 if (error)
813 goto out_unlock_internal;
814 enospc = 1;
815 goto write_retry;
816 }
817 ret = ret2;
818 }
819
820 current->backing_dev_info = NULL;
821
822 isize = i_size_read(inode);
00258e36
CH
823 if (unlikely(ret < 0 && ret != -EFAULT && iocb->ki_pos > isize))
824 iocb->ki_pos = isize;
825
826 if (iocb->ki_pos > ip->i_size) {
827 xfs_ilock(ip, XFS_ILOCK_EXCL);
828 if (iocb->ki_pos > ip->i_size)
829 ip->i_size = iocb->ki_pos;
830 xfs_iunlock(ip, XFS_ILOCK_EXCL);
dda35b8f
CH
831 }
832
833 if (ret == -ENOSPC &&
00258e36
CH
834 DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
835 xfs_iunlock(ip, iolock);
dda35b8f
CH
836 if (need_i_mutex)
837 mutex_unlock(&inode->i_mutex);
00258e36
CH
838 error = XFS_SEND_NAMESP(ip->i_mount, DM_EVENT_NOSPACE, ip,
839 DM_RIGHT_NULL, ip, DM_RIGHT_NULL, NULL, NULL,
dda35b8f
CH
840 0, 0, 0); /* Delay flag intentionally unused */
841 if (need_i_mutex)
842 mutex_lock(&inode->i_mutex);
00258e36 843 xfs_ilock(ip, iolock);
dda35b8f
CH
844 if (error)
845 goto out_unlock_internal;
846 goto start;
847 }
848
849 error = -ret;
850 if (ret <= 0)
851 goto out_unlock_internal;
852
853 XFS_STATS_ADD(xs_write_bytes, ret);
854
855 /* Handle various SYNC-type writes */
856 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
857 loff_t end = pos + ret - 1;
858 int error2;
859
00258e36 860 xfs_iunlock(ip, iolock);
dda35b8f
CH
861 if (need_i_mutex)
862 mutex_unlock(&inode->i_mutex);
863
864 error2 = filemap_write_and_wait_range(mapping, pos, end);
865 if (!error)
866 error = error2;
867 if (need_i_mutex)
868 mutex_lock(&inode->i_mutex);
00258e36 869 xfs_ilock(ip, iolock);
dda35b8f 870
7ea80859 871 error2 = -xfs_file_fsync(file,
fd3200be 872 (file->f_flags & __O_SYNC) ? 0 : 1);
dda35b8f
CH
873 if (!error)
874 error = error2;
875 }
876
877 out_unlock_internal:
00258e36
CH
878 if (ip->i_new_size) {
879 xfs_ilock(ip, XFS_ILOCK_EXCL);
880 ip->i_new_size = 0;
dda35b8f
CH
881 /*
882 * If this was a direct or synchronous I/O that failed (such
883 * as ENOSPC) then part of the I/O may have been written to
884 * disk before the error occured. In this case the on-disk
885 * file size may have been adjusted beyond the in-memory file
886 * size and now needs to be truncated back.
887 */
00258e36
CH
888 if (ip->i_d.di_size > ip->i_size)
889 ip->i_d.di_size = ip->i_size;
890 xfs_iunlock(ip, XFS_ILOCK_EXCL);
dda35b8f 891 }
00258e36 892 xfs_iunlock(ip, iolock);
dda35b8f
CH
893 out_unlock_mutex:
894 if (need_i_mutex)
895 mutex_unlock(&inode->i_mutex);
896 return -error;
897}
898
1da177e4 899STATIC int
3562fd45 900xfs_file_open(
1da177e4 901 struct inode *inode,
f999a5bf 902 struct file *file)
1da177e4 903{
f999a5bf 904 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
1da177e4 905 return -EFBIG;
f999a5bf
CH
906 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
907 return -EIO;
908 return 0;
909}
910
911STATIC int
912xfs_dir_open(
913 struct inode *inode,
914 struct file *file)
915{
916 struct xfs_inode *ip = XFS_I(inode);
917 int mode;
918 int error;
919
920 error = xfs_file_open(inode, file);
921 if (error)
922 return error;
923
924 /*
925 * If there are any blocks, read-ahead block 0 as we're almost
926 * certain to have the next operation be a read there.
927 */
928 mode = xfs_ilock_map_shared(ip);
929 if (ip->i_d.di_nextents > 0)
930 xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
931 xfs_iunlock(ip, mode);
932 return 0;
1da177e4
LT
933}
934
1da177e4 935STATIC int
3562fd45 936xfs_file_release(
1da177e4
LT
937 struct inode *inode,
938 struct file *filp)
939{
739bfb2a 940 return -xfs_release(XFS_I(inode));
1da177e4
LT
941}
942
1da177e4 943STATIC int
3562fd45 944xfs_file_readdir(
1da177e4
LT
945 struct file *filp,
946 void *dirent,
947 filldir_t filldir)
948{
051e7cd4 949 struct inode *inode = filp->f_path.dentry->d_inode;
739bfb2a 950 xfs_inode_t *ip = XFS_I(inode);
051e7cd4
CH
951 int error;
952 size_t bufsize;
953
954 /*
955 * The Linux API doesn't pass down the total size of the buffer
956 * we read into down to the filesystem. With the filldir concept
957 * it's not needed for correct information, but the XFS dir2 leaf
958 * code wants an estimate of the buffer size to calculate it's
959 * readahead window and size the buffers used for mapping to
960 * physical blocks.
961 *
962 * Try to give it an estimate that's good enough, maybe at some
963 * point we can change the ->readdir prototype to include the
a9cc799e 964 * buffer size. For now we use the current glibc buffer size.
051e7cd4 965 */
a9cc799e 966 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
051e7cd4 967
739bfb2a 968 error = xfs_readdir(ip, dirent, bufsize,
051e7cd4
CH
969 (xfs_off_t *)&filp->f_pos, filldir);
970 if (error)
971 return -error;
972 return 0;
1da177e4
LT
973}
974
1da177e4 975STATIC int
3562fd45 976xfs_file_mmap(
1da177e4
LT
977 struct file *filp,
978 struct vm_area_struct *vma)
979{
3562fd45 980 vma->vm_ops = &xfs_file_vm_ops;
d0217ac0 981 vma->vm_flags |= VM_CAN_NONLINEAR;
6fac0cb4 982
fbc1462b 983 file_accessed(filp);
1da177e4
LT
984 return 0;
985}
986
4f57dbc6
DC
987/*
988 * mmap()d file has taken write protection fault and is being made
989 * writable. We can set the page state up correctly for a writable
990 * page, which means we can do correct delalloc accounting (ENOSPC
991 * checking!) and unwritten extent mapping.
992 */
993STATIC int
994xfs_vm_page_mkwrite(
995 struct vm_area_struct *vma,
c2ec175c 996 struct vm_fault *vmf)
4f57dbc6 997{
c2ec175c 998 return block_page_mkwrite(vma, vmf, xfs_get_blocks);
4f57dbc6
DC
999}
1000
4b6f5d20 1001const struct file_operations xfs_file_operations = {
1da177e4
LT
1002 .llseek = generic_file_llseek,
1003 .read = do_sync_read,
bb3f724e 1004 .write = do_sync_write,
3562fd45
NS
1005 .aio_read = xfs_file_aio_read,
1006 .aio_write = xfs_file_aio_write,
1b895840
NS
1007 .splice_read = xfs_file_splice_read,
1008 .splice_write = xfs_file_splice_write,
3562fd45 1009 .unlocked_ioctl = xfs_file_ioctl,
1da177e4 1010#ifdef CONFIG_COMPAT
3562fd45 1011 .compat_ioctl = xfs_file_compat_ioctl,
1da177e4 1012#endif
3562fd45
NS
1013 .mmap = xfs_file_mmap,
1014 .open = xfs_file_open,
1015 .release = xfs_file_release,
1016 .fsync = xfs_file_fsync,
1da177e4 1017#ifdef HAVE_FOP_OPEN_EXEC
3562fd45 1018 .open_exec = xfs_file_open_exec,
1da177e4
LT
1019#endif
1020};
1021
4b6f5d20 1022const struct file_operations xfs_dir_file_operations = {
f999a5bf 1023 .open = xfs_dir_open,
1da177e4 1024 .read = generic_read_dir,
3562fd45 1025 .readdir = xfs_file_readdir,
59af1584 1026 .llseek = generic_file_llseek,
3562fd45 1027 .unlocked_ioctl = xfs_file_ioctl,
d3870398 1028#ifdef CONFIG_COMPAT
3562fd45 1029 .compat_ioctl = xfs_file_compat_ioctl,
d3870398 1030#endif
3562fd45 1031 .fsync = xfs_file_fsync,
1da177e4
LT
1032};
1033
f0f37e2f 1034static const struct vm_operations_struct xfs_file_vm_ops = {
54cb8821 1035 .fault = filemap_fault,
4f57dbc6 1036 .page_mkwrite = xfs_vm_page_mkwrite,
6fac0cb4 1037};