]> bbs.cooldavid.org Git - net-next-2.6.git/blob - fs/xfs/linux-2.6/xfs_lrw.c
[XFS] kill xfs_rwlock/xfs_rwunlock
[net-next-2.6.git] / fs / xfs / linux-2.6 / xfs_lrw.c
1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir2.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_bmap.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
44 #include "xfs_rw.h"
45 #include "xfs_acl.h"
46 #include "xfs_attr.h"
47 #include "xfs_inode_item.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_utils.h"
50 #include "xfs_iomap.h"
51 #include "xfs_vnodeops.h"
52
53 #include <linux/capability.h>
54 #include <linux/writeback.h>
55
56
57 #if defined(XFS_RW_TRACE)
58 void
59 xfs_rw_enter_trace(
60         int                     tag,
61         xfs_inode_t             *ip,
62         void                    *data,
63         size_t                  segs,
64         loff_t                  offset,
65         int                     ioflags)
66 {
67         if (ip->i_rwtrace == NULL)
68                 return;
69         ktrace_enter(ip->i_rwtrace,
70                 (void *)(unsigned long)tag,
71                 (void *)ip,
72                 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
73                 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
74                 (void *)data,
75                 (void *)((unsigned long)segs),
76                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
77                 (void *)((unsigned long)(offset & 0xffffffff)),
78                 (void *)((unsigned long)ioflags),
79                 (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
80                 (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
81                 (void *)((unsigned long)current_pid()),
82                 (void *)NULL,
83                 (void *)NULL,
84                 (void *)NULL,
85                 (void *)NULL);
86 }
87
88 void
89 xfs_inval_cached_trace(
90         xfs_inode_t     *ip,
91         xfs_off_t       offset,
92         xfs_off_t       len,
93         xfs_off_t       first,
94         xfs_off_t       last)
95 {
96
97         if (ip->i_rwtrace == NULL)
98                 return;
99         ktrace_enter(ip->i_rwtrace,
100                 (void *)(__psint_t)XFS_INVAL_CACHED,
101                 (void *)ip,
102                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
103                 (void *)((unsigned long)(offset & 0xffffffff)),
104                 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
105                 (void *)((unsigned long)(len & 0xffffffff)),
106                 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
107                 (void *)((unsigned long)(first & 0xffffffff)),
108                 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
109                 (void *)((unsigned long)(last & 0xffffffff)),
110                 (void *)((unsigned long)current_pid()),
111                 (void *)NULL,
112                 (void *)NULL,
113                 (void *)NULL,
114                 (void *)NULL,
115                 (void *)NULL);
116 }
117 #endif
118
119 /*
120  *      xfs_iozero
121  *
122  *      xfs_iozero clears the specified range of buffer supplied,
123  *      and marks all the affected blocks as valid and modified.  If
124  *      an affected block is not allocated, it will be allocated.  If
125  *      an affected block is not completely overwritten, and is not
126  *      valid before the operation, it will be read from disk before
127  *      being partially zeroed.
128  */
129 STATIC int
130 xfs_iozero(
131         struct xfs_inode        *ip,    /* inode                        */
132         loff_t                  pos,    /* offset in file               */
133         size_t                  count)  /* size of data to zero         */
134 {
135         struct page             *page;
136         struct address_space    *mapping;
137         int                     status;
138
139         mapping = ip->i_vnode->i_mapping;
140         do {
141                 unsigned offset, bytes;
142                 void *fsdata;
143
144                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
145                 bytes = PAGE_CACHE_SIZE - offset;
146                 if (bytes > count)
147                         bytes = count;
148
149                 status = pagecache_write_begin(NULL, mapping, pos, bytes,
150                                         AOP_FLAG_UNINTERRUPTIBLE,
151                                         &page, &fsdata);
152                 if (status)
153                         break;
154
155                 zero_user(page, offset, bytes);
156
157                 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
158                                         page, fsdata);
159                 WARN_ON(status <= 0); /* can't return less than zero! */
160                 pos += bytes;
161                 count -= bytes;
162                 status = 0;
163         } while (count);
164
165         return (-status);
166 }
167
168 ssize_t                 /* bytes read, or (-)  error */
169 xfs_read(
170         xfs_inode_t             *ip,
171         struct kiocb            *iocb,
172         const struct iovec      *iovp,
173         unsigned int            segs,
174         loff_t                  *offset,
175         int                     ioflags)
176 {
177         struct file             *file = iocb->ki_filp;
178         struct inode            *inode = file->f_mapping->host;
179         bhv_vnode_t             *vp = XFS_ITOV(ip);
180         xfs_mount_t             *mp = ip->i_mount;
181         size_t                  size = 0;
182         ssize_t                 ret = 0;
183         xfs_fsize_t             n;
184         unsigned long           seg;
185
186
187         XFS_STATS_INC(xs_read_calls);
188
189         /* START copy & waste from filemap.c */
190         for (seg = 0; seg < segs; seg++) {
191                 const struct iovec *iv = &iovp[seg];
192
193                 /*
194                  * If any segment has a negative length, or the cumulative
195                  * length ever wraps negative then return -EINVAL.
196                  */
197                 size += iv->iov_len;
198                 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
199                         return XFS_ERROR(-EINVAL);
200         }
201         /* END copy & waste from filemap.c */
202
203         if (unlikely(ioflags & IO_ISDIRECT)) {
204                 xfs_buftarg_t   *target =
205                         XFS_IS_REALTIME_INODE(ip) ?
206                                 mp->m_rtdev_targp : mp->m_ddev_targp;
207                 if ((*offset & target->bt_smask) ||
208                     (size & target->bt_smask)) {
209                         if (*offset == ip->i_size) {
210                                 return (0);
211                         }
212                         return -XFS_ERROR(EINVAL);
213                 }
214         }
215
216         n = XFS_MAXIOFFSET(mp) - *offset;
217         if ((n <= 0) || (size == 0))
218                 return 0;
219
220         if (n < size)
221                 size = n;
222
223         if (XFS_FORCED_SHUTDOWN(mp))
224                 return -EIO;
225
226         if (unlikely(ioflags & IO_ISDIRECT))
227                 mutex_lock(&inode->i_mutex);
228         xfs_ilock(ip, XFS_IOLOCK_SHARED);
229
230         if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
231                 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
232                 int iolock = XFS_IOLOCK_SHARED;
233
234                 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp, *offset, size,
235                                         dmflags, &iolock);
236                 if (ret) {
237                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
238                         if (unlikely(ioflags & IO_ISDIRECT))
239                                 mutex_unlock(&inode->i_mutex);
240                         return ret;
241                 }
242         }
243
244         if (unlikely(ioflags & IO_ISDIRECT)) {
245                 if (VN_CACHED(vp))
246                         ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
247                                                     -1, FI_REMAPF_LOCKED);
248                 mutex_unlock(&inode->i_mutex);
249                 if (ret) {
250                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
251                         return ret;
252                 }
253         }
254
255         xfs_rw_enter_trace(XFS_READ_ENTER, ip,
256                                 (void *)iovp, segs, *offset, ioflags);
257
258         iocb->ki_pos = *offset;
259         ret = generic_file_aio_read(iocb, iovp, segs, *offset);
260         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
261                 ret = wait_on_sync_kiocb(iocb);
262         if (ret > 0)
263                 XFS_STATS_ADD(xs_read_bytes, ret);
264
265         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
266         return ret;
267 }
268
269 ssize_t
270 xfs_splice_read(
271         xfs_inode_t             *ip,
272         struct file             *infilp,
273         loff_t                  *ppos,
274         struct pipe_inode_info  *pipe,
275         size_t                  count,
276         int                     flags,
277         int                     ioflags)
278 {
279         bhv_vnode_t             *vp = XFS_ITOV(ip);
280         xfs_mount_t             *mp = ip->i_mount;
281         ssize_t                 ret;
282
283         XFS_STATS_INC(xs_read_calls);
284         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
285                 return -EIO;
286
287         xfs_ilock(ip, XFS_IOLOCK_SHARED);
288
289         if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
290                 int iolock = XFS_IOLOCK_SHARED;
291                 int error;
292
293                 error = XFS_SEND_DATA(mp, DM_EVENT_READ, vp, *ppos, count,
294                                         FILP_DELAY_FLAG(infilp), &iolock);
295                 if (error) {
296                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
297                         return -error;
298                 }
299         }
300         xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip,
301                            pipe, count, *ppos, ioflags);
302         ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
303         if (ret > 0)
304                 XFS_STATS_ADD(xs_read_bytes, ret);
305
306         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
307         return ret;
308 }
309
310 ssize_t
311 xfs_splice_write(
312         xfs_inode_t             *ip,
313         struct pipe_inode_info  *pipe,
314         struct file             *outfilp,
315         loff_t                  *ppos,
316         size_t                  count,
317         int                     flags,
318         int                     ioflags)
319 {
320         bhv_vnode_t             *vp = XFS_ITOV(ip);
321         xfs_mount_t             *mp = ip->i_mount;
322         ssize_t                 ret;
323         struct inode            *inode = outfilp->f_mapping->host;
324         xfs_fsize_t             isize, new_size;
325
326         XFS_STATS_INC(xs_write_calls);
327         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
328                 return -EIO;
329
330         xfs_ilock(ip, XFS_IOLOCK_EXCL);
331
332         if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
333                 int iolock = XFS_IOLOCK_EXCL;
334                 int error;
335
336                 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, vp, *ppos, count,
337                                         FILP_DELAY_FLAG(outfilp), &iolock);
338                 if (error) {
339                         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
340                         return -error;
341                 }
342         }
343
344         new_size = *ppos + count;
345
346         xfs_ilock(ip, XFS_ILOCK_EXCL);
347         if (new_size > ip->i_size)
348                 ip->i_new_size = new_size;
349         xfs_iunlock(ip, XFS_ILOCK_EXCL);
350
351         xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip,
352                            pipe, count, *ppos, ioflags);
353         ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
354         if (ret > 0)
355                 XFS_STATS_ADD(xs_write_bytes, ret);
356
357         isize = i_size_read(inode);
358         if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
359                 *ppos = isize;
360
361         if (*ppos > ip->i_size) {
362                 xfs_ilock(ip, XFS_ILOCK_EXCL);
363                 if (*ppos > ip->i_size)
364                         ip->i_size = *ppos;
365                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
366         }
367
368         if (ip->i_new_size) {
369                 xfs_ilock(ip, XFS_ILOCK_EXCL);
370                 ip->i_new_size = 0;
371                 if (ip->i_d.di_size > ip->i_size)
372                         ip->i_d.di_size = ip->i_size;
373                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
374         }
375         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
376         return ret;
377 }
378
379 /*
380  * This routine is called to handle zeroing any space in the last
381  * block of the file that is beyond the EOF.  We do this since the
382  * size is being increased without writing anything to that block
383  * and we don't want anyone to read the garbage on the disk.
384  */
385 STATIC int                              /* error (positive) */
386 xfs_zero_last_block(
387         xfs_inode_t     *ip,
388         xfs_fsize_t     offset,
389         xfs_fsize_t     isize)
390 {
391         xfs_fileoff_t   last_fsb;
392         xfs_mount_t     *mp = ip->i_mount;
393         int             nimaps;
394         int             zero_offset;
395         int             zero_len;
396         int             error = 0;
397         xfs_bmbt_irec_t imap;
398
399         ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
400
401         zero_offset = XFS_B_FSB_OFFSET(mp, isize);
402         if (zero_offset == 0) {
403                 /*
404                  * There are no extra bytes in the last block on disk to
405                  * zero, so return.
406                  */
407                 return 0;
408         }
409
410         last_fsb = XFS_B_TO_FSBT(mp, isize);
411         nimaps = 1;
412         error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
413                           &nimaps, NULL, NULL);
414         if (error) {
415                 return error;
416         }
417         ASSERT(nimaps > 0);
418         /*
419          * If the block underlying isize is just a hole, then there
420          * is nothing to zero.
421          */
422         if (imap.br_startblock == HOLESTARTBLOCK) {
423                 return 0;
424         }
425         /*
426          * Zero the part of the last block beyond the EOF, and write it
427          * out sync.  We need to drop the ilock while we do this so we
428          * don't deadlock when the buffer cache calls back to us.
429          */
430         xfs_iunlock(ip, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
431
432         zero_len = mp->m_sb.sb_blocksize - zero_offset;
433         if (isize + zero_len > offset)
434                 zero_len = offset - isize;
435         error = xfs_iozero(ip, isize, zero_len);
436
437         xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
438         ASSERT(error >= 0);
439         return error;
440 }
441
442 /*
443  * Zero any on disk space between the current EOF and the new,
444  * larger EOF.  This handles the normal case of zeroing the remainder
445  * of the last block in the file and the unusual case of zeroing blocks
446  * out beyond the size of the file.  This second case only happens
447  * with fixed size extents and when the system crashes before the inode
448  * size was updated but after blocks were allocated.  If fill is set,
449  * then any holes in the range are filled and zeroed.  If not, the holes
450  * are left alone as holes.
451  */
452
453 int                                     /* error (positive) */
454 xfs_zero_eof(
455         xfs_inode_t     *ip,
456         xfs_off_t       offset,         /* starting I/O offset */
457         xfs_fsize_t     isize)          /* current inode size */
458 {
459         xfs_mount_t     *mp = ip->i_mount;
460         xfs_fileoff_t   start_zero_fsb;
461         xfs_fileoff_t   end_zero_fsb;
462         xfs_fileoff_t   zero_count_fsb;
463         xfs_fileoff_t   last_fsb;
464         xfs_fileoff_t   zero_off;
465         xfs_fsize_t     zero_len;
466         int             nimaps;
467         int             error = 0;
468         xfs_bmbt_irec_t imap;
469
470         ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
471         ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
472         ASSERT(offset > isize);
473
474         /*
475          * First handle zeroing the block on which isize resides.
476          * We only zero a part of that block so it is handled specially.
477          */
478         error = xfs_zero_last_block(ip, offset, isize);
479         if (error) {
480                 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
481                 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
482                 return error;
483         }
484
485         /*
486          * Calculate the range between the new size and the old
487          * where blocks needing to be zeroed may exist.  To get the
488          * block where the last byte in the file currently resides,
489          * we need to subtract one from the size and truncate back
490          * to a block boundary.  We subtract 1 in case the size is
491          * exactly on a block boundary.
492          */
493         last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
494         start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
495         end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
496         ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
497         if (last_fsb == end_zero_fsb) {
498                 /*
499                  * The size was only incremented on its last block.
500                  * We took care of that above, so just return.
501                  */
502                 return 0;
503         }
504
505         ASSERT(start_zero_fsb <= end_zero_fsb);
506         while (start_zero_fsb <= end_zero_fsb) {
507                 nimaps = 1;
508                 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
509                 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
510                                   0, NULL, 0, &imap, &nimaps, NULL, NULL);
511                 if (error) {
512                         ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
513                         ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
514                         return error;
515                 }
516                 ASSERT(nimaps > 0);
517
518                 if (imap.br_state == XFS_EXT_UNWRITTEN ||
519                     imap.br_startblock == HOLESTARTBLOCK) {
520                         /*
521                          * This loop handles initializing pages that were
522                          * partially initialized by the code below this
523                          * loop. It basically zeroes the part of the page
524                          * that sits on a hole and sets the page as P_HOLE
525                          * and calls remapf if it is a mapped file.
526                          */
527                         start_zero_fsb = imap.br_startoff + imap.br_blockcount;
528                         ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
529                         continue;
530                 }
531
532                 /*
533                  * There are blocks we need to zero.
534                  * Drop the inode lock while we're doing the I/O.
535                  * We'll still have the iolock to protect us.
536                  */
537                 xfs_iunlock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
538
539                 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
540                 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
541
542                 if ((zero_off + zero_len) > offset)
543                         zero_len = offset - zero_off;
544
545                 error = xfs_iozero(ip, zero_off, zero_len);
546                 if (error) {
547                         goto out_lock;
548                 }
549
550                 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
551                 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
552
553                 xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
554         }
555
556         return 0;
557
558 out_lock:
559         xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
560         ASSERT(error >= 0);
561         return error;
562 }
563
564 ssize_t                         /* bytes written, or (-) error */
565 xfs_write(
566         struct xfs_inode        *xip,
567         struct kiocb            *iocb,
568         const struct iovec      *iovp,
569         unsigned int            nsegs,
570         loff_t                  *offset,
571         int                     ioflags)
572 {
573         struct file             *file = iocb->ki_filp;
574         struct address_space    *mapping = file->f_mapping;
575         struct inode            *inode = mapping->host;
576         bhv_vnode_t             *vp = XFS_ITOV(xip);
577         unsigned long           segs = nsegs;
578         xfs_mount_t             *mp;
579         ssize_t                 ret = 0, error = 0;
580         xfs_fsize_t             isize, new_size;
581         int                     iolock;
582         int                     eventsent = 0;
583         size_t                  ocount = 0, count;
584         loff_t                  pos;
585         int                     need_i_mutex;
586
587         XFS_STATS_INC(xs_write_calls);
588
589         error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
590         if (error)
591                 return error;
592
593         count = ocount;
594         pos = *offset;
595
596         if (count == 0)
597                 return 0;
598
599         mp = xip->i_mount;
600
601         xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
602
603         if (XFS_FORCED_SHUTDOWN(mp))
604                 return -EIO;
605
606 relock:
607         if (ioflags & IO_ISDIRECT) {
608                 iolock = XFS_IOLOCK_SHARED;
609                 need_i_mutex = 0;
610         } else {
611                 iolock = XFS_IOLOCK_EXCL;
612                 need_i_mutex = 1;
613                 mutex_lock(&inode->i_mutex);
614         }
615
616         xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
617
618 start:
619         error = -generic_write_checks(file, &pos, &count,
620                                         S_ISBLK(inode->i_mode));
621         if (error) {
622                 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
623                 goto out_unlock_mutex;
624         }
625
626         if ((DM_EVENT_ENABLED(xip, DM_EVENT_WRITE) &&
627             !(ioflags & IO_INVIS) && !eventsent)) {
628                 int             dmflags = FILP_DELAY_FLAG(file);
629
630                 if (need_i_mutex)
631                         dmflags |= DM_FLAGS_IMUX;
632
633                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
634                 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
635                                       pos, count, dmflags, &iolock);
636                 if (error) {
637                         goto out_unlock_internal;
638                 }
639                 xfs_ilock(xip, XFS_ILOCK_EXCL);
640                 eventsent = 1;
641
642                 /*
643                  * The iolock was dropped and reacquired in XFS_SEND_DATA
644                  * so we have to recheck the size when appending.
645                  * We will only "goto start;" once, since having sent the
646                  * event prevents another call to XFS_SEND_DATA, which is
647                  * what allows the size to change in the first place.
648                  */
649                 if ((file->f_flags & O_APPEND) && pos != xip->i_size)
650                         goto start;
651         }
652
653         if (ioflags & IO_ISDIRECT) {
654                 xfs_buftarg_t   *target =
655                         XFS_IS_REALTIME_INODE(xip) ?
656                                 mp->m_rtdev_targp : mp->m_ddev_targp;
657
658                 if ((pos & target->bt_smask) || (count & target->bt_smask)) {
659                         xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
660                         return XFS_ERROR(-EINVAL);
661                 }
662
663                 if (!need_i_mutex && (VN_CACHED(vp) || pos > xip->i_size)) {
664                         xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
665                         iolock = XFS_IOLOCK_EXCL;
666                         need_i_mutex = 1;
667                         mutex_lock(&inode->i_mutex);
668                         xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
669                         goto start;
670                 }
671         }
672
673         new_size = pos + count;
674         if (new_size > xip->i_size)
675                 xip->i_new_size = new_size;
676
677         if (likely(!(ioflags & IO_INVIS))) {
678                 file_update_time(file);
679                 xfs_ichgtime_fast(xip, inode,
680                                   XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
681         }
682
683         /*
684          * If the offset is beyond the size of the file, we have a couple
685          * of things to do. First, if there is already space allocated
686          * we need to either create holes or zero the disk or ...
687          *
688          * If there is a page where the previous size lands, we need
689          * to zero it out up to the new size.
690          */
691
692         if (pos > xip->i_size) {
693                 error = xfs_zero_eof(xip, pos, xip->i_size);
694                 if (error) {
695                         xfs_iunlock(xip, XFS_ILOCK_EXCL);
696                         goto out_unlock_internal;
697                 }
698         }
699         xfs_iunlock(xip, XFS_ILOCK_EXCL);
700
701         /*
702          * If we're writing the file then make sure to clear the
703          * setuid and setgid bits if the process is not being run
704          * by root.  This keeps people from modifying setuid and
705          * setgid binaries.
706          */
707
708         if (((xip->i_d.di_mode & S_ISUID) ||
709             ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
710                 (S_ISGID | S_IXGRP))) &&
711              !capable(CAP_FSETID)) {
712                 error = xfs_write_clear_setuid(xip);
713                 if (likely(!error))
714                         error = -remove_suid(file->f_path.dentry);
715                 if (unlikely(error)) {
716                         goto out_unlock_internal;
717                 }
718         }
719
720 retry:
721         /* We can write back this queue in page reclaim */
722         current->backing_dev_info = mapping->backing_dev_info;
723
724         if ((ioflags & IO_ISDIRECT)) {
725                 if (VN_CACHED(vp)) {
726                         WARN_ON(need_i_mutex == 0);
727                         xfs_inval_cached_trace(xip, pos, -1,
728                                         (pos & PAGE_CACHE_MASK), -1);
729                         error = xfs_flushinval_pages(xip,
730                                         (pos & PAGE_CACHE_MASK),
731                                         -1, FI_REMAPF_LOCKED);
732                         if (error)
733                                 goto out_unlock_internal;
734                 }
735
736                 if (need_i_mutex) {
737                         /* demote the lock now the cached pages are gone */
738                         xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
739                         mutex_unlock(&inode->i_mutex);
740
741                         iolock = XFS_IOLOCK_SHARED;
742                         need_i_mutex = 0;
743                 }
744
745                 xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs,
746                                 *offset, ioflags);
747                 ret = generic_file_direct_write(iocb, iovp,
748                                 &segs, pos, offset, count, ocount);
749
750                 /*
751                  * direct-io write to a hole: fall through to buffered I/O
752                  * for completing the rest of the request.
753                  */
754                 if (ret >= 0 && ret != count) {
755                         XFS_STATS_ADD(xs_write_bytes, ret);
756
757                         pos += ret;
758                         count -= ret;
759
760                         ioflags &= ~IO_ISDIRECT;
761                         xfs_iunlock(xip, iolock);
762                         goto relock;
763                 }
764         } else {
765                 xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs,
766                                 *offset, ioflags);
767                 ret = generic_file_buffered_write(iocb, iovp, segs,
768                                 pos, offset, count, ret);
769         }
770
771         current->backing_dev_info = NULL;
772
773         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
774                 ret = wait_on_sync_kiocb(iocb);
775
776         if (ret == -ENOSPC &&
777             DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
778                 xfs_iunlock(xip, iolock);
779                 if (need_i_mutex)
780                         mutex_unlock(&inode->i_mutex);
781                 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
782                                 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
783                                 0, 0, 0); /* Delay flag intentionally  unused */
784                 if (need_i_mutex)
785                         mutex_lock(&inode->i_mutex);
786                 xfs_ilock(xip, iolock);
787                 if (error)
788                         goto out_unlock_internal;
789                 pos = xip->i_size;
790                 ret = 0;
791                 goto retry;
792         }
793
794         isize = i_size_read(inode);
795         if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
796                 *offset = isize;
797
798         if (*offset > xip->i_size) {
799                 xfs_ilock(xip, XFS_ILOCK_EXCL);
800                 if (*offset > xip->i_size)
801                         xip->i_size = *offset;
802                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
803         }
804
805         error = -ret;
806         if (ret <= 0)
807                 goto out_unlock_internal;
808
809         XFS_STATS_ADD(xs_write_bytes, ret);
810
811         /* Handle various SYNC-type writes */
812         if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
813                 int error2;
814
815                 xfs_iunlock(xip, iolock);
816                 if (need_i_mutex)
817                         mutex_unlock(&inode->i_mutex);
818                 error2 = sync_page_range(inode, mapping, pos, ret);
819                 if (!error)
820                         error = error2;
821                 if (need_i_mutex)
822                         mutex_lock(&inode->i_mutex);
823                 xfs_ilock(xip, iolock);
824                 error2 = xfs_write_sync_logforce(mp, xip);
825                 if (!error)
826                         error = error2;
827         }
828
829  out_unlock_internal:
830         if (xip->i_new_size) {
831                 xfs_ilock(xip, XFS_ILOCK_EXCL);
832                 xip->i_new_size = 0;
833                 /*
834                  * If this was a direct or synchronous I/O that failed (such
835                  * as ENOSPC) then part of the I/O may have been written to
836                  * disk before the error occured.  In this case the on-disk
837                  * file size may have been adjusted beyond the in-memory file
838                  * size and now needs to be truncated back.
839                  */
840                 if (xip->i_d.di_size > xip->i_size)
841                         xip->i_d.di_size = xip->i_size;
842                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
843         }
844         xfs_iunlock(xip, iolock);
845  out_unlock_mutex:
846         if (need_i_mutex)
847                 mutex_unlock(&inode->i_mutex);
848         return -error;
849 }
850
851 /*
852  * All xfs metadata buffers except log state machine buffers
853  * get this attached as their b_bdstrat callback function.
854  * This is so that we can catch a buffer
855  * after prematurely unpinning it to forcibly shutdown the filesystem.
856  */
857 int
858 xfs_bdstrat_cb(struct xfs_buf *bp)
859 {
860         xfs_mount_t     *mp;
861
862         mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
863         if (!XFS_FORCED_SHUTDOWN(mp)) {
864                 xfs_buf_iorequest(bp);
865                 return 0;
866         } else {
867                 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
868                 /*
869                  * Metadata write that didn't get logged but
870                  * written delayed anyway. These aren't associated
871                  * with a transaction, and can be ignored.
872                  */
873                 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
874                     (XFS_BUF_ISREAD(bp)) == 0)
875                         return (xfs_bioerror_relse(bp));
876                 else
877                         return (xfs_bioerror(bp));
878         }
879 }
880
881 /*
882  * Wrapper around bdstrat so that we can stop data
883  * from going to disk in case we are shutting down the filesystem.
884  * Typically user data goes thru this path; one of the exceptions
885  * is the superblock.
886  */
887 int
888 xfsbdstrat(
889         struct xfs_mount        *mp,
890         struct xfs_buf          *bp)
891 {
892         ASSERT(mp);
893         if (!XFS_FORCED_SHUTDOWN(mp)) {
894                 /* Grio redirection would go here
895                  * if (XFS_BUF_IS_GRIO(bp)) {
896                  */
897
898                 xfs_buf_iorequest(bp);
899                 return 0;
900         }
901
902         xfs_buftrace("XFSBDSTRAT IOERROR", bp);
903         return (xfs_bioerror_relse(bp));
904 }
905
906 /*
907  * If the underlying (data/log/rt) device is readonly, there are some
908  * operations that cannot proceed.
909  */
910 int
911 xfs_dev_is_read_only(
912         xfs_mount_t             *mp,
913         char                    *message)
914 {
915         if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
916             xfs_readonly_buftarg(mp->m_logdev_targp) ||
917             (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
918                 cmn_err(CE_NOTE,
919                         "XFS: %s required on read-only device.", message);
920                 cmn_err(CE_NOTE,
921                         "XFS: write access unavailable, cannot proceed.");
922                 return EROFS;
923         }
924         return 0;
925 }