]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/ocfs2/file.c
ocfs2: Teach ocfs2_get_block() about holes
[net-next-2.6.git] / fs / ocfs2 / file.c
CommitLineData
ccd979bd
MF
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * file.c
5 *
6 * File open, close, extend, truncate
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
16f7e0fe 26#include <linux/capability.h>
ccd979bd
MF
27#include <linux/fs.h>
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/highmem.h>
31#include <linux/pagemap.h>
32#include <linux/uio.h>
e2057c5a 33#include <linux/sched.h>
8659ac25 34#include <linux/pipe_fs_i.h>
7f1a37e3 35#include <linux/mount.h>
9517bac6 36#include <linux/writeback.h>
ccd979bd
MF
37
38#define MLOG_MASK_PREFIX ML_INODE
39#include <cluster/masklog.h>
40
41#include "ocfs2.h"
42
43#include "alloc.h"
44#include "aops.h"
45#include "dir.h"
46#include "dlmglue.h"
47#include "extent_map.h"
48#include "file.h"
49#include "sysfile.h"
50#include "inode.h"
ca4d147e 51#include "ioctl.h"
ccd979bd
MF
52#include "journal.h"
53#include "mmap.h"
54#include "suballoc.h"
55#include "super.h"
56
57#include "buffer_head_io.h"
58
59static int ocfs2_sync_inode(struct inode *inode)
60{
61 filemap_fdatawrite(inode->i_mapping);
62 return sync_mapping_buffers(inode->i_mapping);
63}
64
65static int ocfs2_file_open(struct inode *inode, struct file *file)
66{
67 int status;
68 int mode = file->f_flags;
69 struct ocfs2_inode_info *oi = OCFS2_I(inode);
70
71 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
d28c9174 72 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
ccd979bd
MF
73
74 spin_lock(&oi->ip_lock);
75
76 /* Check that the inode hasn't been wiped from disk by another
77 * node. If it hasn't then we're safe as long as we hold the
78 * spin lock until our increment of open count. */
79 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
80 spin_unlock(&oi->ip_lock);
81
82 status = -ENOENT;
83 goto leave;
84 }
85
86 if (mode & O_DIRECT)
87 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
88
89 oi->ip_open_count++;
90 spin_unlock(&oi->ip_lock);
91 status = 0;
92leave:
93 mlog_exit(status);
94 return status;
95}
96
97static int ocfs2_file_release(struct inode *inode, struct file *file)
98{
99 struct ocfs2_inode_info *oi = OCFS2_I(inode);
100
101 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
d28c9174
JS
102 file->f_path.dentry->d_name.len,
103 file->f_path.dentry->d_name.name);
ccd979bd
MF
104
105 spin_lock(&oi->ip_lock);
106 if (!--oi->ip_open_count)
107 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
108 spin_unlock(&oi->ip_lock);
109
110 mlog_exit(0);
111
112 return 0;
113}
114
115static int ocfs2_sync_file(struct file *file,
116 struct dentry *dentry,
117 int datasync)
118{
119 int err = 0;
120 journal_t *journal;
121 struct inode *inode = dentry->d_inode;
122 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
123
124 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync,
125 dentry->d_name.len, dentry->d_name.name);
126
127 err = ocfs2_sync_inode(dentry->d_inode);
128 if (err)
129 goto bail;
130
131 journal = osb->journal->j_journal;
132 err = journal_force_commit(journal);
133
134bail:
135 mlog_exit(err);
136
137 return (err < 0) ? -EIO : 0;
138}
139
7f1a37e3
TY
140int ocfs2_should_update_atime(struct inode *inode,
141 struct vfsmount *vfsmnt)
142{
143 struct timespec now;
144 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
145
146 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
147 return 0;
148
149 if ((inode->i_flags & S_NOATIME) ||
150 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
151 return 0;
152
6c2aad05
MF
153 /*
154 * We can be called with no vfsmnt structure - NFSD will
155 * sometimes do this.
156 *
157 * Note that our action here is different than touch_atime() -
158 * if we can't tell whether this is a noatime mount, then we
159 * don't know whether to trust the value of s_atime_quantum.
160 */
161 if (vfsmnt == NULL)
162 return 0;
163
7f1a37e3
TY
164 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
165 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
166 return 0;
167
7e913c53
MF
168 if (vfsmnt->mnt_flags & MNT_RELATIME) {
169 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
170 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
171 return 1;
172
173 return 0;
174 }
175
7f1a37e3
TY
176 now = CURRENT_TIME;
177 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
178 return 0;
179 else
180 return 1;
181}
182
183int ocfs2_update_inode_atime(struct inode *inode,
184 struct buffer_head *bh)
185{
186 int ret;
187 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
188 handle_t *handle;
189
190 mlog_entry_void();
191
192 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
193 if (handle == NULL) {
194 ret = -ENOMEM;
195 mlog_errno(ret);
196 goto out;
197 }
198
199 inode->i_atime = CURRENT_TIME;
200 ret = ocfs2_mark_inode_dirty(handle, inode, bh);
201 if (ret < 0)
202 mlog_errno(ret);
203
204 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
205out:
206 mlog_exit(ret);
207 return ret;
208}
209
1fabe148 210int ocfs2_set_inode_size(handle_t *handle,
ccd979bd
MF
211 struct inode *inode,
212 struct buffer_head *fe_bh,
213 u64 new_i_size)
214{
215 int status;
216
217 mlog_entry_void();
218 i_size_write(inode, new_i_size);
219 inode->i_blocks = ocfs2_align_bytes_to_sectors(new_i_size);
220 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
221
222 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
223 if (status < 0) {
224 mlog_errno(status);
225 goto bail;
226 }
227
228bail:
229 mlog_exit(status);
230 return status;
231}
232
233static int ocfs2_simple_size_update(struct inode *inode,
234 struct buffer_head *di_bh,
235 u64 new_i_size)
236{
237 int ret;
238 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1fabe148 239 handle_t *handle = NULL;
ccd979bd 240
65eff9cc 241 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
ccd979bd
MF
242 if (handle == NULL) {
243 ret = -ENOMEM;
244 mlog_errno(ret);
245 goto out;
246 }
247
248 ret = ocfs2_set_inode_size(handle, inode, di_bh,
249 new_i_size);
250 if (ret < 0)
251 mlog_errno(ret);
252
02dc1af4 253 ocfs2_commit_trans(osb, handle);
ccd979bd
MF
254out:
255 return ret;
256}
257
258static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
259 struct inode *inode,
260 struct buffer_head *fe_bh,
261 u64 new_i_size)
262{
263 int status;
1fabe148 264 handle_t *handle;
ccd979bd
MF
265
266 mlog_entry_void();
267
268 /* TODO: This needs to actually orphan the inode in this
269 * transaction. */
270
65eff9cc 271 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
ccd979bd
MF
272 if (IS_ERR(handle)) {
273 status = PTR_ERR(handle);
274 mlog_errno(status);
275 goto out;
276 }
277
278 status = ocfs2_set_inode_size(handle, inode, fe_bh, new_i_size);
279 if (status < 0)
280 mlog_errno(status);
281
02dc1af4 282 ocfs2_commit_trans(osb, handle);
ccd979bd
MF
283out:
284 mlog_exit(status);
285 return status;
286}
287
288static int ocfs2_truncate_file(struct inode *inode,
289 struct buffer_head *di_bh,
290 u64 new_i_size)
291{
292 int status = 0;
293 struct ocfs2_dinode *fe = NULL;
294 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
295 struct ocfs2_truncate_context *tc = NULL;
296
b0697053
MF
297 mlog_entry("(inode = %llu, new_i_size = %llu\n",
298 (unsigned long long)OCFS2_I(inode)->ip_blkno,
299 (unsigned long long)new_i_size);
ccd979bd
MF
300
301 truncate_inode_pages(inode->i_mapping, new_i_size);
302
303 fe = (struct ocfs2_dinode *) di_bh->b_data;
304 if (!OCFS2_IS_VALID_DINODE(fe)) {
305 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
306 status = -EIO;
307 goto bail;
308 }
309
310 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
b0697053
MF
311 "Inode %llu, inode i_size = %lld != di "
312 "i_size = %llu, i_flags = 0x%x\n",
313 (unsigned long long)OCFS2_I(inode)->ip_blkno,
ccd979bd 314 i_size_read(inode),
b0697053
MF
315 (unsigned long long)le64_to_cpu(fe->i_size),
316 le32_to_cpu(fe->i_flags));
ccd979bd
MF
317
318 if (new_i_size > le64_to_cpu(fe->i_size)) {
b0697053
MF
319 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
320 (unsigned long long)le64_to_cpu(fe->i_size),
321 (unsigned long long)new_i_size);
ccd979bd
MF
322 status = -EINVAL;
323 mlog_errno(status);
324 goto bail;
325 }
326
b0697053
MF
327 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
328 (unsigned long long)le64_to_cpu(fe->i_blkno),
329 (unsigned long long)le64_to_cpu(fe->i_size),
330 (unsigned long long)new_i_size);
ccd979bd
MF
331
332 /* lets handle the simple truncate cases before doing any more
333 * cluster locking. */
334 if (new_i_size == le64_to_cpu(fe->i_size))
335 goto bail;
336
ab0920ce
MF
337 /* This forces other nodes to sync and drop their pages. Do
338 * this even if we have a truncate without allocation change -
339 * ocfs2 cluster sizes can be much greater than page size, so
340 * we have to truncate them anyway. */
341 status = ocfs2_data_lock(inode, 1);
342 if (status < 0) {
343 mlog_errno(status);
344 goto bail;
345 }
346 ocfs2_data_unlock(inode, 1);
347
ccd979bd
MF
348 /* alright, we're going to need to do a full blown alloc size
349 * change. Orphan the inode so that recovery can complete the
350 * truncate if necessary. This does the task of marking
351 * i_size. */
352 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
353 if (status < 0) {
354 mlog_errno(status);
355 goto bail;
356 }
357
358 status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc);
359 if (status < 0) {
360 mlog_errno(status);
361 goto bail;
362 }
363
364 status = ocfs2_commit_truncate(osb, inode, di_bh, tc);
365 if (status < 0) {
366 mlog_errno(status);
367 goto bail;
368 }
369
370 /* TODO: orphan dir cleanup here. */
371bail:
372
373 mlog_exit(status);
374 return status;
375}
376
377/*
378 * extend allocation only here.
379 * we'll update all the disk stuff, and oip->alloc_size
380 *
381 * expect stuff to be locked, a transaction started and enough data /
382 * metadata reservations in the contexts.
383 *
384 * Will return -EAGAIN, and a reason if a restart is needed.
385 * If passed in, *reason will always be set, even in error.
386 */
387int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
388 struct inode *inode,
dcd0538f 389 u32 *logical_offset,
ccd979bd
MF
390 u32 clusters_to_add,
391 struct buffer_head *fe_bh,
1fabe148 392 handle_t *handle,
ccd979bd
MF
393 struct ocfs2_alloc_context *data_ac,
394 struct ocfs2_alloc_context *meta_ac,
395 enum ocfs2_alloc_restarted *reason_ret)
396{
397 int status = 0;
398 int free_extents;
399 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
400 enum ocfs2_alloc_restarted reason = RESTART_NONE;
401 u32 bit_off, num_bits;
402 u64 block;
403
404 BUG_ON(!clusters_to_add);
405
406 free_extents = ocfs2_num_free_extents(osb, inode, fe);
407 if (free_extents < 0) {
408 status = free_extents;
409 mlog_errno(status);
410 goto leave;
411 }
412
413 /* there are two cases which could cause us to EAGAIN in the
414 * we-need-more-metadata case:
415 * 1) we haven't reserved *any*
416 * 2) we are so fragmented, we've needed to add metadata too
417 * many times. */
418 if (!free_extents && !meta_ac) {
419 mlog(0, "we haven't reserved any metadata!\n");
420 status = -EAGAIN;
421 reason = RESTART_META;
422 goto leave;
423 } else if ((!free_extents)
424 && (ocfs2_alloc_context_bits_left(meta_ac)
425 < ocfs2_extend_meta_needed(fe))) {
426 mlog(0, "filesystem is really fragmented...\n");
427 status = -EAGAIN;
428 reason = RESTART_META;
429 goto leave;
430 }
431
432 status = ocfs2_claim_clusters(osb, handle, data_ac, 1,
433 &bit_off, &num_bits);
434 if (status < 0) {
435 if (status != -ENOSPC)
436 mlog_errno(status);
437 goto leave;
438 }
439
440 BUG_ON(num_bits > clusters_to_add);
441
442 /* reserve our write early -- insert_extent may update the inode */
443 status = ocfs2_journal_access(handle, inode, fe_bh,
444 OCFS2_JOURNAL_ACCESS_WRITE);
445 if (status < 0) {
446 mlog_errno(status);
447 goto leave;
448 }
449
450 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
b0697053
MF
451 mlog(0, "Allocating %u clusters at block %u for inode %llu\n",
452 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
dcd0538f
MF
453 status = ocfs2_insert_extent(osb, handle, inode, fe_bh,
454 *logical_offset, block, num_bits,
455 meta_ac);
ccd979bd
MF
456 if (status < 0) {
457 mlog_errno(status);
458 goto leave;
459 }
460
ccd979bd
MF
461 status = ocfs2_journal_dirty(handle, fe_bh);
462 if (status < 0) {
463 mlog_errno(status);
464 goto leave;
465 }
466
467 clusters_to_add -= num_bits;
dcd0538f 468 *logical_offset += num_bits;
ccd979bd
MF
469
470 if (clusters_to_add) {
471 mlog(0, "need to alloc once more, clusters = %u, wanted = "
472 "%u\n", fe->i_clusters, clusters_to_add);
473 status = -EAGAIN;
474 reason = RESTART_TRANS;
475 }
476
477leave:
478 mlog_exit(status);
479 if (reason_ret)
480 *reason_ret = reason;
481 return status;
482}
483
abf8b156
MF
484/*
485 * For a given allocation, determine which allocators will need to be
486 * accessed, and lock them, reserving the appropriate number of bits.
487 *
488 * Called from ocfs2_extend_allocation() for file systems which don't
9517bac6
MF
489 * support holes, and from ocfs2_write() for file systems which
490 * understand sparse inodes.
abf8b156 491 */
9517bac6
MF
492int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
493 u32 clusters_to_add,
494 struct ocfs2_alloc_context **data_ac,
495 struct ocfs2_alloc_context **meta_ac)
abf8b156
MF
496{
497 int ret, num_free_extents;
498 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
499
500 *meta_ac = NULL;
501 *data_ac = NULL;
502
503 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
504 "clusters_to_add = %u\n",
505 (unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode),
506 le32_to_cpu(di->i_clusters), clusters_to_add);
507
508 num_free_extents = ocfs2_num_free_extents(osb, inode, di);
509 if (num_free_extents < 0) {
510 ret = num_free_extents;
511 mlog_errno(ret);
512 goto out;
513 }
514
515 /*
516 * Sparse allocation file systems need to be more conservative
517 * with reserving room for expansion - the actual allocation
518 * happens while we've got a journal handle open so re-taking
519 * a cluster lock (because we ran out of room for another
520 * extent) will violate ordering rules.
521 *
9517bac6 522 * Most of the time we'll only be seeing this 1 cluster at a time
abf8b156
MF
523 * anyway.
524 */
525 if (!num_free_extents ||
526 (ocfs2_sparse_alloc(osb) && num_free_extents < clusters_to_add)) {
527 ret = ocfs2_reserve_new_metadata(osb, di, meta_ac);
528 if (ret < 0) {
529 if (ret != -ENOSPC)
530 mlog_errno(ret);
531 goto out;
532 }
533 }
534
535 ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
536 if (ret < 0) {
537 if (ret != -ENOSPC)
538 mlog_errno(ret);
539 goto out;
540 }
541
542out:
543 if (ret) {
544 if (*meta_ac) {
545 ocfs2_free_alloc_context(*meta_ac);
546 *meta_ac = NULL;
547 }
548
549 /*
550 * We cannot have an error and a non null *data_ac.
551 */
552 }
553
554 return ret;
555}
556
ccd979bd
MF
557static int ocfs2_extend_allocation(struct inode *inode,
558 u32 clusters_to_add)
559{
560 int status = 0;
561 int restart_func = 0;
562 int drop_alloc_sem = 0;
abf8b156 563 int credits;
dcd0538f 564 u32 prev_clusters, logical_start;
ccd979bd
MF
565 struct buffer_head *bh = NULL;
566 struct ocfs2_dinode *fe = NULL;
1fabe148 567 handle_t *handle = NULL;
ccd979bd
MF
568 struct ocfs2_alloc_context *data_ac = NULL;
569 struct ocfs2_alloc_context *meta_ac = NULL;
570 enum ocfs2_alloc_restarted why;
571 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
572
573 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
574
dcd0538f
MF
575 /*
576 * This function only exists for file systems which don't
577 * support holes.
578 */
579 BUG_ON(ocfs2_sparse_alloc(osb));
580
ccd979bd
MF
581 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
582 OCFS2_BH_CACHED, inode);
583 if (status < 0) {
584 mlog_errno(status);
585 goto leave;
586 }
587
588 fe = (struct ocfs2_dinode *) bh->b_data;
589 if (!OCFS2_IS_VALID_DINODE(fe)) {
590 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
591 status = -EIO;
592 goto leave;
593 }
594
dcd0538f
MF
595 logical_start = OCFS2_I(inode)->ip_clusters;
596
ccd979bd
MF
597restart_all:
598 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
599
ccd979bd 600 /* blocks peope in read/write from reading our allocation
1b1dcc1b 601 * until we're done changing it. We depend on i_mutex to block
ccd979bd
MF
602 * other extend/truncate calls while we're here. Ordering wrt
603 * start_trans is important here -- always do it before! */
604 down_write(&OCFS2_I(inode)->ip_alloc_sem);
605 drop_alloc_sem = 1;
606
9517bac6
MF
607 status = ocfs2_lock_allocators(inode, fe, clusters_to_add, &data_ac,
608 &meta_ac);
609 if (status) {
610 mlog_errno(status);
611 goto leave;
612 }
613
ccd979bd 614 credits = ocfs2_calc_extend_credits(osb->sb, fe, clusters_to_add);
65eff9cc 615 handle = ocfs2_start_trans(osb, credits);
ccd979bd
MF
616 if (IS_ERR(handle)) {
617 status = PTR_ERR(handle);
618 handle = NULL;
619 mlog_errno(status);
620 goto leave;
621 }
622
623restarted_transaction:
624 /* reserve a write to the file entry early on - that we if we
625 * run out of credits in the allocation path, we can still
626 * update i_size. */
627 status = ocfs2_journal_access(handle, inode, bh,
628 OCFS2_JOURNAL_ACCESS_WRITE);
629 if (status < 0) {
630 mlog_errno(status);
631 goto leave;
632 }
633
634 prev_clusters = OCFS2_I(inode)->ip_clusters;
635
636 status = ocfs2_do_extend_allocation(osb,
637 inode,
dcd0538f 638 &logical_start,
ccd979bd
MF
639 clusters_to_add,
640 bh,
641 handle,
642 data_ac,
643 meta_ac,
644 &why);
645 if ((status < 0) && (status != -EAGAIN)) {
646 if (status != -ENOSPC)
647 mlog_errno(status);
648 goto leave;
649 }
650
651 status = ocfs2_journal_dirty(handle, bh);
652 if (status < 0) {
653 mlog_errno(status);
654 goto leave;
655 }
656
657 spin_lock(&OCFS2_I(inode)->ip_lock);
658 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
659 spin_unlock(&OCFS2_I(inode)->ip_lock);
660
661 if (why != RESTART_NONE && clusters_to_add) {
662 if (why == RESTART_META) {
663 mlog(0, "restarting function.\n");
664 restart_func = 1;
665 } else {
666 BUG_ON(why != RESTART_TRANS);
667
668 mlog(0, "restarting transaction.\n");
669 /* TODO: This can be more intelligent. */
670 credits = ocfs2_calc_extend_credits(osb->sb,
671 fe,
672 clusters_to_add);
1fabe148 673 status = ocfs2_extend_trans(handle, credits);
ccd979bd
MF
674 if (status < 0) {
675 /* handle still has to be committed at
676 * this point. */
677 status = -ENOMEM;
678 mlog_errno(status);
679 goto leave;
680 }
681 goto restarted_transaction;
682 }
683 }
684
b0697053
MF
685 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
686 fe->i_clusters, (unsigned long long)fe->i_size);
ccd979bd
MF
687 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
688 OCFS2_I(inode)->ip_clusters, i_size_read(inode));
689
690leave:
691 if (drop_alloc_sem) {
692 up_write(&OCFS2_I(inode)->ip_alloc_sem);
693 drop_alloc_sem = 0;
694 }
695 if (handle) {
02dc1af4 696 ocfs2_commit_trans(osb, handle);
ccd979bd
MF
697 handle = NULL;
698 }
699 if (data_ac) {
700 ocfs2_free_alloc_context(data_ac);
701 data_ac = NULL;
702 }
703 if (meta_ac) {
704 ocfs2_free_alloc_context(meta_ac);
705 meta_ac = NULL;
706 }
707 if ((!status) && restart_func) {
708 restart_func = 0;
709 goto restart_all;
710 }
711 if (bh) {
712 brelse(bh);
713 bh = NULL;
714 }
715
716 mlog_exit(status);
717 return status;
718}
719
720/* Some parts of this taken from generic_cont_expand, which turned out
721 * to be too fragile to do exactly what we need without us having to
53013cba
MF
722 * worry about recursive locking in ->prepare_write() and
723 * ->commit_write(). */
ccd979bd
MF
724static int ocfs2_write_zero_page(struct inode *inode,
725 u64 size)
726{
727 struct address_space *mapping = inode->i_mapping;
728 struct page *page;
729 unsigned long index;
730 unsigned int offset;
1fabe148 731 handle_t *handle = NULL;
ccd979bd
MF
732 int ret;
733
734 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
735 /* ugh. in prepare/commit_write, if from==to==start of block, we
736 ** skip the prepare. make sure we never send an offset for the start
737 ** of a block
738 */
739 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
740 offset++;
741 }
742 index = size >> PAGE_CACHE_SHIFT;
743
744 page = grab_cache_page(mapping, index);
745 if (!page) {
746 ret = -ENOMEM;
747 mlog_errno(ret);
748 goto out;
749 }
750
53013cba 751 ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
ccd979bd
MF
752 if (ret < 0) {
753 mlog_errno(ret);
754 goto out_unlock;
755 }
756
757 if (ocfs2_should_order_data(inode)) {
758 handle = ocfs2_start_walk_page_trans(inode, page, offset,
759 offset);
760 if (IS_ERR(handle)) {
761 ret = PTR_ERR(handle);
762 handle = NULL;
763 goto out_unlock;
764 }
765 }
766
767 /* must not update i_size! */
768 ret = block_commit_write(page, offset, offset);
769 if (ret < 0)
770 mlog_errno(ret);
771 else
772 ret = 0;
773
774 if (handle)
02dc1af4 775 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
ccd979bd
MF
776out_unlock:
777 unlock_page(page);
778 page_cache_release(page);
779out:
780 return ret;
781}
782
783static int ocfs2_zero_extend(struct inode *inode,
784 u64 zero_to_size)
785{
786 int ret = 0;
787 u64 start_off;
788 struct super_block *sb = inode->i_sb;
789
790 start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
791 while (start_off < zero_to_size) {
792 ret = ocfs2_write_zero_page(inode, start_off);
793 if (ret < 0) {
794 mlog_errno(ret);
795 goto out;
796 }
797
798 start_off += sb->s_blocksize;
e2057c5a
MF
799
800 /*
801 * Very large extends have the potential to lock up
802 * the cpu for extended periods of time.
803 */
804 cond_resched();
ccd979bd
MF
805 }
806
807out:
808 return ret;
809}
810
53013cba
MF
811/*
812 * A tail_to_skip value > 0 indicates that we're being called from
813 * ocfs2_file_aio_write(). This has the following implications:
814 *
815 * - we don't want to update i_size
816 * - di_bh will be NULL, which is fine because it's only used in the
817 * case where we want to update i_size.
818 * - ocfs2_zero_extend() will then only be filling the hole created
819 * between i_size and the start of the write.
820 */
ccd979bd
MF
821static int ocfs2_extend_file(struct inode *inode,
822 struct buffer_head *di_bh,
53013cba
MF
823 u64 new_i_size,
824 size_t tail_to_skip)
ccd979bd
MF
825{
826 int ret = 0;
3a0782d0 827 u32 clusters_to_add = 0;
ccd979bd 828
53013cba
MF
829 BUG_ON(!tail_to_skip && !di_bh);
830
ccd979bd
MF
831 /* setattr sometimes calls us like this. */
832 if (new_i_size == 0)
833 goto out;
834
835 if (i_size_read(inode) == new_i_size)
836 goto out;
837 BUG_ON(new_i_size < i_size_read(inode));
838
3a0782d0
MF
839 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
840 BUG_ON(tail_to_skip != 0);
841 goto out_update_size;
842 }
843
ccd979bd
MF
844 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size) -
845 OCFS2_I(inode)->ip_clusters;
846
0effef77
MF
847 /*
848 * protect the pages that ocfs2_zero_extend is going to be
849 * pulling into the page cache.. we do this before the
850 * metadata extend so that we don't get into the situation
851 * where we've extended the metadata but can't get the data
852 * lock to zero.
853 */
854 ret = ocfs2_data_lock(inode, 1);
855 if (ret < 0) {
856 mlog_errno(ret);
857 goto out;
858 }
ccd979bd 859
0effef77 860 if (clusters_to_add) {
53013cba 861 ret = ocfs2_extend_allocation(inode, clusters_to_add);
ccd979bd
MF
862 if (ret < 0) {
863 mlog_errno(ret);
53013cba 864 goto out_unlock;
ccd979bd 865 }
0effef77 866 }
ccd979bd 867
0effef77
MF
868 /*
869 * Call this even if we don't add any clusters to the tree. We
870 * still need to zero the area between the old i_size and the
871 * new i_size.
872 */
873 ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
874 if (ret < 0) {
875 mlog_errno(ret);
876 goto out_unlock;
53013cba
MF
877 }
878
3a0782d0 879out_update_size:
53013cba
MF
880 if (!tail_to_skip) {
881 /* We're being called from ocfs2_setattr() which wants
882 * us to update i_size */
883 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
884 if (ret < 0)
885 mlog_errno(ret);
ccd979bd
MF
886 }
887
53013cba 888out_unlock:
3a0782d0
MF
889 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
890 ocfs2_data_unlock(inode, 1);
53013cba 891
ccd979bd
MF
892out:
893 return ret;
894}
895
896int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
897{
898 int status = 0, size_change;
899 struct inode *inode = dentry->d_inode;
900 struct super_block *sb = inode->i_sb;
901 struct ocfs2_super *osb = OCFS2_SB(sb);
902 struct buffer_head *bh = NULL;
1fabe148 903 handle_t *handle = NULL;
ccd979bd
MF
904
905 mlog_entry("(0x%p, '%.*s')\n", dentry,
906 dentry->d_name.len, dentry->d_name.name);
907
908 if (attr->ia_valid & ATTR_MODE)
909 mlog(0, "mode change: %d\n", attr->ia_mode);
910 if (attr->ia_valid & ATTR_UID)
911 mlog(0, "uid change: %d\n", attr->ia_uid);
912 if (attr->ia_valid & ATTR_GID)
913 mlog(0, "gid change: %d\n", attr->ia_gid);
914 if (attr->ia_valid & ATTR_SIZE)
915 mlog(0, "size change...\n");
916 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
917 mlog(0, "time change...\n");
918
919#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
920 | ATTR_GID | ATTR_UID | ATTR_MODE)
921 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
922 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
923 return 0;
924 }
925
926 status = inode_change_ok(inode, attr);
927 if (status)
928 return status;
929
930 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
931 if (size_change) {
932 status = ocfs2_rw_lock(inode, 1);
933 if (status < 0) {
934 mlog_errno(status);
935 goto bail;
936 }
937 }
938
4bcec184 939 status = ocfs2_meta_lock(inode, &bh, 1);
ccd979bd
MF
940 if (status < 0) {
941 if (status != -ENOENT)
942 mlog_errno(status);
943 goto bail_unlock_rw;
944 }
945
946 if (size_change && attr->ia_size != i_size_read(inode)) {
947 if (i_size_read(inode) > attr->ia_size)
948 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
949 else
53013cba 950 status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
ccd979bd
MF
951 if (status < 0) {
952 if (status != -ENOSPC)
953 mlog_errno(status);
954 status = -ENOSPC;
955 goto bail_unlock;
956 }
957 }
958
65eff9cc 959 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
ccd979bd
MF
960 if (IS_ERR(handle)) {
961 status = PTR_ERR(handle);
962 mlog_errno(status);
963 goto bail_unlock;
964 }
965
966 status = inode_setattr(inode, attr);
967 if (status < 0) {
968 mlog_errno(status);
969 goto bail_commit;
970 }
971
972 status = ocfs2_mark_inode_dirty(handle, inode, bh);
973 if (status < 0)
974 mlog_errno(status);
975
976bail_commit:
02dc1af4 977 ocfs2_commit_trans(osb, handle);
ccd979bd
MF
978bail_unlock:
979 ocfs2_meta_unlock(inode, 1);
980bail_unlock_rw:
981 if (size_change)
982 ocfs2_rw_unlock(inode, 1);
983bail:
984 if (bh)
985 brelse(bh);
986
987 mlog_exit(status);
988 return status;
989}
990
991int ocfs2_getattr(struct vfsmount *mnt,
992 struct dentry *dentry,
993 struct kstat *stat)
994{
995 struct inode *inode = dentry->d_inode;
996 struct super_block *sb = dentry->d_inode->i_sb;
997 struct ocfs2_super *osb = sb->s_fs_info;
998 int err;
999
1000 mlog_entry_void();
1001
1002 err = ocfs2_inode_revalidate(dentry);
1003 if (err) {
1004 if (err != -ENOENT)
1005 mlog_errno(err);
1006 goto bail;
1007 }
1008
1009 generic_fillattr(inode, stat);
1010
1011 /* We set the blksize from the cluster size for performance */
1012 stat->blksize = osb->s_clustersize;
1013
1014bail:
1015 mlog_exit(err);
1016
1017 return err;
1018}
1019
d38eb8db
TY
1020int ocfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
1021{
1022 int ret;
1023
1024 mlog_entry_void();
1025
1026 ret = ocfs2_meta_lock(inode, NULL, 0);
1027 if (ret) {
a9f5f707
MF
1028 if (ret != -ENOENT)
1029 mlog_errno(ret);
d38eb8db
TY
1030 goto out;
1031 }
1032
1033 ret = generic_permission(inode, mask, NULL);
d38eb8db
TY
1034
1035 ocfs2_meta_unlock(inode, 0);
1036out:
1037 mlog_exit(ret);
1038 return ret;
1039}
1040
ccd979bd
MF
1041static int ocfs2_write_remove_suid(struct inode *inode)
1042{
1043 int ret;
1044 struct buffer_head *bh = NULL;
1045 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1fabe148 1046 handle_t *handle;
ccd979bd
MF
1047 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1048 struct ocfs2_dinode *di;
1049
b0697053
MF
1050 mlog_entry("(Inode %llu, mode 0%o)\n",
1051 (unsigned long long)oi->ip_blkno, inode->i_mode);
ccd979bd 1052
65eff9cc 1053 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
ccd979bd
MF
1054 if (handle == NULL) {
1055 ret = -ENOMEM;
1056 mlog_errno(ret);
1057 goto out;
1058 }
1059
1060 ret = ocfs2_read_block(osb, oi->ip_blkno, &bh, OCFS2_BH_CACHED, inode);
1061 if (ret < 0) {
1062 mlog_errno(ret);
1063 goto out_trans;
1064 }
1065
1066 ret = ocfs2_journal_access(handle, inode, bh,
1067 OCFS2_JOURNAL_ACCESS_WRITE);
1068 if (ret < 0) {
1069 mlog_errno(ret);
1070 goto out_bh;
1071 }
1072
1073 inode->i_mode &= ~S_ISUID;
1074 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1075 inode->i_mode &= ~S_ISGID;
1076
1077 di = (struct ocfs2_dinode *) bh->b_data;
1078 di->i_mode = cpu_to_le16(inode->i_mode);
1079
1080 ret = ocfs2_journal_dirty(handle, bh);
1081 if (ret < 0)
1082 mlog_errno(ret);
1083out_bh:
1084 brelse(bh);
1085out_trans:
02dc1af4 1086 ocfs2_commit_trans(osb, handle);
ccd979bd
MF
1087out:
1088 mlog_exit(ret);
1089 return ret;
1090}
1091
9517bac6
MF
1092/*
1093 * Will look for holes and unwritten extents in the range starting at
1094 * pos for count bytes (inclusive).
1095 */
1096static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1097 size_t count)
1098{
1099 int ret = 0;
1100 unsigned int extent_flags;
1101 u32 cpos, clusters, extent_len, phys_cpos;
1102 struct super_block *sb = inode->i_sb;
1103
1104 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1105 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1106
1107 while (clusters) {
1108 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1109 &extent_flags);
1110 if (ret < 0) {
1111 mlog_errno(ret);
1112 goto out;
1113 }
1114
1115 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1116 ret = 1;
1117 break;
1118 }
1119
1120 if (extent_len > clusters)
1121 extent_len = clusters;
1122
1123 clusters -= extent_len;
1124 cpos += extent_len;
1125 }
1126out:
1127 return ret;
1128}
1129
8659ac25
TY
1130static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1131 loff_t *ppos,
1132 size_t count,
9517bac6
MF
1133 int appending,
1134 int *direct_io)
ccd979bd 1135{
8659ac25
TY
1136 int ret = 0, meta_level = appending;
1137 struct inode *inode = dentry->d_inode;
ccd979bd 1138 u32 clusters;
ccd979bd 1139 loff_t newsize, saved_pos;
ccd979bd 1140
ccd979bd
MF
1141 /*
1142 * We sample i_size under a read level meta lock to see if our write
1143 * is extending the file, if it is we back off and get a write level
1144 * meta lock.
1145 */
ccd979bd 1146 for(;;) {
4bcec184 1147 ret = ocfs2_meta_lock(inode, NULL, meta_level);
ccd979bd
MF
1148 if (ret < 0) {
1149 meta_level = -1;
1150 mlog_errno(ret);
1151 goto out;
1152 }
1153
1154 /* Clear suid / sgid if necessary. We do this here
1155 * instead of later in the write path because
1156 * remove_suid() calls ->setattr without any hint that
1157 * we may have already done our cluster locking. Since
1158 * ocfs2_setattr() *must* take cluster locks to
1159 * proceeed, this will lead us to recursively lock the
1160 * inode. There's also the dinode i_size state which
1161 * can be lost via setattr during extending writes (we
1162 * set inode->i_size at the end of a write. */
8659ac25 1163 if (should_remove_suid(dentry)) {
ccd979bd
MF
1164 if (meta_level == 0) {
1165 ocfs2_meta_unlock(inode, meta_level);
1166 meta_level = 1;
1167 continue;
1168 }
1169
1170 ret = ocfs2_write_remove_suid(inode);
1171 if (ret < 0) {
1172 mlog_errno(ret);
8659ac25 1173 goto out_unlock;
ccd979bd
MF
1174 }
1175 }
1176
1177 /* work on a copy of ppos until we're sure that we won't have
1178 * to recalculate it due to relocking. */
8659ac25 1179 if (appending) {
ccd979bd
MF
1180 saved_pos = i_size_read(inode);
1181 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
1182 } else {
8659ac25 1183 saved_pos = *ppos;
ccd979bd 1184 }
3a0782d0 1185
9517bac6
MF
1186 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
1187 loff_t end = saved_pos + count;
1188
1189 /*
1190 * Skip the O_DIRECT checks if we don't need
1191 * them.
1192 */
1193 if (!direct_io || !(*direct_io))
1194 break;
1195
1196 /*
1197 * Allowing concurrent direct writes means
1198 * i_size changes wouldn't be synchronized, so
1199 * one node could wind up truncating another
1200 * nodes writes.
1201 */
1202 if (end > i_size_read(inode)) {
1203 *direct_io = 0;
1204 break;
1205 }
1206
1207 /*
1208 * We don't fill holes during direct io, so
1209 * check for them here. If any are found, the
1210 * caller will have to retake some cluster
1211 * locks and initiate the io as buffered.
1212 */
1213 ret = ocfs2_check_range_for_holes(inode, saved_pos,
1214 count);
1215 if (ret == 1) {
1216 *direct_io = 0;
1217 ret = 0;
1218 } else if (ret < 0)
1219 mlog_errno(ret);
1220 break;
1221 }
1222
3a0782d0
MF
1223 /*
1224 * The rest of this loop is concerned with legacy file
1225 * systems which don't support sparse files.
1226 */
3a0782d0 1227
8659ac25 1228 newsize = count + saved_pos;
ccd979bd 1229
215c7f9f
MF
1230 mlog(0, "pos=%lld newsize=%lld cursize=%lld\n",
1231 (long long) saved_pos, (long long) newsize,
1232 (long long) i_size_read(inode));
ccd979bd
MF
1233
1234 /* No need for a higher level metadata lock if we're
1235 * never going past i_size. */
1236 if (newsize <= i_size_read(inode))
1237 break;
1238
1239 if (meta_level == 0) {
1240 ocfs2_meta_unlock(inode, meta_level);
1241 meta_level = 1;
1242 continue;
1243 }
1244
1245 spin_lock(&OCFS2_I(inode)->ip_lock);
1246 clusters = ocfs2_clusters_for_bytes(inode->i_sb, newsize) -
1247 OCFS2_I(inode)->ip_clusters;
1248 spin_unlock(&OCFS2_I(inode)->ip_lock);
1249
1250 mlog(0, "Writing at EOF, may need more allocation: "
215c7f9f
MF
1251 "i_size = %lld, newsize = %lld, need %u clusters\n",
1252 (long long) i_size_read(inode), (long long) newsize,
1253 clusters);
ccd979bd
MF
1254
1255 /* We only want to continue the rest of this loop if
1256 * our extend will actually require more
1257 * allocation. */
1258 if (!clusters)
1259 break;
1260
8659ac25 1261 ret = ocfs2_extend_file(inode, NULL, newsize, count);
ccd979bd
MF
1262 if (ret < 0) {
1263 if (ret != -ENOSPC)
1264 mlog_errno(ret);
8659ac25 1265 goto out_unlock;
ccd979bd 1266 }
ccd979bd
MF
1267 break;
1268 }
1269
8659ac25
TY
1270 if (appending)
1271 *ppos = saved_pos;
1272
1273out_unlock:
ccd979bd 1274 ocfs2_meta_unlock(inode, meta_level);
8659ac25
TY
1275
1276out:
1277 return ret;
1278}
1279
9517bac6
MF
1280static inline void
1281ocfs2_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
1282{
1283 const struct iovec *iov = *iovp;
1284 size_t base = *basep;
1285
1286 do {
1287 int copy = min(bytes, iov->iov_len - base);
1288
1289 bytes -= copy;
1290 base += copy;
1291 if (iov->iov_len == base) {
1292 iov++;
1293 base = 0;
1294 }
1295 } while (bytes);
1296 *iovp = iov;
1297 *basep = base;
1298}
1299
1300static struct page * ocfs2_get_write_source(struct ocfs2_buffered_write_priv *bp,
1301 const struct iovec *cur_iov,
1302 size_t iov_offset)
1303{
1304 int ret;
1305 char *buf;
1306 struct page *src_page = NULL;
1307
1308 buf = cur_iov->iov_base + iov_offset;
1309
1310 if (!segment_eq(get_fs(), KERNEL_DS)) {
1311 /*
1312 * Pull in the user page. We want to do this outside
1313 * of the meta data locks in order to preserve locking
1314 * order in case of page fault.
1315 */
1316 ret = get_user_pages(current, current->mm,
1317 (unsigned long)buf & PAGE_CACHE_MASK, 1,
1318 0, 0, &src_page, NULL);
1319 if (ret == 1)
1320 bp->b_src_buf = kmap(src_page);
1321 else
1322 src_page = ERR_PTR(-EFAULT);
1323 } else {
1324 bp->b_src_buf = buf;
1325 }
1326
1327 return src_page;
1328}
1329
1330static void ocfs2_put_write_source(struct ocfs2_buffered_write_priv *bp,
1331 struct page *page)
1332{
1333 if (page) {
1334 kunmap(page);
1335 page_cache_release(page);
1336 }
1337}
1338
1339static ssize_t ocfs2_file_buffered_write(struct file *file, loff_t *ppos,
1340 const struct iovec *iov,
1341 unsigned long nr_segs,
1342 size_t count,
1343 ssize_t o_direct_written)
1344{
1345 int ret = 0;
1346 ssize_t copied, total = 0;
1347 size_t iov_offset = 0;
1348 const struct iovec *cur_iov = iov;
1349 struct ocfs2_buffered_write_priv bp;
1350 struct page *page;
1351
1352 /*
1353 * handle partial DIO write. Adjust cur_iov if needed.
1354 */
1355 ocfs2_set_next_iovec(&cur_iov, &iov_offset, o_direct_written);
1356
1357 do {
1358 bp.b_cur_off = iov_offset;
1359 bp.b_cur_iov = cur_iov;
1360
1361 page = ocfs2_get_write_source(&bp, cur_iov, iov_offset);
1362 if (IS_ERR(page)) {
1363 ret = PTR_ERR(page);
1364 goto out;
1365 }
1366
1367 copied = ocfs2_buffered_write_cluster(file, *ppos, count,
1368 ocfs2_map_and_write_user_data,
1369 &bp);
1370
1371 ocfs2_put_write_source(&bp, page);
1372
1373 if (copied < 0) {
1374 mlog_errno(copied);
1375 ret = copied;
1376 goto out;
1377 }
1378
1379 total += copied;
1380 *ppos = *ppos + copied;
1381 count -= copied;
1382
1383 ocfs2_set_next_iovec(&cur_iov, &iov_offset, copied);
1384 } while(count);
1385
1386out:
1387 return total ? total : ret;
1388}
1389
1390static int ocfs2_check_iovec(const struct iovec *iov, size_t *counted,
1391 unsigned long *nr_segs)
1392{
1393 size_t ocount; /* original count */
1394 unsigned long seg;
1395
1396 ocount = 0;
1397 for (seg = 0; seg < *nr_segs; seg++) {
1398 const struct iovec *iv = &iov[seg];
1399
1400 /*
1401 * If any segment has a negative length, or the cumulative
1402 * length ever wraps negative then return -EINVAL.
1403 */
1404 ocount += iv->iov_len;
1405 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
1406 return -EINVAL;
1407 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
1408 continue;
1409 if (seg == 0)
1410 return -EFAULT;
1411 *nr_segs = seg;
1412 ocount -= iv->iov_len; /* This segment is no good */
1413 break;
1414 }
1415
1416 *counted = ocount;
1417 return 0;
1418}
1419
8659ac25
TY
1420static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1421 const struct iovec *iov,
1422 unsigned long nr_segs,
1423 loff_t pos)
1424{
9517bac6
MF
1425 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
1426 int can_do_direct, sync = 0;
1427 ssize_t written = 0;
1428 size_t ocount; /* original count */
1429 size_t count; /* after file limit checks */
1430 loff_t *ppos = &iocb->ki_pos;
1431 struct file *file = iocb->ki_filp;
1432 struct inode *inode = file->f_path.dentry->d_inode;
1433
1434 mlog_entry("(0x%p, %u, '%.*s')\n", file,
8659ac25 1435 (unsigned int)nr_segs,
9517bac6
MF
1436 file->f_path.dentry->d_name.len,
1437 file->f_path.dentry->d_name.name);
8659ac25 1438
8659ac25
TY
1439 if (iocb->ki_left == 0)
1440 return 0;
1441
9517bac6
MF
1442 ret = ocfs2_check_iovec(iov, &ocount, &nr_segs);
1443 if (ret)
1444 return ret;
1445
1446 count = ocount;
1447
1448 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1449
1450 appending = file->f_flags & O_APPEND ? 1 : 0;
1451 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
1452
8659ac25 1453 mutex_lock(&inode->i_mutex);
9517bac6
MF
1454
1455relock:
8659ac25 1456 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
9517bac6 1457 if (direct_io) {
8659ac25 1458 down_read(&inode->i_alloc_sem);
9517bac6 1459 have_alloc_sem = 1;
8659ac25
TY
1460 }
1461
1462 /* concurrent O_DIRECT writes are allowed */
9517bac6 1463 rw_level = !direct_io;
8659ac25
TY
1464 ret = ocfs2_rw_lock(inode, rw_level);
1465 if (ret < 0) {
8659ac25 1466 mlog_errno(ret);
9517bac6 1467 goto out_sems;
8659ac25
TY
1468 }
1469
9517bac6
MF
1470 can_do_direct = direct_io;
1471 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
1472 iocb->ki_left, appending,
1473 &can_do_direct);
8659ac25
TY
1474 if (ret < 0) {
1475 mlog_errno(ret);
1476 goto out;
1477 }
ccd979bd 1478
9517bac6
MF
1479 /*
1480 * We can't complete the direct I/O as requested, fall back to
1481 * buffered I/O.
1482 */
1483 if (direct_io && !can_do_direct) {
1484 ocfs2_rw_unlock(inode, rw_level);
1485 up_read(&inode->i_alloc_sem);
1486
1487 have_alloc_sem = 0;
1488 rw_level = -1;
1489
1490 direct_io = 0;
1491 sync = 1;
1492 goto relock;
1493 }
1494
1495 if (!sync && ((file->f_flags & O_SYNC) || IS_SYNC(inode)))
1496 sync = 1;
1497
1498 /*
1499 * XXX: Is it ok to execute these checks a second time?
1500 */
1501 ret = generic_write_checks(file, ppos, &count, S_ISBLK(inode->i_mode));
1502 if (ret)
1503 goto out;
1504
1505 /*
1506 * Set pos so that sync_page_range_nolock() below understands
1507 * where to start from. We might've moved it around via the
1508 * calls above. The range we want to actually sync starts from
1509 * *ppos here.
1510 *
1511 */
1512 pos = *ppos;
1513
ccd979bd
MF
1514 /* communicate with ocfs2_dio_end_io */
1515 ocfs2_iocb_set_rw_locked(iocb);
1516
9517bac6
MF
1517 if (direct_io) {
1518 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
1519 ppos, count, ocount);
1520 if (written < 0) {
1521 ret = written;
1522 goto out_dio;
1523 }
1524 } else {
1525 written = ocfs2_file_buffered_write(file, ppos, iov, nr_segs,
1526 count, written);
1527 if (written < 0) {
1528 ret = written;
1529 if (ret != -EFAULT || ret != -ENOSPC)
1530 mlog_errno(ret);
1531 goto out;
1532 }
1533 }
ccd979bd 1534
9517bac6 1535out_dio:
ccd979bd 1536 /* buffered aio wouldn't have proper lock coverage today */
9517bac6 1537 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
ccd979bd
MF
1538
1539 /*
1540 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
1541 * function pointer which is called when o_direct io completes so that
1542 * it can unlock our rw lock. (it's the clustered equivalent of
1543 * i_alloc_sem; protects truncate from racing with pending ios).
1544 * Unfortunately there are error cases which call end_io and others
1545 * that don't. so we don't have to unlock the rw_lock if either an
1546 * async dio is going to do it in the future or an end_io after an
1547 * error has already done it.
1548 */
1549 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
1550 rw_level = -1;
1551 have_alloc_sem = 0;
1552 }
1553
1554out:
9517bac6
MF
1555 if (rw_level != -1)
1556 ocfs2_rw_unlock(inode, rw_level);
1557
1558out_sems:
ccd979bd
MF
1559 if (have_alloc_sem)
1560 up_read(&inode->i_alloc_sem);
9517bac6
MF
1561
1562 if (written > 0 && sync) {
1563 ssize_t err;
1564
1565 err = sync_page_range_nolock(inode, file->f_mapping, pos, count);
1566 if (err < 0)
1567 written = err;
1568 }
1569
1b1dcc1b 1570 mutex_unlock(&inode->i_mutex);
ccd979bd
MF
1571
1572 mlog_exit(ret);
9517bac6 1573 return written ? written : ret;
ccd979bd
MF
1574}
1575
8659ac25
TY
1576static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1577 struct file *out,
1578 loff_t *ppos,
1579 size_t len,
1580 unsigned int flags)
1581{
1582 int ret;
d28c9174 1583 struct inode *inode = out->f_path.dentry->d_inode;
8659ac25
TY
1584
1585 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
1586 (unsigned int)len,
d28c9174
JS
1587 out->f_path.dentry->d_name.len,
1588 out->f_path.dentry->d_name.name);
8659ac25
TY
1589
1590 inode_double_lock(inode, pipe->inode);
1591
1592 ret = ocfs2_rw_lock(inode, 1);
1593 if (ret < 0) {
1594 mlog_errno(ret);
1595 goto out;
1596 }
1597
9517bac6
MF
1598 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0,
1599 NULL);
8659ac25
TY
1600 if (ret < 0) {
1601 mlog_errno(ret);
1602 goto out_unlock;
1603 }
1604
1605 /* ok, we're done with i_size and alloc work */
1606 ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags);
1607
1608out_unlock:
1609 ocfs2_rw_unlock(inode, 1);
1610out:
1611 inode_double_unlock(inode, pipe->inode);
1612
1613 mlog_exit(ret);
1614 return ret;
1615}
1616
1617static ssize_t ocfs2_file_splice_read(struct file *in,
1618 loff_t *ppos,
1619 struct pipe_inode_info *pipe,
1620 size_t len,
1621 unsigned int flags)
1622{
1623 int ret = 0;
d28c9174 1624 struct inode *inode = in->f_path.dentry->d_inode;
8659ac25
TY
1625
1626 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
1627 (unsigned int)len,
d28c9174
JS
1628 in->f_path.dentry->d_name.len,
1629 in->f_path.dentry->d_name.name);
8659ac25
TY
1630
1631 /*
1632 * See the comment in ocfs2_file_aio_read()
1633 */
1634 ret = ocfs2_meta_lock(inode, NULL, 0);
1635 if (ret < 0) {
1636 mlog_errno(ret);
1637 goto bail;
1638 }
1639 ocfs2_meta_unlock(inode, 0);
1640
1641 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
1642
1643bail:
1644 mlog_exit(ret);
1645 return ret;
1646}
1647
ccd979bd 1648static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
027445c3
BP
1649 const struct iovec *iov,
1650 unsigned long nr_segs,
ccd979bd
MF
1651 loff_t pos)
1652{
25899dee 1653 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
ccd979bd 1654 struct file *filp = iocb->ki_filp;
d28c9174 1655 struct inode *inode = filp->f_path.dentry->d_inode;
ccd979bd 1656
027445c3
BP
1657 mlog_entry("(0x%p, %u, '%.*s')\n", filp,
1658 (unsigned int)nr_segs,
d28c9174
JS
1659 filp->f_path.dentry->d_name.len,
1660 filp->f_path.dentry->d_name.name);
ccd979bd
MF
1661
1662 if (!inode) {
1663 ret = -EINVAL;
1664 mlog_errno(ret);
1665 goto bail;
1666 }
1667
ccd979bd
MF
1668 /*
1669 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
1670 * need locks to protect pending reads from racing with truncate.
1671 */
1672 if (filp->f_flags & O_DIRECT) {
1673 down_read(&inode->i_alloc_sem);
1674 have_alloc_sem = 1;
1675
1676 ret = ocfs2_rw_lock(inode, 0);
1677 if (ret < 0) {
1678 mlog_errno(ret);
1679 goto bail;
1680 }
1681 rw_level = 0;
1682 /* communicate with ocfs2_dio_end_io */
1683 ocfs2_iocb_set_rw_locked(iocb);
1684 }
1685
c4374f8a
MF
1686 /*
1687 * We're fine letting folks race truncates and extending
1688 * writes with read across the cluster, just like they can
1689 * locally. Hence no rw_lock during read.
1690 *
1691 * Take and drop the meta data lock to update inode fields
1692 * like i_size. This allows the checks down below
1693 * generic_file_aio_read() a chance of actually working.
1694 */
25899dee 1695 ret = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
c4374f8a
MF
1696 if (ret < 0) {
1697 mlog_errno(ret);
1698 goto bail;
1699 }
25899dee 1700 ocfs2_meta_unlock(inode, lock_level);
c4374f8a 1701
027445c3 1702 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
ccd979bd
MF
1703 if (ret == -EINVAL)
1704 mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
1705
1706 /* buffered aio wouldn't have proper lock coverage today */
1707 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
1708
1709 /* see ocfs2_file_aio_write */
1710 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
1711 rw_level = -1;
1712 have_alloc_sem = 0;
1713 }
1714
1715bail:
1716 if (have_alloc_sem)
1717 up_read(&inode->i_alloc_sem);
1718 if (rw_level != -1)
1719 ocfs2_rw_unlock(inode, rw_level);
1720 mlog_exit(ret);
1721
1722 return ret;
1723}
1724
92e1d5be 1725const struct inode_operations ocfs2_file_iops = {
ccd979bd
MF
1726 .setattr = ocfs2_setattr,
1727 .getattr = ocfs2_getattr,
d38eb8db 1728 .permission = ocfs2_permission,
ccd979bd
MF
1729};
1730
92e1d5be 1731const struct inode_operations ocfs2_special_file_iops = {
ccd979bd
MF
1732 .setattr = ocfs2_setattr,
1733 .getattr = ocfs2_getattr,
d38eb8db 1734 .permission = ocfs2_permission,
ccd979bd
MF
1735};
1736
4b6f5d20 1737const struct file_operations ocfs2_fops = {
ccd979bd
MF
1738 .read = do_sync_read,
1739 .write = do_sync_write,
1740 .sendfile = generic_file_sendfile,
1741 .mmap = ocfs2_mmap,
1742 .fsync = ocfs2_sync_file,
1743 .release = ocfs2_file_release,
1744 .open = ocfs2_file_open,
1745 .aio_read = ocfs2_file_aio_read,
1746 .aio_write = ocfs2_file_aio_write,
ca4d147e 1747 .ioctl = ocfs2_ioctl,
8659ac25
TY
1748 .splice_read = ocfs2_file_splice_read,
1749 .splice_write = ocfs2_file_splice_write,
ccd979bd
MF
1750};
1751
4b6f5d20 1752const struct file_operations ocfs2_dops = {
ccd979bd
MF
1753 .read = generic_read_dir,
1754 .readdir = ocfs2_readdir,
1755 .fsync = ocfs2_sync_file,
ca4d147e 1756 .ioctl = ocfs2_ioctl,
ccd979bd 1757};