]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/xfs/linux-2.6/xfs_super.c
[XFS] kill the vfs_flags member in struct bhv_vfs
[net-next-2.6.git] / fs / xfs / linux-2.6 / xfs_super.c
CommitLineData
1da177e4 1/*
a805bad5 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_bit.h"
1da177e4
LT
20#include "xfs_log.h"
21#include "xfs_clnt.h"
a844f451 22#include "xfs_inum.h"
1da177e4
LT
23#include "xfs_trans.h"
24#include "xfs_sb.h"
a844f451 25#include "xfs_ag.h"
1da177e4
LT
26#include "xfs_dir2.h"
27#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h"
30#include "xfs_mount.h"
1da177e4 31#include "xfs_bmap_btree.h"
a844f451 32#include "xfs_alloc_btree.h"
1da177e4 33#include "xfs_ialloc_btree.h"
1da177e4 34#include "xfs_dir2_sf.h"
a844f451 35#include "xfs_attr_sf.h"
1da177e4
LT
36#include "xfs_dinode.h"
37#include "xfs_inode.h"
a844f451
NS
38#include "xfs_btree.h"
39#include "xfs_ialloc.h"
1da177e4 40#include "xfs_bmap.h"
1da177e4
LT
41#include "xfs_rtalloc.h"
42#include "xfs_error.h"
43#include "xfs_itable.h"
44#include "xfs_rw.h"
45#include "xfs_acl.h"
1da177e4
LT
46#include "xfs_attr.h"
47#include "xfs_buf_item.h"
48#include "xfs_utils.h"
739bfb2a 49#include "xfs_vnodeops.h"
745f6919 50#include "xfs_vfsops.h"
1da177e4 51#include "xfs_version.h"
1da177e4
LT
52
53#include <linux/namei.h>
54#include <linux/init.h>
55#include <linux/mount.h>
0829c360 56#include <linux/mempool.h>
1da177e4 57#include <linux/writeback.h>
4df08c52 58#include <linux/kthread.h>
7dfb7103 59#include <linux/freezer.h>
1da177e4 60
7989cb8e
DC
61static struct quotactl_ops xfs_quotactl_operations;
62static struct super_operations xfs_super_operations;
63static kmem_zone_t *xfs_vnode_zone;
64static kmem_zone_t *xfs_ioend_zone;
0829c360 65mempool_t *xfs_ioend_pool;
1da177e4
LT
66
67STATIC struct xfs_mount_args *
68xfs_args_allocate(
764d1f89
NS
69 struct super_block *sb,
70 int silent)
1da177e4
LT
71{
72 struct xfs_mount_args *args;
73
74 args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
75 args->logbufs = args->logbufsize = -1;
76 strncpy(args->fsname, sb->s_id, MAXNAMELEN);
77
78 /* Copy the already-parsed mount(2) flags we're interested in */
1da177e4
LT
79 if (sb->s_flags & MS_DIRSYNC)
80 args->flags |= XFSMNT_DIRSYNC;
81 if (sb->s_flags & MS_SYNCHRONOUS)
82 args->flags |= XFSMNT_WSYNC;
764d1f89
NS
83 if (silent)
84 args->flags |= XFSMNT_QUIET;
1da177e4
LT
85 args->flags |= XFSMNT_32BITINODES;
86
87 return args;
88}
89
90__uint64_t
91xfs_max_file_offset(
92 unsigned int blockshift)
93{
94 unsigned int pagefactor = 1;
95 unsigned int bitshift = BITS_PER_LONG - 1;
96
97 /* Figure out maximum filesize, on Linux this can depend on
98 * the filesystem blocksize (on 32 bit platforms).
99 * __block_prepare_write does this in an [unsigned] long...
100 * page->index << (PAGE_CACHE_SHIFT - bbits)
101 * So, for page sized blocks (4K on 32 bit platforms),
102 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
103 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
104 * but for smaller blocksizes it is less (bbits = log2 bsize).
105 * Note1: get_block_t takes a long (implicit cast from above)
106 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
107 * can optionally convert the [unsigned] long from above into
108 * an [unsigned] long long.
109 */
110
111#if BITS_PER_LONG == 32
112# if defined(CONFIG_LBD)
113 ASSERT(sizeof(sector_t) == 8);
114 pagefactor = PAGE_CACHE_SIZE;
115 bitshift = BITS_PER_LONG;
116# else
117 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
118# endif
119#endif
120
121 return (((__uint64_t)pagefactor) << bitshift) - 1;
122}
123
7989cb8e 124STATIC_INLINE void
1da177e4
LT
125xfs_set_inodeops(
126 struct inode *inode)
127{
0432dab2
CH
128 switch (inode->i_mode & S_IFMT) {
129 case S_IFREG:
416c6d5b 130 inode->i_op = &xfs_inode_operations;
3562fd45 131 inode->i_fop = &xfs_file_operations;
e4c573bb 132 inode->i_mapping->a_ops = &xfs_address_space_operations;
0432dab2
CH
133 break;
134 case S_IFDIR:
416c6d5b 135 inode->i_op = &xfs_dir_inode_operations;
3562fd45 136 inode->i_fop = &xfs_dir_file_operations;
0432dab2
CH
137 break;
138 case S_IFLNK:
416c6d5b 139 inode->i_op = &xfs_symlink_inode_operations;
1da177e4 140 if (inode->i_blocks)
e4c573bb 141 inode->i_mapping->a_ops = &xfs_address_space_operations;
0432dab2
CH
142 break;
143 default:
416c6d5b 144 inode->i_op = &xfs_inode_operations;
1da177e4 145 init_special_inode(inode, inode->i_mode, inode->i_rdev);
0432dab2 146 break;
1da177e4
LT
147 }
148}
149
7989cb8e 150STATIC_INLINE void
1da177e4
LT
151xfs_revalidate_inode(
152 xfs_mount_t *mp,
67fcaa73 153 bhv_vnode_t *vp,
1da177e4
LT
154 xfs_inode_t *ip)
155{
ec86dc02 156 struct inode *inode = vn_to_inode(vp);
1da177e4 157
0432dab2 158 inode->i_mode = ip->i_d.di_mode;
1da177e4
LT
159 inode->i_nlink = ip->i_d.di_nlink;
160 inode->i_uid = ip->i_d.di_uid;
161 inode->i_gid = ip->i_d.di_gid;
0432dab2
CH
162
163 switch (inode->i_mode & S_IFMT) {
164 case S_IFBLK:
165 case S_IFCHR:
166 inode->i_rdev =
167 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
168 sysv_minor(ip->i_df.if_u2.if_rdev));
169 break;
170 default:
1da177e4 171 inode->i_rdev = 0;
0432dab2 172 break;
1da177e4 173 }
0432dab2 174
1da177e4
LT
175 inode->i_generation = ip->i_d.di_gen;
176 i_size_write(inode, ip->i_d.di_size);
177 inode->i_blocks =
178 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
179 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
180 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
181 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
182 inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
183 inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
184 inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
185 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
186 inode->i_flags |= S_IMMUTABLE;
187 else
188 inode->i_flags &= ~S_IMMUTABLE;
189 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
190 inode->i_flags |= S_APPEND;
191 else
192 inode->i_flags &= ~S_APPEND;
193 if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
194 inode->i_flags |= S_SYNC;
195 else
196 inode->i_flags &= ~S_SYNC;
197 if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
198 inode->i_flags |= S_NOATIME;
199 else
200 inode->i_flags &= ~S_NOATIME;
b3aea4ed 201 xfs_iflags_clear(ip, XFS_IMODIFIED);
1da177e4
LT
202}
203
204void
205xfs_initialize_vnode(
48c872a9 206 struct xfs_mount *mp,
67fcaa73 207 bhv_vnode_t *vp,
745f6919 208 struct xfs_inode *ip)
1da177e4 209{
ec86dc02 210 struct inode *inode = vn_to_inode(vp);
1da177e4 211
739bfb2a 212 if (!ip->i_vnode) {
739bfb2a
CH
213 ip->i_vnode = vp;
214 inode->i_private = ip;
1da177e4
LT
215 }
216
217 /*
218 * We need to set the ops vectors, and unlock the inode, but if
219 * we have been called during the new inode create process, it is
220 * too early to fill in the Linux inode. We will get called a
221 * second time once the inode is properly set up, and then we can
222 * finish our work.
223 */
745f6919 224 if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) {
48c872a9 225 xfs_revalidate_inode(mp, vp, ip);
1da177e4 226 xfs_set_inodeops(inode);
ec86dc02 227
7a18c386 228 xfs_iflags_clear(ip, XFS_INEW);
1da177e4
LT
229 barrier();
230
231 unlock_new_inode(inode);
232 }
233}
234
235int
236xfs_blkdev_get(
237 xfs_mount_t *mp,
238 const char *name,
239 struct block_device **bdevp)
240{
241 int error = 0;
242
243 *bdevp = open_bdev_excl(name, 0, mp);
244 if (IS_ERR(*bdevp)) {
245 error = PTR_ERR(*bdevp);
246 printk("XFS: Invalid device [%s], error=%d\n", name, error);
247 }
248
249 return -error;
250}
251
252void
253xfs_blkdev_put(
254 struct block_device *bdev)
255{
256 if (bdev)
257 close_bdev_excl(bdev);
258}
259
f538d4da
CH
260/*
261 * Try to write out the superblock using barriers.
262 */
263STATIC int
264xfs_barrier_test(
265 xfs_mount_t *mp)
266{
267 xfs_buf_t *sbp = xfs_getsb(mp, 0);
268 int error;
269
270 XFS_BUF_UNDONE(sbp);
271 XFS_BUF_UNREAD(sbp);
272 XFS_BUF_UNDELAYWRITE(sbp);
273 XFS_BUF_WRITE(sbp);
274 XFS_BUF_UNASYNC(sbp);
275 XFS_BUF_ORDERED(sbp);
276
277 xfsbdstrat(mp, sbp);
278 error = xfs_iowait(sbp);
279
280 /*
281 * Clear all the flags we set and possible error state in the
282 * buffer. We only did the write to try out whether barriers
283 * worked and shouldn't leave any traces in the superblock
284 * buffer.
285 */
286 XFS_BUF_DONE(sbp);
287 XFS_BUF_ERROR(sbp, 0);
288 XFS_BUF_UNORDERED(sbp);
289
290 xfs_buf_relse(sbp);
291 return error;
292}
293
294void
295xfs_mountfs_check_barriers(xfs_mount_t *mp)
296{
297 int error;
298
299 if (mp->m_logdev_targp != mp->m_ddev_targp) {
300 xfs_fs_cmn_err(CE_NOTE, mp,
301 "Disabling barriers, not supported with external log device");
302 mp->m_flags &= ~XFS_MOUNT_BARRIER;
4ef19ddd 303 return;
f538d4da
CH
304 }
305
b2ea401b
NS
306 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
307 xfs_fs_cmn_err(CE_NOTE, mp,
308 "Disabling barriers, underlying device is readonly");
309 mp->m_flags &= ~XFS_MOUNT_BARRIER;
310 return;
311 }
312
f538d4da
CH
313 error = xfs_barrier_test(mp);
314 if (error) {
315 xfs_fs_cmn_err(CE_NOTE, mp,
316 "Disabling barriers, trial barrier write failed");
317 mp->m_flags &= ~XFS_MOUNT_BARRIER;
4ef19ddd 318 return;
f538d4da
CH
319 }
320}
321
322void
323xfs_blkdev_issue_flush(
324 xfs_buftarg_t *buftarg)
325{
ce8e922c 326 blkdev_issue_flush(buftarg->bt_bdev, NULL);
f538d4da 327}
1da177e4
LT
328
329STATIC struct inode *
a50cd269 330xfs_fs_alloc_inode(
1da177e4
LT
331 struct super_block *sb)
332{
67fcaa73 333 bhv_vnode_t *vp;
1da177e4 334
8758280f
NS
335 vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
336 if (unlikely(!vp))
1da177e4 337 return NULL;
ec86dc02 338 return vn_to_inode(vp);
1da177e4
LT
339}
340
341STATIC void
a50cd269 342xfs_fs_destroy_inode(
1da177e4
LT
343 struct inode *inode)
344{
ec86dc02 345 kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
1da177e4
LT
346}
347
348STATIC void
a50cd269 349xfs_fs_inode_init_once(
8758280f
NS
350 void *vnode,
351 kmem_zone_t *zonep,
1da177e4
LT
352 unsigned long flags)
353{
a35afb83 354 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
1da177e4
LT
355}
356
357STATIC int
8758280f 358xfs_init_zones(void)
1da177e4 359{
67fcaa73 360 xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
e0cc2325
NS
361 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
362 KM_ZONE_SPREAD,
a50cd269 363 xfs_fs_inode_init_once);
0829c360
CH
364 if (!xfs_vnode_zone)
365 goto out;
366
367 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
368 if (!xfs_ioend_zone)
369 goto out_destroy_vnode_zone;
370
93d2341c
MD
371 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
372 xfs_ioend_zone);
0829c360
CH
373 if (!xfs_ioend_pool)
374 goto out_free_ioend_zone;
1da177e4 375 return 0;
0829c360 376
0829c360
CH
377 out_free_ioend_zone:
378 kmem_zone_destroy(xfs_ioend_zone);
379 out_destroy_vnode_zone:
380 kmem_zone_destroy(xfs_vnode_zone);
381 out:
382 return -ENOMEM;
1da177e4
LT
383}
384
385STATIC void
8758280f 386xfs_destroy_zones(void)
1da177e4 387{
0829c360
CH
388 mempool_destroy(xfs_ioend_pool);
389 kmem_zone_destroy(xfs_vnode_zone);
390 kmem_zone_destroy(xfs_ioend_zone);
1da177e4
LT
391}
392
393/*
394 * Attempt to flush the inode, this will actually fail
395 * if the inode is pinned, but we dirty the inode again
396 * at the point when it is unpinned after a log write,
8758280f 397 * since this is when the inode itself becomes flushable.
1da177e4
LT
398 */
399STATIC int
a50cd269 400xfs_fs_write_inode(
1da177e4
LT
401 struct inode *inode,
402 int sync)
403{
1da177e4
LT
404 int error = 0, flags = FLUSH_INODE;
405
1543d79c 406 vn_trace_entry(XFS_I(inode), __FUNCTION__,
739bfb2a
CH
407 (inst_t *)__return_address);
408 if (sync) {
409 filemap_fdatawait(inode->i_mapping);
410 flags |= FLUSH_SYNC;
411 }
412 error = xfs_inode_flush(XFS_I(inode), flags);
413 if (error == EAGAIN) {
414 if (sync)
415 error = xfs_inode_flush(XFS_I(inode),
416 flags | FLUSH_LOG);
417 else
418 error = 0;
1da177e4 419 }
739bfb2a 420
1da177e4
LT
421 return -error;
422}
423
424STATIC void
a50cd269 425xfs_fs_clear_inode(
1da177e4
LT
426 struct inode *inode)
427{
1543d79c 428 xfs_inode_t *ip = XFS_I(inode);
56d433e4 429
02ba71de 430 /*
1543d79c 431 * ip can be null when xfs_iget_core calls xfs_idestroy if we
02ba71de
CH
432 * find an inode with di_mode == 0 but without IGET_CREATE set.
433 */
1543d79c
CH
434 if (ip) {
435 vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address);
436
437 XFS_STATS_INC(vn_rele);
438 XFS_STATS_INC(vn_remove);
439 XFS_STATS_INC(vn_reclaim);
440 XFS_STATS_DEC(vn_active);
441
442 xfs_inactive(ip);
443 xfs_iflags_clear(ip, XFS_IMODIFIED);
444 if (xfs_reclaim(ip))
445 panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, inode);
b3aea4ed 446 }
56d433e4 447
739bfb2a 448 ASSERT(XFS_I(inode) == NULL);
56d433e4 449}
1da177e4
LT
450
451/*
452 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
453 * Doing this has two advantages:
454 * - It saves on stack space, which is tight in certain situations
455 * - It can be used (with care) as a mechanism to avoid deadlocks.
456 * Flushing while allocating in a full filesystem requires both.
457 */
458STATIC void
459xfs_syncd_queue_work(
b83bd138 460 struct bhv_vfs *vfs,
1da177e4 461 void *data,
b83bd138 462 void (*syncer)(bhv_vfs_t *, void *))
1da177e4 463{
b83bd138 464 struct bhv_vfs_sync_work *work;
1da177e4 465
b83bd138 466 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
1da177e4
LT
467 INIT_LIST_HEAD(&work->w_list);
468 work->w_syncer = syncer;
469 work->w_data = data;
470 work->w_vfs = vfs;
471 spin_lock(&vfs->vfs_sync_lock);
472 list_add_tail(&work->w_list, &vfs->vfs_sync_list);
473 spin_unlock(&vfs->vfs_sync_lock);
474 wake_up_process(vfs->vfs_sync_task);
475}
476
477/*
478 * Flush delayed allocate data, attempting to free up reserved space
479 * from existing allocations. At this point a new allocation attempt
480 * has failed with ENOSPC and we are in the process of scratching our
481 * heads, looking about for more room...
482 */
483STATIC void
484xfs_flush_inode_work(
b83bd138 485 bhv_vfs_t *vfs,
1da177e4
LT
486 void *inode)
487{
488 filemap_flush(((struct inode *)inode)->i_mapping);
489 iput((struct inode *)inode);
490}
491
492void
493xfs_flush_inode(
494 xfs_inode_t *ip)
495{
ec86dc02 496 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
b83bd138 497 struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount);
1da177e4
LT
498
499 igrab(inode);
500 xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
041e0e3b 501 delay(msecs_to_jiffies(500));
1da177e4
LT
502}
503
504/*
505 * This is the "bigger hammer" version of xfs_flush_inode_work...
506 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
507 */
508STATIC void
509xfs_flush_device_work(
b83bd138 510 bhv_vfs_t *vfs,
1da177e4
LT
511 void *inode)
512{
513 sync_blockdev(vfs->vfs_super->s_bdev);
514 iput((struct inode *)inode);
515}
516
517void
518xfs_flush_device(
519 xfs_inode_t *ip)
520{
ec86dc02 521 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
b83bd138 522 struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount);
1da177e4
LT
523
524 igrab(inode);
525 xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
041e0e3b 526 delay(msecs_to_jiffies(500));
1da177e4
LT
527 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
528}
529
1da177e4
LT
530STATIC void
531vfs_sync_worker(
b83bd138 532 bhv_vfs_t *vfsp,
1da177e4
LT
533 void *unused)
534{
535 int error;
536
bd186aa9 537 if (!(XFS_VFSTOM(vfsp)->m_flags & XFS_MOUNT_RDONLY))
745f6919
CH
538 error = xfs_sync(XFS_VFSTOM(vfsp), SYNC_FSDATA | SYNC_BDFLUSH | \
539 SYNC_ATTR | SYNC_REFCACHE | SYNC_SUPER);
1da177e4 540 vfsp->vfs_sync_seq++;
1da177e4
LT
541 wake_up(&vfsp->vfs_wait_single_sync_task);
542}
543
544STATIC int
545xfssyncd(
546 void *arg)
547{
548 long timeleft;
b83bd138
NS
549 bhv_vfs_t *vfsp = (bhv_vfs_t *) arg;
550 bhv_vfs_sync_work_t *work, *n;
4df08c52 551 LIST_HEAD (tmp);
1da177e4 552
83144186 553 set_freezable();
041e0e3b 554 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
1da177e4 555 for (;;) {
041e0e3b 556 timeleft = schedule_timeout_interruptible(timeleft);
1da177e4 557 /* swsusp */
3e1d1d28 558 try_to_freeze();
71df099d 559 if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list))
1da177e4
LT
560 break;
561
562 spin_lock(&vfsp->vfs_sync_lock);
563 /*
564 * We can get woken by laptop mode, to do a sync -
565 * that's the (only!) case where the list would be
566 * empty with time remaining.
567 */
568 if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
569 if (!timeleft)
041e0e3b
NA
570 timeleft = xfs_syncd_centisecs *
571 msecs_to_jiffies(10);
1da177e4
LT
572 INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
573 list_add_tail(&vfsp->vfs_sync_work.w_list,
574 &vfsp->vfs_sync_list);
575 }
576 list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
577 list_move(&work->w_list, &tmp);
578 spin_unlock(&vfsp->vfs_sync_lock);
579
580 list_for_each_entry_safe(work, n, &tmp, w_list) {
581 (*work->w_syncer)(vfsp, work->w_data);
582 list_del(&work->w_list);
583 if (work == &vfsp->vfs_sync_work)
584 continue;
b83bd138 585 kmem_free(work, sizeof(struct bhv_vfs_sync_work));
1da177e4
LT
586 }
587 }
588
1da177e4
LT
589 return 0;
590}
591
592STATIC int
a50cd269 593xfs_fs_start_syncd(
b83bd138 594 bhv_vfs_t *vfsp)
1da177e4 595{
4df08c52
CH
596 vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
597 vfsp->vfs_sync_work.w_vfs = vfsp;
598 vfsp->vfs_sync_task = kthread_run(xfssyncd, vfsp, "xfssyncd");
599 if (IS_ERR(vfsp->vfs_sync_task))
600 return -PTR_ERR(vfsp->vfs_sync_task);
1da177e4
LT
601 return 0;
602}
603
604STATIC void
a50cd269 605xfs_fs_stop_syncd(
b83bd138 606 bhv_vfs_t *vfsp)
1da177e4 607{
4df08c52 608 kthread_stop(vfsp->vfs_sync_task);
1da177e4
LT
609}
610
611STATIC void
a50cd269 612xfs_fs_put_super(
1da177e4
LT
613 struct super_block *sb)
614{
b83bd138 615 bhv_vfs_t *vfsp = vfs_from_sb(sb);
745f6919 616 struct xfs_mount *mp = XFS_M(sb);
1da177e4
LT
617 int error;
618
a50cd269 619 xfs_fs_stop_syncd(vfsp);
745f6919
CH
620 xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI);
621 error = xfs_unmount(mp, 0, NULL);
1da177e4 622 if (error) {
b83bd138
NS
623 printk("XFS: unmount got error=%d\n", error);
624 printk("%s: vfs=0x%p left dangling!\n", __FUNCTION__, vfsp);
625 } else {
626 vfs_deallocate(vfsp);
1da177e4 627 }
1da177e4
LT
628}
629
630STATIC void
a50cd269 631xfs_fs_write_super(
1da177e4
LT
632 struct super_block *sb)
633{
b83bd138 634 if (!(sb->s_flags & MS_RDONLY))
745f6919 635 xfs_sync(XFS_M(sb), SYNC_FSDATA);
1da177e4
LT
636 sb->s_dirt = 0;
637}
638
639STATIC int
a50cd269 640xfs_fs_sync_super(
1da177e4
LT
641 struct super_block *sb,
642 int wait)
643{
b83bd138 644 bhv_vfs_t *vfsp = vfs_from_sb(sb);
745f6919 645 struct xfs_mount *mp = XFS_M(sb);
b83bd138
NS
646 int error;
647 int flags;
1da177e4 648
2823945f
DC
649 if (unlikely(sb->s_frozen == SB_FREEZE_WRITE)) {
650 /*
651 * First stage of freeze - no more writers will make progress
652 * now we are here, so we flush delwri and delalloc buffers
653 * here, then wait for all I/O to complete. Data is frozen at
654 * that point. Metadata is not frozen, transactions can still
655 * occur here so don't bother flushing the buftarg (i.e
656 * SYNC_QUIESCE) because it'll just get dirty again.
657 */
516b2e7c 658 flags = SYNC_DATA_QUIESCE;
2823945f 659 } else
f898d6c0 660 flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
1da177e4 661
745f6919 662 error = xfs_sync(mp, flags);
1da177e4
LT
663 sb->s_dirt = 0;
664
665 if (unlikely(laptop_mode)) {
666 int prev_sync_seq = vfsp->vfs_sync_seq;
667
668 /*
669 * The disk must be active because we're syncing.
670 * We schedule xfssyncd now (now that the disk is
671 * active) instead of later (when it might not be).
672 */
673 wake_up_process(vfsp->vfs_sync_task);
674 /*
675 * We have to wait for the sync iteration to complete.
676 * If we don't, the disk activity caused by the sync
677 * will come after the sync is completed, and that
678 * triggers another sync from laptop mode.
679 */
680 wait_event(vfsp->vfs_wait_single_sync_task,
681 vfsp->vfs_sync_seq != prev_sync_seq);
682 }
683
684 return -error;
685}
686
687STATIC int
a50cd269 688xfs_fs_statfs(
726c3342 689 struct dentry *dentry,
1da177e4
LT
690 struct kstatfs *statp)
691{
745f6919 692 return -xfs_statvfs(XFS_M(dentry->d_sb), statp,
d6938d1b 693 vn_from_inode(dentry->d_inode));
1da177e4
LT
694}
695
696STATIC int
a50cd269 697xfs_fs_remount(
1da177e4
LT
698 struct super_block *sb,
699 int *flags,
700 char *options)
701{
745f6919 702 struct xfs_mount *mp = XFS_M(sb);
764d1f89 703 struct xfs_mount_args *args = xfs_args_allocate(sb, 0);
1da177e4
LT
704 int error;
705
745f6919 706 error = xfs_parseargs(mp, options, args, 1);
1da177e4 707 if (!error)
745f6919 708 error = xfs_mntupdate(mp, flags, args);
1da177e4
LT
709 kmem_free(args, sizeof(*args));
710 return -error;
711}
712
713STATIC void
a50cd269 714xfs_fs_lockfs(
1da177e4
LT
715 struct super_block *sb)
716{
745f6919 717 xfs_freeze(XFS_M(sb));
1da177e4
LT
718}
719
720STATIC int
a50cd269 721xfs_fs_show_options(
1da177e4
LT
722 struct seq_file *m,
723 struct vfsmount *mnt)
724{
745f6919 725 return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1da177e4
LT
726}
727
ee34807a 728STATIC int
a50cd269 729xfs_fs_quotasync(
ee34807a
NS
730 struct super_block *sb,
731 int type)
732{
b09cc771 733 return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XQUOTASYNC, 0, NULL);
ee34807a
NS
734}
735
1da177e4 736STATIC int
a50cd269 737xfs_fs_getxstate(
1da177e4
LT
738 struct super_block *sb,
739 struct fs_quota_stat *fqs)
740{
b09cc771 741 return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
1da177e4
LT
742}
743
744STATIC int
a50cd269 745xfs_fs_setxstate(
1da177e4
LT
746 struct super_block *sb,
747 unsigned int flags,
748 int op)
749{
b09cc771 750 return -XFS_QM_QUOTACTL(XFS_M(sb), op, 0, (caddr_t)&flags);
1da177e4
LT
751}
752
753STATIC int
a50cd269 754xfs_fs_getxquota(
1da177e4
LT
755 struct super_block *sb,
756 int type,
757 qid_t id,
758 struct fs_disk_quota *fdq)
759{
b09cc771 760 return -XFS_QM_QUOTACTL(XFS_M(sb),
b83bd138
NS
761 (type == USRQUOTA) ? Q_XGETQUOTA :
762 ((type == GRPQUOTA) ? Q_XGETGQUOTA :
763 Q_XGETPQUOTA), id, (caddr_t)fdq);
1da177e4
LT
764}
765
766STATIC int
a50cd269 767xfs_fs_setxquota(
1da177e4
LT
768 struct super_block *sb,
769 int type,
770 qid_t id,
771 struct fs_disk_quota *fdq)
772{
b09cc771 773 return -XFS_QM_QUOTACTL(XFS_M(sb),
b83bd138
NS
774 (type == USRQUOTA) ? Q_XSETQLIM :
775 ((type == GRPQUOTA) ? Q_XSETGQLIM :
776 Q_XSETPQLIM), id, (caddr_t)fdq);
1da177e4
LT
777}
778
779STATIC int
a50cd269 780xfs_fs_fill_super(
1da177e4
LT
781 struct super_block *sb,
782 void *data,
783 int silent)
784{
0a74cd19 785 struct inode *rootvp;
b83bd138 786 struct bhv_vfs *vfsp = vfs_allocate(sb);
745f6919 787 struct xfs_mount *mp = NULL;
764d1f89 788 struct xfs_mount_args *args = xfs_args_allocate(sb, silent);
1da177e4 789 struct kstatfs statvfs;
b83bd138 790 int error;
1da177e4 791
745f6919
CH
792 mp = xfs_mount_init();
793 mp->m_vfsp = vfsp;
794 vfsp->vfs_mount = mp;
1da177e4 795
bd186aa9
CH
796 if (sb->s_flags & MS_RDONLY)
797 mp->m_flags |= XFS_MOUNT_RDONLY;
798
745f6919
CH
799 error = xfs_parseargs(mp, (char *)data, args, 0);
800 if (error)
1da177e4 801 goto fail_vfsop;
1da177e4
LT
802
803 sb_min_blocksize(sb, BBSIZE);
a50cd269 804 sb->s_export_op = &xfs_export_operations;
a50cd269
NS
805 sb->s_qcop = &xfs_quotactl_operations;
806 sb->s_op = &xfs_super_operations;
1da177e4 807
745f6919
CH
808 error = xfs_mount(mp, args, NULL);
809 if (error)
1da177e4 810 goto fail_vfsop;
1da177e4 811
745f6919 812 error = xfs_statvfs(mp, &statvfs, NULL);
1da177e4
LT
813 if (error)
814 goto fail_unmount;
815
816 sb->s_dirt = 1;
817 sb->s_magic = statvfs.f_type;
818 sb->s_blocksize = statvfs.f_bsize;
819 sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
820 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
821 sb->s_time_gran = 1;
822 set_posix_acl_flag(sb);
823
745f6919 824 error = xfs_root(mp, &rootvp);
1da177e4
LT
825 if (error)
826 goto fail_unmount;
827
ec86dc02 828 sb->s_root = d_alloc_root(vn_to_inode(rootvp));
1da177e4
LT
829 if (!sb->s_root) {
830 error = ENOMEM;
831 goto fail_vnrele;
832 }
833 if (is_bad_inode(sb->s_root->d_inode)) {
834 error = EINVAL;
835 goto fail_vnrele;
836 }
a50cd269 837 if ((error = xfs_fs_start_syncd(vfsp)))
1da177e4 838 goto fail_vnrele;
1543d79c
CH
839 vn_trace_exit(XFS_I(sb->s_root->d_inode), __FUNCTION__,
840 (inst_t *)__return_address);
1da177e4
LT
841
842 kmem_free(args, sizeof(*args));
843 return 0;
844
845fail_vnrele:
846 if (sb->s_root) {
847 dput(sb->s_root);
848 sb->s_root = NULL;
849 } else {
850 VN_RELE(rootvp);
851 }
852
853fail_unmount:
745f6919 854 xfs_unmount(mp, 0, NULL);
1da177e4
LT
855
856fail_vfsop:
857 vfs_deallocate(vfsp);
858 kmem_free(args, sizeof(*args));
859 return -error;
860}
861
454e2398 862STATIC int
a50cd269 863xfs_fs_get_sb(
1da177e4
LT
864 struct file_system_type *fs_type,
865 int flags,
866 const char *dev_name,
454e2398
DH
867 void *data,
868 struct vfsmount *mnt)
1da177e4 869{
454e2398
DH
870 return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
871 mnt);
a50cd269
NS
872}
873
7989cb8e 874static struct super_operations xfs_super_operations = {
a50cd269
NS
875 .alloc_inode = xfs_fs_alloc_inode,
876 .destroy_inode = xfs_fs_destroy_inode,
877 .write_inode = xfs_fs_write_inode,
878 .clear_inode = xfs_fs_clear_inode,
879 .put_super = xfs_fs_put_super,
880 .write_super = xfs_fs_write_super,
881 .sync_fs = xfs_fs_sync_super,
882 .write_super_lockfs = xfs_fs_lockfs,
883 .statfs = xfs_fs_statfs,
884 .remount_fs = xfs_fs_remount,
885 .show_options = xfs_fs_show_options,
1da177e4
LT
886};
887
7989cb8e 888static struct quotactl_ops xfs_quotactl_operations = {
a50cd269
NS
889 .quota_sync = xfs_fs_quotasync,
890 .get_xstate = xfs_fs_getxstate,
891 .set_xstate = xfs_fs_setxstate,
892 .get_xquota = xfs_fs_getxquota,
893 .set_xquota = xfs_fs_setxquota,
1da177e4
LT
894};
895
5085b607 896static struct file_system_type xfs_fs_type = {
1da177e4
LT
897 .owner = THIS_MODULE,
898 .name = "xfs",
a50cd269 899 .get_sb = xfs_fs_get_sb,
1da177e4
LT
900 .kill_sb = kill_block_super,
901 .fs_flags = FS_REQUIRES_DEV,
902};
903
904
905STATIC int __init
906init_xfs_fs( void )
907{
908 int error;
1da177e4
LT
909 static char message[] __initdata = KERN_INFO \
910 XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
911
912 printk(message);
913
1da177e4
LT
914 ktrace_init(64);
915
8758280f 916 error = xfs_init_zones();
1da177e4 917 if (error < 0)
0829c360 918 goto undo_zones;
1da177e4 919
ce8e922c 920 error = xfs_buf_init();
1da177e4 921 if (error < 0)
ce8e922c 922 goto undo_buffers;
1da177e4
LT
923
924 vn_init();
925 xfs_init();
926 uuid_init();
927 vfs_initquota();
928
929 error = register_filesystem(&xfs_fs_type);
930 if (error)
931 goto undo_register;
1da177e4
LT
932 return 0;
933
934undo_register:
ce8e922c 935 xfs_buf_terminate();
1da177e4 936
ce8e922c 937undo_buffers:
8758280f 938 xfs_destroy_zones();
1da177e4 939
0829c360 940undo_zones:
1da177e4
LT
941 return error;
942}
943
944STATIC void __exit
945exit_xfs_fs( void )
946{
947 vfs_exitquota();
1da177e4
LT
948 unregister_filesystem(&xfs_fs_type);
949 xfs_cleanup();
ce8e922c 950 xfs_buf_terminate();
8758280f 951 xfs_destroy_zones();
1da177e4
LT
952 ktrace_uninit();
953}
954
955module_init(init_xfs_fs);
956module_exit(exit_xfs_fs);
957
958MODULE_AUTHOR("Silicon Graphics, Inc.");
959MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
960MODULE_LICENSE("GPL");