]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/namespace.c
[patch 2/7] vfs: mountinfo: add seq_file_root()
[net-next-2.6.git] / fs / namespace.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/namespace.c
3 *
4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
6 *
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
8 * Heavily rewritten.
9 */
10
1da177e4
LT
11#include <linux/syscalls.h>
12#include <linux/slab.h>
13#include <linux/sched.h>
14#include <linux/smp_lock.h>
15#include <linux/init.h>
15a67dd8 16#include <linux/kernel.h>
1da177e4
LT
17#include <linux/quotaops.h>
18#include <linux/acct.h>
16f7e0fe 19#include <linux/capability.h>
3d733633 20#include <linux/cpumask.h>
1da177e4 21#include <linux/module.h>
f20a9ead 22#include <linux/sysfs.h>
1da177e4 23#include <linux/seq_file.h>
6b3286ed 24#include <linux/mnt_namespace.h>
1da177e4
LT
25#include <linux/namei.h>
26#include <linux/security.h>
27#include <linux/mount.h>
07f3f05c 28#include <linux/ramfs.h>
13f14b4d 29#include <linux/log2.h>
1da177e4
LT
30#include <asm/uaccess.h>
31#include <asm/unistd.h>
07b20889 32#include "pnode.h"
948730b0 33#include "internal.h"
1da177e4 34
13f14b4d
ED
35#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
36#define HASH_SIZE (1UL << HASH_SHIFT)
37
1da177e4 38/* spinlock for vfsmount related operations, inplace of dcache_lock */
5addc5dd
AV
39__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
40
41static int event;
1da177e4 42
fa3536cc 43static struct list_head *mount_hashtable __read_mostly;
e18b890b 44static struct kmem_cache *mnt_cache __read_mostly;
390c6843 45static struct rw_semaphore namespace_sem;
1da177e4 46
f87fd4c2 47/* /sys/fs */
00d26666
GKH
48struct kobject *fs_kobj;
49EXPORT_SYMBOL_GPL(fs_kobj);
f87fd4c2 50
1da177e4
LT
51static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
52{
b58fed8b
RP
53 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
54 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
13f14b4d
ED
55 tmp = tmp + (tmp >> HASH_SHIFT);
56 return tmp & (HASH_SIZE - 1);
1da177e4
LT
57}
58
3d733633
DH
59#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
60
1da177e4
LT
61struct vfsmount *alloc_vfsmnt(const char *name)
62{
c3762229 63 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
1da177e4 64 if (mnt) {
b58fed8b 65 atomic_set(&mnt->mnt_count, 1);
1da177e4
LT
66 INIT_LIST_HEAD(&mnt->mnt_hash);
67 INIT_LIST_HEAD(&mnt->mnt_child);
68 INIT_LIST_HEAD(&mnt->mnt_mounts);
69 INIT_LIST_HEAD(&mnt->mnt_list);
55e700b9 70 INIT_LIST_HEAD(&mnt->mnt_expire);
03e06e68 71 INIT_LIST_HEAD(&mnt->mnt_share);
a58b0eb8
RP
72 INIT_LIST_HEAD(&mnt->mnt_slave_list);
73 INIT_LIST_HEAD(&mnt->mnt_slave);
3d733633 74 atomic_set(&mnt->__mnt_writers, 0);
1da177e4 75 if (name) {
b58fed8b 76 int size = strlen(name) + 1;
1da177e4
LT
77 char *newname = kmalloc(size, GFP_KERNEL);
78 if (newname) {
79 memcpy(newname, name, size);
80 mnt->mnt_devname = newname;
81 }
82 }
83 }
84 return mnt;
85}
86
3d733633
DH
87/*
88 * Most r/o checks on a fs are for operations that take
89 * discrete amounts of time, like a write() or unlink().
90 * We must keep track of when those operations start
91 * (for permission checks) and when they end, so that
92 * we can determine when writes are able to occur to
93 * a filesystem.
94 */
95/*
96 * __mnt_is_readonly: check whether a mount is read-only
97 * @mnt: the mount to check for its write status
98 *
99 * This shouldn't be used directly ouside of the VFS.
100 * It does not guarantee that the filesystem will stay
101 * r/w, just that it is right *now*. This can not and
102 * should not be used in place of IS_RDONLY(inode).
103 * mnt_want/drop_write() will _keep_ the filesystem
104 * r/w.
105 */
106int __mnt_is_readonly(struct vfsmount *mnt)
107{
2e4b7fcd
DH
108 if (mnt->mnt_flags & MNT_READONLY)
109 return 1;
110 if (mnt->mnt_sb->s_flags & MS_RDONLY)
111 return 1;
112 return 0;
3d733633
DH
113}
114EXPORT_SYMBOL_GPL(__mnt_is_readonly);
115
116struct mnt_writer {
117 /*
118 * If holding multiple instances of this lock, they
119 * must be ordered by cpu number.
120 */
121 spinlock_t lock;
122 struct lock_class_key lock_class; /* compiles out with !lockdep */
123 unsigned long count;
124 struct vfsmount *mnt;
125} ____cacheline_aligned_in_smp;
126static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
127
128static int __init init_mnt_writers(void)
129{
130 int cpu;
131 for_each_possible_cpu(cpu) {
132 struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
133 spin_lock_init(&writer->lock);
134 lockdep_set_class(&writer->lock, &writer->lock_class);
135 writer->count = 0;
136 }
137 return 0;
138}
139fs_initcall(init_mnt_writers);
140
141static void unlock_mnt_writers(void)
142{
143 int cpu;
144 struct mnt_writer *cpu_writer;
145
146 for_each_possible_cpu(cpu) {
147 cpu_writer = &per_cpu(mnt_writers, cpu);
148 spin_unlock(&cpu_writer->lock);
149 }
150}
151
152static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
153{
154 if (!cpu_writer->mnt)
155 return;
156 /*
157 * This is in case anyone ever leaves an invalid,
158 * old ->mnt and a count of 0.
159 */
160 if (!cpu_writer->count)
161 return;
162 atomic_add(cpu_writer->count, &cpu_writer->mnt->__mnt_writers);
163 cpu_writer->count = 0;
164}
165 /*
166 * must hold cpu_writer->lock
167 */
168static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
169 struct vfsmount *mnt)
170{
171 if (cpu_writer->mnt == mnt)
172 return;
173 __clear_mnt_count(cpu_writer);
174 cpu_writer->mnt = mnt;
175}
176
8366025e
DH
177/*
178 * Most r/o checks on a fs are for operations that take
179 * discrete amounts of time, like a write() or unlink().
180 * We must keep track of when those operations start
181 * (for permission checks) and when they end, so that
182 * we can determine when writes are able to occur to
183 * a filesystem.
184 */
185/**
186 * mnt_want_write - get write access to a mount
187 * @mnt: the mount on which to take a write
188 *
189 * This tells the low-level filesystem that a write is
190 * about to be performed to it, and makes sure that
191 * writes are allowed before returning success. When
192 * the write operation is finished, mnt_drop_write()
193 * must be called. This is effectively a refcount.
194 */
195int mnt_want_write(struct vfsmount *mnt)
196{
3d733633
DH
197 int ret = 0;
198 struct mnt_writer *cpu_writer;
199
200 cpu_writer = &get_cpu_var(mnt_writers);
201 spin_lock(&cpu_writer->lock);
202 if (__mnt_is_readonly(mnt)) {
203 ret = -EROFS;
204 goto out;
205 }
206 use_cpu_writer_for_mount(cpu_writer, mnt);
207 cpu_writer->count++;
208out:
209 spin_unlock(&cpu_writer->lock);
210 put_cpu_var(mnt_writers);
211 return ret;
8366025e
DH
212}
213EXPORT_SYMBOL_GPL(mnt_want_write);
214
3d733633
DH
215static void lock_mnt_writers(void)
216{
217 int cpu;
218 struct mnt_writer *cpu_writer;
219
220 for_each_possible_cpu(cpu) {
221 cpu_writer = &per_cpu(mnt_writers, cpu);
222 spin_lock(&cpu_writer->lock);
223 __clear_mnt_count(cpu_writer);
224 cpu_writer->mnt = NULL;
225 }
226}
227
228/*
229 * These per-cpu write counts are not guaranteed to have
230 * matched increments and decrements on any given cpu.
231 * A file open()ed for write on one cpu and close()d on
232 * another cpu will imbalance this count. Make sure it
233 * does not get too far out of whack.
234 */
235static void handle_write_count_underflow(struct vfsmount *mnt)
236{
237 if (atomic_read(&mnt->__mnt_writers) >=
238 MNT_WRITER_UNDERFLOW_LIMIT)
239 return;
240 /*
241 * It isn't necessary to hold all of the locks
242 * at the same time, but doing it this way makes
243 * us share a lot more code.
244 */
245 lock_mnt_writers();
246 /*
247 * vfsmount_lock is for mnt_flags.
248 */
249 spin_lock(&vfsmount_lock);
250 /*
251 * If coalescing the per-cpu writer counts did not
252 * get us back to a positive writer count, we have
253 * a bug.
254 */
255 if ((atomic_read(&mnt->__mnt_writers) < 0) &&
256 !(mnt->mnt_flags & MNT_IMBALANCED_WRITE_COUNT)) {
257 printk(KERN_DEBUG "leak detected on mount(%p) writers "
258 "count: %d\n",
259 mnt, atomic_read(&mnt->__mnt_writers));
260 WARN_ON(1);
261 /* use the flag to keep the dmesg spam down */
262 mnt->mnt_flags |= MNT_IMBALANCED_WRITE_COUNT;
263 }
264 spin_unlock(&vfsmount_lock);
265 unlock_mnt_writers();
266}
267
8366025e
DH
268/**
269 * mnt_drop_write - give up write access to a mount
270 * @mnt: the mount on which to give up write access
271 *
272 * Tells the low-level filesystem that we are done
273 * performing writes to it. Must be matched with
274 * mnt_want_write() call above.
275 */
276void mnt_drop_write(struct vfsmount *mnt)
277{
3d733633
DH
278 int must_check_underflow = 0;
279 struct mnt_writer *cpu_writer;
280
281 cpu_writer = &get_cpu_var(mnt_writers);
282 spin_lock(&cpu_writer->lock);
283
284 use_cpu_writer_for_mount(cpu_writer, mnt);
285 if (cpu_writer->count > 0) {
286 cpu_writer->count--;
287 } else {
288 must_check_underflow = 1;
289 atomic_dec(&mnt->__mnt_writers);
290 }
291
292 spin_unlock(&cpu_writer->lock);
293 /*
294 * Logically, we could call this each time,
295 * but the __mnt_writers cacheline tends to
296 * be cold, and makes this expensive.
297 */
298 if (must_check_underflow)
299 handle_write_count_underflow(mnt);
300 /*
301 * This could be done right after the spinlock
302 * is taken because the spinlock keeps us on
303 * the cpu, and disables preemption. However,
304 * putting it here bounds the amount that
305 * __mnt_writers can underflow. Without it,
306 * we could theoretically wrap __mnt_writers.
307 */
308 put_cpu_var(mnt_writers);
8366025e
DH
309}
310EXPORT_SYMBOL_GPL(mnt_drop_write);
311
2e4b7fcd 312static int mnt_make_readonly(struct vfsmount *mnt)
8366025e 313{
3d733633
DH
314 int ret = 0;
315
316 lock_mnt_writers();
317 /*
318 * With all the locks held, this value is stable
319 */
320 if (atomic_read(&mnt->__mnt_writers) > 0) {
321 ret = -EBUSY;
322 goto out;
323 }
324 /*
2e4b7fcd
DH
325 * nobody can do a successful mnt_want_write() with all
326 * of the counts in MNT_DENIED_WRITE and the locks held.
3d733633 327 */
2e4b7fcd
DH
328 spin_lock(&vfsmount_lock);
329 if (!ret)
330 mnt->mnt_flags |= MNT_READONLY;
331 spin_unlock(&vfsmount_lock);
3d733633
DH
332out:
333 unlock_mnt_writers();
334 return ret;
8366025e 335}
8366025e 336
2e4b7fcd
DH
337static void __mnt_unmake_readonly(struct vfsmount *mnt)
338{
339 spin_lock(&vfsmount_lock);
340 mnt->mnt_flags &= ~MNT_READONLY;
341 spin_unlock(&vfsmount_lock);
342}
343
454e2398
DH
344int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
345{
346 mnt->mnt_sb = sb;
347 mnt->mnt_root = dget(sb->s_root);
348 return 0;
349}
350
351EXPORT_SYMBOL(simple_set_mnt);
352
1da177e4
LT
353void free_vfsmnt(struct vfsmount *mnt)
354{
355 kfree(mnt->mnt_devname);
356 kmem_cache_free(mnt_cache, mnt);
357}
358
359/*
a05964f3
RP
360 * find the first or last mount at @dentry on vfsmount @mnt depending on
361 * @dir. If @dir is set return the first mount else return the last mount.
1da177e4 362 */
a05964f3
RP
363struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
364 int dir)
1da177e4 365{
b58fed8b
RP
366 struct list_head *head = mount_hashtable + hash(mnt, dentry);
367 struct list_head *tmp = head;
1da177e4
LT
368 struct vfsmount *p, *found = NULL;
369
1da177e4 370 for (;;) {
a05964f3 371 tmp = dir ? tmp->next : tmp->prev;
1da177e4
LT
372 p = NULL;
373 if (tmp == head)
374 break;
375 p = list_entry(tmp, struct vfsmount, mnt_hash);
376 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
a05964f3 377 found = p;
1da177e4
LT
378 break;
379 }
380 }
1da177e4
LT
381 return found;
382}
383
a05964f3
RP
384/*
385 * lookup_mnt increments the ref count before returning
386 * the vfsmount struct.
387 */
388struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
389{
390 struct vfsmount *child_mnt;
391 spin_lock(&vfsmount_lock);
392 if ((child_mnt = __lookup_mnt(mnt, dentry, 1)))
393 mntget(child_mnt);
394 spin_unlock(&vfsmount_lock);
395 return child_mnt;
396}
397
1da177e4
LT
398static inline int check_mnt(struct vfsmount *mnt)
399{
6b3286ed 400 return mnt->mnt_ns == current->nsproxy->mnt_ns;
1da177e4
LT
401}
402
6b3286ed 403static void touch_mnt_namespace(struct mnt_namespace *ns)
5addc5dd
AV
404{
405 if (ns) {
406 ns->event = ++event;
407 wake_up_interruptible(&ns->poll);
408 }
409}
410
6b3286ed 411static void __touch_mnt_namespace(struct mnt_namespace *ns)
5addc5dd
AV
412{
413 if (ns && ns->event != event) {
414 ns->event = event;
415 wake_up_interruptible(&ns->poll);
416 }
417}
418
1a390689 419static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
1da177e4 420{
1a390689
AV
421 old_path->dentry = mnt->mnt_mountpoint;
422 old_path->mnt = mnt->mnt_parent;
1da177e4
LT
423 mnt->mnt_parent = mnt;
424 mnt->mnt_mountpoint = mnt->mnt_root;
425 list_del_init(&mnt->mnt_child);
426 list_del_init(&mnt->mnt_hash);
1a390689 427 old_path->dentry->d_mounted--;
1da177e4
LT
428}
429
b90fa9ae
RP
430void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
431 struct vfsmount *child_mnt)
432{
433 child_mnt->mnt_parent = mntget(mnt);
434 child_mnt->mnt_mountpoint = dget(dentry);
435 dentry->d_mounted++;
436}
437
1a390689 438static void attach_mnt(struct vfsmount *mnt, struct path *path)
1da177e4 439{
1a390689 440 mnt_set_mountpoint(path->mnt, path->dentry, mnt);
b90fa9ae 441 list_add_tail(&mnt->mnt_hash, mount_hashtable +
1a390689
AV
442 hash(path->mnt, path->dentry));
443 list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
b90fa9ae
RP
444}
445
446/*
447 * the caller must hold vfsmount_lock
448 */
449static void commit_tree(struct vfsmount *mnt)
450{
451 struct vfsmount *parent = mnt->mnt_parent;
452 struct vfsmount *m;
453 LIST_HEAD(head);
6b3286ed 454 struct mnt_namespace *n = parent->mnt_ns;
b90fa9ae
RP
455
456 BUG_ON(parent == mnt);
457
458 list_add_tail(&head, &mnt->mnt_list);
459 list_for_each_entry(m, &head, mnt_list)
6b3286ed 460 m->mnt_ns = n;
b90fa9ae
RP
461 list_splice(&head, n->list.prev);
462
463 list_add_tail(&mnt->mnt_hash, mount_hashtable +
464 hash(parent, mnt->mnt_mountpoint));
465 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
6b3286ed 466 touch_mnt_namespace(n);
1da177e4
LT
467}
468
469static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
470{
471 struct list_head *next = p->mnt_mounts.next;
472 if (next == &p->mnt_mounts) {
473 while (1) {
474 if (p == root)
475 return NULL;
476 next = p->mnt_child.next;
477 if (next != &p->mnt_parent->mnt_mounts)
478 break;
479 p = p->mnt_parent;
480 }
481 }
482 return list_entry(next, struct vfsmount, mnt_child);
483}
484
9676f0c6
RP
485static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
486{
487 struct list_head *prev = p->mnt_mounts.prev;
488 while (prev != &p->mnt_mounts) {
489 p = list_entry(prev, struct vfsmount, mnt_child);
490 prev = p->mnt_mounts.prev;
491 }
492 return p;
493}
494
36341f64
RP
495static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
496 int flag)
1da177e4
LT
497{
498 struct super_block *sb = old->mnt_sb;
499 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
500
501 if (mnt) {
502 mnt->mnt_flags = old->mnt_flags;
503 atomic_inc(&sb->s_active);
504 mnt->mnt_sb = sb;
505 mnt->mnt_root = dget(root);
506 mnt->mnt_mountpoint = mnt->mnt_root;
507 mnt->mnt_parent = mnt;
b90fa9ae 508
5afe0022
RP
509 if (flag & CL_SLAVE) {
510 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
511 mnt->mnt_master = old;
512 CLEAR_MNT_SHARED(mnt);
8aec0809 513 } else if (!(flag & CL_PRIVATE)) {
5afe0022
RP
514 if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
515 list_add(&mnt->mnt_share, &old->mnt_share);
516 if (IS_MNT_SLAVE(old))
517 list_add(&mnt->mnt_slave, &old->mnt_slave);
518 mnt->mnt_master = old->mnt_master;
519 }
b90fa9ae
RP
520 if (flag & CL_MAKE_SHARED)
521 set_mnt_shared(mnt);
1da177e4
LT
522
523 /* stick the duplicate mount on the same expiry list
524 * as the original if that was on one */
36341f64 525 if (flag & CL_EXPIRE) {
36341f64
RP
526 if (!list_empty(&old->mnt_expire))
527 list_add(&mnt->mnt_expire, &old->mnt_expire);
36341f64 528 }
1da177e4
LT
529 }
530 return mnt;
531}
532
7b7b1ace 533static inline void __mntput(struct vfsmount *mnt)
1da177e4 534{
3d733633 535 int cpu;
1da177e4 536 struct super_block *sb = mnt->mnt_sb;
3d733633
DH
537 /*
538 * We don't have to hold all of the locks at the
539 * same time here because we know that we're the
540 * last reference to mnt and that no new writers
541 * can come in.
542 */
543 for_each_possible_cpu(cpu) {
544 struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
545 if (cpu_writer->mnt != mnt)
546 continue;
547 spin_lock(&cpu_writer->lock);
548 atomic_add(cpu_writer->count, &mnt->__mnt_writers);
549 cpu_writer->count = 0;
550 /*
551 * Might as well do this so that no one
552 * ever sees the pointer and expects
553 * it to be valid.
554 */
555 cpu_writer->mnt = NULL;
556 spin_unlock(&cpu_writer->lock);
557 }
558 /*
559 * This probably indicates that somebody messed
560 * up a mnt_want/drop_write() pair. If this
561 * happens, the filesystem was probably unable
562 * to make r/w->r/o transitions.
563 */
564 WARN_ON(atomic_read(&mnt->__mnt_writers));
1da177e4
LT
565 dput(mnt->mnt_root);
566 free_vfsmnt(mnt);
567 deactivate_super(sb);
568}
569
7b7b1ace
AV
570void mntput_no_expire(struct vfsmount *mnt)
571{
572repeat:
573 if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
574 if (likely(!mnt->mnt_pinned)) {
575 spin_unlock(&vfsmount_lock);
576 __mntput(mnt);
577 return;
578 }
579 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
580 mnt->mnt_pinned = 0;
581 spin_unlock(&vfsmount_lock);
582 acct_auto_close_mnt(mnt);
583 security_sb_umount_close(mnt);
584 goto repeat;
585 }
586}
587
588EXPORT_SYMBOL(mntput_no_expire);
589
590void mnt_pin(struct vfsmount *mnt)
591{
592 spin_lock(&vfsmount_lock);
593 mnt->mnt_pinned++;
594 spin_unlock(&vfsmount_lock);
595}
596
597EXPORT_SYMBOL(mnt_pin);
598
599void mnt_unpin(struct vfsmount *mnt)
600{
601 spin_lock(&vfsmount_lock);
602 if (mnt->mnt_pinned) {
603 atomic_inc(&mnt->mnt_count);
604 mnt->mnt_pinned--;
605 }
606 spin_unlock(&vfsmount_lock);
607}
608
609EXPORT_SYMBOL(mnt_unpin);
1da177e4 610
b3b304a2
MS
611static inline void mangle(struct seq_file *m, const char *s)
612{
613 seq_escape(m, s, " \t\n\\");
614}
615
616/*
617 * Simple .show_options callback for filesystems which don't want to
618 * implement more complex mount option showing.
619 *
620 * See also save_mount_options().
621 */
622int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
623{
624 const char *options = mnt->mnt_sb->s_options;
625
626 if (options != NULL && options[0]) {
627 seq_putc(m, ',');
628 mangle(m, options);
629 }
630
631 return 0;
632}
633EXPORT_SYMBOL(generic_show_options);
634
635/*
636 * If filesystem uses generic_show_options(), this function should be
637 * called from the fill_super() callback.
638 *
639 * The .remount_fs callback usually needs to be handled in a special
640 * way, to make sure, that previous options are not overwritten if the
641 * remount fails.
642 *
643 * Also note, that if the filesystem's .remount_fs function doesn't
644 * reset all options to their default value, but changes only newly
645 * given options, then the displayed options will not reflect reality
646 * any more.
647 */
648void save_mount_options(struct super_block *sb, char *options)
649{
650 kfree(sb->s_options);
651 sb->s_options = kstrdup(options, GFP_KERNEL);
652}
653EXPORT_SYMBOL(save_mount_options);
654
1da177e4
LT
655/* iterator */
656static void *m_start(struct seq_file *m, loff_t *pos)
657{
6b3286ed 658 struct mnt_namespace *n = m->private;
1da177e4 659
390c6843 660 down_read(&namespace_sem);
b0765fb8 661 return seq_list_start(&n->list, *pos);
1da177e4
LT
662}
663
664static void *m_next(struct seq_file *m, void *v, loff_t *pos)
665{
6b3286ed 666 struct mnt_namespace *n = m->private;
b0765fb8
PE
667
668 return seq_list_next(v, &n->list, pos);
1da177e4
LT
669}
670
671static void m_stop(struct seq_file *m, void *v)
672{
390c6843 673 up_read(&namespace_sem);
1da177e4
LT
674}
675
1da177e4
LT
676static int show_vfsmnt(struct seq_file *m, void *v)
677{
b0765fb8 678 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
1da177e4
LT
679 int err = 0;
680 static struct proc_fs_info {
681 int flag;
682 char *str;
683 } fs_info[] = {
684 { MS_SYNCHRONOUS, ",sync" },
685 { MS_DIRSYNC, ",dirsync" },
686 { MS_MANDLOCK, ",mand" },
1da177e4
LT
687 { 0, NULL }
688 };
689 static struct proc_fs_info mnt_info[] = {
690 { MNT_NOSUID, ",nosuid" },
691 { MNT_NODEV, ",nodev" },
692 { MNT_NOEXEC, ",noexec" },
fc33a7bb
CH
693 { MNT_NOATIME, ",noatime" },
694 { MNT_NODIRATIME, ",nodiratime" },
47ae32d6 695 { MNT_RELATIME, ",relatime" },
1da177e4
LT
696 { 0, NULL }
697 };
698 struct proc_fs_info *fs_infop;
c32c2f63 699 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
1da177e4
LT
700
701 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
702 seq_putc(m, ' ');
c32c2f63 703 seq_path(m, &mnt_path, " \t\n\\");
1da177e4
LT
704 seq_putc(m, ' ');
705 mangle(m, mnt->mnt_sb->s_type->name);
79c0b2df
MS
706 if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) {
707 seq_putc(m, '.');
708 mangle(m, mnt->mnt_sb->s_subtype);
709 }
2e4b7fcd 710 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
1da177e4
LT
711 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
712 if (mnt->mnt_sb->s_flags & fs_infop->flag)
713 seq_puts(m, fs_infop->str);
714 }
715 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
716 if (mnt->mnt_flags & fs_infop->flag)
717 seq_puts(m, fs_infop->str);
718 }
719 if (mnt->mnt_sb->s_op->show_options)
720 err = mnt->mnt_sb->s_op->show_options(m, mnt);
721 seq_puts(m, " 0 0\n");
722 return err;
723}
724
725struct seq_operations mounts_op = {
726 .start = m_start,
727 .next = m_next,
728 .stop = m_stop,
729 .show = show_vfsmnt
730};
731
b4629fe2
CL
732static int show_vfsstat(struct seq_file *m, void *v)
733{
b0765fb8 734 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
c32c2f63 735 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
b4629fe2
CL
736 int err = 0;
737
738 /* device */
739 if (mnt->mnt_devname) {
740 seq_puts(m, "device ");
741 mangle(m, mnt->mnt_devname);
742 } else
743 seq_puts(m, "no device");
744
745 /* mount point */
746 seq_puts(m, " mounted on ");
c32c2f63 747 seq_path(m, &mnt_path, " \t\n\\");
b4629fe2
CL
748 seq_putc(m, ' ');
749
750 /* file system type */
751 seq_puts(m, "with fstype ");
752 mangle(m, mnt->mnt_sb->s_type->name);
753
754 /* optional statistics */
755 if (mnt->mnt_sb->s_op->show_stats) {
756 seq_putc(m, ' ');
757 err = mnt->mnt_sb->s_op->show_stats(m, mnt);
758 }
759
760 seq_putc(m, '\n');
761 return err;
762}
763
764struct seq_operations mountstats_op = {
765 .start = m_start,
766 .next = m_next,
767 .stop = m_stop,
768 .show = show_vfsstat,
769};
770
1da177e4
LT
771/**
772 * may_umount_tree - check if a mount tree is busy
773 * @mnt: root of mount tree
774 *
775 * This is called to check if a tree of mounts has any
776 * open files, pwds, chroots or sub mounts that are
777 * busy.
778 */
779int may_umount_tree(struct vfsmount *mnt)
780{
36341f64
RP
781 int actual_refs = 0;
782 int minimum_refs = 0;
783 struct vfsmount *p;
1da177e4
LT
784
785 spin_lock(&vfsmount_lock);
36341f64 786 for (p = mnt; p; p = next_mnt(p, mnt)) {
1da177e4
LT
787 actual_refs += atomic_read(&p->mnt_count);
788 minimum_refs += 2;
1da177e4
LT
789 }
790 spin_unlock(&vfsmount_lock);
791
792 if (actual_refs > minimum_refs)
e3474a8e 793 return 0;
1da177e4 794
e3474a8e 795 return 1;
1da177e4
LT
796}
797
798EXPORT_SYMBOL(may_umount_tree);
799
800/**
801 * may_umount - check if a mount point is busy
802 * @mnt: root of mount
803 *
804 * This is called to check if a mount point has any
805 * open files, pwds, chroots or sub mounts. If the
806 * mount has sub mounts this will return busy
807 * regardless of whether the sub mounts are busy.
808 *
809 * Doesn't take quota and stuff into account. IOW, in some cases it will
810 * give false negatives. The main reason why it's here is that we need
811 * a non-destructive way to look for easily umountable filesystems.
812 */
813int may_umount(struct vfsmount *mnt)
814{
e3474a8e 815 int ret = 1;
a05964f3
RP
816 spin_lock(&vfsmount_lock);
817 if (propagate_mount_busy(mnt, 2))
e3474a8e 818 ret = 0;
a05964f3
RP
819 spin_unlock(&vfsmount_lock);
820 return ret;
1da177e4
LT
821}
822
823EXPORT_SYMBOL(may_umount);
824
b90fa9ae 825void release_mounts(struct list_head *head)
70fbcdf4
RP
826{
827 struct vfsmount *mnt;
bf066c7d 828 while (!list_empty(head)) {
b5e61818 829 mnt = list_first_entry(head, struct vfsmount, mnt_hash);
70fbcdf4
RP
830 list_del_init(&mnt->mnt_hash);
831 if (mnt->mnt_parent != mnt) {
832 struct dentry *dentry;
833 struct vfsmount *m;
834 spin_lock(&vfsmount_lock);
835 dentry = mnt->mnt_mountpoint;
836 m = mnt->mnt_parent;
837 mnt->mnt_mountpoint = mnt->mnt_root;
838 mnt->mnt_parent = mnt;
7c4b93d8 839 m->mnt_ghosts--;
70fbcdf4
RP
840 spin_unlock(&vfsmount_lock);
841 dput(dentry);
842 mntput(m);
843 }
844 mntput(mnt);
845 }
846}
847
a05964f3 848void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
1da177e4
LT
849{
850 struct vfsmount *p;
1da177e4 851
1bfba4e8
AM
852 for (p = mnt; p; p = next_mnt(p, mnt))
853 list_move(&p->mnt_hash, kill);
1da177e4 854
a05964f3
RP
855 if (propagate)
856 propagate_umount(kill);
857
70fbcdf4
RP
858 list_for_each_entry(p, kill, mnt_hash) {
859 list_del_init(&p->mnt_expire);
860 list_del_init(&p->mnt_list);
6b3286ed
KK
861 __touch_mnt_namespace(p->mnt_ns);
862 p->mnt_ns = NULL;
70fbcdf4 863 list_del_init(&p->mnt_child);
7c4b93d8
AV
864 if (p->mnt_parent != p) {
865 p->mnt_parent->mnt_ghosts++;
f30ac319 866 p->mnt_mountpoint->d_mounted--;
7c4b93d8 867 }
a05964f3 868 change_mnt_propagation(p, MS_PRIVATE);
1da177e4
LT
869 }
870}
871
c35038be
AV
872static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
873
1da177e4
LT
874static int do_umount(struct vfsmount *mnt, int flags)
875{
b58fed8b 876 struct super_block *sb = mnt->mnt_sb;
1da177e4 877 int retval;
70fbcdf4 878 LIST_HEAD(umount_list);
1da177e4
LT
879
880 retval = security_sb_umount(mnt, flags);
881 if (retval)
882 return retval;
883
884 /*
885 * Allow userspace to request a mountpoint be expired rather than
886 * unmounting unconditionally. Unmount only happens if:
887 * (1) the mark is already set (the mark is cleared by mntput())
888 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
889 */
890 if (flags & MNT_EXPIRE) {
6ac08c39 891 if (mnt == current->fs->root.mnt ||
1da177e4
LT
892 flags & (MNT_FORCE | MNT_DETACH))
893 return -EINVAL;
894
895 if (atomic_read(&mnt->mnt_count) != 2)
896 return -EBUSY;
897
898 if (!xchg(&mnt->mnt_expiry_mark, 1))
899 return -EAGAIN;
900 }
901
902 /*
903 * If we may have to abort operations to get out of this
904 * mount, and they will themselves hold resources we must
905 * allow the fs to do things. In the Unix tradition of
906 * 'Gee thats tricky lets do it in userspace' the umount_begin
907 * might fail to complete on the first run through as other tasks
908 * must return, and the like. Thats for the mount program to worry
909 * about for the moment.
910 */
911
912 lock_kernel();
8b512d9a
TM
913 if (sb->s_op->umount_begin)
914 sb->s_op->umount_begin(mnt, flags);
1da177e4
LT
915 unlock_kernel();
916
917 /*
918 * No sense to grab the lock for this test, but test itself looks
919 * somewhat bogus. Suggestions for better replacement?
920 * Ho-hum... In principle, we might treat that as umount + switch
921 * to rootfs. GC would eventually take care of the old vfsmount.
922 * Actually it makes sense, especially if rootfs would contain a
923 * /reboot - static binary that would close all descriptors and
924 * call reboot(9). Then init(8) could umount root and exec /reboot.
925 */
6ac08c39 926 if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1da177e4
LT
927 /*
928 * Special case for "unmounting" root ...
929 * we just try to remount it readonly.
930 */
931 down_write(&sb->s_umount);
932 if (!(sb->s_flags & MS_RDONLY)) {
933 lock_kernel();
934 DQUOT_OFF(sb);
935 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
936 unlock_kernel();
937 }
938 up_write(&sb->s_umount);
939 return retval;
940 }
941
390c6843 942 down_write(&namespace_sem);
1da177e4 943 spin_lock(&vfsmount_lock);
5addc5dd 944 event++;
1da177e4 945
c35038be
AV
946 if (!(flags & MNT_DETACH))
947 shrink_submounts(mnt, &umount_list);
948
1da177e4 949 retval = -EBUSY;
a05964f3 950 if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
1da177e4 951 if (!list_empty(&mnt->mnt_list))
a05964f3 952 umount_tree(mnt, 1, &umount_list);
1da177e4
LT
953 retval = 0;
954 }
955 spin_unlock(&vfsmount_lock);
956 if (retval)
957 security_sb_umount_busy(mnt);
390c6843 958 up_write(&namespace_sem);
70fbcdf4 959 release_mounts(&umount_list);
1da177e4
LT
960 return retval;
961}
962
963/*
964 * Now umount can handle mount points as well as block devices.
965 * This is important for filesystems which use unnamed block devices.
966 *
967 * We now support a flag for forced unmount like the other 'big iron'
968 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
969 */
970
971asmlinkage long sys_umount(char __user * name, int flags)
972{
973 struct nameidata nd;
974 int retval;
975
976 retval = __user_walk(name, LOOKUP_FOLLOW, &nd);
977 if (retval)
978 goto out;
979 retval = -EINVAL;
4ac91378 980 if (nd.path.dentry != nd.path.mnt->mnt_root)
1da177e4 981 goto dput_and_out;
4ac91378 982 if (!check_mnt(nd.path.mnt))
1da177e4
LT
983 goto dput_and_out;
984
985 retval = -EPERM;
986 if (!capable(CAP_SYS_ADMIN))
987 goto dput_and_out;
988
4ac91378 989 retval = do_umount(nd.path.mnt, flags);
1da177e4 990dput_and_out:
429731b1 991 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
4ac91378
JB
992 dput(nd.path.dentry);
993 mntput_no_expire(nd.path.mnt);
1da177e4
LT
994out:
995 return retval;
996}
997
998#ifdef __ARCH_WANT_SYS_OLDUMOUNT
999
1000/*
b58fed8b 1001 * The 2.0 compatible umount. No flags.
1da177e4 1002 */
1da177e4
LT
1003asmlinkage long sys_oldumount(char __user * name)
1004{
b58fed8b 1005 return sys_umount(name, 0);
1da177e4
LT
1006}
1007
1008#endif
1009
1010static int mount_is_safe(struct nameidata *nd)
1011{
1012 if (capable(CAP_SYS_ADMIN))
1013 return 0;
1014 return -EPERM;
1015#ifdef notyet
4ac91378 1016 if (S_ISLNK(nd->path.dentry->d_inode->i_mode))
1da177e4 1017 return -EPERM;
4ac91378
JB
1018 if (nd->path.dentry->d_inode->i_mode & S_ISVTX) {
1019 if (current->uid != nd->path.dentry->d_inode->i_uid)
1da177e4
LT
1020 return -EPERM;
1021 }
e4543edd 1022 if (vfs_permission(nd, MAY_WRITE))
1da177e4
LT
1023 return -EPERM;
1024 return 0;
1025#endif
1026}
1027
b58fed8b 1028static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
1da177e4
LT
1029{
1030 while (1) {
1031 if (d == dentry)
1032 return 1;
1033 if (d == NULL || d == d->d_parent)
1034 return 0;
1035 d = d->d_parent;
1036 }
1037}
1038
b90fa9ae 1039struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
36341f64 1040 int flag)
1da177e4
LT
1041{
1042 struct vfsmount *res, *p, *q, *r, *s;
1a390689 1043 struct path path;
1da177e4 1044
9676f0c6
RP
1045 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
1046 return NULL;
1047
36341f64 1048 res = q = clone_mnt(mnt, dentry, flag);
1da177e4
LT
1049 if (!q)
1050 goto Enomem;
1051 q->mnt_mountpoint = mnt->mnt_mountpoint;
1052
1053 p = mnt;
fdadd65f 1054 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1da177e4
LT
1055 if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
1056 continue;
1057
1058 for (s = r; s; s = next_mnt(s, r)) {
9676f0c6
RP
1059 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
1060 s = skip_mnt_tree(s);
1061 continue;
1062 }
1da177e4
LT
1063 while (p != s->mnt_parent) {
1064 p = p->mnt_parent;
1065 q = q->mnt_parent;
1066 }
1067 p = s;
1a390689
AV
1068 path.mnt = q;
1069 path.dentry = p->mnt_mountpoint;
36341f64 1070 q = clone_mnt(p, p->mnt_root, flag);
1da177e4
LT
1071 if (!q)
1072 goto Enomem;
1073 spin_lock(&vfsmount_lock);
1074 list_add_tail(&q->mnt_list, &res->mnt_list);
1a390689 1075 attach_mnt(q, &path);
1da177e4
LT
1076 spin_unlock(&vfsmount_lock);
1077 }
1078 }
1079 return res;
b58fed8b 1080Enomem:
1da177e4 1081 if (res) {
70fbcdf4 1082 LIST_HEAD(umount_list);
1da177e4 1083 spin_lock(&vfsmount_lock);
a05964f3 1084 umount_tree(res, 0, &umount_list);
1da177e4 1085 spin_unlock(&vfsmount_lock);
70fbcdf4 1086 release_mounts(&umount_list);
1da177e4
LT
1087 }
1088 return NULL;
1089}
1090
8aec0809
AV
1091struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry)
1092{
1093 struct vfsmount *tree;
1a60a280 1094 down_write(&namespace_sem);
8aec0809 1095 tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE);
1a60a280 1096 up_write(&namespace_sem);
8aec0809
AV
1097 return tree;
1098}
1099
1100void drop_collected_mounts(struct vfsmount *mnt)
1101{
1102 LIST_HEAD(umount_list);
1a60a280 1103 down_write(&namespace_sem);
8aec0809
AV
1104 spin_lock(&vfsmount_lock);
1105 umount_tree(mnt, 0, &umount_list);
1106 spin_unlock(&vfsmount_lock);
1a60a280 1107 up_write(&namespace_sem);
8aec0809
AV
1108 release_mounts(&umount_list);
1109}
1110
b90fa9ae
RP
1111/*
1112 * @source_mnt : mount tree to be attached
21444403
RP
1113 * @nd : place the mount tree @source_mnt is attached
1114 * @parent_nd : if non-null, detach the source_mnt from its parent and
1115 * store the parent mount and mountpoint dentry.
1116 * (done when source_mnt is moved)
b90fa9ae
RP
1117 *
1118 * NOTE: in the table below explains the semantics when a source mount
1119 * of a given type is attached to a destination mount of a given type.
9676f0c6
RP
1120 * ---------------------------------------------------------------------------
1121 * | BIND MOUNT OPERATION |
1122 * |**************************************************************************
1123 * | source-->| shared | private | slave | unbindable |
1124 * | dest | | | | |
1125 * | | | | | | |
1126 * | v | | | | |
1127 * |**************************************************************************
1128 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1129 * | | | | | |
1130 * |non-shared| shared (+) | private | slave (*) | invalid |
1131 * ***************************************************************************
b90fa9ae
RP
1132 * A bind operation clones the source mount and mounts the clone on the
1133 * destination mount.
1134 *
1135 * (++) the cloned mount is propagated to all the mounts in the propagation
1136 * tree of the destination mount and the cloned mount is added to
1137 * the peer group of the source mount.
1138 * (+) the cloned mount is created under the destination mount and is marked
1139 * as shared. The cloned mount is added to the peer group of the source
1140 * mount.
5afe0022
RP
1141 * (+++) the mount is propagated to all the mounts in the propagation tree
1142 * of the destination mount and the cloned mount is made slave
1143 * of the same master as that of the source mount. The cloned mount
1144 * is marked as 'shared and slave'.
1145 * (*) the cloned mount is made a slave of the same master as that of the
1146 * source mount.
1147 *
9676f0c6
RP
1148 * ---------------------------------------------------------------------------
1149 * | MOVE MOUNT OPERATION |
1150 * |**************************************************************************
1151 * | source-->| shared | private | slave | unbindable |
1152 * | dest | | | | |
1153 * | | | | | | |
1154 * | v | | | | |
1155 * |**************************************************************************
1156 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1157 * | | | | | |
1158 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1159 * ***************************************************************************
5afe0022
RP
1160 *
1161 * (+) the mount is moved to the destination. And is then propagated to
1162 * all the mounts in the propagation tree of the destination mount.
21444403 1163 * (+*) the mount is moved to the destination.
5afe0022
RP
1164 * (+++) the mount is moved to the destination and is then propagated to
1165 * all the mounts belonging to the destination mount's propagation tree.
1166 * the mount is marked as 'shared and slave'.
1167 * (*) the mount continues to be a slave at the new location.
b90fa9ae
RP
1168 *
1169 * if the source mount is a tree, the operations explained above is
1170 * applied to each mount in the tree.
1171 * Must be called without spinlocks held, since this function can sleep
1172 * in allocations.
1173 */
1174static int attach_recursive_mnt(struct vfsmount *source_mnt,
1a390689 1175 struct path *path, struct path *parent_path)
b90fa9ae
RP
1176{
1177 LIST_HEAD(tree_list);
1a390689
AV
1178 struct vfsmount *dest_mnt = path->mnt;
1179 struct dentry *dest_dentry = path->dentry;
b90fa9ae
RP
1180 struct vfsmount *child, *p;
1181
1182 if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list))
1183 return -EINVAL;
1184
1185 if (IS_MNT_SHARED(dest_mnt)) {
1186 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1187 set_mnt_shared(p);
1188 }
1189
1190 spin_lock(&vfsmount_lock);
1a390689
AV
1191 if (parent_path) {
1192 detach_mnt(source_mnt, parent_path);
1193 attach_mnt(source_mnt, path);
6b3286ed 1194 touch_mnt_namespace(current->nsproxy->mnt_ns);
21444403
RP
1195 } else {
1196 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
1197 commit_tree(source_mnt);
1198 }
b90fa9ae
RP
1199
1200 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
1201 list_del_init(&child->mnt_hash);
1202 commit_tree(child);
1203 }
1204 spin_unlock(&vfsmount_lock);
1205 return 0;
1206}
1207
8c3ee42e 1208static int graft_tree(struct vfsmount *mnt, struct path *path)
1da177e4
LT
1209{
1210 int err;
1211 if (mnt->mnt_sb->s_flags & MS_NOUSER)
1212 return -EINVAL;
1213
8c3ee42e 1214 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1da177e4
LT
1215 S_ISDIR(mnt->mnt_root->d_inode->i_mode))
1216 return -ENOTDIR;
1217
1218 err = -ENOENT;
8c3ee42e
AV
1219 mutex_lock(&path->dentry->d_inode->i_mutex);
1220 if (IS_DEADDIR(path->dentry->d_inode))
1da177e4
LT
1221 goto out_unlock;
1222
8c3ee42e 1223 err = security_sb_check_sb(mnt, path);
1da177e4
LT
1224 if (err)
1225 goto out_unlock;
1226
1227 err = -ENOENT;
8c3ee42e
AV
1228 if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry))
1229 err = attach_recursive_mnt(mnt, path, NULL);
1da177e4 1230out_unlock:
8c3ee42e 1231 mutex_unlock(&path->dentry->d_inode->i_mutex);
1da177e4 1232 if (!err)
8c3ee42e 1233 security_sb_post_addmount(mnt, path);
1da177e4
LT
1234 return err;
1235}
1236
07b20889
RP
1237/*
1238 * recursively change the type of the mountpoint.
2dafe1c4 1239 * noinline this do_mount helper to save do_mount stack space.
07b20889 1240 */
2dafe1c4 1241static noinline int do_change_type(struct nameidata *nd, int flag)
07b20889 1242{
4ac91378 1243 struct vfsmount *m, *mnt = nd->path.mnt;
07b20889
RP
1244 int recurse = flag & MS_REC;
1245 int type = flag & ~MS_REC;
1246
ee6f9582
MS
1247 if (!capable(CAP_SYS_ADMIN))
1248 return -EPERM;
1249
4ac91378 1250 if (nd->path.dentry != nd->path.mnt->mnt_root)
07b20889
RP
1251 return -EINVAL;
1252
1253 down_write(&namespace_sem);
1254 spin_lock(&vfsmount_lock);
1255 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1256 change_mnt_propagation(m, type);
1257 spin_unlock(&vfsmount_lock);
1258 up_write(&namespace_sem);
1259 return 0;
1260}
1261
1da177e4
LT
1262/*
1263 * do loopback mount.
2dafe1c4 1264 * noinline this do_mount helper to save do_mount stack space.
1da177e4 1265 */
2dafe1c4
ES
1266static noinline int do_loopback(struct nameidata *nd, char *old_name,
1267 int recurse)
1da177e4
LT
1268{
1269 struct nameidata old_nd;
1270 struct vfsmount *mnt = NULL;
1271 int err = mount_is_safe(nd);
1272 if (err)
1273 return err;
1274 if (!old_name || !*old_name)
1275 return -EINVAL;
1276 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
1277 if (err)
1278 return err;
1279
390c6843 1280 down_write(&namespace_sem);
1da177e4 1281 err = -EINVAL;
4ac91378
JB
1282 if (IS_MNT_UNBINDABLE(old_nd.path.mnt))
1283 goto out;
9676f0c6 1284
4ac91378 1285 if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
ccd48bc7 1286 goto out;
1da177e4 1287
ccd48bc7
AV
1288 err = -ENOMEM;
1289 if (recurse)
4ac91378 1290 mnt = copy_tree(old_nd.path.mnt, old_nd.path.dentry, 0);
ccd48bc7 1291 else
4ac91378 1292 mnt = clone_mnt(old_nd.path.mnt, old_nd.path.dentry, 0);
ccd48bc7
AV
1293
1294 if (!mnt)
1295 goto out;
1296
8c3ee42e 1297 err = graft_tree(mnt, &nd->path);
ccd48bc7 1298 if (err) {
70fbcdf4 1299 LIST_HEAD(umount_list);
1da177e4 1300 spin_lock(&vfsmount_lock);
a05964f3 1301 umount_tree(mnt, 0, &umount_list);
1da177e4 1302 spin_unlock(&vfsmount_lock);
70fbcdf4 1303 release_mounts(&umount_list);
5b83d2c5 1304 }
1da177e4 1305
ccd48bc7 1306out:
390c6843 1307 up_write(&namespace_sem);
1d957f9b 1308 path_put(&old_nd.path);
1da177e4
LT
1309 return err;
1310}
1311
2e4b7fcd
DH
1312static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1313{
1314 int error = 0;
1315 int readonly_request = 0;
1316
1317 if (ms_flags & MS_RDONLY)
1318 readonly_request = 1;
1319 if (readonly_request == __mnt_is_readonly(mnt))
1320 return 0;
1321
1322 if (readonly_request)
1323 error = mnt_make_readonly(mnt);
1324 else
1325 __mnt_unmake_readonly(mnt);
1326 return error;
1327}
1328
1da177e4
LT
1329/*
1330 * change filesystem flags. dir should be a physical root of filesystem.
1331 * If you've mounted a non-root directory somewhere and want to do remount
1332 * on it - tough luck.
2dafe1c4 1333 * noinline this do_mount helper to save do_mount stack space.
1da177e4 1334 */
2dafe1c4 1335static noinline int do_remount(struct nameidata *nd, int flags, int mnt_flags,
1da177e4
LT
1336 void *data)
1337{
1338 int err;
4ac91378 1339 struct super_block *sb = nd->path.mnt->mnt_sb;
1da177e4
LT
1340
1341 if (!capable(CAP_SYS_ADMIN))
1342 return -EPERM;
1343
4ac91378 1344 if (!check_mnt(nd->path.mnt))
1da177e4
LT
1345 return -EINVAL;
1346
4ac91378 1347 if (nd->path.dentry != nd->path.mnt->mnt_root)
1da177e4
LT
1348 return -EINVAL;
1349
1350 down_write(&sb->s_umount);
2e4b7fcd
DH
1351 if (flags & MS_BIND)
1352 err = change_mount_flags(nd->path.mnt, flags);
1353 else
1354 err = do_remount_sb(sb, flags, data, 0);
1da177e4 1355 if (!err)
4ac91378 1356 nd->path.mnt->mnt_flags = mnt_flags;
1da177e4
LT
1357 up_write(&sb->s_umount);
1358 if (!err)
4ac91378 1359 security_sb_post_remount(nd->path.mnt, flags, data);
1da177e4
LT
1360 return err;
1361}
1362
9676f0c6
RP
1363static inline int tree_contains_unbindable(struct vfsmount *mnt)
1364{
1365 struct vfsmount *p;
1366 for (p = mnt; p; p = next_mnt(p, mnt)) {
1367 if (IS_MNT_UNBINDABLE(p))
1368 return 1;
1369 }
1370 return 0;
1371}
1372
2dafe1c4
ES
1373/*
1374 * noinline this do_mount helper to save do_mount stack space.
1375 */
1376static noinline int do_move_mount(struct nameidata *nd, char *old_name)
1da177e4 1377{
1a390689
AV
1378 struct nameidata old_nd;
1379 struct path parent_path;
1da177e4
LT
1380 struct vfsmount *p;
1381 int err = 0;
1382 if (!capable(CAP_SYS_ADMIN))
1383 return -EPERM;
1384 if (!old_name || !*old_name)
1385 return -EINVAL;
1386 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
1387 if (err)
1388 return err;
1389
390c6843 1390 down_write(&namespace_sem);
4ac91378
JB
1391 while (d_mountpoint(nd->path.dentry) &&
1392 follow_down(&nd->path.mnt, &nd->path.dentry))
1da177e4
LT
1393 ;
1394 err = -EINVAL;
4ac91378 1395 if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
1da177e4
LT
1396 goto out;
1397
1398 err = -ENOENT;
4ac91378
JB
1399 mutex_lock(&nd->path.dentry->d_inode->i_mutex);
1400 if (IS_DEADDIR(nd->path.dentry->d_inode))
1da177e4
LT
1401 goto out1;
1402
4ac91378 1403 if (!IS_ROOT(nd->path.dentry) && d_unhashed(nd->path.dentry))
21444403 1404 goto out1;
1da177e4
LT
1405
1406 err = -EINVAL;
4ac91378 1407 if (old_nd.path.dentry != old_nd.path.mnt->mnt_root)
21444403 1408 goto out1;
1da177e4 1409
4ac91378 1410 if (old_nd.path.mnt == old_nd.path.mnt->mnt_parent)
21444403 1411 goto out1;
1da177e4 1412
4ac91378
JB
1413 if (S_ISDIR(nd->path.dentry->d_inode->i_mode) !=
1414 S_ISDIR(old_nd.path.dentry->d_inode->i_mode))
21444403
RP
1415 goto out1;
1416 /*
1417 * Don't move a mount residing in a shared parent.
1418 */
4ac91378
JB
1419 if (old_nd.path.mnt->mnt_parent &&
1420 IS_MNT_SHARED(old_nd.path.mnt->mnt_parent))
21444403 1421 goto out1;
9676f0c6
RP
1422 /*
1423 * Don't move a mount tree containing unbindable mounts to a destination
1424 * mount which is shared.
1425 */
4ac91378
JB
1426 if (IS_MNT_SHARED(nd->path.mnt) &&
1427 tree_contains_unbindable(old_nd.path.mnt))
9676f0c6 1428 goto out1;
1da177e4 1429 err = -ELOOP;
4ac91378
JB
1430 for (p = nd->path.mnt; p->mnt_parent != p; p = p->mnt_parent)
1431 if (p == old_nd.path.mnt)
21444403 1432 goto out1;
1da177e4 1433
1a390689 1434 err = attach_recursive_mnt(old_nd.path.mnt, &nd->path, &parent_path);
4ac91378 1435 if (err)
21444403 1436 goto out1;
1da177e4
LT
1437
1438 /* if the mount is moved, it should no longer be expire
1439 * automatically */
4ac91378 1440 list_del_init(&old_nd.path.mnt->mnt_expire);
1da177e4 1441out1:
4ac91378 1442 mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
1da177e4 1443out:
390c6843 1444 up_write(&namespace_sem);
1da177e4 1445 if (!err)
1a390689 1446 path_put(&parent_path);
1d957f9b 1447 path_put(&old_nd.path);
1da177e4
LT
1448 return err;
1449}
1450
1451/*
1452 * create a new mount for userspace and request it to be added into the
1453 * namespace's tree
2dafe1c4 1454 * noinline this do_mount helper to save do_mount stack space.
1da177e4 1455 */
2dafe1c4 1456static noinline int do_new_mount(struct nameidata *nd, char *type, int flags,
1da177e4
LT
1457 int mnt_flags, char *name, void *data)
1458{
1459 struct vfsmount *mnt;
1460
1461 if (!type || !memchr(type, 0, PAGE_SIZE))
1462 return -EINVAL;
1463
1464 /* we need capabilities... */
1465 if (!capable(CAP_SYS_ADMIN))
1466 return -EPERM;
1467
1468 mnt = do_kern_mount(type, flags, name, data);
1469 if (IS_ERR(mnt))
1470 return PTR_ERR(mnt);
1471
1472 return do_add_mount(mnt, nd, mnt_flags, NULL);
1473}
1474
1475/*
1476 * add a mount into a namespace's mount tree
1477 * - provide the option of adding the new mount to an expiration list
1478 */
1479int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
1480 int mnt_flags, struct list_head *fslist)
1481{
1482 int err;
1483
390c6843 1484 down_write(&namespace_sem);
1da177e4 1485 /* Something was mounted here while we slept */
4ac91378
JB
1486 while (d_mountpoint(nd->path.dentry) &&
1487 follow_down(&nd->path.mnt, &nd->path.dentry))
1da177e4
LT
1488 ;
1489 err = -EINVAL;
4ac91378 1490 if (!check_mnt(nd->path.mnt))
1da177e4
LT
1491 goto unlock;
1492
1493 /* Refuse the same filesystem on the same mount point */
1494 err = -EBUSY;
4ac91378
JB
1495 if (nd->path.mnt->mnt_sb == newmnt->mnt_sb &&
1496 nd->path.mnt->mnt_root == nd->path.dentry)
1da177e4
LT
1497 goto unlock;
1498
1499 err = -EINVAL;
1500 if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
1501 goto unlock;
1502
1503 newmnt->mnt_flags = mnt_flags;
8c3ee42e 1504 if ((err = graft_tree(newmnt, &nd->path)))
5b83d2c5 1505 goto unlock;
1da177e4 1506
6758f953 1507 if (fslist) /* add to the specified expiration list */
55e700b9 1508 list_add_tail(&newmnt->mnt_expire, fslist);
6758f953 1509
390c6843 1510 up_write(&namespace_sem);
5b83d2c5 1511 return 0;
1da177e4
LT
1512
1513unlock:
390c6843 1514 up_write(&namespace_sem);
1da177e4
LT
1515 mntput(newmnt);
1516 return err;
1517}
1518
1519EXPORT_SYMBOL_GPL(do_add_mount);
1520
1521/*
1522 * process a list of expirable mountpoints with the intent of discarding any
1523 * mountpoints that aren't in use and haven't been touched since last we came
1524 * here
1525 */
1526void mark_mounts_for_expiry(struct list_head *mounts)
1527{
1da177e4
LT
1528 struct vfsmount *mnt, *next;
1529 LIST_HEAD(graveyard);
bcc5c7d2 1530 LIST_HEAD(umounts);
1da177e4
LT
1531
1532 if (list_empty(mounts))
1533 return;
1534
bcc5c7d2 1535 down_write(&namespace_sem);
1da177e4
LT
1536 spin_lock(&vfsmount_lock);
1537
1538 /* extract from the expiration list every vfsmount that matches the
1539 * following criteria:
1540 * - only referenced by its parent vfsmount
1541 * - still marked for expiry (marked on the last call here; marks are
1542 * cleared by mntput())
1543 */
55e700b9 1544 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
1da177e4 1545 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
bcc5c7d2 1546 propagate_mount_busy(mnt, 1))
1da177e4 1547 continue;
55e700b9 1548 list_move(&mnt->mnt_expire, &graveyard);
1da177e4 1549 }
bcc5c7d2
AV
1550 while (!list_empty(&graveyard)) {
1551 mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
1552 touch_mnt_namespace(mnt->mnt_ns);
1553 umount_tree(mnt, 1, &umounts);
1554 }
5528f911 1555 spin_unlock(&vfsmount_lock);
bcc5c7d2
AV
1556 up_write(&namespace_sem);
1557
1558 release_mounts(&umounts);
5528f911
TM
1559}
1560
1561EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
1562
1563/*
1564 * Ripoff of 'select_parent()'
1565 *
1566 * search the list of submounts for a given mountpoint, and move any
1567 * shrinkable submounts to the 'graveyard' list.
1568 */
1569static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
1570{
1571 struct vfsmount *this_parent = parent;
1572 struct list_head *next;
1573 int found = 0;
1574
1575repeat:
1576 next = this_parent->mnt_mounts.next;
1577resume:
1578 while (next != &this_parent->mnt_mounts) {
1579 struct list_head *tmp = next;
1580 struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
1581
1582 next = tmp->next;
1583 if (!(mnt->mnt_flags & MNT_SHRINKABLE))
1da177e4 1584 continue;
5528f911
TM
1585 /*
1586 * Descend a level if the d_mounts list is non-empty.
1587 */
1588 if (!list_empty(&mnt->mnt_mounts)) {
1589 this_parent = mnt;
1590 goto repeat;
1591 }
1da177e4 1592
5528f911 1593 if (!propagate_mount_busy(mnt, 1)) {
5528f911
TM
1594 list_move_tail(&mnt->mnt_expire, graveyard);
1595 found++;
1596 }
1da177e4 1597 }
5528f911
TM
1598 /*
1599 * All done at this level ... ascend and resume the search
1600 */
1601 if (this_parent != parent) {
1602 next = this_parent->mnt_child.next;
1603 this_parent = this_parent->mnt_parent;
1604 goto resume;
1605 }
1606 return found;
1607}
1608
1609/*
1610 * process a list of expirable mountpoints with the intent of discarding any
1611 * submounts of a specific parent mountpoint
1612 */
c35038be 1613static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
5528f911
TM
1614{
1615 LIST_HEAD(graveyard);
c35038be 1616 struct vfsmount *m;
5528f911 1617
5528f911 1618 /* extract submounts of 'mountpoint' from the expiration list */
c35038be 1619 while (select_submounts(mnt, &graveyard)) {
bcc5c7d2 1620 while (!list_empty(&graveyard)) {
c35038be 1621 m = list_first_entry(&graveyard, struct vfsmount,
bcc5c7d2
AV
1622 mnt_expire);
1623 touch_mnt_namespace(mnt->mnt_ns);
c35038be 1624 umount_tree(mnt, 1, umounts);
bcc5c7d2
AV
1625 }
1626 }
1da177e4
LT
1627}
1628
1da177e4
LT
1629/*
1630 * Some copy_from_user() implementations do not return the exact number of
1631 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
1632 * Note that this function differs from copy_from_user() in that it will oops
1633 * on bad values of `to', rather than returning a short copy.
1634 */
b58fed8b
RP
1635static long exact_copy_from_user(void *to, const void __user * from,
1636 unsigned long n)
1da177e4
LT
1637{
1638 char *t = to;
1639 const char __user *f = from;
1640 char c;
1641
1642 if (!access_ok(VERIFY_READ, from, n))
1643 return n;
1644
1645 while (n) {
1646 if (__get_user(c, f)) {
1647 memset(t, 0, n);
1648 break;
1649 }
1650 *t++ = c;
1651 f++;
1652 n--;
1653 }
1654 return n;
1655}
1656
b58fed8b 1657int copy_mount_options(const void __user * data, unsigned long *where)
1da177e4
LT
1658{
1659 int i;
1660 unsigned long page;
1661 unsigned long size;
b58fed8b 1662
1da177e4
LT
1663 *where = 0;
1664 if (!data)
1665 return 0;
1666
1667 if (!(page = __get_free_page(GFP_KERNEL)))
1668 return -ENOMEM;
1669
1670 /* We only care that *some* data at the address the user
1671 * gave us is valid. Just in case, we'll zero
1672 * the remainder of the page.
1673 */
1674 /* copy_from_user cannot cross TASK_SIZE ! */
1675 size = TASK_SIZE - (unsigned long)data;
1676 if (size > PAGE_SIZE)
1677 size = PAGE_SIZE;
1678
1679 i = size - exact_copy_from_user((void *)page, data, size);
1680 if (!i) {
b58fed8b 1681 free_page(page);
1da177e4
LT
1682 return -EFAULT;
1683 }
1684 if (i != PAGE_SIZE)
1685 memset((char *)page + i, 0, PAGE_SIZE - i);
1686 *where = page;
1687 return 0;
1688}
1689
1690/*
1691 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
1692 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
1693 *
1694 * data is a (void *) that can point to any structure up to
1695 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
1696 * information (or be NULL).
1697 *
1698 * Pre-0.97 versions of mount() didn't have a flags word.
1699 * When the flags word was introduced its top half was required
1700 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
1701 * Therefore, if this magic number is present, it carries no information
1702 * and must be discarded.
1703 */
b58fed8b 1704long do_mount(char *dev_name, char *dir_name, char *type_page,
1da177e4
LT
1705 unsigned long flags, void *data_page)
1706{
1707 struct nameidata nd;
1708 int retval = 0;
1709 int mnt_flags = 0;
1710
1711 /* Discard magic */
1712 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
1713 flags &= ~MS_MGC_MSK;
1714
1715 /* Basic sanity checks */
1716
1717 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
1718 return -EINVAL;
1719 if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
1720 return -EINVAL;
1721
1722 if (data_page)
1723 ((char *)data_page)[PAGE_SIZE - 1] = 0;
1724
1725 /* Separate the per-mountpoint flags */
1726 if (flags & MS_NOSUID)
1727 mnt_flags |= MNT_NOSUID;
1728 if (flags & MS_NODEV)
1729 mnt_flags |= MNT_NODEV;
1730 if (flags & MS_NOEXEC)
1731 mnt_flags |= MNT_NOEXEC;
fc33a7bb
CH
1732 if (flags & MS_NOATIME)
1733 mnt_flags |= MNT_NOATIME;
1734 if (flags & MS_NODIRATIME)
1735 mnt_flags |= MNT_NODIRATIME;
47ae32d6
VH
1736 if (flags & MS_RELATIME)
1737 mnt_flags |= MNT_RELATIME;
2e4b7fcd
DH
1738 if (flags & MS_RDONLY)
1739 mnt_flags |= MNT_READONLY;
fc33a7bb
CH
1740
1741 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
8bf9725c 1742 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT);
1da177e4
LT
1743
1744 /* ... and get the mountpoint */
1745 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
1746 if (retval)
1747 return retval;
1748
b5266eb4
AV
1749 retval = security_sb_mount(dev_name, &nd.path,
1750 type_page, flags, data_page);
1da177e4
LT
1751 if (retval)
1752 goto dput_out;
1753
1754 if (flags & MS_REMOUNT)
1755 retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
1756 data_page);
1757 else if (flags & MS_BIND)
eee391a6 1758 retval = do_loopback(&nd, dev_name, flags & MS_REC);
9676f0c6 1759 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
07b20889 1760 retval = do_change_type(&nd, flags);
1da177e4
LT
1761 else if (flags & MS_MOVE)
1762 retval = do_move_mount(&nd, dev_name);
1763 else
1764 retval = do_new_mount(&nd, type_page, flags, mnt_flags,
1765 dev_name, data_page);
1766dput_out:
1d957f9b 1767 path_put(&nd.path);
1da177e4
LT
1768 return retval;
1769}
1770
741a2951
JD
1771/*
1772 * Allocate a new namespace structure and populate it with contents
1773 * copied from the namespace of the passed in task structure.
1774 */
e3222c4e 1775static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
6b3286ed 1776 struct fs_struct *fs)
1da177e4 1777{
6b3286ed 1778 struct mnt_namespace *new_ns;
1da177e4 1779 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
1da177e4
LT
1780 struct vfsmount *p, *q;
1781
6b3286ed 1782 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
1da177e4 1783 if (!new_ns)
467e9f4b 1784 return ERR_PTR(-ENOMEM);
1da177e4
LT
1785
1786 atomic_set(&new_ns->count, 1);
1da177e4 1787 INIT_LIST_HEAD(&new_ns->list);
5addc5dd
AV
1788 init_waitqueue_head(&new_ns->poll);
1789 new_ns->event = 0;
1da177e4 1790
390c6843 1791 down_write(&namespace_sem);
1da177e4 1792 /* First pass: copy the tree topology */
6b3286ed 1793 new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
9676f0c6 1794 CL_COPY_ALL | CL_EXPIRE);
1da177e4 1795 if (!new_ns->root) {
390c6843 1796 up_write(&namespace_sem);
1da177e4 1797 kfree(new_ns);
467e9f4b 1798 return ERR_PTR(-ENOMEM);;
1da177e4
LT
1799 }
1800 spin_lock(&vfsmount_lock);
1801 list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
1802 spin_unlock(&vfsmount_lock);
1803
1804 /*
1805 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
1806 * as belonging to new namespace. We have already acquired a private
1807 * fs_struct, so tsk->fs->lock is not needed.
1808 */
6b3286ed 1809 p = mnt_ns->root;
1da177e4
LT
1810 q = new_ns->root;
1811 while (p) {
6b3286ed 1812 q->mnt_ns = new_ns;
1da177e4 1813 if (fs) {
6ac08c39 1814 if (p == fs->root.mnt) {
1da177e4 1815 rootmnt = p;
6ac08c39 1816 fs->root.mnt = mntget(q);
1da177e4 1817 }
6ac08c39 1818 if (p == fs->pwd.mnt) {
1da177e4 1819 pwdmnt = p;
6ac08c39 1820 fs->pwd.mnt = mntget(q);
1da177e4 1821 }
6ac08c39 1822 if (p == fs->altroot.mnt) {
1da177e4 1823 altrootmnt = p;
6ac08c39 1824 fs->altroot.mnt = mntget(q);
1da177e4
LT
1825 }
1826 }
6b3286ed 1827 p = next_mnt(p, mnt_ns->root);
1da177e4
LT
1828 q = next_mnt(q, new_ns->root);
1829 }
390c6843 1830 up_write(&namespace_sem);
1da177e4 1831
1da177e4
LT
1832 if (rootmnt)
1833 mntput(rootmnt);
1834 if (pwdmnt)
1835 mntput(pwdmnt);
1836 if (altrootmnt)
1837 mntput(altrootmnt);
1838
741a2951
JD
1839 return new_ns;
1840}
1841
213dd266 1842struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
e3222c4e 1843 struct fs_struct *new_fs)
741a2951 1844{
6b3286ed 1845 struct mnt_namespace *new_ns;
741a2951 1846
e3222c4e 1847 BUG_ON(!ns);
6b3286ed 1848 get_mnt_ns(ns);
741a2951
JD
1849
1850 if (!(flags & CLONE_NEWNS))
e3222c4e 1851 return ns;
741a2951 1852
e3222c4e 1853 new_ns = dup_mnt_ns(ns, new_fs);
741a2951 1854
6b3286ed 1855 put_mnt_ns(ns);
e3222c4e 1856 return new_ns;
1da177e4
LT
1857}
1858
1859asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
1860 char __user * type, unsigned long flags,
1861 void __user * data)
1862{
1863 int retval;
1864 unsigned long data_page;
1865 unsigned long type_page;
1866 unsigned long dev_page;
1867 char *dir_page;
1868
b58fed8b 1869 retval = copy_mount_options(type, &type_page);
1da177e4
LT
1870 if (retval < 0)
1871 return retval;
1872
1873 dir_page = getname(dir_name);
1874 retval = PTR_ERR(dir_page);
1875 if (IS_ERR(dir_page))
1876 goto out1;
1877
b58fed8b 1878 retval = copy_mount_options(dev_name, &dev_page);
1da177e4
LT
1879 if (retval < 0)
1880 goto out2;
1881
b58fed8b 1882 retval = copy_mount_options(data, &data_page);
1da177e4
LT
1883 if (retval < 0)
1884 goto out3;
1885
1886 lock_kernel();
b58fed8b
RP
1887 retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
1888 flags, (void *)data_page);
1da177e4
LT
1889 unlock_kernel();
1890 free_page(data_page);
1891
1892out3:
1893 free_page(dev_page);
1894out2:
1895 putname(dir_page);
1896out1:
1897 free_page(type_page);
1898 return retval;
1899}
1900
1901/*
1902 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1903 * It can block. Requires the big lock held.
1904 */
ac748a09 1905void set_fs_root(struct fs_struct *fs, struct path *path)
1da177e4 1906{
6ac08c39
JB
1907 struct path old_root;
1908
1da177e4
LT
1909 write_lock(&fs->lock);
1910 old_root = fs->root;
ac748a09
JB
1911 fs->root = *path;
1912 path_get(path);
1da177e4 1913 write_unlock(&fs->lock);
6ac08c39
JB
1914 if (old_root.dentry)
1915 path_put(&old_root);
1da177e4
LT
1916}
1917
1918/*
1919 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
1920 * It can block. Requires the big lock held.
1921 */
ac748a09 1922void set_fs_pwd(struct fs_struct *fs, struct path *path)
1da177e4 1923{
6ac08c39 1924 struct path old_pwd;
1da177e4
LT
1925
1926 write_lock(&fs->lock);
1927 old_pwd = fs->pwd;
ac748a09
JB
1928 fs->pwd = *path;
1929 path_get(path);
1da177e4
LT
1930 write_unlock(&fs->lock);
1931
6ac08c39
JB
1932 if (old_pwd.dentry)
1933 path_put(&old_pwd);
1da177e4
LT
1934}
1935
1a390689 1936static void chroot_fs_refs(struct path *old_root, struct path *new_root)
1da177e4
LT
1937{
1938 struct task_struct *g, *p;
1939 struct fs_struct *fs;
1940
1941 read_lock(&tasklist_lock);
1942 do_each_thread(g, p) {
1943 task_lock(p);
1944 fs = p->fs;
1945 if (fs) {
1946 atomic_inc(&fs->count);
1947 task_unlock(p);
1a390689
AV
1948 if (fs->root.dentry == old_root->dentry
1949 && fs->root.mnt == old_root->mnt)
1950 set_fs_root(fs, new_root);
1951 if (fs->pwd.dentry == old_root->dentry
1952 && fs->pwd.mnt == old_root->mnt)
1953 set_fs_pwd(fs, new_root);
1da177e4
LT
1954 put_fs_struct(fs);
1955 } else
1956 task_unlock(p);
1957 } while_each_thread(g, p);
1958 read_unlock(&tasklist_lock);
1959}
1960
1961/*
1962 * pivot_root Semantics:
1963 * Moves the root file system of the current process to the directory put_old,
1964 * makes new_root as the new root file system of the current process, and sets
1965 * root/cwd of all processes which had them on the current root to new_root.
1966 *
1967 * Restrictions:
1968 * The new_root and put_old must be directories, and must not be on the
1969 * same file system as the current process root. The put_old must be
1970 * underneath new_root, i.e. adding a non-zero number of /.. to the string
1971 * pointed to by put_old must yield the same directory as new_root. No other
1972 * file system may be mounted on put_old. After all, new_root is a mountpoint.
1973 *
4a0d11fa
NB
1974 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
1975 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
1976 * in this situation.
1977 *
1da177e4
LT
1978 * Notes:
1979 * - we don't move root/cwd if they are not at the root (reason: if something
1980 * cared enough to change them, it's probably wrong to force them elsewhere)
1981 * - it's okay to pick a root that isn't the root of a file system, e.g.
1982 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
1983 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
1984 * first.
1985 */
b58fed8b
RP
1986asmlinkage long sys_pivot_root(const char __user * new_root,
1987 const char __user * put_old)
1da177e4
LT
1988{
1989 struct vfsmount *tmp;
8c3ee42e
AV
1990 struct nameidata new_nd, old_nd;
1991 struct path parent_path, root_parent, root;
1da177e4
LT
1992 int error;
1993
1994 if (!capable(CAP_SYS_ADMIN))
1995 return -EPERM;
1996
b58fed8b
RP
1997 error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
1998 &new_nd);
1da177e4
LT
1999 if (error)
2000 goto out0;
2001 error = -EINVAL;
4ac91378 2002 if (!check_mnt(new_nd.path.mnt))
1da177e4
LT
2003 goto out1;
2004
b58fed8b 2005 error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
1da177e4
LT
2006 if (error)
2007 goto out1;
2008
b5266eb4 2009 error = security_sb_pivotroot(&old_nd.path, &new_nd.path);
1da177e4 2010 if (error) {
1d957f9b 2011 path_put(&old_nd.path);
1da177e4
LT
2012 goto out1;
2013 }
2014
2015 read_lock(&current->fs->lock);
8c3ee42e 2016 root = current->fs->root;
6ac08c39 2017 path_get(&current->fs->root);
1da177e4 2018 read_unlock(&current->fs->lock);
390c6843 2019 down_write(&namespace_sem);
4ac91378 2020 mutex_lock(&old_nd.path.dentry->d_inode->i_mutex);
1da177e4 2021 error = -EINVAL;
4ac91378
JB
2022 if (IS_MNT_SHARED(old_nd.path.mnt) ||
2023 IS_MNT_SHARED(new_nd.path.mnt->mnt_parent) ||
8c3ee42e 2024 IS_MNT_SHARED(root.mnt->mnt_parent))
21444403 2025 goto out2;
8c3ee42e 2026 if (!check_mnt(root.mnt))
1da177e4
LT
2027 goto out2;
2028 error = -ENOENT;
4ac91378 2029 if (IS_DEADDIR(new_nd.path.dentry->d_inode))
1da177e4 2030 goto out2;
4ac91378 2031 if (d_unhashed(new_nd.path.dentry) && !IS_ROOT(new_nd.path.dentry))
1da177e4 2032 goto out2;
4ac91378 2033 if (d_unhashed(old_nd.path.dentry) && !IS_ROOT(old_nd.path.dentry))
1da177e4
LT
2034 goto out2;
2035 error = -EBUSY;
8c3ee42e
AV
2036 if (new_nd.path.mnt == root.mnt ||
2037 old_nd.path.mnt == root.mnt)
1da177e4
LT
2038 goto out2; /* loop, on the same file system */
2039 error = -EINVAL;
8c3ee42e 2040 if (root.mnt->mnt_root != root.dentry)
1da177e4 2041 goto out2; /* not a mountpoint */
8c3ee42e 2042 if (root.mnt->mnt_parent == root.mnt)
0bb6fcc1 2043 goto out2; /* not attached */
4ac91378 2044 if (new_nd.path.mnt->mnt_root != new_nd.path.dentry)
1da177e4 2045 goto out2; /* not a mountpoint */
4ac91378 2046 if (new_nd.path.mnt->mnt_parent == new_nd.path.mnt)
0bb6fcc1 2047 goto out2; /* not attached */
4ac91378
JB
2048 /* make sure we can reach put_old from new_root */
2049 tmp = old_nd.path.mnt;
1da177e4 2050 spin_lock(&vfsmount_lock);
4ac91378 2051 if (tmp != new_nd.path.mnt) {
1da177e4
LT
2052 for (;;) {
2053 if (tmp->mnt_parent == tmp)
2054 goto out3; /* already mounted on put_old */
4ac91378 2055 if (tmp->mnt_parent == new_nd.path.mnt)
1da177e4
LT
2056 break;
2057 tmp = tmp->mnt_parent;
2058 }
4ac91378 2059 if (!is_subdir(tmp->mnt_mountpoint, new_nd.path.dentry))
1da177e4 2060 goto out3;
4ac91378 2061 } else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry))
1da177e4 2062 goto out3;
1a390689 2063 detach_mnt(new_nd.path.mnt, &parent_path);
8c3ee42e 2064 detach_mnt(root.mnt, &root_parent);
4ac91378 2065 /* mount old root on put_old */
8c3ee42e 2066 attach_mnt(root.mnt, &old_nd.path);
4ac91378
JB
2067 /* mount new_root on / */
2068 attach_mnt(new_nd.path.mnt, &root_parent);
6b3286ed 2069 touch_mnt_namespace(current->nsproxy->mnt_ns);
1da177e4 2070 spin_unlock(&vfsmount_lock);
8c3ee42e
AV
2071 chroot_fs_refs(&root, &new_nd.path);
2072 security_sb_post_pivotroot(&root, &new_nd.path);
1da177e4 2073 error = 0;
1a390689
AV
2074 path_put(&root_parent);
2075 path_put(&parent_path);
1da177e4 2076out2:
4ac91378 2077 mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex);
390c6843 2078 up_write(&namespace_sem);
8c3ee42e 2079 path_put(&root);
1d957f9b 2080 path_put(&old_nd.path);
1da177e4 2081out1:
1d957f9b 2082 path_put(&new_nd.path);
1da177e4 2083out0:
1da177e4
LT
2084 return error;
2085out3:
2086 spin_unlock(&vfsmount_lock);
2087 goto out2;
2088}
2089
2090static void __init init_mount_tree(void)
2091{
2092 struct vfsmount *mnt;
6b3286ed 2093 struct mnt_namespace *ns;
ac748a09 2094 struct path root;
1da177e4
LT
2095
2096 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
2097 if (IS_ERR(mnt))
2098 panic("Can't create rootfs");
6b3286ed
KK
2099 ns = kmalloc(sizeof(*ns), GFP_KERNEL);
2100 if (!ns)
1da177e4 2101 panic("Can't allocate initial namespace");
6b3286ed
KK
2102 atomic_set(&ns->count, 1);
2103 INIT_LIST_HEAD(&ns->list);
2104 init_waitqueue_head(&ns->poll);
2105 ns->event = 0;
2106 list_add(&mnt->mnt_list, &ns->list);
2107 ns->root = mnt;
2108 mnt->mnt_ns = ns;
2109
2110 init_task.nsproxy->mnt_ns = ns;
2111 get_mnt_ns(ns);
2112
ac748a09
JB
2113 root.mnt = ns->root;
2114 root.dentry = ns->root->mnt_root;
2115
2116 set_fs_pwd(current->fs, &root);
2117 set_fs_root(current->fs, &root);
1da177e4
LT
2118}
2119
74bf17cf 2120void __init mnt_init(void)
1da177e4 2121{
13f14b4d 2122 unsigned u;
15a67dd8 2123 int err;
1da177e4 2124
390c6843
RP
2125 init_rwsem(&namespace_sem);
2126
1da177e4 2127 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
20c2df83 2128 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1da177e4 2129
b58fed8b 2130 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
1da177e4
LT
2131
2132 if (!mount_hashtable)
2133 panic("Failed to allocate mount hash table\n");
2134
13f14b4d
ED
2135 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
2136
2137 for (u = 0; u < HASH_SIZE; u++)
2138 INIT_LIST_HEAD(&mount_hashtable[u]);
1da177e4 2139
15a67dd8
RD
2140 err = sysfs_init();
2141 if (err)
2142 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
2143 __FUNCTION__, err);
00d26666
GKH
2144 fs_kobj = kobject_create_and_add("fs", NULL);
2145 if (!fs_kobj)
2146 printk(KERN_WARNING "%s: kobj create error\n", __FUNCTION__);
1da177e4
LT
2147 init_rootfs();
2148 init_mount_tree();
2149}
2150
6b3286ed 2151void __put_mnt_ns(struct mnt_namespace *ns)
1da177e4 2152{
6b3286ed 2153 struct vfsmount *root = ns->root;
70fbcdf4 2154 LIST_HEAD(umount_list);
6b3286ed 2155 ns->root = NULL;
1ce88cf4 2156 spin_unlock(&vfsmount_lock);
390c6843 2157 down_write(&namespace_sem);
1da177e4 2158 spin_lock(&vfsmount_lock);
a05964f3 2159 umount_tree(root, 0, &umount_list);
1da177e4 2160 spin_unlock(&vfsmount_lock);
390c6843 2161 up_write(&namespace_sem);
70fbcdf4 2162 release_mounts(&umount_list);
6b3286ed 2163 kfree(ns);
1da177e4 2164}