]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/namespace.c
[patch 5/7] vfs: mountinfo: allow using process root
[net-next-2.6.git] / fs / namespace.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/namespace.c
3 *
4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
6 *
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
8 * Heavily rewritten.
9 */
10
1da177e4
LT
11#include <linux/syscalls.h>
12#include <linux/slab.h>
13#include <linux/sched.h>
14#include <linux/smp_lock.h>
15#include <linux/init.h>
15a67dd8 16#include <linux/kernel.h>
1da177e4
LT
17#include <linux/quotaops.h>
18#include <linux/acct.h>
16f7e0fe 19#include <linux/capability.h>
3d733633 20#include <linux/cpumask.h>
1da177e4 21#include <linux/module.h>
f20a9ead 22#include <linux/sysfs.h>
1da177e4 23#include <linux/seq_file.h>
6b3286ed 24#include <linux/mnt_namespace.h>
1da177e4
LT
25#include <linux/namei.h>
26#include <linux/security.h>
27#include <linux/mount.h>
07f3f05c 28#include <linux/ramfs.h>
13f14b4d 29#include <linux/log2.h>
73cd49ec 30#include <linux/idr.h>
1da177e4
LT
31#include <asm/uaccess.h>
32#include <asm/unistd.h>
07b20889 33#include "pnode.h"
948730b0 34#include "internal.h"
1da177e4 35
13f14b4d
ED
36#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
37#define HASH_SIZE (1UL << HASH_SHIFT)
38
1da177e4 39/* spinlock for vfsmount related operations, inplace of dcache_lock */
5addc5dd
AV
40__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
41
42static int event;
73cd49ec 43static DEFINE_IDA(mnt_id_ida);
719f5d7f 44static DEFINE_IDA(mnt_group_ida);
1da177e4 45
fa3536cc 46static struct list_head *mount_hashtable __read_mostly;
e18b890b 47static struct kmem_cache *mnt_cache __read_mostly;
390c6843 48static struct rw_semaphore namespace_sem;
1da177e4 49
f87fd4c2 50/* /sys/fs */
00d26666
GKH
51struct kobject *fs_kobj;
52EXPORT_SYMBOL_GPL(fs_kobj);
f87fd4c2 53
1da177e4
LT
54static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
55{
b58fed8b
RP
56 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
57 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
13f14b4d
ED
58 tmp = tmp + (tmp >> HASH_SHIFT);
59 return tmp & (HASH_SIZE - 1);
1da177e4
LT
60}
61
3d733633
DH
62#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
63
73cd49ec
MS
64/* allocation is serialized by namespace_sem */
65static int mnt_alloc_id(struct vfsmount *mnt)
66{
67 int res;
68
69retry:
70 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
71 spin_lock(&vfsmount_lock);
72 res = ida_get_new(&mnt_id_ida, &mnt->mnt_id);
73 spin_unlock(&vfsmount_lock);
74 if (res == -EAGAIN)
75 goto retry;
76
77 return res;
78}
79
80static void mnt_free_id(struct vfsmount *mnt)
81{
82 spin_lock(&vfsmount_lock);
83 ida_remove(&mnt_id_ida, mnt->mnt_id);
84 spin_unlock(&vfsmount_lock);
85}
86
719f5d7f
MS
87/*
88 * Allocate a new peer group ID
89 *
90 * mnt_group_ida is protected by namespace_sem
91 */
92static int mnt_alloc_group_id(struct vfsmount *mnt)
93{
94 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
95 return -ENOMEM;
96
97 return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id);
98}
99
100/*
101 * Release a peer group ID
102 */
103void mnt_release_group_id(struct vfsmount *mnt)
104{
105 ida_remove(&mnt_group_ida, mnt->mnt_group_id);
106 mnt->mnt_group_id = 0;
107}
108
1da177e4
LT
109struct vfsmount *alloc_vfsmnt(const char *name)
110{
c3762229 111 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
1da177e4 112 if (mnt) {
73cd49ec
MS
113 int err;
114
115 err = mnt_alloc_id(mnt);
116 if (err) {
117 kmem_cache_free(mnt_cache, mnt);
118 return NULL;
119 }
120
b58fed8b 121 atomic_set(&mnt->mnt_count, 1);
1da177e4
LT
122 INIT_LIST_HEAD(&mnt->mnt_hash);
123 INIT_LIST_HEAD(&mnt->mnt_child);
124 INIT_LIST_HEAD(&mnt->mnt_mounts);
125 INIT_LIST_HEAD(&mnt->mnt_list);
55e700b9 126 INIT_LIST_HEAD(&mnt->mnt_expire);
03e06e68 127 INIT_LIST_HEAD(&mnt->mnt_share);
a58b0eb8
RP
128 INIT_LIST_HEAD(&mnt->mnt_slave_list);
129 INIT_LIST_HEAD(&mnt->mnt_slave);
3d733633 130 atomic_set(&mnt->__mnt_writers, 0);
1da177e4 131 if (name) {
b58fed8b 132 int size = strlen(name) + 1;
1da177e4
LT
133 char *newname = kmalloc(size, GFP_KERNEL);
134 if (newname) {
135 memcpy(newname, name, size);
136 mnt->mnt_devname = newname;
137 }
138 }
139 }
140 return mnt;
141}
142
3d733633
DH
143/*
144 * Most r/o checks on a fs are for operations that take
145 * discrete amounts of time, like a write() or unlink().
146 * We must keep track of when those operations start
147 * (for permission checks) and when they end, so that
148 * we can determine when writes are able to occur to
149 * a filesystem.
150 */
151/*
152 * __mnt_is_readonly: check whether a mount is read-only
153 * @mnt: the mount to check for its write status
154 *
155 * This shouldn't be used directly ouside of the VFS.
156 * It does not guarantee that the filesystem will stay
157 * r/w, just that it is right *now*. This can not and
158 * should not be used in place of IS_RDONLY(inode).
159 * mnt_want/drop_write() will _keep_ the filesystem
160 * r/w.
161 */
162int __mnt_is_readonly(struct vfsmount *mnt)
163{
2e4b7fcd
DH
164 if (mnt->mnt_flags & MNT_READONLY)
165 return 1;
166 if (mnt->mnt_sb->s_flags & MS_RDONLY)
167 return 1;
168 return 0;
3d733633
DH
169}
170EXPORT_SYMBOL_GPL(__mnt_is_readonly);
171
172struct mnt_writer {
173 /*
174 * If holding multiple instances of this lock, they
175 * must be ordered by cpu number.
176 */
177 spinlock_t lock;
178 struct lock_class_key lock_class; /* compiles out with !lockdep */
179 unsigned long count;
180 struct vfsmount *mnt;
181} ____cacheline_aligned_in_smp;
182static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
183
184static int __init init_mnt_writers(void)
185{
186 int cpu;
187 for_each_possible_cpu(cpu) {
188 struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
189 spin_lock_init(&writer->lock);
190 lockdep_set_class(&writer->lock, &writer->lock_class);
191 writer->count = 0;
192 }
193 return 0;
194}
195fs_initcall(init_mnt_writers);
196
197static void unlock_mnt_writers(void)
198{
199 int cpu;
200 struct mnt_writer *cpu_writer;
201
202 for_each_possible_cpu(cpu) {
203 cpu_writer = &per_cpu(mnt_writers, cpu);
204 spin_unlock(&cpu_writer->lock);
205 }
206}
207
208static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
209{
210 if (!cpu_writer->mnt)
211 return;
212 /*
213 * This is in case anyone ever leaves an invalid,
214 * old ->mnt and a count of 0.
215 */
216 if (!cpu_writer->count)
217 return;
218 atomic_add(cpu_writer->count, &cpu_writer->mnt->__mnt_writers);
219 cpu_writer->count = 0;
220}
221 /*
222 * must hold cpu_writer->lock
223 */
224static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
225 struct vfsmount *mnt)
226{
227 if (cpu_writer->mnt == mnt)
228 return;
229 __clear_mnt_count(cpu_writer);
230 cpu_writer->mnt = mnt;
231}
232
8366025e
DH
233/*
234 * Most r/o checks on a fs are for operations that take
235 * discrete amounts of time, like a write() or unlink().
236 * We must keep track of when those operations start
237 * (for permission checks) and when they end, so that
238 * we can determine when writes are able to occur to
239 * a filesystem.
240 */
241/**
242 * mnt_want_write - get write access to a mount
243 * @mnt: the mount on which to take a write
244 *
245 * This tells the low-level filesystem that a write is
246 * about to be performed to it, and makes sure that
247 * writes are allowed before returning success. When
248 * the write operation is finished, mnt_drop_write()
249 * must be called. This is effectively a refcount.
250 */
251int mnt_want_write(struct vfsmount *mnt)
252{
3d733633
DH
253 int ret = 0;
254 struct mnt_writer *cpu_writer;
255
256 cpu_writer = &get_cpu_var(mnt_writers);
257 spin_lock(&cpu_writer->lock);
258 if (__mnt_is_readonly(mnt)) {
259 ret = -EROFS;
260 goto out;
261 }
262 use_cpu_writer_for_mount(cpu_writer, mnt);
263 cpu_writer->count++;
264out:
265 spin_unlock(&cpu_writer->lock);
266 put_cpu_var(mnt_writers);
267 return ret;
8366025e
DH
268}
269EXPORT_SYMBOL_GPL(mnt_want_write);
270
3d733633
DH
271static void lock_mnt_writers(void)
272{
273 int cpu;
274 struct mnt_writer *cpu_writer;
275
276 for_each_possible_cpu(cpu) {
277 cpu_writer = &per_cpu(mnt_writers, cpu);
278 spin_lock(&cpu_writer->lock);
279 __clear_mnt_count(cpu_writer);
280 cpu_writer->mnt = NULL;
281 }
282}
283
284/*
285 * These per-cpu write counts are not guaranteed to have
286 * matched increments and decrements on any given cpu.
287 * A file open()ed for write on one cpu and close()d on
288 * another cpu will imbalance this count. Make sure it
289 * does not get too far out of whack.
290 */
291static void handle_write_count_underflow(struct vfsmount *mnt)
292{
293 if (atomic_read(&mnt->__mnt_writers) >=
294 MNT_WRITER_UNDERFLOW_LIMIT)
295 return;
296 /*
297 * It isn't necessary to hold all of the locks
298 * at the same time, but doing it this way makes
299 * us share a lot more code.
300 */
301 lock_mnt_writers();
302 /*
303 * vfsmount_lock is for mnt_flags.
304 */
305 spin_lock(&vfsmount_lock);
306 /*
307 * If coalescing the per-cpu writer counts did not
308 * get us back to a positive writer count, we have
309 * a bug.
310 */
311 if ((atomic_read(&mnt->__mnt_writers) < 0) &&
312 !(mnt->mnt_flags & MNT_IMBALANCED_WRITE_COUNT)) {
313 printk(KERN_DEBUG "leak detected on mount(%p) writers "
314 "count: %d\n",
315 mnt, atomic_read(&mnt->__mnt_writers));
316 WARN_ON(1);
317 /* use the flag to keep the dmesg spam down */
318 mnt->mnt_flags |= MNT_IMBALANCED_WRITE_COUNT;
319 }
320 spin_unlock(&vfsmount_lock);
321 unlock_mnt_writers();
322}
323
8366025e
DH
324/**
325 * mnt_drop_write - give up write access to a mount
326 * @mnt: the mount on which to give up write access
327 *
328 * Tells the low-level filesystem that we are done
329 * performing writes to it. Must be matched with
330 * mnt_want_write() call above.
331 */
332void mnt_drop_write(struct vfsmount *mnt)
333{
3d733633
DH
334 int must_check_underflow = 0;
335 struct mnt_writer *cpu_writer;
336
337 cpu_writer = &get_cpu_var(mnt_writers);
338 spin_lock(&cpu_writer->lock);
339
340 use_cpu_writer_for_mount(cpu_writer, mnt);
341 if (cpu_writer->count > 0) {
342 cpu_writer->count--;
343 } else {
344 must_check_underflow = 1;
345 atomic_dec(&mnt->__mnt_writers);
346 }
347
348 spin_unlock(&cpu_writer->lock);
349 /*
350 * Logically, we could call this each time,
351 * but the __mnt_writers cacheline tends to
352 * be cold, and makes this expensive.
353 */
354 if (must_check_underflow)
355 handle_write_count_underflow(mnt);
356 /*
357 * This could be done right after the spinlock
358 * is taken because the spinlock keeps us on
359 * the cpu, and disables preemption. However,
360 * putting it here bounds the amount that
361 * __mnt_writers can underflow. Without it,
362 * we could theoretically wrap __mnt_writers.
363 */
364 put_cpu_var(mnt_writers);
8366025e
DH
365}
366EXPORT_SYMBOL_GPL(mnt_drop_write);
367
2e4b7fcd 368static int mnt_make_readonly(struct vfsmount *mnt)
8366025e 369{
3d733633
DH
370 int ret = 0;
371
372 lock_mnt_writers();
373 /*
374 * With all the locks held, this value is stable
375 */
376 if (atomic_read(&mnt->__mnt_writers) > 0) {
377 ret = -EBUSY;
378 goto out;
379 }
380 /*
2e4b7fcd
DH
381 * nobody can do a successful mnt_want_write() with all
382 * of the counts in MNT_DENIED_WRITE and the locks held.
3d733633 383 */
2e4b7fcd
DH
384 spin_lock(&vfsmount_lock);
385 if (!ret)
386 mnt->mnt_flags |= MNT_READONLY;
387 spin_unlock(&vfsmount_lock);
3d733633
DH
388out:
389 unlock_mnt_writers();
390 return ret;
8366025e 391}
8366025e 392
2e4b7fcd
DH
393static void __mnt_unmake_readonly(struct vfsmount *mnt)
394{
395 spin_lock(&vfsmount_lock);
396 mnt->mnt_flags &= ~MNT_READONLY;
397 spin_unlock(&vfsmount_lock);
398}
399
454e2398
DH
400int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
401{
402 mnt->mnt_sb = sb;
403 mnt->mnt_root = dget(sb->s_root);
404 return 0;
405}
406
407EXPORT_SYMBOL(simple_set_mnt);
408
1da177e4
LT
409void free_vfsmnt(struct vfsmount *mnt)
410{
411 kfree(mnt->mnt_devname);
73cd49ec 412 mnt_free_id(mnt);
1da177e4
LT
413 kmem_cache_free(mnt_cache, mnt);
414}
415
416/*
a05964f3
RP
417 * find the first or last mount at @dentry on vfsmount @mnt depending on
418 * @dir. If @dir is set return the first mount else return the last mount.
1da177e4 419 */
a05964f3
RP
420struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
421 int dir)
1da177e4 422{
b58fed8b
RP
423 struct list_head *head = mount_hashtable + hash(mnt, dentry);
424 struct list_head *tmp = head;
1da177e4
LT
425 struct vfsmount *p, *found = NULL;
426
1da177e4 427 for (;;) {
a05964f3 428 tmp = dir ? tmp->next : tmp->prev;
1da177e4
LT
429 p = NULL;
430 if (tmp == head)
431 break;
432 p = list_entry(tmp, struct vfsmount, mnt_hash);
433 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
a05964f3 434 found = p;
1da177e4
LT
435 break;
436 }
437 }
1da177e4
LT
438 return found;
439}
440
a05964f3
RP
441/*
442 * lookup_mnt increments the ref count before returning
443 * the vfsmount struct.
444 */
445struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
446{
447 struct vfsmount *child_mnt;
448 spin_lock(&vfsmount_lock);
449 if ((child_mnt = __lookup_mnt(mnt, dentry, 1)))
450 mntget(child_mnt);
451 spin_unlock(&vfsmount_lock);
452 return child_mnt;
453}
454
1da177e4
LT
455static inline int check_mnt(struct vfsmount *mnt)
456{
6b3286ed 457 return mnt->mnt_ns == current->nsproxy->mnt_ns;
1da177e4
LT
458}
459
6b3286ed 460static void touch_mnt_namespace(struct mnt_namespace *ns)
5addc5dd
AV
461{
462 if (ns) {
463 ns->event = ++event;
464 wake_up_interruptible(&ns->poll);
465 }
466}
467
6b3286ed 468static void __touch_mnt_namespace(struct mnt_namespace *ns)
5addc5dd
AV
469{
470 if (ns && ns->event != event) {
471 ns->event = event;
472 wake_up_interruptible(&ns->poll);
473 }
474}
475
1a390689 476static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
1da177e4 477{
1a390689
AV
478 old_path->dentry = mnt->mnt_mountpoint;
479 old_path->mnt = mnt->mnt_parent;
1da177e4
LT
480 mnt->mnt_parent = mnt;
481 mnt->mnt_mountpoint = mnt->mnt_root;
482 list_del_init(&mnt->mnt_child);
483 list_del_init(&mnt->mnt_hash);
1a390689 484 old_path->dentry->d_mounted--;
1da177e4
LT
485}
486
b90fa9ae
RP
487void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
488 struct vfsmount *child_mnt)
489{
490 child_mnt->mnt_parent = mntget(mnt);
491 child_mnt->mnt_mountpoint = dget(dentry);
492 dentry->d_mounted++;
493}
494
1a390689 495static void attach_mnt(struct vfsmount *mnt, struct path *path)
1da177e4 496{
1a390689 497 mnt_set_mountpoint(path->mnt, path->dentry, mnt);
b90fa9ae 498 list_add_tail(&mnt->mnt_hash, mount_hashtable +
1a390689
AV
499 hash(path->mnt, path->dentry));
500 list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
b90fa9ae
RP
501}
502
503/*
504 * the caller must hold vfsmount_lock
505 */
506static void commit_tree(struct vfsmount *mnt)
507{
508 struct vfsmount *parent = mnt->mnt_parent;
509 struct vfsmount *m;
510 LIST_HEAD(head);
6b3286ed 511 struct mnt_namespace *n = parent->mnt_ns;
b90fa9ae
RP
512
513 BUG_ON(parent == mnt);
514
515 list_add_tail(&head, &mnt->mnt_list);
516 list_for_each_entry(m, &head, mnt_list)
6b3286ed 517 m->mnt_ns = n;
b90fa9ae
RP
518 list_splice(&head, n->list.prev);
519
520 list_add_tail(&mnt->mnt_hash, mount_hashtable +
521 hash(parent, mnt->mnt_mountpoint));
522 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
6b3286ed 523 touch_mnt_namespace(n);
1da177e4
LT
524}
525
526static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
527{
528 struct list_head *next = p->mnt_mounts.next;
529 if (next == &p->mnt_mounts) {
530 while (1) {
531 if (p == root)
532 return NULL;
533 next = p->mnt_child.next;
534 if (next != &p->mnt_parent->mnt_mounts)
535 break;
536 p = p->mnt_parent;
537 }
538 }
539 return list_entry(next, struct vfsmount, mnt_child);
540}
541
9676f0c6
RP
542static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
543{
544 struct list_head *prev = p->mnt_mounts.prev;
545 while (prev != &p->mnt_mounts) {
546 p = list_entry(prev, struct vfsmount, mnt_child);
547 prev = p->mnt_mounts.prev;
548 }
549 return p;
550}
551
36341f64
RP
552static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
553 int flag)
1da177e4
LT
554{
555 struct super_block *sb = old->mnt_sb;
556 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
557
558 if (mnt) {
719f5d7f
MS
559 if (flag & (CL_SLAVE | CL_PRIVATE))
560 mnt->mnt_group_id = 0; /* not a peer of original */
561 else
562 mnt->mnt_group_id = old->mnt_group_id;
563
564 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
565 int err = mnt_alloc_group_id(mnt);
566 if (err)
567 goto out_free;
568 }
569
1da177e4
LT
570 mnt->mnt_flags = old->mnt_flags;
571 atomic_inc(&sb->s_active);
572 mnt->mnt_sb = sb;
573 mnt->mnt_root = dget(root);
574 mnt->mnt_mountpoint = mnt->mnt_root;
575 mnt->mnt_parent = mnt;
b90fa9ae 576
5afe0022
RP
577 if (flag & CL_SLAVE) {
578 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
579 mnt->mnt_master = old;
580 CLEAR_MNT_SHARED(mnt);
8aec0809 581 } else if (!(flag & CL_PRIVATE)) {
5afe0022
RP
582 if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
583 list_add(&mnt->mnt_share, &old->mnt_share);
584 if (IS_MNT_SLAVE(old))
585 list_add(&mnt->mnt_slave, &old->mnt_slave);
586 mnt->mnt_master = old->mnt_master;
587 }
b90fa9ae
RP
588 if (flag & CL_MAKE_SHARED)
589 set_mnt_shared(mnt);
1da177e4
LT
590
591 /* stick the duplicate mount on the same expiry list
592 * as the original if that was on one */
36341f64 593 if (flag & CL_EXPIRE) {
36341f64
RP
594 if (!list_empty(&old->mnt_expire))
595 list_add(&mnt->mnt_expire, &old->mnt_expire);
36341f64 596 }
1da177e4
LT
597 }
598 return mnt;
719f5d7f
MS
599
600 out_free:
601 free_vfsmnt(mnt);
602 return NULL;
1da177e4
LT
603}
604
7b7b1ace 605static inline void __mntput(struct vfsmount *mnt)
1da177e4 606{
3d733633 607 int cpu;
1da177e4 608 struct super_block *sb = mnt->mnt_sb;
3d733633
DH
609 /*
610 * We don't have to hold all of the locks at the
611 * same time here because we know that we're the
612 * last reference to mnt and that no new writers
613 * can come in.
614 */
615 for_each_possible_cpu(cpu) {
616 struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
617 if (cpu_writer->mnt != mnt)
618 continue;
619 spin_lock(&cpu_writer->lock);
620 atomic_add(cpu_writer->count, &mnt->__mnt_writers);
621 cpu_writer->count = 0;
622 /*
623 * Might as well do this so that no one
624 * ever sees the pointer and expects
625 * it to be valid.
626 */
627 cpu_writer->mnt = NULL;
628 spin_unlock(&cpu_writer->lock);
629 }
630 /*
631 * This probably indicates that somebody messed
632 * up a mnt_want/drop_write() pair. If this
633 * happens, the filesystem was probably unable
634 * to make r/w->r/o transitions.
635 */
636 WARN_ON(atomic_read(&mnt->__mnt_writers));
1da177e4
LT
637 dput(mnt->mnt_root);
638 free_vfsmnt(mnt);
639 deactivate_super(sb);
640}
641
7b7b1ace
AV
642void mntput_no_expire(struct vfsmount *mnt)
643{
644repeat:
645 if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
646 if (likely(!mnt->mnt_pinned)) {
647 spin_unlock(&vfsmount_lock);
648 __mntput(mnt);
649 return;
650 }
651 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
652 mnt->mnt_pinned = 0;
653 spin_unlock(&vfsmount_lock);
654 acct_auto_close_mnt(mnt);
655 security_sb_umount_close(mnt);
656 goto repeat;
657 }
658}
659
660EXPORT_SYMBOL(mntput_no_expire);
661
662void mnt_pin(struct vfsmount *mnt)
663{
664 spin_lock(&vfsmount_lock);
665 mnt->mnt_pinned++;
666 spin_unlock(&vfsmount_lock);
667}
668
669EXPORT_SYMBOL(mnt_pin);
670
671void mnt_unpin(struct vfsmount *mnt)
672{
673 spin_lock(&vfsmount_lock);
674 if (mnt->mnt_pinned) {
675 atomic_inc(&mnt->mnt_count);
676 mnt->mnt_pinned--;
677 }
678 spin_unlock(&vfsmount_lock);
679}
680
681EXPORT_SYMBOL(mnt_unpin);
1da177e4 682
b3b304a2
MS
683static inline void mangle(struct seq_file *m, const char *s)
684{
685 seq_escape(m, s, " \t\n\\");
686}
687
688/*
689 * Simple .show_options callback for filesystems which don't want to
690 * implement more complex mount option showing.
691 *
692 * See also save_mount_options().
693 */
694int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
695{
696 const char *options = mnt->mnt_sb->s_options;
697
698 if (options != NULL && options[0]) {
699 seq_putc(m, ',');
700 mangle(m, options);
701 }
702
703 return 0;
704}
705EXPORT_SYMBOL(generic_show_options);
706
707/*
708 * If filesystem uses generic_show_options(), this function should be
709 * called from the fill_super() callback.
710 *
711 * The .remount_fs callback usually needs to be handled in a special
712 * way, to make sure, that previous options are not overwritten if the
713 * remount fails.
714 *
715 * Also note, that if the filesystem's .remount_fs function doesn't
716 * reset all options to their default value, but changes only newly
717 * given options, then the displayed options will not reflect reality
718 * any more.
719 */
720void save_mount_options(struct super_block *sb, char *options)
721{
722 kfree(sb->s_options);
723 sb->s_options = kstrdup(options, GFP_KERNEL);
724}
725EXPORT_SYMBOL(save_mount_options);
726
a1a2c409 727#ifdef CONFIG_PROC_FS
1da177e4
LT
728/* iterator */
729static void *m_start(struct seq_file *m, loff_t *pos)
730{
a1a2c409 731 struct proc_mounts *p = m->private;
1da177e4 732
390c6843 733 down_read(&namespace_sem);
a1a2c409 734 return seq_list_start(&p->ns->list, *pos);
1da177e4
LT
735}
736
737static void *m_next(struct seq_file *m, void *v, loff_t *pos)
738{
a1a2c409 739 struct proc_mounts *p = m->private;
b0765fb8 740
a1a2c409 741 return seq_list_next(v, &p->ns->list, pos);
1da177e4
LT
742}
743
744static void m_stop(struct seq_file *m, void *v)
745{
390c6843 746 up_read(&namespace_sem);
1da177e4
LT
747}
748
1da177e4
LT
749static int show_vfsmnt(struct seq_file *m, void *v)
750{
b0765fb8 751 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
1da177e4
LT
752 int err = 0;
753 static struct proc_fs_info {
754 int flag;
755 char *str;
756 } fs_info[] = {
757 { MS_SYNCHRONOUS, ",sync" },
758 { MS_DIRSYNC, ",dirsync" },
759 { MS_MANDLOCK, ",mand" },
1da177e4
LT
760 { 0, NULL }
761 };
762 static struct proc_fs_info mnt_info[] = {
763 { MNT_NOSUID, ",nosuid" },
764 { MNT_NODEV, ",nodev" },
765 { MNT_NOEXEC, ",noexec" },
fc33a7bb
CH
766 { MNT_NOATIME, ",noatime" },
767 { MNT_NODIRATIME, ",nodiratime" },
47ae32d6 768 { MNT_RELATIME, ",relatime" },
1da177e4
LT
769 { 0, NULL }
770 };
771 struct proc_fs_info *fs_infop;
c32c2f63 772 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
1da177e4
LT
773
774 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
775 seq_putc(m, ' ');
c32c2f63 776 seq_path(m, &mnt_path, " \t\n\\");
1da177e4
LT
777 seq_putc(m, ' ');
778 mangle(m, mnt->mnt_sb->s_type->name);
79c0b2df
MS
779 if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) {
780 seq_putc(m, '.');
781 mangle(m, mnt->mnt_sb->s_subtype);
782 }
2e4b7fcd 783 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
1da177e4
LT
784 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
785 if (mnt->mnt_sb->s_flags & fs_infop->flag)
786 seq_puts(m, fs_infop->str);
787 }
788 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
789 if (mnt->mnt_flags & fs_infop->flag)
790 seq_puts(m, fs_infop->str);
791 }
792 if (mnt->mnt_sb->s_op->show_options)
793 err = mnt->mnt_sb->s_op->show_options(m, mnt);
794 seq_puts(m, " 0 0\n");
795 return err;
796}
797
a1a2c409 798const struct seq_operations mounts_op = {
1da177e4
LT
799 .start = m_start,
800 .next = m_next,
801 .stop = m_stop,
802 .show = show_vfsmnt
803};
804
b4629fe2
CL
805static int show_vfsstat(struct seq_file *m, void *v)
806{
b0765fb8 807 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
c32c2f63 808 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
b4629fe2
CL
809 int err = 0;
810
811 /* device */
812 if (mnt->mnt_devname) {
813 seq_puts(m, "device ");
814 mangle(m, mnt->mnt_devname);
815 } else
816 seq_puts(m, "no device");
817
818 /* mount point */
819 seq_puts(m, " mounted on ");
c32c2f63 820 seq_path(m, &mnt_path, " \t\n\\");
b4629fe2
CL
821 seq_putc(m, ' ');
822
823 /* file system type */
824 seq_puts(m, "with fstype ");
825 mangle(m, mnt->mnt_sb->s_type->name);
826
827 /* optional statistics */
828 if (mnt->mnt_sb->s_op->show_stats) {
829 seq_putc(m, ' ');
830 err = mnt->mnt_sb->s_op->show_stats(m, mnt);
831 }
832
833 seq_putc(m, '\n');
834 return err;
835}
836
a1a2c409 837const struct seq_operations mountstats_op = {
b4629fe2
CL
838 .start = m_start,
839 .next = m_next,
840 .stop = m_stop,
841 .show = show_vfsstat,
842};
a1a2c409 843#endif /* CONFIG_PROC_FS */
b4629fe2 844
1da177e4
LT
845/**
846 * may_umount_tree - check if a mount tree is busy
847 * @mnt: root of mount tree
848 *
849 * This is called to check if a tree of mounts has any
850 * open files, pwds, chroots or sub mounts that are
851 * busy.
852 */
853int may_umount_tree(struct vfsmount *mnt)
854{
36341f64
RP
855 int actual_refs = 0;
856 int minimum_refs = 0;
857 struct vfsmount *p;
1da177e4
LT
858
859 spin_lock(&vfsmount_lock);
36341f64 860 for (p = mnt; p; p = next_mnt(p, mnt)) {
1da177e4
LT
861 actual_refs += atomic_read(&p->mnt_count);
862 minimum_refs += 2;
1da177e4
LT
863 }
864 spin_unlock(&vfsmount_lock);
865
866 if (actual_refs > minimum_refs)
e3474a8e 867 return 0;
1da177e4 868
e3474a8e 869 return 1;
1da177e4
LT
870}
871
872EXPORT_SYMBOL(may_umount_tree);
873
874/**
875 * may_umount - check if a mount point is busy
876 * @mnt: root of mount
877 *
878 * This is called to check if a mount point has any
879 * open files, pwds, chroots or sub mounts. If the
880 * mount has sub mounts this will return busy
881 * regardless of whether the sub mounts are busy.
882 *
883 * Doesn't take quota and stuff into account. IOW, in some cases it will
884 * give false negatives. The main reason why it's here is that we need
885 * a non-destructive way to look for easily umountable filesystems.
886 */
887int may_umount(struct vfsmount *mnt)
888{
e3474a8e 889 int ret = 1;
a05964f3
RP
890 spin_lock(&vfsmount_lock);
891 if (propagate_mount_busy(mnt, 2))
e3474a8e 892 ret = 0;
a05964f3
RP
893 spin_unlock(&vfsmount_lock);
894 return ret;
1da177e4
LT
895}
896
897EXPORT_SYMBOL(may_umount);
898
b90fa9ae 899void release_mounts(struct list_head *head)
70fbcdf4
RP
900{
901 struct vfsmount *mnt;
bf066c7d 902 while (!list_empty(head)) {
b5e61818 903 mnt = list_first_entry(head, struct vfsmount, mnt_hash);
70fbcdf4
RP
904 list_del_init(&mnt->mnt_hash);
905 if (mnt->mnt_parent != mnt) {
906 struct dentry *dentry;
907 struct vfsmount *m;
908 spin_lock(&vfsmount_lock);
909 dentry = mnt->mnt_mountpoint;
910 m = mnt->mnt_parent;
911 mnt->mnt_mountpoint = mnt->mnt_root;
912 mnt->mnt_parent = mnt;
7c4b93d8 913 m->mnt_ghosts--;
70fbcdf4
RP
914 spin_unlock(&vfsmount_lock);
915 dput(dentry);
916 mntput(m);
917 }
918 mntput(mnt);
919 }
920}
921
a05964f3 922void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
1da177e4
LT
923{
924 struct vfsmount *p;
1da177e4 925
1bfba4e8
AM
926 for (p = mnt; p; p = next_mnt(p, mnt))
927 list_move(&p->mnt_hash, kill);
1da177e4 928
a05964f3
RP
929 if (propagate)
930 propagate_umount(kill);
931
70fbcdf4
RP
932 list_for_each_entry(p, kill, mnt_hash) {
933 list_del_init(&p->mnt_expire);
934 list_del_init(&p->mnt_list);
6b3286ed
KK
935 __touch_mnt_namespace(p->mnt_ns);
936 p->mnt_ns = NULL;
70fbcdf4 937 list_del_init(&p->mnt_child);
7c4b93d8
AV
938 if (p->mnt_parent != p) {
939 p->mnt_parent->mnt_ghosts++;
f30ac319 940 p->mnt_mountpoint->d_mounted--;
7c4b93d8 941 }
a05964f3 942 change_mnt_propagation(p, MS_PRIVATE);
1da177e4
LT
943 }
944}
945
c35038be
AV
946static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
947
1da177e4
LT
948static int do_umount(struct vfsmount *mnt, int flags)
949{
b58fed8b 950 struct super_block *sb = mnt->mnt_sb;
1da177e4 951 int retval;
70fbcdf4 952 LIST_HEAD(umount_list);
1da177e4
LT
953
954 retval = security_sb_umount(mnt, flags);
955 if (retval)
956 return retval;
957
958 /*
959 * Allow userspace to request a mountpoint be expired rather than
960 * unmounting unconditionally. Unmount only happens if:
961 * (1) the mark is already set (the mark is cleared by mntput())
962 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
963 */
964 if (flags & MNT_EXPIRE) {
6ac08c39 965 if (mnt == current->fs->root.mnt ||
1da177e4
LT
966 flags & (MNT_FORCE | MNT_DETACH))
967 return -EINVAL;
968
969 if (atomic_read(&mnt->mnt_count) != 2)
970 return -EBUSY;
971
972 if (!xchg(&mnt->mnt_expiry_mark, 1))
973 return -EAGAIN;
974 }
975
976 /*
977 * If we may have to abort operations to get out of this
978 * mount, and they will themselves hold resources we must
979 * allow the fs to do things. In the Unix tradition of
980 * 'Gee thats tricky lets do it in userspace' the umount_begin
981 * might fail to complete on the first run through as other tasks
982 * must return, and the like. Thats for the mount program to worry
983 * about for the moment.
984 */
985
986 lock_kernel();
8b512d9a
TM
987 if (sb->s_op->umount_begin)
988 sb->s_op->umount_begin(mnt, flags);
1da177e4
LT
989 unlock_kernel();
990
991 /*
992 * No sense to grab the lock for this test, but test itself looks
993 * somewhat bogus. Suggestions for better replacement?
994 * Ho-hum... In principle, we might treat that as umount + switch
995 * to rootfs. GC would eventually take care of the old vfsmount.
996 * Actually it makes sense, especially if rootfs would contain a
997 * /reboot - static binary that would close all descriptors and
998 * call reboot(9). Then init(8) could umount root and exec /reboot.
999 */
6ac08c39 1000 if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1da177e4
LT
1001 /*
1002 * Special case for "unmounting" root ...
1003 * we just try to remount it readonly.
1004 */
1005 down_write(&sb->s_umount);
1006 if (!(sb->s_flags & MS_RDONLY)) {
1007 lock_kernel();
1008 DQUOT_OFF(sb);
1009 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1010 unlock_kernel();
1011 }
1012 up_write(&sb->s_umount);
1013 return retval;
1014 }
1015
390c6843 1016 down_write(&namespace_sem);
1da177e4 1017 spin_lock(&vfsmount_lock);
5addc5dd 1018 event++;
1da177e4 1019
c35038be
AV
1020 if (!(flags & MNT_DETACH))
1021 shrink_submounts(mnt, &umount_list);
1022
1da177e4 1023 retval = -EBUSY;
a05964f3 1024 if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
1da177e4 1025 if (!list_empty(&mnt->mnt_list))
a05964f3 1026 umount_tree(mnt, 1, &umount_list);
1da177e4
LT
1027 retval = 0;
1028 }
1029 spin_unlock(&vfsmount_lock);
1030 if (retval)
1031 security_sb_umount_busy(mnt);
390c6843 1032 up_write(&namespace_sem);
70fbcdf4 1033 release_mounts(&umount_list);
1da177e4
LT
1034 return retval;
1035}
1036
1037/*
1038 * Now umount can handle mount points as well as block devices.
1039 * This is important for filesystems which use unnamed block devices.
1040 *
1041 * We now support a flag for forced unmount like the other 'big iron'
1042 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1043 */
1044
1045asmlinkage long sys_umount(char __user * name, int flags)
1046{
1047 struct nameidata nd;
1048 int retval;
1049
1050 retval = __user_walk(name, LOOKUP_FOLLOW, &nd);
1051 if (retval)
1052 goto out;
1053 retval = -EINVAL;
4ac91378 1054 if (nd.path.dentry != nd.path.mnt->mnt_root)
1da177e4 1055 goto dput_and_out;
4ac91378 1056 if (!check_mnt(nd.path.mnt))
1da177e4
LT
1057 goto dput_and_out;
1058
1059 retval = -EPERM;
1060 if (!capable(CAP_SYS_ADMIN))
1061 goto dput_and_out;
1062
4ac91378 1063 retval = do_umount(nd.path.mnt, flags);
1da177e4 1064dput_and_out:
429731b1 1065 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
4ac91378
JB
1066 dput(nd.path.dentry);
1067 mntput_no_expire(nd.path.mnt);
1da177e4
LT
1068out:
1069 return retval;
1070}
1071
1072#ifdef __ARCH_WANT_SYS_OLDUMOUNT
1073
1074/*
b58fed8b 1075 * The 2.0 compatible umount. No flags.
1da177e4 1076 */
1da177e4
LT
1077asmlinkage long sys_oldumount(char __user * name)
1078{
b58fed8b 1079 return sys_umount(name, 0);
1da177e4
LT
1080}
1081
1082#endif
1083
1084static int mount_is_safe(struct nameidata *nd)
1085{
1086 if (capable(CAP_SYS_ADMIN))
1087 return 0;
1088 return -EPERM;
1089#ifdef notyet
4ac91378 1090 if (S_ISLNK(nd->path.dentry->d_inode->i_mode))
1da177e4 1091 return -EPERM;
4ac91378
JB
1092 if (nd->path.dentry->d_inode->i_mode & S_ISVTX) {
1093 if (current->uid != nd->path.dentry->d_inode->i_uid)
1da177e4
LT
1094 return -EPERM;
1095 }
e4543edd 1096 if (vfs_permission(nd, MAY_WRITE))
1da177e4
LT
1097 return -EPERM;
1098 return 0;
1099#endif
1100}
1101
b58fed8b 1102static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
1da177e4
LT
1103{
1104 while (1) {
1105 if (d == dentry)
1106 return 1;
1107 if (d == NULL || d == d->d_parent)
1108 return 0;
1109 d = d->d_parent;
1110 }
1111}
1112
b90fa9ae 1113struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
36341f64 1114 int flag)
1da177e4
LT
1115{
1116 struct vfsmount *res, *p, *q, *r, *s;
1a390689 1117 struct path path;
1da177e4 1118
9676f0c6
RP
1119 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
1120 return NULL;
1121
36341f64 1122 res = q = clone_mnt(mnt, dentry, flag);
1da177e4
LT
1123 if (!q)
1124 goto Enomem;
1125 q->mnt_mountpoint = mnt->mnt_mountpoint;
1126
1127 p = mnt;
fdadd65f 1128 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1da177e4
LT
1129 if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
1130 continue;
1131
1132 for (s = r; s; s = next_mnt(s, r)) {
9676f0c6
RP
1133 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
1134 s = skip_mnt_tree(s);
1135 continue;
1136 }
1da177e4
LT
1137 while (p != s->mnt_parent) {
1138 p = p->mnt_parent;
1139 q = q->mnt_parent;
1140 }
1141 p = s;
1a390689
AV
1142 path.mnt = q;
1143 path.dentry = p->mnt_mountpoint;
36341f64 1144 q = clone_mnt(p, p->mnt_root, flag);
1da177e4
LT
1145 if (!q)
1146 goto Enomem;
1147 spin_lock(&vfsmount_lock);
1148 list_add_tail(&q->mnt_list, &res->mnt_list);
1a390689 1149 attach_mnt(q, &path);
1da177e4
LT
1150 spin_unlock(&vfsmount_lock);
1151 }
1152 }
1153 return res;
b58fed8b 1154Enomem:
1da177e4 1155 if (res) {
70fbcdf4 1156 LIST_HEAD(umount_list);
1da177e4 1157 spin_lock(&vfsmount_lock);
a05964f3 1158 umount_tree(res, 0, &umount_list);
1da177e4 1159 spin_unlock(&vfsmount_lock);
70fbcdf4 1160 release_mounts(&umount_list);
1da177e4
LT
1161 }
1162 return NULL;
1163}
1164
8aec0809
AV
1165struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry)
1166{
1167 struct vfsmount *tree;
1a60a280 1168 down_write(&namespace_sem);
8aec0809 1169 tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE);
1a60a280 1170 up_write(&namespace_sem);
8aec0809
AV
1171 return tree;
1172}
1173
1174void drop_collected_mounts(struct vfsmount *mnt)
1175{
1176 LIST_HEAD(umount_list);
1a60a280 1177 down_write(&namespace_sem);
8aec0809
AV
1178 spin_lock(&vfsmount_lock);
1179 umount_tree(mnt, 0, &umount_list);
1180 spin_unlock(&vfsmount_lock);
1a60a280 1181 up_write(&namespace_sem);
8aec0809
AV
1182 release_mounts(&umount_list);
1183}
1184
719f5d7f
MS
1185static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
1186{
1187 struct vfsmount *p;
1188
1189 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1190 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1191 mnt_release_group_id(p);
1192 }
1193}
1194
1195static int invent_group_ids(struct vfsmount *mnt, bool recurse)
1196{
1197 struct vfsmount *p;
1198
1199 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1200 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1201 int err = mnt_alloc_group_id(p);
1202 if (err) {
1203 cleanup_group_ids(mnt, p);
1204 return err;
1205 }
1206 }
1207 }
1208
1209 return 0;
1210}
1211
b90fa9ae
RP
1212/*
1213 * @source_mnt : mount tree to be attached
21444403
RP
1214 * @nd : place the mount tree @source_mnt is attached
1215 * @parent_nd : if non-null, detach the source_mnt from its parent and
1216 * store the parent mount and mountpoint dentry.
1217 * (done when source_mnt is moved)
b90fa9ae
RP
1218 *
1219 * NOTE: in the table below explains the semantics when a source mount
1220 * of a given type is attached to a destination mount of a given type.
9676f0c6
RP
1221 * ---------------------------------------------------------------------------
1222 * | BIND MOUNT OPERATION |
1223 * |**************************************************************************
1224 * | source-->| shared | private | slave | unbindable |
1225 * | dest | | | | |
1226 * | | | | | | |
1227 * | v | | | | |
1228 * |**************************************************************************
1229 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1230 * | | | | | |
1231 * |non-shared| shared (+) | private | slave (*) | invalid |
1232 * ***************************************************************************
b90fa9ae
RP
1233 * A bind operation clones the source mount and mounts the clone on the
1234 * destination mount.
1235 *
1236 * (++) the cloned mount is propagated to all the mounts in the propagation
1237 * tree of the destination mount and the cloned mount is added to
1238 * the peer group of the source mount.
1239 * (+) the cloned mount is created under the destination mount and is marked
1240 * as shared. The cloned mount is added to the peer group of the source
1241 * mount.
5afe0022
RP
1242 * (+++) the mount is propagated to all the mounts in the propagation tree
1243 * of the destination mount and the cloned mount is made slave
1244 * of the same master as that of the source mount. The cloned mount
1245 * is marked as 'shared and slave'.
1246 * (*) the cloned mount is made a slave of the same master as that of the
1247 * source mount.
1248 *
9676f0c6
RP
1249 * ---------------------------------------------------------------------------
1250 * | MOVE MOUNT OPERATION |
1251 * |**************************************************************************
1252 * | source-->| shared | private | slave | unbindable |
1253 * | dest | | | | |
1254 * | | | | | | |
1255 * | v | | | | |
1256 * |**************************************************************************
1257 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1258 * | | | | | |
1259 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1260 * ***************************************************************************
5afe0022
RP
1261 *
1262 * (+) the mount is moved to the destination. And is then propagated to
1263 * all the mounts in the propagation tree of the destination mount.
21444403 1264 * (+*) the mount is moved to the destination.
5afe0022
RP
1265 * (+++) the mount is moved to the destination and is then propagated to
1266 * all the mounts belonging to the destination mount's propagation tree.
1267 * the mount is marked as 'shared and slave'.
1268 * (*) the mount continues to be a slave at the new location.
b90fa9ae
RP
1269 *
1270 * if the source mount is a tree, the operations explained above is
1271 * applied to each mount in the tree.
1272 * Must be called without spinlocks held, since this function can sleep
1273 * in allocations.
1274 */
1275static int attach_recursive_mnt(struct vfsmount *source_mnt,
1a390689 1276 struct path *path, struct path *parent_path)
b90fa9ae
RP
1277{
1278 LIST_HEAD(tree_list);
1a390689
AV
1279 struct vfsmount *dest_mnt = path->mnt;
1280 struct dentry *dest_dentry = path->dentry;
b90fa9ae 1281 struct vfsmount *child, *p;
719f5d7f 1282 int err;
b90fa9ae 1283
719f5d7f
MS
1284 if (IS_MNT_SHARED(dest_mnt)) {
1285 err = invent_group_ids(source_mnt, true);
1286 if (err)
1287 goto out;
1288 }
1289 err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
1290 if (err)
1291 goto out_cleanup_ids;
b90fa9ae
RP
1292
1293 if (IS_MNT_SHARED(dest_mnt)) {
1294 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1295 set_mnt_shared(p);
1296 }
1297
1298 spin_lock(&vfsmount_lock);
1a390689
AV
1299 if (parent_path) {
1300 detach_mnt(source_mnt, parent_path);
1301 attach_mnt(source_mnt, path);
6b3286ed 1302 touch_mnt_namespace(current->nsproxy->mnt_ns);
21444403
RP
1303 } else {
1304 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
1305 commit_tree(source_mnt);
1306 }
b90fa9ae
RP
1307
1308 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
1309 list_del_init(&child->mnt_hash);
1310 commit_tree(child);
1311 }
1312 spin_unlock(&vfsmount_lock);
1313 return 0;
719f5d7f
MS
1314
1315 out_cleanup_ids:
1316 if (IS_MNT_SHARED(dest_mnt))
1317 cleanup_group_ids(source_mnt, NULL);
1318 out:
1319 return err;
b90fa9ae
RP
1320}
1321
8c3ee42e 1322static int graft_tree(struct vfsmount *mnt, struct path *path)
1da177e4
LT
1323{
1324 int err;
1325 if (mnt->mnt_sb->s_flags & MS_NOUSER)
1326 return -EINVAL;
1327
8c3ee42e 1328 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1da177e4
LT
1329 S_ISDIR(mnt->mnt_root->d_inode->i_mode))
1330 return -ENOTDIR;
1331
1332 err = -ENOENT;
8c3ee42e
AV
1333 mutex_lock(&path->dentry->d_inode->i_mutex);
1334 if (IS_DEADDIR(path->dentry->d_inode))
1da177e4
LT
1335 goto out_unlock;
1336
8c3ee42e 1337 err = security_sb_check_sb(mnt, path);
1da177e4
LT
1338 if (err)
1339 goto out_unlock;
1340
1341 err = -ENOENT;
8c3ee42e
AV
1342 if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry))
1343 err = attach_recursive_mnt(mnt, path, NULL);
1da177e4 1344out_unlock:
8c3ee42e 1345 mutex_unlock(&path->dentry->d_inode->i_mutex);
1da177e4 1346 if (!err)
8c3ee42e 1347 security_sb_post_addmount(mnt, path);
1da177e4
LT
1348 return err;
1349}
1350
07b20889
RP
1351/*
1352 * recursively change the type of the mountpoint.
2dafe1c4 1353 * noinline this do_mount helper to save do_mount stack space.
07b20889 1354 */
2dafe1c4 1355static noinline int do_change_type(struct nameidata *nd, int flag)
07b20889 1356{
4ac91378 1357 struct vfsmount *m, *mnt = nd->path.mnt;
07b20889
RP
1358 int recurse = flag & MS_REC;
1359 int type = flag & ~MS_REC;
719f5d7f 1360 int err = 0;
07b20889 1361
ee6f9582
MS
1362 if (!capable(CAP_SYS_ADMIN))
1363 return -EPERM;
1364
4ac91378 1365 if (nd->path.dentry != nd->path.mnt->mnt_root)
07b20889
RP
1366 return -EINVAL;
1367
1368 down_write(&namespace_sem);
719f5d7f
MS
1369 if (type == MS_SHARED) {
1370 err = invent_group_ids(mnt, recurse);
1371 if (err)
1372 goto out_unlock;
1373 }
1374
07b20889
RP
1375 spin_lock(&vfsmount_lock);
1376 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1377 change_mnt_propagation(m, type);
1378 spin_unlock(&vfsmount_lock);
719f5d7f
MS
1379
1380 out_unlock:
07b20889 1381 up_write(&namespace_sem);
719f5d7f 1382 return err;
07b20889
RP
1383}
1384
1da177e4
LT
1385/*
1386 * do loopback mount.
2dafe1c4 1387 * noinline this do_mount helper to save do_mount stack space.
1da177e4 1388 */
2dafe1c4
ES
1389static noinline int do_loopback(struct nameidata *nd, char *old_name,
1390 int recurse)
1da177e4
LT
1391{
1392 struct nameidata old_nd;
1393 struct vfsmount *mnt = NULL;
1394 int err = mount_is_safe(nd);
1395 if (err)
1396 return err;
1397 if (!old_name || !*old_name)
1398 return -EINVAL;
1399 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
1400 if (err)
1401 return err;
1402
390c6843 1403 down_write(&namespace_sem);
1da177e4 1404 err = -EINVAL;
4ac91378
JB
1405 if (IS_MNT_UNBINDABLE(old_nd.path.mnt))
1406 goto out;
9676f0c6 1407
4ac91378 1408 if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
ccd48bc7 1409 goto out;
1da177e4 1410
ccd48bc7
AV
1411 err = -ENOMEM;
1412 if (recurse)
4ac91378 1413 mnt = copy_tree(old_nd.path.mnt, old_nd.path.dentry, 0);
ccd48bc7 1414 else
4ac91378 1415 mnt = clone_mnt(old_nd.path.mnt, old_nd.path.dentry, 0);
ccd48bc7
AV
1416
1417 if (!mnt)
1418 goto out;
1419
8c3ee42e 1420 err = graft_tree(mnt, &nd->path);
ccd48bc7 1421 if (err) {
70fbcdf4 1422 LIST_HEAD(umount_list);
1da177e4 1423 spin_lock(&vfsmount_lock);
a05964f3 1424 umount_tree(mnt, 0, &umount_list);
1da177e4 1425 spin_unlock(&vfsmount_lock);
70fbcdf4 1426 release_mounts(&umount_list);
5b83d2c5 1427 }
1da177e4 1428
ccd48bc7 1429out:
390c6843 1430 up_write(&namespace_sem);
1d957f9b 1431 path_put(&old_nd.path);
1da177e4
LT
1432 return err;
1433}
1434
2e4b7fcd
DH
1435static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1436{
1437 int error = 0;
1438 int readonly_request = 0;
1439
1440 if (ms_flags & MS_RDONLY)
1441 readonly_request = 1;
1442 if (readonly_request == __mnt_is_readonly(mnt))
1443 return 0;
1444
1445 if (readonly_request)
1446 error = mnt_make_readonly(mnt);
1447 else
1448 __mnt_unmake_readonly(mnt);
1449 return error;
1450}
1451
1da177e4
LT
1452/*
1453 * change filesystem flags. dir should be a physical root of filesystem.
1454 * If you've mounted a non-root directory somewhere and want to do remount
1455 * on it - tough luck.
2dafe1c4 1456 * noinline this do_mount helper to save do_mount stack space.
1da177e4 1457 */
2dafe1c4 1458static noinline int do_remount(struct nameidata *nd, int flags, int mnt_flags,
1da177e4
LT
1459 void *data)
1460{
1461 int err;
4ac91378 1462 struct super_block *sb = nd->path.mnt->mnt_sb;
1da177e4
LT
1463
1464 if (!capable(CAP_SYS_ADMIN))
1465 return -EPERM;
1466
4ac91378 1467 if (!check_mnt(nd->path.mnt))
1da177e4
LT
1468 return -EINVAL;
1469
4ac91378 1470 if (nd->path.dentry != nd->path.mnt->mnt_root)
1da177e4
LT
1471 return -EINVAL;
1472
1473 down_write(&sb->s_umount);
2e4b7fcd
DH
1474 if (flags & MS_BIND)
1475 err = change_mount_flags(nd->path.mnt, flags);
1476 else
1477 err = do_remount_sb(sb, flags, data, 0);
1da177e4 1478 if (!err)
4ac91378 1479 nd->path.mnt->mnt_flags = mnt_flags;
1da177e4
LT
1480 up_write(&sb->s_umount);
1481 if (!err)
4ac91378 1482 security_sb_post_remount(nd->path.mnt, flags, data);
1da177e4
LT
1483 return err;
1484}
1485
9676f0c6
RP
1486static inline int tree_contains_unbindable(struct vfsmount *mnt)
1487{
1488 struct vfsmount *p;
1489 for (p = mnt; p; p = next_mnt(p, mnt)) {
1490 if (IS_MNT_UNBINDABLE(p))
1491 return 1;
1492 }
1493 return 0;
1494}
1495
2dafe1c4
ES
1496/*
1497 * noinline this do_mount helper to save do_mount stack space.
1498 */
1499static noinline int do_move_mount(struct nameidata *nd, char *old_name)
1da177e4 1500{
1a390689
AV
1501 struct nameidata old_nd;
1502 struct path parent_path;
1da177e4
LT
1503 struct vfsmount *p;
1504 int err = 0;
1505 if (!capable(CAP_SYS_ADMIN))
1506 return -EPERM;
1507 if (!old_name || !*old_name)
1508 return -EINVAL;
1509 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
1510 if (err)
1511 return err;
1512
390c6843 1513 down_write(&namespace_sem);
4ac91378
JB
1514 while (d_mountpoint(nd->path.dentry) &&
1515 follow_down(&nd->path.mnt, &nd->path.dentry))
1da177e4
LT
1516 ;
1517 err = -EINVAL;
4ac91378 1518 if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
1da177e4
LT
1519 goto out;
1520
1521 err = -ENOENT;
4ac91378
JB
1522 mutex_lock(&nd->path.dentry->d_inode->i_mutex);
1523 if (IS_DEADDIR(nd->path.dentry->d_inode))
1da177e4
LT
1524 goto out1;
1525
4ac91378 1526 if (!IS_ROOT(nd->path.dentry) && d_unhashed(nd->path.dentry))
21444403 1527 goto out1;
1da177e4
LT
1528
1529 err = -EINVAL;
4ac91378 1530 if (old_nd.path.dentry != old_nd.path.mnt->mnt_root)
21444403 1531 goto out1;
1da177e4 1532
4ac91378 1533 if (old_nd.path.mnt == old_nd.path.mnt->mnt_parent)
21444403 1534 goto out1;
1da177e4 1535
4ac91378
JB
1536 if (S_ISDIR(nd->path.dentry->d_inode->i_mode) !=
1537 S_ISDIR(old_nd.path.dentry->d_inode->i_mode))
21444403
RP
1538 goto out1;
1539 /*
1540 * Don't move a mount residing in a shared parent.
1541 */
4ac91378
JB
1542 if (old_nd.path.mnt->mnt_parent &&
1543 IS_MNT_SHARED(old_nd.path.mnt->mnt_parent))
21444403 1544 goto out1;
9676f0c6
RP
1545 /*
1546 * Don't move a mount tree containing unbindable mounts to a destination
1547 * mount which is shared.
1548 */
4ac91378
JB
1549 if (IS_MNT_SHARED(nd->path.mnt) &&
1550 tree_contains_unbindable(old_nd.path.mnt))
9676f0c6 1551 goto out1;
1da177e4 1552 err = -ELOOP;
4ac91378
JB
1553 for (p = nd->path.mnt; p->mnt_parent != p; p = p->mnt_parent)
1554 if (p == old_nd.path.mnt)
21444403 1555 goto out1;
1da177e4 1556
1a390689 1557 err = attach_recursive_mnt(old_nd.path.mnt, &nd->path, &parent_path);
4ac91378 1558 if (err)
21444403 1559 goto out1;
1da177e4
LT
1560
1561 /* if the mount is moved, it should no longer be expire
1562 * automatically */
4ac91378 1563 list_del_init(&old_nd.path.mnt->mnt_expire);
1da177e4 1564out1:
4ac91378 1565 mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
1da177e4 1566out:
390c6843 1567 up_write(&namespace_sem);
1da177e4 1568 if (!err)
1a390689 1569 path_put(&parent_path);
1d957f9b 1570 path_put(&old_nd.path);
1da177e4
LT
1571 return err;
1572}
1573
1574/*
1575 * create a new mount for userspace and request it to be added into the
1576 * namespace's tree
2dafe1c4 1577 * noinline this do_mount helper to save do_mount stack space.
1da177e4 1578 */
2dafe1c4 1579static noinline int do_new_mount(struct nameidata *nd, char *type, int flags,
1da177e4
LT
1580 int mnt_flags, char *name, void *data)
1581{
1582 struct vfsmount *mnt;
1583
1584 if (!type || !memchr(type, 0, PAGE_SIZE))
1585 return -EINVAL;
1586
1587 /* we need capabilities... */
1588 if (!capable(CAP_SYS_ADMIN))
1589 return -EPERM;
1590
1591 mnt = do_kern_mount(type, flags, name, data);
1592 if (IS_ERR(mnt))
1593 return PTR_ERR(mnt);
1594
1595 return do_add_mount(mnt, nd, mnt_flags, NULL);
1596}
1597
1598/*
1599 * add a mount into a namespace's mount tree
1600 * - provide the option of adding the new mount to an expiration list
1601 */
1602int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
1603 int mnt_flags, struct list_head *fslist)
1604{
1605 int err;
1606
390c6843 1607 down_write(&namespace_sem);
1da177e4 1608 /* Something was mounted here while we slept */
4ac91378
JB
1609 while (d_mountpoint(nd->path.dentry) &&
1610 follow_down(&nd->path.mnt, &nd->path.dentry))
1da177e4
LT
1611 ;
1612 err = -EINVAL;
4ac91378 1613 if (!check_mnt(nd->path.mnt))
1da177e4
LT
1614 goto unlock;
1615
1616 /* Refuse the same filesystem on the same mount point */
1617 err = -EBUSY;
4ac91378
JB
1618 if (nd->path.mnt->mnt_sb == newmnt->mnt_sb &&
1619 nd->path.mnt->mnt_root == nd->path.dentry)
1da177e4
LT
1620 goto unlock;
1621
1622 err = -EINVAL;
1623 if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
1624 goto unlock;
1625
1626 newmnt->mnt_flags = mnt_flags;
8c3ee42e 1627 if ((err = graft_tree(newmnt, &nd->path)))
5b83d2c5 1628 goto unlock;
1da177e4 1629
6758f953 1630 if (fslist) /* add to the specified expiration list */
55e700b9 1631 list_add_tail(&newmnt->mnt_expire, fslist);
6758f953 1632
390c6843 1633 up_write(&namespace_sem);
5b83d2c5 1634 return 0;
1da177e4
LT
1635
1636unlock:
390c6843 1637 up_write(&namespace_sem);
1da177e4
LT
1638 mntput(newmnt);
1639 return err;
1640}
1641
1642EXPORT_SYMBOL_GPL(do_add_mount);
1643
1644/*
1645 * process a list of expirable mountpoints with the intent of discarding any
1646 * mountpoints that aren't in use and haven't been touched since last we came
1647 * here
1648 */
1649void mark_mounts_for_expiry(struct list_head *mounts)
1650{
1da177e4
LT
1651 struct vfsmount *mnt, *next;
1652 LIST_HEAD(graveyard);
bcc5c7d2 1653 LIST_HEAD(umounts);
1da177e4
LT
1654
1655 if (list_empty(mounts))
1656 return;
1657
bcc5c7d2 1658 down_write(&namespace_sem);
1da177e4
LT
1659 spin_lock(&vfsmount_lock);
1660
1661 /* extract from the expiration list every vfsmount that matches the
1662 * following criteria:
1663 * - only referenced by its parent vfsmount
1664 * - still marked for expiry (marked on the last call here; marks are
1665 * cleared by mntput())
1666 */
55e700b9 1667 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
1da177e4 1668 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
bcc5c7d2 1669 propagate_mount_busy(mnt, 1))
1da177e4 1670 continue;
55e700b9 1671 list_move(&mnt->mnt_expire, &graveyard);
1da177e4 1672 }
bcc5c7d2
AV
1673 while (!list_empty(&graveyard)) {
1674 mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
1675 touch_mnt_namespace(mnt->mnt_ns);
1676 umount_tree(mnt, 1, &umounts);
1677 }
5528f911 1678 spin_unlock(&vfsmount_lock);
bcc5c7d2
AV
1679 up_write(&namespace_sem);
1680
1681 release_mounts(&umounts);
5528f911
TM
1682}
1683
1684EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
1685
1686/*
1687 * Ripoff of 'select_parent()'
1688 *
1689 * search the list of submounts for a given mountpoint, and move any
1690 * shrinkable submounts to the 'graveyard' list.
1691 */
1692static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
1693{
1694 struct vfsmount *this_parent = parent;
1695 struct list_head *next;
1696 int found = 0;
1697
1698repeat:
1699 next = this_parent->mnt_mounts.next;
1700resume:
1701 while (next != &this_parent->mnt_mounts) {
1702 struct list_head *tmp = next;
1703 struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
1704
1705 next = tmp->next;
1706 if (!(mnt->mnt_flags & MNT_SHRINKABLE))
1da177e4 1707 continue;
5528f911
TM
1708 /*
1709 * Descend a level if the d_mounts list is non-empty.
1710 */
1711 if (!list_empty(&mnt->mnt_mounts)) {
1712 this_parent = mnt;
1713 goto repeat;
1714 }
1da177e4 1715
5528f911 1716 if (!propagate_mount_busy(mnt, 1)) {
5528f911
TM
1717 list_move_tail(&mnt->mnt_expire, graveyard);
1718 found++;
1719 }
1da177e4 1720 }
5528f911
TM
1721 /*
1722 * All done at this level ... ascend and resume the search
1723 */
1724 if (this_parent != parent) {
1725 next = this_parent->mnt_child.next;
1726 this_parent = this_parent->mnt_parent;
1727 goto resume;
1728 }
1729 return found;
1730}
1731
1732/*
1733 * process a list of expirable mountpoints with the intent of discarding any
1734 * submounts of a specific parent mountpoint
1735 */
c35038be 1736static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
5528f911
TM
1737{
1738 LIST_HEAD(graveyard);
c35038be 1739 struct vfsmount *m;
5528f911 1740
5528f911 1741 /* extract submounts of 'mountpoint' from the expiration list */
c35038be 1742 while (select_submounts(mnt, &graveyard)) {
bcc5c7d2 1743 while (!list_empty(&graveyard)) {
c35038be 1744 m = list_first_entry(&graveyard, struct vfsmount,
bcc5c7d2
AV
1745 mnt_expire);
1746 touch_mnt_namespace(mnt->mnt_ns);
c35038be 1747 umount_tree(mnt, 1, umounts);
bcc5c7d2
AV
1748 }
1749 }
1da177e4
LT
1750}
1751
1da177e4
LT
1752/*
1753 * Some copy_from_user() implementations do not return the exact number of
1754 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
1755 * Note that this function differs from copy_from_user() in that it will oops
1756 * on bad values of `to', rather than returning a short copy.
1757 */
b58fed8b
RP
1758static long exact_copy_from_user(void *to, const void __user * from,
1759 unsigned long n)
1da177e4
LT
1760{
1761 char *t = to;
1762 const char __user *f = from;
1763 char c;
1764
1765 if (!access_ok(VERIFY_READ, from, n))
1766 return n;
1767
1768 while (n) {
1769 if (__get_user(c, f)) {
1770 memset(t, 0, n);
1771 break;
1772 }
1773 *t++ = c;
1774 f++;
1775 n--;
1776 }
1777 return n;
1778}
1779
b58fed8b 1780int copy_mount_options(const void __user * data, unsigned long *where)
1da177e4
LT
1781{
1782 int i;
1783 unsigned long page;
1784 unsigned long size;
b58fed8b 1785
1da177e4
LT
1786 *where = 0;
1787 if (!data)
1788 return 0;
1789
1790 if (!(page = __get_free_page(GFP_KERNEL)))
1791 return -ENOMEM;
1792
1793 /* We only care that *some* data at the address the user
1794 * gave us is valid. Just in case, we'll zero
1795 * the remainder of the page.
1796 */
1797 /* copy_from_user cannot cross TASK_SIZE ! */
1798 size = TASK_SIZE - (unsigned long)data;
1799 if (size > PAGE_SIZE)
1800 size = PAGE_SIZE;
1801
1802 i = size - exact_copy_from_user((void *)page, data, size);
1803 if (!i) {
b58fed8b 1804 free_page(page);
1da177e4
LT
1805 return -EFAULT;
1806 }
1807 if (i != PAGE_SIZE)
1808 memset((char *)page + i, 0, PAGE_SIZE - i);
1809 *where = page;
1810 return 0;
1811}
1812
1813/*
1814 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
1815 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
1816 *
1817 * data is a (void *) that can point to any structure up to
1818 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
1819 * information (or be NULL).
1820 *
1821 * Pre-0.97 versions of mount() didn't have a flags word.
1822 * When the flags word was introduced its top half was required
1823 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
1824 * Therefore, if this magic number is present, it carries no information
1825 * and must be discarded.
1826 */
b58fed8b 1827long do_mount(char *dev_name, char *dir_name, char *type_page,
1da177e4
LT
1828 unsigned long flags, void *data_page)
1829{
1830 struct nameidata nd;
1831 int retval = 0;
1832 int mnt_flags = 0;
1833
1834 /* Discard magic */
1835 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
1836 flags &= ~MS_MGC_MSK;
1837
1838 /* Basic sanity checks */
1839
1840 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
1841 return -EINVAL;
1842 if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
1843 return -EINVAL;
1844
1845 if (data_page)
1846 ((char *)data_page)[PAGE_SIZE - 1] = 0;
1847
1848 /* Separate the per-mountpoint flags */
1849 if (flags & MS_NOSUID)
1850 mnt_flags |= MNT_NOSUID;
1851 if (flags & MS_NODEV)
1852 mnt_flags |= MNT_NODEV;
1853 if (flags & MS_NOEXEC)
1854 mnt_flags |= MNT_NOEXEC;
fc33a7bb
CH
1855 if (flags & MS_NOATIME)
1856 mnt_flags |= MNT_NOATIME;
1857 if (flags & MS_NODIRATIME)
1858 mnt_flags |= MNT_NODIRATIME;
47ae32d6
VH
1859 if (flags & MS_RELATIME)
1860 mnt_flags |= MNT_RELATIME;
2e4b7fcd
DH
1861 if (flags & MS_RDONLY)
1862 mnt_flags |= MNT_READONLY;
fc33a7bb
CH
1863
1864 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
8bf9725c 1865 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT);
1da177e4
LT
1866
1867 /* ... and get the mountpoint */
1868 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
1869 if (retval)
1870 return retval;
1871
b5266eb4
AV
1872 retval = security_sb_mount(dev_name, &nd.path,
1873 type_page, flags, data_page);
1da177e4
LT
1874 if (retval)
1875 goto dput_out;
1876
1877 if (flags & MS_REMOUNT)
1878 retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
1879 data_page);
1880 else if (flags & MS_BIND)
eee391a6 1881 retval = do_loopback(&nd, dev_name, flags & MS_REC);
9676f0c6 1882 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
07b20889 1883 retval = do_change_type(&nd, flags);
1da177e4
LT
1884 else if (flags & MS_MOVE)
1885 retval = do_move_mount(&nd, dev_name);
1886 else
1887 retval = do_new_mount(&nd, type_page, flags, mnt_flags,
1888 dev_name, data_page);
1889dput_out:
1d957f9b 1890 path_put(&nd.path);
1da177e4
LT
1891 return retval;
1892}
1893
741a2951
JD
1894/*
1895 * Allocate a new namespace structure and populate it with contents
1896 * copied from the namespace of the passed in task structure.
1897 */
e3222c4e 1898static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
6b3286ed 1899 struct fs_struct *fs)
1da177e4 1900{
6b3286ed 1901 struct mnt_namespace *new_ns;
1da177e4 1902 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
1da177e4
LT
1903 struct vfsmount *p, *q;
1904
6b3286ed 1905 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
1da177e4 1906 if (!new_ns)
467e9f4b 1907 return ERR_PTR(-ENOMEM);
1da177e4
LT
1908
1909 atomic_set(&new_ns->count, 1);
1da177e4 1910 INIT_LIST_HEAD(&new_ns->list);
5addc5dd
AV
1911 init_waitqueue_head(&new_ns->poll);
1912 new_ns->event = 0;
1da177e4 1913
390c6843 1914 down_write(&namespace_sem);
1da177e4 1915 /* First pass: copy the tree topology */
6b3286ed 1916 new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
9676f0c6 1917 CL_COPY_ALL | CL_EXPIRE);
1da177e4 1918 if (!new_ns->root) {
390c6843 1919 up_write(&namespace_sem);
1da177e4 1920 kfree(new_ns);
467e9f4b 1921 return ERR_PTR(-ENOMEM);;
1da177e4
LT
1922 }
1923 spin_lock(&vfsmount_lock);
1924 list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
1925 spin_unlock(&vfsmount_lock);
1926
1927 /*
1928 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
1929 * as belonging to new namespace. We have already acquired a private
1930 * fs_struct, so tsk->fs->lock is not needed.
1931 */
6b3286ed 1932 p = mnt_ns->root;
1da177e4
LT
1933 q = new_ns->root;
1934 while (p) {
6b3286ed 1935 q->mnt_ns = new_ns;
1da177e4 1936 if (fs) {
6ac08c39 1937 if (p == fs->root.mnt) {
1da177e4 1938 rootmnt = p;
6ac08c39 1939 fs->root.mnt = mntget(q);
1da177e4 1940 }
6ac08c39 1941 if (p == fs->pwd.mnt) {
1da177e4 1942 pwdmnt = p;
6ac08c39 1943 fs->pwd.mnt = mntget(q);
1da177e4 1944 }
6ac08c39 1945 if (p == fs->altroot.mnt) {
1da177e4 1946 altrootmnt = p;
6ac08c39 1947 fs->altroot.mnt = mntget(q);
1da177e4
LT
1948 }
1949 }
6b3286ed 1950 p = next_mnt(p, mnt_ns->root);
1da177e4
LT
1951 q = next_mnt(q, new_ns->root);
1952 }
390c6843 1953 up_write(&namespace_sem);
1da177e4 1954
1da177e4
LT
1955 if (rootmnt)
1956 mntput(rootmnt);
1957 if (pwdmnt)
1958 mntput(pwdmnt);
1959 if (altrootmnt)
1960 mntput(altrootmnt);
1961
741a2951
JD
1962 return new_ns;
1963}
1964
213dd266 1965struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
e3222c4e 1966 struct fs_struct *new_fs)
741a2951 1967{
6b3286ed 1968 struct mnt_namespace *new_ns;
741a2951 1969
e3222c4e 1970 BUG_ON(!ns);
6b3286ed 1971 get_mnt_ns(ns);
741a2951
JD
1972
1973 if (!(flags & CLONE_NEWNS))
e3222c4e 1974 return ns;
741a2951 1975
e3222c4e 1976 new_ns = dup_mnt_ns(ns, new_fs);
741a2951 1977
6b3286ed 1978 put_mnt_ns(ns);
e3222c4e 1979 return new_ns;
1da177e4
LT
1980}
1981
1982asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
1983 char __user * type, unsigned long flags,
1984 void __user * data)
1985{
1986 int retval;
1987 unsigned long data_page;
1988 unsigned long type_page;
1989 unsigned long dev_page;
1990 char *dir_page;
1991
b58fed8b 1992 retval = copy_mount_options(type, &type_page);
1da177e4
LT
1993 if (retval < 0)
1994 return retval;
1995
1996 dir_page = getname(dir_name);
1997 retval = PTR_ERR(dir_page);
1998 if (IS_ERR(dir_page))
1999 goto out1;
2000
b58fed8b 2001 retval = copy_mount_options(dev_name, &dev_page);
1da177e4
LT
2002 if (retval < 0)
2003 goto out2;
2004
b58fed8b 2005 retval = copy_mount_options(data, &data_page);
1da177e4
LT
2006 if (retval < 0)
2007 goto out3;
2008
2009 lock_kernel();
b58fed8b
RP
2010 retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
2011 flags, (void *)data_page);
1da177e4
LT
2012 unlock_kernel();
2013 free_page(data_page);
2014
2015out3:
2016 free_page(dev_page);
2017out2:
2018 putname(dir_page);
2019out1:
2020 free_page(type_page);
2021 return retval;
2022}
2023
2024/*
2025 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
2026 * It can block. Requires the big lock held.
2027 */
ac748a09 2028void set_fs_root(struct fs_struct *fs, struct path *path)
1da177e4 2029{
6ac08c39
JB
2030 struct path old_root;
2031
1da177e4
LT
2032 write_lock(&fs->lock);
2033 old_root = fs->root;
ac748a09
JB
2034 fs->root = *path;
2035 path_get(path);
1da177e4 2036 write_unlock(&fs->lock);
6ac08c39
JB
2037 if (old_root.dentry)
2038 path_put(&old_root);
1da177e4
LT
2039}
2040
2041/*
2042 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
2043 * It can block. Requires the big lock held.
2044 */
ac748a09 2045void set_fs_pwd(struct fs_struct *fs, struct path *path)
1da177e4 2046{
6ac08c39 2047 struct path old_pwd;
1da177e4
LT
2048
2049 write_lock(&fs->lock);
2050 old_pwd = fs->pwd;
ac748a09
JB
2051 fs->pwd = *path;
2052 path_get(path);
1da177e4
LT
2053 write_unlock(&fs->lock);
2054
6ac08c39
JB
2055 if (old_pwd.dentry)
2056 path_put(&old_pwd);
1da177e4
LT
2057}
2058
1a390689 2059static void chroot_fs_refs(struct path *old_root, struct path *new_root)
1da177e4
LT
2060{
2061 struct task_struct *g, *p;
2062 struct fs_struct *fs;
2063
2064 read_lock(&tasklist_lock);
2065 do_each_thread(g, p) {
2066 task_lock(p);
2067 fs = p->fs;
2068 if (fs) {
2069 atomic_inc(&fs->count);
2070 task_unlock(p);
1a390689
AV
2071 if (fs->root.dentry == old_root->dentry
2072 && fs->root.mnt == old_root->mnt)
2073 set_fs_root(fs, new_root);
2074 if (fs->pwd.dentry == old_root->dentry
2075 && fs->pwd.mnt == old_root->mnt)
2076 set_fs_pwd(fs, new_root);
1da177e4
LT
2077 put_fs_struct(fs);
2078 } else
2079 task_unlock(p);
2080 } while_each_thread(g, p);
2081 read_unlock(&tasklist_lock);
2082}
2083
2084/*
2085 * pivot_root Semantics:
2086 * Moves the root file system of the current process to the directory put_old,
2087 * makes new_root as the new root file system of the current process, and sets
2088 * root/cwd of all processes which had them on the current root to new_root.
2089 *
2090 * Restrictions:
2091 * The new_root and put_old must be directories, and must not be on the
2092 * same file system as the current process root. The put_old must be
2093 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2094 * pointed to by put_old must yield the same directory as new_root. No other
2095 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2096 *
4a0d11fa
NB
2097 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2098 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2099 * in this situation.
2100 *
1da177e4
LT
2101 * Notes:
2102 * - we don't move root/cwd if they are not at the root (reason: if something
2103 * cared enough to change them, it's probably wrong to force them elsewhere)
2104 * - it's okay to pick a root that isn't the root of a file system, e.g.
2105 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2106 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2107 * first.
2108 */
b58fed8b
RP
2109asmlinkage long sys_pivot_root(const char __user * new_root,
2110 const char __user * put_old)
1da177e4
LT
2111{
2112 struct vfsmount *tmp;
8c3ee42e
AV
2113 struct nameidata new_nd, old_nd;
2114 struct path parent_path, root_parent, root;
1da177e4
LT
2115 int error;
2116
2117 if (!capable(CAP_SYS_ADMIN))
2118 return -EPERM;
2119
b58fed8b
RP
2120 error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
2121 &new_nd);
1da177e4
LT
2122 if (error)
2123 goto out0;
2124 error = -EINVAL;
4ac91378 2125 if (!check_mnt(new_nd.path.mnt))
1da177e4
LT
2126 goto out1;
2127
b58fed8b 2128 error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
1da177e4
LT
2129 if (error)
2130 goto out1;
2131
b5266eb4 2132 error = security_sb_pivotroot(&old_nd.path, &new_nd.path);
1da177e4 2133 if (error) {
1d957f9b 2134 path_put(&old_nd.path);
1da177e4
LT
2135 goto out1;
2136 }
2137
2138 read_lock(&current->fs->lock);
8c3ee42e 2139 root = current->fs->root;
6ac08c39 2140 path_get(&current->fs->root);
1da177e4 2141 read_unlock(&current->fs->lock);
390c6843 2142 down_write(&namespace_sem);
4ac91378 2143 mutex_lock(&old_nd.path.dentry->d_inode->i_mutex);
1da177e4 2144 error = -EINVAL;
4ac91378
JB
2145 if (IS_MNT_SHARED(old_nd.path.mnt) ||
2146 IS_MNT_SHARED(new_nd.path.mnt->mnt_parent) ||
8c3ee42e 2147 IS_MNT_SHARED(root.mnt->mnt_parent))
21444403 2148 goto out2;
8c3ee42e 2149 if (!check_mnt(root.mnt))
1da177e4
LT
2150 goto out2;
2151 error = -ENOENT;
4ac91378 2152 if (IS_DEADDIR(new_nd.path.dentry->d_inode))
1da177e4 2153 goto out2;
4ac91378 2154 if (d_unhashed(new_nd.path.dentry) && !IS_ROOT(new_nd.path.dentry))
1da177e4 2155 goto out2;
4ac91378 2156 if (d_unhashed(old_nd.path.dentry) && !IS_ROOT(old_nd.path.dentry))
1da177e4
LT
2157 goto out2;
2158 error = -EBUSY;
8c3ee42e
AV
2159 if (new_nd.path.mnt == root.mnt ||
2160 old_nd.path.mnt == root.mnt)
1da177e4
LT
2161 goto out2; /* loop, on the same file system */
2162 error = -EINVAL;
8c3ee42e 2163 if (root.mnt->mnt_root != root.dentry)
1da177e4 2164 goto out2; /* not a mountpoint */
8c3ee42e 2165 if (root.mnt->mnt_parent == root.mnt)
0bb6fcc1 2166 goto out2; /* not attached */
4ac91378 2167 if (new_nd.path.mnt->mnt_root != new_nd.path.dentry)
1da177e4 2168 goto out2; /* not a mountpoint */
4ac91378 2169 if (new_nd.path.mnt->mnt_parent == new_nd.path.mnt)
0bb6fcc1 2170 goto out2; /* not attached */
4ac91378
JB
2171 /* make sure we can reach put_old from new_root */
2172 tmp = old_nd.path.mnt;
1da177e4 2173 spin_lock(&vfsmount_lock);
4ac91378 2174 if (tmp != new_nd.path.mnt) {
1da177e4
LT
2175 for (;;) {
2176 if (tmp->mnt_parent == tmp)
2177 goto out3; /* already mounted on put_old */
4ac91378 2178 if (tmp->mnt_parent == new_nd.path.mnt)
1da177e4
LT
2179 break;
2180 tmp = tmp->mnt_parent;
2181 }
4ac91378 2182 if (!is_subdir(tmp->mnt_mountpoint, new_nd.path.dentry))
1da177e4 2183 goto out3;
4ac91378 2184 } else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry))
1da177e4 2185 goto out3;
1a390689 2186 detach_mnt(new_nd.path.mnt, &parent_path);
8c3ee42e 2187 detach_mnt(root.mnt, &root_parent);
4ac91378 2188 /* mount old root on put_old */
8c3ee42e 2189 attach_mnt(root.mnt, &old_nd.path);
4ac91378
JB
2190 /* mount new_root on / */
2191 attach_mnt(new_nd.path.mnt, &root_parent);
6b3286ed 2192 touch_mnt_namespace(current->nsproxy->mnt_ns);
1da177e4 2193 spin_unlock(&vfsmount_lock);
8c3ee42e
AV
2194 chroot_fs_refs(&root, &new_nd.path);
2195 security_sb_post_pivotroot(&root, &new_nd.path);
1da177e4 2196 error = 0;
1a390689
AV
2197 path_put(&root_parent);
2198 path_put(&parent_path);
1da177e4 2199out2:
4ac91378 2200 mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex);
390c6843 2201 up_write(&namespace_sem);
8c3ee42e 2202 path_put(&root);
1d957f9b 2203 path_put(&old_nd.path);
1da177e4 2204out1:
1d957f9b 2205 path_put(&new_nd.path);
1da177e4 2206out0:
1da177e4
LT
2207 return error;
2208out3:
2209 spin_unlock(&vfsmount_lock);
2210 goto out2;
2211}
2212
2213static void __init init_mount_tree(void)
2214{
2215 struct vfsmount *mnt;
6b3286ed 2216 struct mnt_namespace *ns;
ac748a09 2217 struct path root;
1da177e4
LT
2218
2219 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
2220 if (IS_ERR(mnt))
2221 panic("Can't create rootfs");
6b3286ed
KK
2222 ns = kmalloc(sizeof(*ns), GFP_KERNEL);
2223 if (!ns)
1da177e4 2224 panic("Can't allocate initial namespace");
6b3286ed
KK
2225 atomic_set(&ns->count, 1);
2226 INIT_LIST_HEAD(&ns->list);
2227 init_waitqueue_head(&ns->poll);
2228 ns->event = 0;
2229 list_add(&mnt->mnt_list, &ns->list);
2230 ns->root = mnt;
2231 mnt->mnt_ns = ns;
2232
2233 init_task.nsproxy->mnt_ns = ns;
2234 get_mnt_ns(ns);
2235
ac748a09
JB
2236 root.mnt = ns->root;
2237 root.dentry = ns->root->mnt_root;
2238
2239 set_fs_pwd(current->fs, &root);
2240 set_fs_root(current->fs, &root);
1da177e4
LT
2241}
2242
74bf17cf 2243void __init mnt_init(void)
1da177e4 2244{
13f14b4d 2245 unsigned u;
15a67dd8 2246 int err;
1da177e4 2247
390c6843
RP
2248 init_rwsem(&namespace_sem);
2249
1da177e4 2250 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
20c2df83 2251 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1da177e4 2252
b58fed8b 2253 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
1da177e4
LT
2254
2255 if (!mount_hashtable)
2256 panic("Failed to allocate mount hash table\n");
2257
13f14b4d
ED
2258 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
2259
2260 for (u = 0; u < HASH_SIZE; u++)
2261 INIT_LIST_HEAD(&mount_hashtable[u]);
1da177e4 2262
15a67dd8
RD
2263 err = sysfs_init();
2264 if (err)
2265 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
2266 __FUNCTION__, err);
00d26666
GKH
2267 fs_kobj = kobject_create_and_add("fs", NULL);
2268 if (!fs_kobj)
2269 printk(KERN_WARNING "%s: kobj create error\n", __FUNCTION__);
1da177e4
LT
2270 init_rootfs();
2271 init_mount_tree();
2272}
2273
6b3286ed 2274void __put_mnt_ns(struct mnt_namespace *ns)
1da177e4 2275{
6b3286ed 2276 struct vfsmount *root = ns->root;
70fbcdf4 2277 LIST_HEAD(umount_list);
6b3286ed 2278 ns->root = NULL;
1ce88cf4 2279 spin_unlock(&vfsmount_lock);
390c6843 2280 down_write(&namespace_sem);
1da177e4 2281 spin_lock(&vfsmount_lock);
a05964f3 2282 umount_tree(root, 0, &umount_list);
1da177e4 2283 spin_unlock(&vfsmount_lock);
390c6843 2284 up_write(&namespace_sem);
70fbcdf4 2285 release_mounts(&umount_list);
6b3286ed 2286 kfree(ns);
1da177e4 2287}