]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/namespace.c
Move junk from proc_fs.h to fs/proc/internal.h
[net-next-2.6.git] / fs / namespace.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/namespace.c
3 *
4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
6 *
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
8 * Heavily rewritten.
9 */
10
1da177e4
LT
11#include <linux/syscalls.h>
12#include <linux/slab.h>
13#include <linux/sched.h>
14#include <linux/smp_lock.h>
15#include <linux/init.h>
15a67dd8 16#include <linux/kernel.h>
1da177e4 17#include <linux/acct.h>
16f7e0fe 18#include <linux/capability.h>
3d733633 19#include <linux/cpumask.h>
1da177e4 20#include <linux/module.h>
f20a9ead 21#include <linux/sysfs.h>
1da177e4 22#include <linux/seq_file.h>
6b3286ed 23#include <linux/mnt_namespace.h>
1da177e4
LT
24#include <linux/namei.h>
25#include <linux/security.h>
26#include <linux/mount.h>
07f3f05c 27#include <linux/ramfs.h>
13f14b4d 28#include <linux/log2.h>
73cd49ec 29#include <linux/idr.h>
5ad4e53b 30#include <linux/fs_struct.h>
1da177e4
LT
31#include <asm/uaccess.h>
32#include <asm/unistd.h>
07b20889 33#include "pnode.h"
948730b0 34#include "internal.h"
1da177e4 35
13f14b4d
ED
36#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
37#define HASH_SIZE (1UL << HASH_SHIFT)
38
1da177e4 39/* spinlock for vfsmount related operations, inplace of dcache_lock */
5addc5dd
AV
40__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
41
42static int event;
73cd49ec 43static DEFINE_IDA(mnt_id_ida);
719f5d7f 44static DEFINE_IDA(mnt_group_ida);
1da177e4 45
fa3536cc 46static struct list_head *mount_hashtable __read_mostly;
e18b890b 47static struct kmem_cache *mnt_cache __read_mostly;
390c6843 48static struct rw_semaphore namespace_sem;
1da177e4 49
f87fd4c2 50/* /sys/fs */
00d26666
GKH
51struct kobject *fs_kobj;
52EXPORT_SYMBOL_GPL(fs_kobj);
f87fd4c2 53
1da177e4
LT
54static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
55{
b58fed8b
RP
56 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
57 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
13f14b4d
ED
58 tmp = tmp + (tmp >> HASH_SHIFT);
59 return tmp & (HASH_SIZE - 1);
1da177e4
LT
60}
61
3d733633
DH
62#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
63
73cd49ec
MS
64/* allocation is serialized by namespace_sem */
65static int mnt_alloc_id(struct vfsmount *mnt)
66{
67 int res;
68
69retry:
70 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
71 spin_lock(&vfsmount_lock);
72 res = ida_get_new(&mnt_id_ida, &mnt->mnt_id);
73 spin_unlock(&vfsmount_lock);
74 if (res == -EAGAIN)
75 goto retry;
76
77 return res;
78}
79
80static void mnt_free_id(struct vfsmount *mnt)
81{
82 spin_lock(&vfsmount_lock);
83 ida_remove(&mnt_id_ida, mnt->mnt_id);
84 spin_unlock(&vfsmount_lock);
85}
86
719f5d7f
MS
87/*
88 * Allocate a new peer group ID
89 *
90 * mnt_group_ida is protected by namespace_sem
91 */
92static int mnt_alloc_group_id(struct vfsmount *mnt)
93{
94 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
95 return -ENOMEM;
96
97 return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id);
98}
99
100/*
101 * Release a peer group ID
102 */
103void mnt_release_group_id(struct vfsmount *mnt)
104{
105 ida_remove(&mnt_group_ida, mnt->mnt_group_id);
106 mnt->mnt_group_id = 0;
107}
108
1da177e4
LT
109struct vfsmount *alloc_vfsmnt(const char *name)
110{
c3762229 111 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
1da177e4 112 if (mnt) {
73cd49ec
MS
113 int err;
114
115 err = mnt_alloc_id(mnt);
88b38782
LZ
116 if (err)
117 goto out_free_cache;
118
119 if (name) {
120 mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
121 if (!mnt->mnt_devname)
122 goto out_free_id;
73cd49ec
MS
123 }
124
b58fed8b 125 atomic_set(&mnt->mnt_count, 1);
1da177e4
LT
126 INIT_LIST_HEAD(&mnt->mnt_hash);
127 INIT_LIST_HEAD(&mnt->mnt_child);
128 INIT_LIST_HEAD(&mnt->mnt_mounts);
129 INIT_LIST_HEAD(&mnt->mnt_list);
55e700b9 130 INIT_LIST_HEAD(&mnt->mnt_expire);
03e06e68 131 INIT_LIST_HEAD(&mnt->mnt_share);
a58b0eb8
RP
132 INIT_LIST_HEAD(&mnt->mnt_slave_list);
133 INIT_LIST_HEAD(&mnt->mnt_slave);
3d733633 134 atomic_set(&mnt->__mnt_writers, 0);
1da177e4
LT
135 }
136 return mnt;
88b38782
LZ
137
138out_free_id:
139 mnt_free_id(mnt);
140out_free_cache:
141 kmem_cache_free(mnt_cache, mnt);
142 return NULL;
1da177e4
LT
143}
144
3d733633
DH
145/*
146 * Most r/o checks on a fs are for operations that take
147 * discrete amounts of time, like a write() or unlink().
148 * We must keep track of when those operations start
149 * (for permission checks) and when they end, so that
150 * we can determine when writes are able to occur to
151 * a filesystem.
152 */
153/*
154 * __mnt_is_readonly: check whether a mount is read-only
155 * @mnt: the mount to check for its write status
156 *
157 * This shouldn't be used directly ouside of the VFS.
158 * It does not guarantee that the filesystem will stay
159 * r/w, just that it is right *now*. This can not and
160 * should not be used in place of IS_RDONLY(inode).
161 * mnt_want/drop_write() will _keep_ the filesystem
162 * r/w.
163 */
164int __mnt_is_readonly(struct vfsmount *mnt)
165{
2e4b7fcd
DH
166 if (mnt->mnt_flags & MNT_READONLY)
167 return 1;
168 if (mnt->mnt_sb->s_flags & MS_RDONLY)
169 return 1;
170 return 0;
3d733633
DH
171}
172EXPORT_SYMBOL_GPL(__mnt_is_readonly);
173
174struct mnt_writer {
175 /*
176 * If holding multiple instances of this lock, they
177 * must be ordered by cpu number.
178 */
179 spinlock_t lock;
180 struct lock_class_key lock_class; /* compiles out with !lockdep */
181 unsigned long count;
182 struct vfsmount *mnt;
183} ____cacheline_aligned_in_smp;
184static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
185
186static int __init init_mnt_writers(void)
187{
188 int cpu;
189 for_each_possible_cpu(cpu) {
190 struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
191 spin_lock_init(&writer->lock);
192 lockdep_set_class(&writer->lock, &writer->lock_class);
193 writer->count = 0;
194 }
195 return 0;
196}
197fs_initcall(init_mnt_writers);
198
199static void unlock_mnt_writers(void)
200{
201 int cpu;
202 struct mnt_writer *cpu_writer;
203
204 for_each_possible_cpu(cpu) {
205 cpu_writer = &per_cpu(mnt_writers, cpu);
206 spin_unlock(&cpu_writer->lock);
207 }
208}
209
210static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
211{
212 if (!cpu_writer->mnt)
213 return;
214 /*
215 * This is in case anyone ever leaves an invalid,
216 * old ->mnt and a count of 0.
217 */
218 if (!cpu_writer->count)
219 return;
220 atomic_add(cpu_writer->count, &cpu_writer->mnt->__mnt_writers);
221 cpu_writer->count = 0;
222}
223 /*
224 * must hold cpu_writer->lock
225 */
226static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
227 struct vfsmount *mnt)
228{
229 if (cpu_writer->mnt == mnt)
230 return;
231 __clear_mnt_count(cpu_writer);
232 cpu_writer->mnt = mnt;
233}
234
8366025e
DH
235/*
236 * Most r/o checks on a fs are for operations that take
237 * discrete amounts of time, like a write() or unlink().
238 * We must keep track of when those operations start
239 * (for permission checks) and when they end, so that
240 * we can determine when writes are able to occur to
241 * a filesystem.
242 */
243/**
244 * mnt_want_write - get write access to a mount
245 * @mnt: the mount on which to take a write
246 *
247 * This tells the low-level filesystem that a write is
248 * about to be performed to it, and makes sure that
249 * writes are allowed before returning success. When
250 * the write operation is finished, mnt_drop_write()
251 * must be called. This is effectively a refcount.
252 */
253int mnt_want_write(struct vfsmount *mnt)
254{
3d733633
DH
255 int ret = 0;
256 struct mnt_writer *cpu_writer;
257
258 cpu_writer = &get_cpu_var(mnt_writers);
259 spin_lock(&cpu_writer->lock);
260 if (__mnt_is_readonly(mnt)) {
261 ret = -EROFS;
262 goto out;
263 }
264 use_cpu_writer_for_mount(cpu_writer, mnt);
265 cpu_writer->count++;
266out:
267 spin_unlock(&cpu_writer->lock);
268 put_cpu_var(mnt_writers);
269 return ret;
8366025e
DH
270}
271EXPORT_SYMBOL_GPL(mnt_want_write);
272
3d733633
DH
273static void lock_mnt_writers(void)
274{
275 int cpu;
276 struct mnt_writer *cpu_writer;
277
278 for_each_possible_cpu(cpu) {
279 cpu_writer = &per_cpu(mnt_writers, cpu);
280 spin_lock(&cpu_writer->lock);
281 __clear_mnt_count(cpu_writer);
282 cpu_writer->mnt = NULL;
283 }
284}
285
286/*
287 * These per-cpu write counts are not guaranteed to have
288 * matched increments and decrements on any given cpu.
289 * A file open()ed for write on one cpu and close()d on
290 * another cpu will imbalance this count. Make sure it
291 * does not get too far out of whack.
292 */
293static void handle_write_count_underflow(struct vfsmount *mnt)
294{
295 if (atomic_read(&mnt->__mnt_writers) >=
296 MNT_WRITER_UNDERFLOW_LIMIT)
297 return;
298 /*
299 * It isn't necessary to hold all of the locks
300 * at the same time, but doing it this way makes
301 * us share a lot more code.
302 */
303 lock_mnt_writers();
304 /*
305 * vfsmount_lock is for mnt_flags.
306 */
307 spin_lock(&vfsmount_lock);
308 /*
309 * If coalescing the per-cpu writer counts did not
310 * get us back to a positive writer count, we have
311 * a bug.
312 */
313 if ((atomic_read(&mnt->__mnt_writers) < 0) &&
314 !(mnt->mnt_flags & MNT_IMBALANCED_WRITE_COUNT)) {
5c752ad9 315 WARN(1, KERN_DEBUG "leak detected on mount(%p) writers "
3d733633
DH
316 "count: %d\n",
317 mnt, atomic_read(&mnt->__mnt_writers));
3d733633
DH
318 /* use the flag to keep the dmesg spam down */
319 mnt->mnt_flags |= MNT_IMBALANCED_WRITE_COUNT;
320 }
321 spin_unlock(&vfsmount_lock);
322 unlock_mnt_writers();
323}
324
8366025e
DH
325/**
326 * mnt_drop_write - give up write access to a mount
327 * @mnt: the mount on which to give up write access
328 *
329 * Tells the low-level filesystem that we are done
330 * performing writes to it. Must be matched with
331 * mnt_want_write() call above.
332 */
333void mnt_drop_write(struct vfsmount *mnt)
334{
3d733633
DH
335 int must_check_underflow = 0;
336 struct mnt_writer *cpu_writer;
337
338 cpu_writer = &get_cpu_var(mnt_writers);
339 spin_lock(&cpu_writer->lock);
340
341 use_cpu_writer_for_mount(cpu_writer, mnt);
342 if (cpu_writer->count > 0) {
343 cpu_writer->count--;
344 } else {
345 must_check_underflow = 1;
346 atomic_dec(&mnt->__mnt_writers);
347 }
348
349 spin_unlock(&cpu_writer->lock);
350 /*
351 * Logically, we could call this each time,
352 * but the __mnt_writers cacheline tends to
353 * be cold, and makes this expensive.
354 */
355 if (must_check_underflow)
356 handle_write_count_underflow(mnt);
357 /*
358 * This could be done right after the spinlock
359 * is taken because the spinlock keeps us on
360 * the cpu, and disables preemption. However,
361 * putting it here bounds the amount that
362 * __mnt_writers can underflow. Without it,
363 * we could theoretically wrap __mnt_writers.
364 */
365 put_cpu_var(mnt_writers);
8366025e
DH
366}
367EXPORT_SYMBOL_GPL(mnt_drop_write);
368
2e4b7fcd 369static int mnt_make_readonly(struct vfsmount *mnt)
8366025e 370{
3d733633
DH
371 int ret = 0;
372
373 lock_mnt_writers();
374 /*
375 * With all the locks held, this value is stable
376 */
377 if (atomic_read(&mnt->__mnt_writers) > 0) {
378 ret = -EBUSY;
379 goto out;
380 }
381 /*
2e4b7fcd
DH
382 * nobody can do a successful mnt_want_write() with all
383 * of the counts in MNT_DENIED_WRITE and the locks held.
3d733633 384 */
2e4b7fcd
DH
385 spin_lock(&vfsmount_lock);
386 if (!ret)
387 mnt->mnt_flags |= MNT_READONLY;
388 spin_unlock(&vfsmount_lock);
3d733633
DH
389out:
390 unlock_mnt_writers();
391 return ret;
8366025e 392}
8366025e 393
2e4b7fcd
DH
394static void __mnt_unmake_readonly(struct vfsmount *mnt)
395{
396 spin_lock(&vfsmount_lock);
397 mnt->mnt_flags &= ~MNT_READONLY;
398 spin_unlock(&vfsmount_lock);
399}
400
a3ec947c 401void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
454e2398
DH
402{
403 mnt->mnt_sb = sb;
404 mnt->mnt_root = dget(sb->s_root);
454e2398
DH
405}
406
407EXPORT_SYMBOL(simple_set_mnt);
408
1da177e4
LT
409void free_vfsmnt(struct vfsmount *mnt)
410{
411 kfree(mnt->mnt_devname);
73cd49ec 412 mnt_free_id(mnt);
1da177e4
LT
413 kmem_cache_free(mnt_cache, mnt);
414}
415
416/*
a05964f3
RP
417 * find the first or last mount at @dentry on vfsmount @mnt depending on
418 * @dir. If @dir is set return the first mount else return the last mount.
1da177e4 419 */
a05964f3
RP
420struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
421 int dir)
1da177e4 422{
b58fed8b
RP
423 struct list_head *head = mount_hashtable + hash(mnt, dentry);
424 struct list_head *tmp = head;
1da177e4
LT
425 struct vfsmount *p, *found = NULL;
426
1da177e4 427 for (;;) {
a05964f3 428 tmp = dir ? tmp->next : tmp->prev;
1da177e4
LT
429 p = NULL;
430 if (tmp == head)
431 break;
432 p = list_entry(tmp, struct vfsmount, mnt_hash);
433 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
a05964f3 434 found = p;
1da177e4
LT
435 break;
436 }
437 }
1da177e4
LT
438 return found;
439}
440
a05964f3
RP
441/*
442 * lookup_mnt increments the ref count before returning
443 * the vfsmount struct.
444 */
1c755af4 445struct vfsmount *lookup_mnt(struct path *path)
a05964f3
RP
446{
447 struct vfsmount *child_mnt;
448 spin_lock(&vfsmount_lock);
1c755af4 449 if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
a05964f3
RP
450 mntget(child_mnt);
451 spin_unlock(&vfsmount_lock);
452 return child_mnt;
453}
454
1da177e4
LT
455static inline int check_mnt(struct vfsmount *mnt)
456{
6b3286ed 457 return mnt->mnt_ns == current->nsproxy->mnt_ns;
1da177e4
LT
458}
459
6b3286ed 460static void touch_mnt_namespace(struct mnt_namespace *ns)
5addc5dd
AV
461{
462 if (ns) {
463 ns->event = ++event;
464 wake_up_interruptible(&ns->poll);
465 }
466}
467
6b3286ed 468static void __touch_mnt_namespace(struct mnt_namespace *ns)
5addc5dd
AV
469{
470 if (ns && ns->event != event) {
471 ns->event = event;
472 wake_up_interruptible(&ns->poll);
473 }
474}
475
1a390689 476static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
1da177e4 477{
1a390689
AV
478 old_path->dentry = mnt->mnt_mountpoint;
479 old_path->mnt = mnt->mnt_parent;
1da177e4
LT
480 mnt->mnt_parent = mnt;
481 mnt->mnt_mountpoint = mnt->mnt_root;
482 list_del_init(&mnt->mnt_child);
483 list_del_init(&mnt->mnt_hash);
1a390689 484 old_path->dentry->d_mounted--;
1da177e4
LT
485}
486
b90fa9ae
RP
487void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
488 struct vfsmount *child_mnt)
489{
490 child_mnt->mnt_parent = mntget(mnt);
491 child_mnt->mnt_mountpoint = dget(dentry);
492 dentry->d_mounted++;
493}
494
1a390689 495static void attach_mnt(struct vfsmount *mnt, struct path *path)
1da177e4 496{
1a390689 497 mnt_set_mountpoint(path->mnt, path->dentry, mnt);
b90fa9ae 498 list_add_tail(&mnt->mnt_hash, mount_hashtable +
1a390689
AV
499 hash(path->mnt, path->dentry));
500 list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
b90fa9ae
RP
501}
502
503/*
504 * the caller must hold vfsmount_lock
505 */
506static void commit_tree(struct vfsmount *mnt)
507{
508 struct vfsmount *parent = mnt->mnt_parent;
509 struct vfsmount *m;
510 LIST_HEAD(head);
6b3286ed 511 struct mnt_namespace *n = parent->mnt_ns;
b90fa9ae
RP
512
513 BUG_ON(parent == mnt);
514
515 list_add_tail(&head, &mnt->mnt_list);
516 list_for_each_entry(m, &head, mnt_list)
6b3286ed 517 m->mnt_ns = n;
b90fa9ae
RP
518 list_splice(&head, n->list.prev);
519
520 list_add_tail(&mnt->mnt_hash, mount_hashtable +
521 hash(parent, mnt->mnt_mountpoint));
522 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
6b3286ed 523 touch_mnt_namespace(n);
1da177e4
LT
524}
525
526static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
527{
528 struct list_head *next = p->mnt_mounts.next;
529 if (next == &p->mnt_mounts) {
530 while (1) {
531 if (p == root)
532 return NULL;
533 next = p->mnt_child.next;
534 if (next != &p->mnt_parent->mnt_mounts)
535 break;
536 p = p->mnt_parent;
537 }
538 }
539 return list_entry(next, struct vfsmount, mnt_child);
540}
541
9676f0c6
RP
542static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
543{
544 struct list_head *prev = p->mnt_mounts.prev;
545 while (prev != &p->mnt_mounts) {
546 p = list_entry(prev, struct vfsmount, mnt_child);
547 prev = p->mnt_mounts.prev;
548 }
549 return p;
550}
551
36341f64
RP
552static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
553 int flag)
1da177e4
LT
554{
555 struct super_block *sb = old->mnt_sb;
556 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
557
558 if (mnt) {
719f5d7f
MS
559 if (flag & (CL_SLAVE | CL_PRIVATE))
560 mnt->mnt_group_id = 0; /* not a peer of original */
561 else
562 mnt->mnt_group_id = old->mnt_group_id;
563
564 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
565 int err = mnt_alloc_group_id(mnt);
566 if (err)
567 goto out_free;
568 }
569
1da177e4
LT
570 mnt->mnt_flags = old->mnt_flags;
571 atomic_inc(&sb->s_active);
572 mnt->mnt_sb = sb;
573 mnt->mnt_root = dget(root);
574 mnt->mnt_mountpoint = mnt->mnt_root;
575 mnt->mnt_parent = mnt;
b90fa9ae 576
5afe0022
RP
577 if (flag & CL_SLAVE) {
578 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
579 mnt->mnt_master = old;
580 CLEAR_MNT_SHARED(mnt);
8aec0809 581 } else if (!(flag & CL_PRIVATE)) {
5afe0022
RP
582 if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
583 list_add(&mnt->mnt_share, &old->mnt_share);
584 if (IS_MNT_SLAVE(old))
585 list_add(&mnt->mnt_slave, &old->mnt_slave);
586 mnt->mnt_master = old->mnt_master;
587 }
b90fa9ae
RP
588 if (flag & CL_MAKE_SHARED)
589 set_mnt_shared(mnt);
1da177e4
LT
590
591 /* stick the duplicate mount on the same expiry list
592 * as the original if that was on one */
36341f64 593 if (flag & CL_EXPIRE) {
36341f64
RP
594 if (!list_empty(&old->mnt_expire))
595 list_add(&mnt->mnt_expire, &old->mnt_expire);
36341f64 596 }
1da177e4
LT
597 }
598 return mnt;
719f5d7f
MS
599
600 out_free:
601 free_vfsmnt(mnt);
602 return NULL;
1da177e4
LT
603}
604
7b7b1ace 605static inline void __mntput(struct vfsmount *mnt)
1da177e4 606{
3d733633 607 int cpu;
1da177e4 608 struct super_block *sb = mnt->mnt_sb;
3d733633
DH
609 /*
610 * We don't have to hold all of the locks at the
611 * same time here because we know that we're the
612 * last reference to mnt and that no new writers
613 * can come in.
614 */
615 for_each_possible_cpu(cpu) {
616 struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
3d733633 617 spin_lock(&cpu_writer->lock);
1a88b536
AV
618 if (cpu_writer->mnt != mnt) {
619 spin_unlock(&cpu_writer->lock);
620 continue;
621 }
3d733633
DH
622 atomic_add(cpu_writer->count, &mnt->__mnt_writers);
623 cpu_writer->count = 0;
624 /*
625 * Might as well do this so that no one
626 * ever sees the pointer and expects
627 * it to be valid.
628 */
629 cpu_writer->mnt = NULL;
630 spin_unlock(&cpu_writer->lock);
631 }
632 /*
633 * This probably indicates that somebody messed
634 * up a mnt_want/drop_write() pair. If this
635 * happens, the filesystem was probably unable
636 * to make r/w->r/o transitions.
637 */
638 WARN_ON(atomic_read(&mnt->__mnt_writers));
1da177e4
LT
639 dput(mnt->mnt_root);
640 free_vfsmnt(mnt);
641 deactivate_super(sb);
642}
643
7b7b1ace
AV
644void mntput_no_expire(struct vfsmount *mnt)
645{
646repeat:
647 if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
648 if (likely(!mnt->mnt_pinned)) {
649 spin_unlock(&vfsmount_lock);
650 __mntput(mnt);
651 return;
652 }
653 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
654 mnt->mnt_pinned = 0;
655 spin_unlock(&vfsmount_lock);
656 acct_auto_close_mnt(mnt);
657 security_sb_umount_close(mnt);
658 goto repeat;
659 }
660}
661
662EXPORT_SYMBOL(mntput_no_expire);
663
664void mnt_pin(struct vfsmount *mnt)
665{
666 spin_lock(&vfsmount_lock);
667 mnt->mnt_pinned++;
668 spin_unlock(&vfsmount_lock);
669}
670
671EXPORT_SYMBOL(mnt_pin);
672
673void mnt_unpin(struct vfsmount *mnt)
674{
675 spin_lock(&vfsmount_lock);
676 if (mnt->mnt_pinned) {
677 atomic_inc(&mnt->mnt_count);
678 mnt->mnt_pinned--;
679 }
680 spin_unlock(&vfsmount_lock);
681}
682
683EXPORT_SYMBOL(mnt_unpin);
1da177e4 684
b3b304a2
MS
685static inline void mangle(struct seq_file *m, const char *s)
686{
687 seq_escape(m, s, " \t\n\\");
688}
689
690/*
691 * Simple .show_options callback for filesystems which don't want to
692 * implement more complex mount option showing.
693 *
694 * See also save_mount_options().
695 */
696int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
697{
2a32cebd
AV
698 const char *options;
699
700 rcu_read_lock();
701 options = rcu_dereference(mnt->mnt_sb->s_options);
b3b304a2
MS
702
703 if (options != NULL && options[0]) {
704 seq_putc(m, ',');
705 mangle(m, options);
706 }
2a32cebd 707 rcu_read_unlock();
b3b304a2
MS
708
709 return 0;
710}
711EXPORT_SYMBOL(generic_show_options);
712
713/*
714 * If filesystem uses generic_show_options(), this function should be
715 * called from the fill_super() callback.
716 *
717 * The .remount_fs callback usually needs to be handled in a special
718 * way, to make sure, that previous options are not overwritten if the
719 * remount fails.
720 *
721 * Also note, that if the filesystem's .remount_fs function doesn't
722 * reset all options to their default value, but changes only newly
723 * given options, then the displayed options will not reflect reality
724 * any more.
725 */
726void save_mount_options(struct super_block *sb, char *options)
727{
2a32cebd
AV
728 BUG_ON(sb->s_options);
729 rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
b3b304a2
MS
730}
731EXPORT_SYMBOL(save_mount_options);
732
2a32cebd
AV
733void replace_mount_options(struct super_block *sb, char *options)
734{
735 char *old = sb->s_options;
736 rcu_assign_pointer(sb->s_options, options);
737 if (old) {
738 synchronize_rcu();
739 kfree(old);
740 }
741}
742EXPORT_SYMBOL(replace_mount_options);
743
a1a2c409 744#ifdef CONFIG_PROC_FS
1da177e4
LT
745/* iterator */
746static void *m_start(struct seq_file *m, loff_t *pos)
747{
a1a2c409 748 struct proc_mounts *p = m->private;
1da177e4 749
390c6843 750 down_read(&namespace_sem);
a1a2c409 751 return seq_list_start(&p->ns->list, *pos);
1da177e4
LT
752}
753
754static void *m_next(struct seq_file *m, void *v, loff_t *pos)
755{
a1a2c409 756 struct proc_mounts *p = m->private;
b0765fb8 757
a1a2c409 758 return seq_list_next(v, &p->ns->list, pos);
1da177e4
LT
759}
760
761static void m_stop(struct seq_file *m, void *v)
762{
390c6843 763 up_read(&namespace_sem);
1da177e4
LT
764}
765
2d4d4864
RP
766struct proc_fs_info {
767 int flag;
768 const char *str;
769};
770
2069f457 771static int show_sb_opts(struct seq_file *m, struct super_block *sb)
1da177e4 772{
2d4d4864 773 static const struct proc_fs_info fs_info[] = {
1da177e4
LT
774 { MS_SYNCHRONOUS, ",sync" },
775 { MS_DIRSYNC, ",dirsync" },
776 { MS_MANDLOCK, ",mand" },
1da177e4
LT
777 { 0, NULL }
778 };
2d4d4864
RP
779 const struct proc_fs_info *fs_infop;
780
781 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
782 if (sb->s_flags & fs_infop->flag)
783 seq_puts(m, fs_infop->str);
784 }
2069f457
EP
785
786 return security_sb_show_options(m, sb);
2d4d4864
RP
787}
788
789static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
790{
791 static const struct proc_fs_info mnt_info[] = {
1da177e4
LT
792 { MNT_NOSUID, ",nosuid" },
793 { MNT_NODEV, ",nodev" },
794 { MNT_NOEXEC, ",noexec" },
fc33a7bb
CH
795 { MNT_NOATIME, ",noatime" },
796 { MNT_NODIRATIME, ",nodiratime" },
47ae32d6 797 { MNT_RELATIME, ",relatime" },
d0adde57 798 { MNT_STRICTATIME, ",strictatime" },
1da177e4
LT
799 { 0, NULL }
800 };
2d4d4864
RP
801 const struct proc_fs_info *fs_infop;
802
803 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
804 if (mnt->mnt_flags & fs_infop->flag)
805 seq_puts(m, fs_infop->str);
806 }
807}
808
809static void show_type(struct seq_file *m, struct super_block *sb)
810{
811 mangle(m, sb->s_type->name);
812 if (sb->s_subtype && sb->s_subtype[0]) {
813 seq_putc(m, '.');
814 mangle(m, sb->s_subtype);
815 }
816}
817
818static int show_vfsmnt(struct seq_file *m, void *v)
819{
820 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
821 int err = 0;
c32c2f63 822 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
1da177e4
LT
823
824 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
825 seq_putc(m, ' ');
c32c2f63 826 seq_path(m, &mnt_path, " \t\n\\");
1da177e4 827 seq_putc(m, ' ');
2d4d4864 828 show_type(m, mnt->mnt_sb);
2e4b7fcd 829 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
2069f457
EP
830 err = show_sb_opts(m, mnt->mnt_sb);
831 if (err)
832 goto out;
2d4d4864 833 show_mnt_opts(m, mnt);
1da177e4
LT
834 if (mnt->mnt_sb->s_op->show_options)
835 err = mnt->mnt_sb->s_op->show_options(m, mnt);
836 seq_puts(m, " 0 0\n");
2069f457 837out:
1da177e4
LT
838 return err;
839}
840
a1a2c409 841const struct seq_operations mounts_op = {
1da177e4
LT
842 .start = m_start,
843 .next = m_next,
844 .stop = m_stop,
845 .show = show_vfsmnt
846};
847
2d4d4864
RP
848static int show_mountinfo(struct seq_file *m, void *v)
849{
850 struct proc_mounts *p = m->private;
851 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
852 struct super_block *sb = mnt->mnt_sb;
853 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
854 struct path root = p->root;
855 int err = 0;
856
857 seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
858 MAJOR(sb->s_dev), MINOR(sb->s_dev));
859 seq_dentry(m, mnt->mnt_root, " \t\n\\");
860 seq_putc(m, ' ');
861 seq_path_root(m, &mnt_path, &root, " \t\n\\");
862 if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
863 /*
864 * Mountpoint is outside root, discard that one. Ugly,
865 * but less so than trying to do that in iterator in a
866 * race-free way (due to renames).
867 */
868 return SEQ_SKIP;
869 }
870 seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
871 show_mnt_opts(m, mnt);
872
873 /* Tagged fields ("foo:X" or "bar") */
874 if (IS_MNT_SHARED(mnt))
875 seq_printf(m, " shared:%i", mnt->mnt_group_id);
97e7e0f7
MS
876 if (IS_MNT_SLAVE(mnt)) {
877 int master = mnt->mnt_master->mnt_group_id;
878 int dom = get_dominating_id(mnt, &p->root);
879 seq_printf(m, " master:%i", master);
880 if (dom && dom != master)
881 seq_printf(m, " propagate_from:%i", dom);
882 }
2d4d4864
RP
883 if (IS_MNT_UNBINDABLE(mnt))
884 seq_puts(m, " unbindable");
885
886 /* Filesystem specific data */
887 seq_puts(m, " - ");
888 show_type(m, sb);
889 seq_putc(m, ' ');
890 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
891 seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
2069f457
EP
892 err = show_sb_opts(m, sb);
893 if (err)
894 goto out;
2d4d4864
RP
895 if (sb->s_op->show_options)
896 err = sb->s_op->show_options(m, mnt);
897 seq_putc(m, '\n');
2069f457 898out:
2d4d4864
RP
899 return err;
900}
901
902const struct seq_operations mountinfo_op = {
903 .start = m_start,
904 .next = m_next,
905 .stop = m_stop,
906 .show = show_mountinfo,
907};
908
b4629fe2
CL
909static int show_vfsstat(struct seq_file *m, void *v)
910{
b0765fb8 911 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
c32c2f63 912 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
b4629fe2
CL
913 int err = 0;
914
915 /* device */
916 if (mnt->mnt_devname) {
917 seq_puts(m, "device ");
918 mangle(m, mnt->mnt_devname);
919 } else
920 seq_puts(m, "no device");
921
922 /* mount point */
923 seq_puts(m, " mounted on ");
c32c2f63 924 seq_path(m, &mnt_path, " \t\n\\");
b4629fe2
CL
925 seq_putc(m, ' ');
926
927 /* file system type */
928 seq_puts(m, "with fstype ");
2d4d4864 929 show_type(m, mnt->mnt_sb);
b4629fe2
CL
930
931 /* optional statistics */
932 if (mnt->mnt_sb->s_op->show_stats) {
933 seq_putc(m, ' ');
934 err = mnt->mnt_sb->s_op->show_stats(m, mnt);
935 }
936
937 seq_putc(m, '\n');
938 return err;
939}
940
a1a2c409 941const struct seq_operations mountstats_op = {
b4629fe2
CL
942 .start = m_start,
943 .next = m_next,
944 .stop = m_stop,
945 .show = show_vfsstat,
946};
a1a2c409 947#endif /* CONFIG_PROC_FS */
b4629fe2 948
1da177e4
LT
949/**
950 * may_umount_tree - check if a mount tree is busy
951 * @mnt: root of mount tree
952 *
953 * This is called to check if a tree of mounts has any
954 * open files, pwds, chroots or sub mounts that are
955 * busy.
956 */
957int may_umount_tree(struct vfsmount *mnt)
958{
36341f64
RP
959 int actual_refs = 0;
960 int minimum_refs = 0;
961 struct vfsmount *p;
1da177e4
LT
962
963 spin_lock(&vfsmount_lock);
36341f64 964 for (p = mnt; p; p = next_mnt(p, mnt)) {
1da177e4
LT
965 actual_refs += atomic_read(&p->mnt_count);
966 minimum_refs += 2;
1da177e4
LT
967 }
968 spin_unlock(&vfsmount_lock);
969
970 if (actual_refs > minimum_refs)
e3474a8e 971 return 0;
1da177e4 972
e3474a8e 973 return 1;
1da177e4
LT
974}
975
976EXPORT_SYMBOL(may_umount_tree);
977
978/**
979 * may_umount - check if a mount point is busy
980 * @mnt: root of mount
981 *
982 * This is called to check if a mount point has any
983 * open files, pwds, chroots or sub mounts. If the
984 * mount has sub mounts this will return busy
985 * regardless of whether the sub mounts are busy.
986 *
987 * Doesn't take quota and stuff into account. IOW, in some cases it will
988 * give false negatives. The main reason why it's here is that we need
989 * a non-destructive way to look for easily umountable filesystems.
990 */
991int may_umount(struct vfsmount *mnt)
992{
e3474a8e 993 int ret = 1;
a05964f3
RP
994 spin_lock(&vfsmount_lock);
995 if (propagate_mount_busy(mnt, 2))
e3474a8e 996 ret = 0;
a05964f3
RP
997 spin_unlock(&vfsmount_lock);
998 return ret;
1da177e4
LT
999}
1000
1001EXPORT_SYMBOL(may_umount);
1002
b90fa9ae 1003void release_mounts(struct list_head *head)
70fbcdf4
RP
1004{
1005 struct vfsmount *mnt;
bf066c7d 1006 while (!list_empty(head)) {
b5e61818 1007 mnt = list_first_entry(head, struct vfsmount, mnt_hash);
70fbcdf4
RP
1008 list_del_init(&mnt->mnt_hash);
1009 if (mnt->mnt_parent != mnt) {
1010 struct dentry *dentry;
1011 struct vfsmount *m;
1012 spin_lock(&vfsmount_lock);
1013 dentry = mnt->mnt_mountpoint;
1014 m = mnt->mnt_parent;
1015 mnt->mnt_mountpoint = mnt->mnt_root;
1016 mnt->mnt_parent = mnt;
7c4b93d8 1017 m->mnt_ghosts--;
70fbcdf4
RP
1018 spin_unlock(&vfsmount_lock);
1019 dput(dentry);
1020 mntput(m);
1021 }
1022 mntput(mnt);
1023 }
1024}
1025
a05964f3 1026void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
1da177e4
LT
1027{
1028 struct vfsmount *p;
1da177e4 1029
1bfba4e8
AM
1030 for (p = mnt; p; p = next_mnt(p, mnt))
1031 list_move(&p->mnt_hash, kill);
1da177e4 1032
a05964f3
RP
1033 if (propagate)
1034 propagate_umount(kill);
1035
70fbcdf4
RP
1036 list_for_each_entry(p, kill, mnt_hash) {
1037 list_del_init(&p->mnt_expire);
1038 list_del_init(&p->mnt_list);
6b3286ed
KK
1039 __touch_mnt_namespace(p->mnt_ns);
1040 p->mnt_ns = NULL;
70fbcdf4 1041 list_del_init(&p->mnt_child);
7c4b93d8
AV
1042 if (p->mnt_parent != p) {
1043 p->mnt_parent->mnt_ghosts++;
f30ac319 1044 p->mnt_mountpoint->d_mounted--;
7c4b93d8 1045 }
a05964f3 1046 change_mnt_propagation(p, MS_PRIVATE);
1da177e4
LT
1047 }
1048}
1049
c35038be
AV
1050static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
1051
1da177e4
LT
1052static int do_umount(struct vfsmount *mnt, int flags)
1053{
b58fed8b 1054 struct super_block *sb = mnt->mnt_sb;
1da177e4 1055 int retval;
70fbcdf4 1056 LIST_HEAD(umount_list);
1da177e4
LT
1057
1058 retval = security_sb_umount(mnt, flags);
1059 if (retval)
1060 return retval;
1061
1062 /*
1063 * Allow userspace to request a mountpoint be expired rather than
1064 * unmounting unconditionally. Unmount only happens if:
1065 * (1) the mark is already set (the mark is cleared by mntput())
1066 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1067 */
1068 if (flags & MNT_EXPIRE) {
6ac08c39 1069 if (mnt == current->fs->root.mnt ||
1da177e4
LT
1070 flags & (MNT_FORCE | MNT_DETACH))
1071 return -EINVAL;
1072
1073 if (atomic_read(&mnt->mnt_count) != 2)
1074 return -EBUSY;
1075
1076 if (!xchg(&mnt->mnt_expiry_mark, 1))
1077 return -EAGAIN;
1078 }
1079
1080 /*
1081 * If we may have to abort operations to get out of this
1082 * mount, and they will themselves hold resources we must
1083 * allow the fs to do things. In the Unix tradition of
1084 * 'Gee thats tricky lets do it in userspace' the umount_begin
1085 * might fail to complete on the first run through as other tasks
1086 * must return, and the like. Thats for the mount program to worry
1087 * about for the moment.
1088 */
1089
42faad99 1090 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
42faad99 1091 sb->s_op->umount_begin(sb);
42faad99 1092 }
1da177e4
LT
1093
1094 /*
1095 * No sense to grab the lock for this test, but test itself looks
1096 * somewhat bogus. Suggestions for better replacement?
1097 * Ho-hum... In principle, we might treat that as umount + switch
1098 * to rootfs. GC would eventually take care of the old vfsmount.
1099 * Actually it makes sense, especially if rootfs would contain a
1100 * /reboot - static binary that would close all descriptors and
1101 * call reboot(9). Then init(8) could umount root and exec /reboot.
1102 */
6ac08c39 1103 if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1da177e4
LT
1104 /*
1105 * Special case for "unmounting" root ...
1106 * we just try to remount it readonly.
1107 */
1108 down_write(&sb->s_umount);
1109 if (!(sb->s_flags & MS_RDONLY)) {
1110 lock_kernel();
1da177e4
LT
1111 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1112 unlock_kernel();
1113 }
1114 up_write(&sb->s_umount);
1115 return retval;
1116 }
1117
390c6843 1118 down_write(&namespace_sem);
1da177e4 1119 spin_lock(&vfsmount_lock);
5addc5dd 1120 event++;
1da177e4 1121
c35038be
AV
1122 if (!(flags & MNT_DETACH))
1123 shrink_submounts(mnt, &umount_list);
1124
1da177e4 1125 retval = -EBUSY;
a05964f3 1126 if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
1da177e4 1127 if (!list_empty(&mnt->mnt_list))
a05964f3 1128 umount_tree(mnt, 1, &umount_list);
1da177e4
LT
1129 retval = 0;
1130 }
1131 spin_unlock(&vfsmount_lock);
1132 if (retval)
1133 security_sb_umount_busy(mnt);
390c6843 1134 up_write(&namespace_sem);
70fbcdf4 1135 release_mounts(&umount_list);
1da177e4
LT
1136 return retval;
1137}
1138
1139/*
1140 * Now umount can handle mount points as well as block devices.
1141 * This is important for filesystems which use unnamed block devices.
1142 *
1143 * We now support a flag for forced unmount like the other 'big iron'
1144 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1145 */
1146
bdc480e3 1147SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1da177e4 1148{
2d8f3038 1149 struct path path;
1da177e4
LT
1150 int retval;
1151
2d8f3038 1152 retval = user_path(name, &path);
1da177e4
LT
1153 if (retval)
1154 goto out;
1155 retval = -EINVAL;
2d8f3038 1156 if (path.dentry != path.mnt->mnt_root)
1da177e4 1157 goto dput_and_out;
2d8f3038 1158 if (!check_mnt(path.mnt))
1da177e4
LT
1159 goto dput_and_out;
1160
1161 retval = -EPERM;
1162 if (!capable(CAP_SYS_ADMIN))
1163 goto dput_and_out;
1164
2d8f3038 1165 retval = do_umount(path.mnt, flags);
1da177e4 1166dput_and_out:
429731b1 1167 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
2d8f3038
AV
1168 dput(path.dentry);
1169 mntput_no_expire(path.mnt);
1da177e4
LT
1170out:
1171 return retval;
1172}
1173
1174#ifdef __ARCH_WANT_SYS_OLDUMOUNT
1175
1176/*
b58fed8b 1177 * The 2.0 compatible umount. No flags.
1da177e4 1178 */
bdc480e3 1179SYSCALL_DEFINE1(oldumount, char __user *, name)
1da177e4 1180{
b58fed8b 1181 return sys_umount(name, 0);
1da177e4
LT
1182}
1183
1184#endif
1185
2d92ab3c 1186static int mount_is_safe(struct path *path)
1da177e4
LT
1187{
1188 if (capable(CAP_SYS_ADMIN))
1189 return 0;
1190 return -EPERM;
1191#ifdef notyet
2d92ab3c 1192 if (S_ISLNK(path->dentry->d_inode->i_mode))
1da177e4 1193 return -EPERM;
2d92ab3c 1194 if (path->dentry->d_inode->i_mode & S_ISVTX) {
da9592ed 1195 if (current_uid() != path->dentry->d_inode->i_uid)
1da177e4
LT
1196 return -EPERM;
1197 }
2d92ab3c 1198 if (inode_permission(path->dentry->d_inode, MAY_WRITE))
1da177e4
LT
1199 return -EPERM;
1200 return 0;
1201#endif
1202}
1203
b90fa9ae 1204struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
36341f64 1205 int flag)
1da177e4
LT
1206{
1207 struct vfsmount *res, *p, *q, *r, *s;
1a390689 1208 struct path path;
1da177e4 1209
9676f0c6
RP
1210 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
1211 return NULL;
1212
36341f64 1213 res = q = clone_mnt(mnt, dentry, flag);
1da177e4
LT
1214 if (!q)
1215 goto Enomem;
1216 q->mnt_mountpoint = mnt->mnt_mountpoint;
1217
1218 p = mnt;
fdadd65f 1219 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
7ec02ef1 1220 if (!is_subdir(r->mnt_mountpoint, dentry))
1da177e4
LT
1221 continue;
1222
1223 for (s = r; s; s = next_mnt(s, r)) {
9676f0c6
RP
1224 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
1225 s = skip_mnt_tree(s);
1226 continue;
1227 }
1da177e4
LT
1228 while (p != s->mnt_parent) {
1229 p = p->mnt_parent;
1230 q = q->mnt_parent;
1231 }
1232 p = s;
1a390689
AV
1233 path.mnt = q;
1234 path.dentry = p->mnt_mountpoint;
36341f64 1235 q = clone_mnt(p, p->mnt_root, flag);
1da177e4
LT
1236 if (!q)
1237 goto Enomem;
1238 spin_lock(&vfsmount_lock);
1239 list_add_tail(&q->mnt_list, &res->mnt_list);
1a390689 1240 attach_mnt(q, &path);
1da177e4
LT
1241 spin_unlock(&vfsmount_lock);
1242 }
1243 }
1244 return res;
b58fed8b 1245Enomem:
1da177e4 1246 if (res) {
70fbcdf4 1247 LIST_HEAD(umount_list);
1da177e4 1248 spin_lock(&vfsmount_lock);
a05964f3 1249 umount_tree(res, 0, &umount_list);
1da177e4 1250 spin_unlock(&vfsmount_lock);
70fbcdf4 1251 release_mounts(&umount_list);
1da177e4
LT
1252 }
1253 return NULL;
1254}
1255
589ff870 1256struct vfsmount *collect_mounts(struct path *path)
8aec0809
AV
1257{
1258 struct vfsmount *tree;
1a60a280 1259 down_write(&namespace_sem);
589ff870 1260 tree = copy_tree(path->mnt, path->dentry, CL_COPY_ALL | CL_PRIVATE);
1a60a280 1261 up_write(&namespace_sem);
8aec0809
AV
1262 return tree;
1263}
1264
1265void drop_collected_mounts(struct vfsmount *mnt)
1266{
1267 LIST_HEAD(umount_list);
1a60a280 1268 down_write(&namespace_sem);
8aec0809
AV
1269 spin_lock(&vfsmount_lock);
1270 umount_tree(mnt, 0, &umount_list);
1271 spin_unlock(&vfsmount_lock);
1a60a280 1272 up_write(&namespace_sem);
8aec0809
AV
1273 release_mounts(&umount_list);
1274}
1275
719f5d7f
MS
1276static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
1277{
1278 struct vfsmount *p;
1279
1280 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1281 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1282 mnt_release_group_id(p);
1283 }
1284}
1285
1286static int invent_group_ids(struct vfsmount *mnt, bool recurse)
1287{
1288 struct vfsmount *p;
1289
1290 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1291 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1292 int err = mnt_alloc_group_id(p);
1293 if (err) {
1294 cleanup_group_ids(mnt, p);
1295 return err;
1296 }
1297 }
1298 }
1299
1300 return 0;
1301}
1302
b90fa9ae
RP
1303/*
1304 * @source_mnt : mount tree to be attached
21444403
RP
1305 * @nd : place the mount tree @source_mnt is attached
1306 * @parent_nd : if non-null, detach the source_mnt from its parent and
1307 * store the parent mount and mountpoint dentry.
1308 * (done when source_mnt is moved)
b90fa9ae
RP
1309 *
1310 * NOTE: in the table below explains the semantics when a source mount
1311 * of a given type is attached to a destination mount of a given type.
9676f0c6
RP
1312 * ---------------------------------------------------------------------------
1313 * | BIND MOUNT OPERATION |
1314 * |**************************************************************************
1315 * | source-->| shared | private | slave | unbindable |
1316 * | dest | | | | |
1317 * | | | | | | |
1318 * | v | | | | |
1319 * |**************************************************************************
1320 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1321 * | | | | | |
1322 * |non-shared| shared (+) | private | slave (*) | invalid |
1323 * ***************************************************************************
b90fa9ae
RP
1324 * A bind operation clones the source mount and mounts the clone on the
1325 * destination mount.
1326 *
1327 * (++) the cloned mount is propagated to all the mounts in the propagation
1328 * tree of the destination mount and the cloned mount is added to
1329 * the peer group of the source mount.
1330 * (+) the cloned mount is created under the destination mount and is marked
1331 * as shared. The cloned mount is added to the peer group of the source
1332 * mount.
5afe0022
RP
1333 * (+++) the mount is propagated to all the mounts in the propagation tree
1334 * of the destination mount and the cloned mount is made slave
1335 * of the same master as that of the source mount. The cloned mount
1336 * is marked as 'shared and slave'.
1337 * (*) the cloned mount is made a slave of the same master as that of the
1338 * source mount.
1339 *
9676f0c6
RP
1340 * ---------------------------------------------------------------------------
1341 * | MOVE MOUNT OPERATION |
1342 * |**************************************************************************
1343 * | source-->| shared | private | slave | unbindable |
1344 * | dest | | | | |
1345 * | | | | | | |
1346 * | v | | | | |
1347 * |**************************************************************************
1348 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1349 * | | | | | |
1350 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1351 * ***************************************************************************
5afe0022
RP
1352 *
1353 * (+) the mount is moved to the destination. And is then propagated to
1354 * all the mounts in the propagation tree of the destination mount.
21444403 1355 * (+*) the mount is moved to the destination.
5afe0022
RP
1356 * (+++) the mount is moved to the destination and is then propagated to
1357 * all the mounts belonging to the destination mount's propagation tree.
1358 * the mount is marked as 'shared and slave'.
1359 * (*) the mount continues to be a slave at the new location.
b90fa9ae
RP
1360 *
1361 * if the source mount is a tree, the operations explained above is
1362 * applied to each mount in the tree.
1363 * Must be called without spinlocks held, since this function can sleep
1364 * in allocations.
1365 */
1366static int attach_recursive_mnt(struct vfsmount *source_mnt,
1a390689 1367 struct path *path, struct path *parent_path)
b90fa9ae
RP
1368{
1369 LIST_HEAD(tree_list);
1a390689
AV
1370 struct vfsmount *dest_mnt = path->mnt;
1371 struct dentry *dest_dentry = path->dentry;
b90fa9ae 1372 struct vfsmount *child, *p;
719f5d7f 1373 int err;
b90fa9ae 1374
719f5d7f
MS
1375 if (IS_MNT_SHARED(dest_mnt)) {
1376 err = invent_group_ids(source_mnt, true);
1377 if (err)
1378 goto out;
1379 }
1380 err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
1381 if (err)
1382 goto out_cleanup_ids;
b90fa9ae
RP
1383
1384 if (IS_MNT_SHARED(dest_mnt)) {
1385 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1386 set_mnt_shared(p);
1387 }
1388
1389 spin_lock(&vfsmount_lock);
1a390689
AV
1390 if (parent_path) {
1391 detach_mnt(source_mnt, parent_path);
1392 attach_mnt(source_mnt, path);
e5d67f07 1393 touch_mnt_namespace(parent_path->mnt->mnt_ns);
21444403
RP
1394 } else {
1395 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
1396 commit_tree(source_mnt);
1397 }
b90fa9ae
RP
1398
1399 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
1400 list_del_init(&child->mnt_hash);
1401 commit_tree(child);
1402 }
1403 spin_unlock(&vfsmount_lock);
1404 return 0;
719f5d7f
MS
1405
1406 out_cleanup_ids:
1407 if (IS_MNT_SHARED(dest_mnt))
1408 cleanup_group_ids(source_mnt, NULL);
1409 out:
1410 return err;
b90fa9ae
RP
1411}
1412
8c3ee42e 1413static int graft_tree(struct vfsmount *mnt, struct path *path)
1da177e4
LT
1414{
1415 int err;
1416 if (mnt->mnt_sb->s_flags & MS_NOUSER)
1417 return -EINVAL;
1418
8c3ee42e 1419 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1da177e4
LT
1420 S_ISDIR(mnt->mnt_root->d_inode->i_mode))
1421 return -ENOTDIR;
1422
1423 err = -ENOENT;
8c3ee42e
AV
1424 mutex_lock(&path->dentry->d_inode->i_mutex);
1425 if (IS_DEADDIR(path->dentry->d_inode))
1da177e4
LT
1426 goto out_unlock;
1427
8c3ee42e 1428 err = security_sb_check_sb(mnt, path);
1da177e4
LT
1429 if (err)
1430 goto out_unlock;
1431
1432 err = -ENOENT;
8c3ee42e
AV
1433 if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry))
1434 err = attach_recursive_mnt(mnt, path, NULL);
1da177e4 1435out_unlock:
8c3ee42e 1436 mutex_unlock(&path->dentry->d_inode->i_mutex);
1da177e4 1437 if (!err)
8c3ee42e 1438 security_sb_post_addmount(mnt, path);
1da177e4
LT
1439 return err;
1440}
1441
07b20889
RP
1442/*
1443 * recursively change the type of the mountpoint.
1444 */
0a0d8a46 1445static int do_change_type(struct path *path, int flag)
07b20889 1446{
2d92ab3c 1447 struct vfsmount *m, *mnt = path->mnt;
07b20889
RP
1448 int recurse = flag & MS_REC;
1449 int type = flag & ~MS_REC;
719f5d7f 1450 int err = 0;
07b20889 1451
ee6f9582
MS
1452 if (!capable(CAP_SYS_ADMIN))
1453 return -EPERM;
1454
2d92ab3c 1455 if (path->dentry != path->mnt->mnt_root)
07b20889
RP
1456 return -EINVAL;
1457
1458 down_write(&namespace_sem);
719f5d7f
MS
1459 if (type == MS_SHARED) {
1460 err = invent_group_ids(mnt, recurse);
1461 if (err)
1462 goto out_unlock;
1463 }
1464
07b20889
RP
1465 spin_lock(&vfsmount_lock);
1466 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1467 change_mnt_propagation(m, type);
1468 spin_unlock(&vfsmount_lock);
719f5d7f
MS
1469
1470 out_unlock:
07b20889 1471 up_write(&namespace_sem);
719f5d7f 1472 return err;
07b20889
RP
1473}
1474
1da177e4
LT
1475/*
1476 * do loopback mount.
1477 */
0a0d8a46 1478static int do_loopback(struct path *path, char *old_name,
2dafe1c4 1479 int recurse)
1da177e4 1480{
2d92ab3c 1481 struct path old_path;
1da177e4 1482 struct vfsmount *mnt = NULL;
2d92ab3c 1483 int err = mount_is_safe(path);
1da177e4
LT
1484 if (err)
1485 return err;
1486 if (!old_name || !*old_name)
1487 return -EINVAL;
2d92ab3c 1488 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
1da177e4
LT
1489 if (err)
1490 return err;
1491
390c6843 1492 down_write(&namespace_sem);
1da177e4 1493 err = -EINVAL;
2d92ab3c 1494 if (IS_MNT_UNBINDABLE(old_path.mnt))
4ac91378 1495 goto out;
9676f0c6 1496
2d92ab3c 1497 if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
ccd48bc7 1498 goto out;
1da177e4 1499
ccd48bc7
AV
1500 err = -ENOMEM;
1501 if (recurse)
2d92ab3c 1502 mnt = copy_tree(old_path.mnt, old_path.dentry, 0);
ccd48bc7 1503 else
2d92ab3c 1504 mnt = clone_mnt(old_path.mnt, old_path.dentry, 0);
ccd48bc7
AV
1505
1506 if (!mnt)
1507 goto out;
1508
2d92ab3c 1509 err = graft_tree(mnt, path);
ccd48bc7 1510 if (err) {
70fbcdf4 1511 LIST_HEAD(umount_list);
1da177e4 1512 spin_lock(&vfsmount_lock);
a05964f3 1513 umount_tree(mnt, 0, &umount_list);
1da177e4 1514 spin_unlock(&vfsmount_lock);
70fbcdf4 1515 release_mounts(&umount_list);
5b83d2c5 1516 }
1da177e4 1517
ccd48bc7 1518out:
390c6843 1519 up_write(&namespace_sem);
2d92ab3c 1520 path_put(&old_path);
1da177e4
LT
1521 return err;
1522}
1523
2e4b7fcd
DH
1524static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1525{
1526 int error = 0;
1527 int readonly_request = 0;
1528
1529 if (ms_flags & MS_RDONLY)
1530 readonly_request = 1;
1531 if (readonly_request == __mnt_is_readonly(mnt))
1532 return 0;
1533
1534 if (readonly_request)
1535 error = mnt_make_readonly(mnt);
1536 else
1537 __mnt_unmake_readonly(mnt);
1538 return error;
1539}
1540
1da177e4
LT
1541/*
1542 * change filesystem flags. dir should be a physical root of filesystem.
1543 * If you've mounted a non-root directory somewhere and want to do remount
1544 * on it - tough luck.
1545 */
0a0d8a46 1546static int do_remount(struct path *path, int flags, int mnt_flags,
1da177e4
LT
1547 void *data)
1548{
1549 int err;
2d92ab3c 1550 struct super_block *sb = path->mnt->mnt_sb;
1da177e4
LT
1551
1552 if (!capable(CAP_SYS_ADMIN))
1553 return -EPERM;
1554
2d92ab3c 1555 if (!check_mnt(path->mnt))
1da177e4
LT
1556 return -EINVAL;
1557
2d92ab3c 1558 if (path->dentry != path->mnt->mnt_root)
1da177e4
LT
1559 return -EINVAL;
1560
1561 down_write(&sb->s_umount);
2e4b7fcd 1562 if (flags & MS_BIND)
2d92ab3c 1563 err = change_mount_flags(path->mnt, flags);
2e4b7fcd
DH
1564 else
1565 err = do_remount_sb(sb, flags, data, 0);
1da177e4 1566 if (!err)
2d92ab3c 1567 path->mnt->mnt_flags = mnt_flags;
1da177e4 1568 up_write(&sb->s_umount);
0e55a7cc 1569 if (!err) {
2d92ab3c 1570 security_sb_post_remount(path->mnt, flags, data);
0e55a7cc
DW
1571
1572 spin_lock(&vfsmount_lock);
1573 touch_mnt_namespace(path->mnt->mnt_ns);
1574 spin_unlock(&vfsmount_lock);
1575 }
1da177e4
LT
1576 return err;
1577}
1578
9676f0c6
RP
1579static inline int tree_contains_unbindable(struct vfsmount *mnt)
1580{
1581 struct vfsmount *p;
1582 for (p = mnt; p; p = next_mnt(p, mnt)) {
1583 if (IS_MNT_UNBINDABLE(p))
1584 return 1;
1585 }
1586 return 0;
1587}
1588
0a0d8a46 1589static int do_move_mount(struct path *path, char *old_name)
1da177e4 1590{
2d92ab3c 1591 struct path old_path, parent_path;
1da177e4
LT
1592 struct vfsmount *p;
1593 int err = 0;
1594 if (!capable(CAP_SYS_ADMIN))
1595 return -EPERM;
1596 if (!old_name || !*old_name)
1597 return -EINVAL;
2d92ab3c 1598 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
1da177e4
LT
1599 if (err)
1600 return err;
1601
390c6843 1602 down_write(&namespace_sem);
2d92ab3c 1603 while (d_mountpoint(path->dentry) &&
9393bd07 1604 follow_down(path))
1da177e4
LT
1605 ;
1606 err = -EINVAL;
2d92ab3c 1607 if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
1da177e4
LT
1608 goto out;
1609
1610 err = -ENOENT;
2d92ab3c
AV
1611 mutex_lock(&path->dentry->d_inode->i_mutex);
1612 if (IS_DEADDIR(path->dentry->d_inode))
1da177e4
LT
1613 goto out1;
1614
2d92ab3c 1615 if (!IS_ROOT(path->dentry) && d_unhashed(path->dentry))
21444403 1616 goto out1;
1da177e4
LT
1617
1618 err = -EINVAL;
2d92ab3c 1619 if (old_path.dentry != old_path.mnt->mnt_root)
21444403 1620 goto out1;
1da177e4 1621
2d92ab3c 1622 if (old_path.mnt == old_path.mnt->mnt_parent)
21444403 1623 goto out1;
1da177e4 1624
2d92ab3c
AV
1625 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1626 S_ISDIR(old_path.dentry->d_inode->i_mode))
21444403
RP
1627 goto out1;
1628 /*
1629 * Don't move a mount residing in a shared parent.
1630 */
2d92ab3c
AV
1631 if (old_path.mnt->mnt_parent &&
1632 IS_MNT_SHARED(old_path.mnt->mnt_parent))
21444403 1633 goto out1;
9676f0c6
RP
1634 /*
1635 * Don't move a mount tree containing unbindable mounts to a destination
1636 * mount which is shared.
1637 */
2d92ab3c
AV
1638 if (IS_MNT_SHARED(path->mnt) &&
1639 tree_contains_unbindable(old_path.mnt))
9676f0c6 1640 goto out1;
1da177e4 1641 err = -ELOOP;
2d92ab3c
AV
1642 for (p = path->mnt; p->mnt_parent != p; p = p->mnt_parent)
1643 if (p == old_path.mnt)
21444403 1644 goto out1;
1da177e4 1645
2d92ab3c 1646 err = attach_recursive_mnt(old_path.mnt, path, &parent_path);
4ac91378 1647 if (err)
21444403 1648 goto out1;
1da177e4
LT
1649
1650 /* if the mount is moved, it should no longer be expire
1651 * automatically */
2d92ab3c 1652 list_del_init(&old_path.mnt->mnt_expire);
1da177e4 1653out1:
2d92ab3c 1654 mutex_unlock(&path->dentry->d_inode->i_mutex);
1da177e4 1655out:
390c6843 1656 up_write(&namespace_sem);
1da177e4 1657 if (!err)
1a390689 1658 path_put(&parent_path);
2d92ab3c 1659 path_put(&old_path);
1da177e4
LT
1660 return err;
1661}
1662
1663/*
1664 * create a new mount for userspace and request it to be added into the
1665 * namespace's tree
1666 */
0a0d8a46 1667static int do_new_mount(struct path *path, char *type, int flags,
1da177e4
LT
1668 int mnt_flags, char *name, void *data)
1669{
1670 struct vfsmount *mnt;
1671
1672 if (!type || !memchr(type, 0, PAGE_SIZE))
1673 return -EINVAL;
1674
1675 /* we need capabilities... */
1676 if (!capable(CAP_SYS_ADMIN))
1677 return -EPERM;
1678
1679 mnt = do_kern_mount(type, flags, name, data);
1680 if (IS_ERR(mnt))
1681 return PTR_ERR(mnt);
1682
2d92ab3c 1683 return do_add_mount(mnt, path, mnt_flags, NULL);
1da177e4
LT
1684}
1685
1686/*
1687 * add a mount into a namespace's mount tree
1688 * - provide the option of adding the new mount to an expiration list
1689 */
8d66bf54 1690int do_add_mount(struct vfsmount *newmnt, struct path *path,
1da177e4
LT
1691 int mnt_flags, struct list_head *fslist)
1692{
1693 int err;
1694
390c6843 1695 down_write(&namespace_sem);
1da177e4 1696 /* Something was mounted here while we slept */
8d66bf54 1697 while (d_mountpoint(path->dentry) &&
9393bd07 1698 follow_down(path))
1da177e4
LT
1699 ;
1700 err = -EINVAL;
dd5cae6e 1701 if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt))
1da177e4
LT
1702 goto unlock;
1703
1704 /* Refuse the same filesystem on the same mount point */
1705 err = -EBUSY;
8d66bf54
AV
1706 if (path->mnt->mnt_sb == newmnt->mnt_sb &&
1707 path->mnt->mnt_root == path->dentry)
1da177e4
LT
1708 goto unlock;
1709
1710 err = -EINVAL;
1711 if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
1712 goto unlock;
1713
1714 newmnt->mnt_flags = mnt_flags;
8d66bf54 1715 if ((err = graft_tree(newmnt, path)))
5b83d2c5 1716 goto unlock;
1da177e4 1717
6758f953 1718 if (fslist) /* add to the specified expiration list */
55e700b9 1719 list_add_tail(&newmnt->mnt_expire, fslist);
6758f953 1720
390c6843 1721 up_write(&namespace_sem);
5b83d2c5 1722 return 0;
1da177e4
LT
1723
1724unlock:
390c6843 1725 up_write(&namespace_sem);
1da177e4
LT
1726 mntput(newmnt);
1727 return err;
1728}
1729
1730EXPORT_SYMBOL_GPL(do_add_mount);
1731
1732/*
1733 * process a list of expirable mountpoints with the intent of discarding any
1734 * mountpoints that aren't in use and haven't been touched since last we came
1735 * here
1736 */
1737void mark_mounts_for_expiry(struct list_head *mounts)
1738{
1da177e4
LT
1739 struct vfsmount *mnt, *next;
1740 LIST_HEAD(graveyard);
bcc5c7d2 1741 LIST_HEAD(umounts);
1da177e4
LT
1742
1743 if (list_empty(mounts))
1744 return;
1745
bcc5c7d2 1746 down_write(&namespace_sem);
1da177e4
LT
1747 spin_lock(&vfsmount_lock);
1748
1749 /* extract from the expiration list every vfsmount that matches the
1750 * following criteria:
1751 * - only referenced by its parent vfsmount
1752 * - still marked for expiry (marked on the last call here; marks are
1753 * cleared by mntput())
1754 */
55e700b9 1755 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
1da177e4 1756 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
bcc5c7d2 1757 propagate_mount_busy(mnt, 1))
1da177e4 1758 continue;
55e700b9 1759 list_move(&mnt->mnt_expire, &graveyard);
1da177e4 1760 }
bcc5c7d2
AV
1761 while (!list_empty(&graveyard)) {
1762 mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
1763 touch_mnt_namespace(mnt->mnt_ns);
1764 umount_tree(mnt, 1, &umounts);
1765 }
5528f911 1766 spin_unlock(&vfsmount_lock);
bcc5c7d2
AV
1767 up_write(&namespace_sem);
1768
1769 release_mounts(&umounts);
5528f911
TM
1770}
1771
1772EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
1773
1774/*
1775 * Ripoff of 'select_parent()'
1776 *
1777 * search the list of submounts for a given mountpoint, and move any
1778 * shrinkable submounts to the 'graveyard' list.
1779 */
1780static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
1781{
1782 struct vfsmount *this_parent = parent;
1783 struct list_head *next;
1784 int found = 0;
1785
1786repeat:
1787 next = this_parent->mnt_mounts.next;
1788resume:
1789 while (next != &this_parent->mnt_mounts) {
1790 struct list_head *tmp = next;
1791 struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
1792
1793 next = tmp->next;
1794 if (!(mnt->mnt_flags & MNT_SHRINKABLE))
1da177e4 1795 continue;
5528f911
TM
1796 /*
1797 * Descend a level if the d_mounts list is non-empty.
1798 */
1799 if (!list_empty(&mnt->mnt_mounts)) {
1800 this_parent = mnt;
1801 goto repeat;
1802 }
1da177e4 1803
5528f911 1804 if (!propagate_mount_busy(mnt, 1)) {
5528f911
TM
1805 list_move_tail(&mnt->mnt_expire, graveyard);
1806 found++;
1807 }
1da177e4 1808 }
5528f911
TM
1809 /*
1810 * All done at this level ... ascend and resume the search
1811 */
1812 if (this_parent != parent) {
1813 next = this_parent->mnt_child.next;
1814 this_parent = this_parent->mnt_parent;
1815 goto resume;
1816 }
1817 return found;
1818}
1819
1820/*
1821 * process a list of expirable mountpoints with the intent of discarding any
1822 * submounts of a specific parent mountpoint
1823 */
c35038be 1824static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
5528f911
TM
1825{
1826 LIST_HEAD(graveyard);
c35038be 1827 struct vfsmount *m;
5528f911 1828
5528f911 1829 /* extract submounts of 'mountpoint' from the expiration list */
c35038be 1830 while (select_submounts(mnt, &graveyard)) {
bcc5c7d2 1831 while (!list_empty(&graveyard)) {
c35038be 1832 m = list_first_entry(&graveyard, struct vfsmount,
bcc5c7d2 1833 mnt_expire);
afef80b3
EB
1834 touch_mnt_namespace(m->mnt_ns);
1835 umount_tree(m, 1, umounts);
bcc5c7d2
AV
1836 }
1837 }
1da177e4
LT
1838}
1839
1da177e4
LT
1840/*
1841 * Some copy_from_user() implementations do not return the exact number of
1842 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
1843 * Note that this function differs from copy_from_user() in that it will oops
1844 * on bad values of `to', rather than returning a short copy.
1845 */
b58fed8b
RP
1846static long exact_copy_from_user(void *to, const void __user * from,
1847 unsigned long n)
1da177e4
LT
1848{
1849 char *t = to;
1850 const char __user *f = from;
1851 char c;
1852
1853 if (!access_ok(VERIFY_READ, from, n))
1854 return n;
1855
1856 while (n) {
1857 if (__get_user(c, f)) {
1858 memset(t, 0, n);
1859 break;
1860 }
1861 *t++ = c;
1862 f++;
1863 n--;
1864 }
1865 return n;
1866}
1867
b58fed8b 1868int copy_mount_options(const void __user * data, unsigned long *where)
1da177e4
LT
1869{
1870 int i;
1871 unsigned long page;
1872 unsigned long size;
b58fed8b 1873
1da177e4
LT
1874 *where = 0;
1875 if (!data)
1876 return 0;
1877
1878 if (!(page = __get_free_page(GFP_KERNEL)))
1879 return -ENOMEM;
1880
1881 /* We only care that *some* data at the address the user
1882 * gave us is valid. Just in case, we'll zero
1883 * the remainder of the page.
1884 */
1885 /* copy_from_user cannot cross TASK_SIZE ! */
1886 size = TASK_SIZE - (unsigned long)data;
1887 if (size > PAGE_SIZE)
1888 size = PAGE_SIZE;
1889
1890 i = size - exact_copy_from_user((void *)page, data, size);
1891 if (!i) {
b58fed8b 1892 free_page(page);
1da177e4
LT
1893 return -EFAULT;
1894 }
1895 if (i != PAGE_SIZE)
1896 memset((char *)page + i, 0, PAGE_SIZE - i);
1897 *where = page;
1898 return 0;
1899}
1900
1901/*
1902 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
1903 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
1904 *
1905 * data is a (void *) that can point to any structure up to
1906 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
1907 * information (or be NULL).
1908 *
1909 * Pre-0.97 versions of mount() didn't have a flags word.
1910 * When the flags word was introduced its top half was required
1911 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
1912 * Therefore, if this magic number is present, it carries no information
1913 * and must be discarded.
1914 */
b58fed8b 1915long do_mount(char *dev_name, char *dir_name, char *type_page,
1da177e4
LT
1916 unsigned long flags, void *data_page)
1917{
2d92ab3c 1918 struct path path;
1da177e4
LT
1919 int retval = 0;
1920 int mnt_flags = 0;
1921
1922 /* Discard magic */
1923 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
1924 flags &= ~MS_MGC_MSK;
1925
1926 /* Basic sanity checks */
1927
1928 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
1929 return -EINVAL;
1930 if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
1931 return -EINVAL;
1932
1933 if (data_page)
1934 ((char *)data_page)[PAGE_SIZE - 1] = 0;
1935
613cbe3d
AK
1936 /* Default to relatime unless overriden */
1937 if (!(flags & MS_NOATIME))
1938 mnt_flags |= MNT_RELATIME;
0a1c01c9 1939
1da177e4
LT
1940 /* Separate the per-mountpoint flags */
1941 if (flags & MS_NOSUID)
1942 mnt_flags |= MNT_NOSUID;
1943 if (flags & MS_NODEV)
1944 mnt_flags |= MNT_NODEV;
1945 if (flags & MS_NOEXEC)
1946 mnt_flags |= MNT_NOEXEC;
fc33a7bb
CH
1947 if (flags & MS_NOATIME)
1948 mnt_flags |= MNT_NOATIME;
1949 if (flags & MS_NODIRATIME)
1950 mnt_flags |= MNT_NODIRATIME;
d0adde57
MG
1951 if (flags & MS_STRICTATIME)
1952 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2e4b7fcd
DH
1953 if (flags & MS_RDONLY)
1954 mnt_flags |= MNT_READONLY;
fc33a7bb
CH
1955
1956 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
d0adde57
MG
1957 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
1958 MS_STRICTATIME);
1da177e4
LT
1959
1960 /* ... and get the mountpoint */
2d92ab3c 1961 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
1da177e4
LT
1962 if (retval)
1963 return retval;
1964
2d92ab3c 1965 retval = security_sb_mount(dev_name, &path,
b5266eb4 1966 type_page, flags, data_page);
1da177e4
LT
1967 if (retval)
1968 goto dput_out;
1969
1970 if (flags & MS_REMOUNT)
2d92ab3c 1971 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
1da177e4
LT
1972 data_page);
1973 else if (flags & MS_BIND)
2d92ab3c 1974 retval = do_loopback(&path, dev_name, flags & MS_REC);
9676f0c6 1975 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2d92ab3c 1976 retval = do_change_type(&path, flags);
1da177e4 1977 else if (flags & MS_MOVE)
2d92ab3c 1978 retval = do_move_mount(&path, dev_name);
1da177e4 1979 else
2d92ab3c 1980 retval = do_new_mount(&path, type_page, flags, mnt_flags,
1da177e4
LT
1981 dev_name, data_page);
1982dput_out:
2d92ab3c 1983 path_put(&path);
1da177e4
LT
1984 return retval;
1985}
1986
741a2951
JD
1987/*
1988 * Allocate a new namespace structure and populate it with contents
1989 * copied from the namespace of the passed in task structure.
1990 */
e3222c4e 1991static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
6b3286ed 1992 struct fs_struct *fs)
1da177e4 1993{
6b3286ed 1994 struct mnt_namespace *new_ns;
7f2da1e7 1995 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
1da177e4
LT
1996 struct vfsmount *p, *q;
1997
6b3286ed 1998 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
1da177e4 1999 if (!new_ns)
467e9f4b 2000 return ERR_PTR(-ENOMEM);
1da177e4
LT
2001
2002 atomic_set(&new_ns->count, 1);
1da177e4 2003 INIT_LIST_HEAD(&new_ns->list);
5addc5dd
AV
2004 init_waitqueue_head(&new_ns->poll);
2005 new_ns->event = 0;
1da177e4 2006
390c6843 2007 down_write(&namespace_sem);
1da177e4 2008 /* First pass: copy the tree topology */
6b3286ed 2009 new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
9676f0c6 2010 CL_COPY_ALL | CL_EXPIRE);
1da177e4 2011 if (!new_ns->root) {
390c6843 2012 up_write(&namespace_sem);
1da177e4 2013 kfree(new_ns);
5cc4a034 2014 return ERR_PTR(-ENOMEM);
1da177e4
LT
2015 }
2016 spin_lock(&vfsmount_lock);
2017 list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
2018 spin_unlock(&vfsmount_lock);
2019
2020 /*
2021 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2022 * as belonging to new namespace. We have already acquired a private
2023 * fs_struct, so tsk->fs->lock is not needed.
2024 */
6b3286ed 2025 p = mnt_ns->root;
1da177e4
LT
2026 q = new_ns->root;
2027 while (p) {
6b3286ed 2028 q->mnt_ns = new_ns;
1da177e4 2029 if (fs) {
6ac08c39 2030 if (p == fs->root.mnt) {
1da177e4 2031 rootmnt = p;
6ac08c39 2032 fs->root.mnt = mntget(q);
1da177e4 2033 }
6ac08c39 2034 if (p == fs->pwd.mnt) {
1da177e4 2035 pwdmnt = p;
6ac08c39 2036 fs->pwd.mnt = mntget(q);
1da177e4 2037 }
1da177e4 2038 }
6b3286ed 2039 p = next_mnt(p, mnt_ns->root);
1da177e4
LT
2040 q = next_mnt(q, new_ns->root);
2041 }
390c6843 2042 up_write(&namespace_sem);
1da177e4 2043
1da177e4
LT
2044 if (rootmnt)
2045 mntput(rootmnt);
2046 if (pwdmnt)
2047 mntput(pwdmnt);
1da177e4 2048
741a2951
JD
2049 return new_ns;
2050}
2051
213dd266 2052struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
e3222c4e 2053 struct fs_struct *new_fs)
741a2951 2054{
6b3286ed 2055 struct mnt_namespace *new_ns;
741a2951 2056
e3222c4e 2057 BUG_ON(!ns);
6b3286ed 2058 get_mnt_ns(ns);
741a2951
JD
2059
2060 if (!(flags & CLONE_NEWNS))
e3222c4e 2061 return ns;
741a2951 2062
e3222c4e 2063 new_ns = dup_mnt_ns(ns, new_fs);
741a2951 2064
6b3286ed 2065 put_mnt_ns(ns);
e3222c4e 2066 return new_ns;
1da177e4
LT
2067}
2068
bdc480e3
HC
2069SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
2070 char __user *, type, unsigned long, flags, void __user *, data)
1da177e4
LT
2071{
2072 int retval;
2073 unsigned long data_page;
2074 unsigned long type_page;
2075 unsigned long dev_page;
2076 char *dir_page;
2077
b58fed8b 2078 retval = copy_mount_options(type, &type_page);
1da177e4
LT
2079 if (retval < 0)
2080 return retval;
2081
2082 dir_page = getname(dir_name);
2083 retval = PTR_ERR(dir_page);
2084 if (IS_ERR(dir_page))
2085 goto out1;
2086
b58fed8b 2087 retval = copy_mount_options(dev_name, &dev_page);
1da177e4
LT
2088 if (retval < 0)
2089 goto out2;
2090
b58fed8b 2091 retval = copy_mount_options(data, &data_page);
1da177e4
LT
2092 if (retval < 0)
2093 goto out3;
2094
2095 lock_kernel();
b58fed8b
RP
2096 retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
2097 flags, (void *)data_page);
1da177e4
LT
2098 unlock_kernel();
2099 free_page(data_page);
2100
2101out3:
2102 free_page(dev_page);
2103out2:
2104 putname(dir_page);
2105out1:
2106 free_page(type_page);
2107 return retval;
2108}
2109
1da177e4
LT
2110/*
2111 * pivot_root Semantics:
2112 * Moves the root file system of the current process to the directory put_old,
2113 * makes new_root as the new root file system of the current process, and sets
2114 * root/cwd of all processes which had them on the current root to new_root.
2115 *
2116 * Restrictions:
2117 * The new_root and put_old must be directories, and must not be on the
2118 * same file system as the current process root. The put_old must be
2119 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2120 * pointed to by put_old must yield the same directory as new_root. No other
2121 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2122 *
4a0d11fa
NB
2123 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2124 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2125 * in this situation.
2126 *
1da177e4
LT
2127 * Notes:
2128 * - we don't move root/cwd if they are not at the root (reason: if something
2129 * cared enough to change them, it's probably wrong to force them elsewhere)
2130 * - it's okay to pick a root that isn't the root of a file system, e.g.
2131 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2132 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2133 * first.
2134 */
3480b257
HC
2135SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2136 const char __user *, put_old)
1da177e4
LT
2137{
2138 struct vfsmount *tmp;
2d8f3038 2139 struct path new, old, parent_path, root_parent, root;
1da177e4
LT
2140 int error;
2141
2142 if (!capable(CAP_SYS_ADMIN))
2143 return -EPERM;
2144
2d8f3038 2145 error = user_path_dir(new_root, &new);
1da177e4
LT
2146 if (error)
2147 goto out0;
2148 error = -EINVAL;
2d8f3038 2149 if (!check_mnt(new.mnt))
1da177e4
LT
2150 goto out1;
2151
2d8f3038 2152 error = user_path_dir(put_old, &old);
1da177e4
LT
2153 if (error)
2154 goto out1;
2155
2d8f3038 2156 error = security_sb_pivotroot(&old, &new);
1da177e4 2157 if (error) {
2d8f3038 2158 path_put(&old);
1da177e4
LT
2159 goto out1;
2160 }
2161
2162 read_lock(&current->fs->lock);
8c3ee42e 2163 root = current->fs->root;
6ac08c39 2164 path_get(&current->fs->root);
1da177e4 2165 read_unlock(&current->fs->lock);
390c6843 2166 down_write(&namespace_sem);
2d8f3038 2167 mutex_lock(&old.dentry->d_inode->i_mutex);
1da177e4 2168 error = -EINVAL;
2d8f3038
AV
2169 if (IS_MNT_SHARED(old.mnt) ||
2170 IS_MNT_SHARED(new.mnt->mnt_parent) ||
8c3ee42e 2171 IS_MNT_SHARED(root.mnt->mnt_parent))
21444403 2172 goto out2;
8c3ee42e 2173 if (!check_mnt(root.mnt))
1da177e4
LT
2174 goto out2;
2175 error = -ENOENT;
2d8f3038 2176 if (IS_DEADDIR(new.dentry->d_inode))
1da177e4 2177 goto out2;
2d8f3038 2178 if (d_unhashed(new.dentry) && !IS_ROOT(new.dentry))
1da177e4 2179 goto out2;
2d8f3038 2180 if (d_unhashed(old.dentry) && !IS_ROOT(old.dentry))
1da177e4
LT
2181 goto out2;
2182 error = -EBUSY;
2d8f3038
AV
2183 if (new.mnt == root.mnt ||
2184 old.mnt == root.mnt)
1da177e4
LT
2185 goto out2; /* loop, on the same file system */
2186 error = -EINVAL;
8c3ee42e 2187 if (root.mnt->mnt_root != root.dentry)
1da177e4 2188 goto out2; /* not a mountpoint */
8c3ee42e 2189 if (root.mnt->mnt_parent == root.mnt)
0bb6fcc1 2190 goto out2; /* not attached */
2d8f3038 2191 if (new.mnt->mnt_root != new.dentry)
1da177e4 2192 goto out2; /* not a mountpoint */
2d8f3038 2193 if (new.mnt->mnt_parent == new.mnt)
0bb6fcc1 2194 goto out2; /* not attached */
4ac91378 2195 /* make sure we can reach put_old from new_root */
2d8f3038 2196 tmp = old.mnt;
1da177e4 2197 spin_lock(&vfsmount_lock);
2d8f3038 2198 if (tmp != new.mnt) {
1da177e4
LT
2199 for (;;) {
2200 if (tmp->mnt_parent == tmp)
2201 goto out3; /* already mounted on put_old */
2d8f3038 2202 if (tmp->mnt_parent == new.mnt)
1da177e4
LT
2203 break;
2204 tmp = tmp->mnt_parent;
2205 }
2d8f3038 2206 if (!is_subdir(tmp->mnt_mountpoint, new.dentry))
1da177e4 2207 goto out3;
2d8f3038 2208 } else if (!is_subdir(old.dentry, new.dentry))
1da177e4 2209 goto out3;
2d8f3038 2210 detach_mnt(new.mnt, &parent_path);
8c3ee42e 2211 detach_mnt(root.mnt, &root_parent);
4ac91378 2212 /* mount old root on put_old */
2d8f3038 2213 attach_mnt(root.mnt, &old);
4ac91378 2214 /* mount new_root on / */
2d8f3038 2215 attach_mnt(new.mnt, &root_parent);
6b3286ed 2216 touch_mnt_namespace(current->nsproxy->mnt_ns);
1da177e4 2217 spin_unlock(&vfsmount_lock);
2d8f3038
AV
2218 chroot_fs_refs(&root, &new);
2219 security_sb_post_pivotroot(&root, &new);
1da177e4 2220 error = 0;
1a390689
AV
2221 path_put(&root_parent);
2222 path_put(&parent_path);
1da177e4 2223out2:
2d8f3038 2224 mutex_unlock(&old.dentry->d_inode->i_mutex);
390c6843 2225 up_write(&namespace_sem);
8c3ee42e 2226 path_put(&root);
2d8f3038 2227 path_put(&old);
1da177e4 2228out1:
2d8f3038 2229 path_put(&new);
1da177e4 2230out0:
1da177e4
LT
2231 return error;
2232out3:
2233 spin_unlock(&vfsmount_lock);
2234 goto out2;
2235}
2236
2237static void __init init_mount_tree(void)
2238{
2239 struct vfsmount *mnt;
6b3286ed 2240 struct mnt_namespace *ns;
ac748a09 2241 struct path root;
1da177e4
LT
2242
2243 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
2244 if (IS_ERR(mnt))
2245 panic("Can't create rootfs");
6b3286ed
KK
2246 ns = kmalloc(sizeof(*ns), GFP_KERNEL);
2247 if (!ns)
1da177e4 2248 panic("Can't allocate initial namespace");
6b3286ed
KK
2249 atomic_set(&ns->count, 1);
2250 INIT_LIST_HEAD(&ns->list);
2251 init_waitqueue_head(&ns->poll);
2252 ns->event = 0;
2253 list_add(&mnt->mnt_list, &ns->list);
2254 ns->root = mnt;
2255 mnt->mnt_ns = ns;
2256
2257 init_task.nsproxy->mnt_ns = ns;
2258 get_mnt_ns(ns);
2259
ac748a09
JB
2260 root.mnt = ns->root;
2261 root.dentry = ns->root->mnt_root;
2262
2263 set_fs_pwd(current->fs, &root);
2264 set_fs_root(current->fs, &root);
1da177e4
LT
2265}
2266
74bf17cf 2267void __init mnt_init(void)
1da177e4 2268{
13f14b4d 2269 unsigned u;
15a67dd8 2270 int err;
1da177e4 2271
390c6843
RP
2272 init_rwsem(&namespace_sem);
2273
1da177e4 2274 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
20c2df83 2275 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1da177e4 2276
b58fed8b 2277 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
1da177e4
LT
2278
2279 if (!mount_hashtable)
2280 panic("Failed to allocate mount hash table\n");
2281
13f14b4d
ED
2282 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
2283
2284 for (u = 0; u < HASH_SIZE; u++)
2285 INIT_LIST_HEAD(&mount_hashtable[u]);
1da177e4 2286
15a67dd8
RD
2287 err = sysfs_init();
2288 if (err)
2289 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
8e24eea7 2290 __func__, err);
00d26666
GKH
2291 fs_kobj = kobject_create_and_add("fs", NULL);
2292 if (!fs_kobj)
8e24eea7 2293 printk(KERN_WARNING "%s: kobj create error\n", __func__);
1da177e4
LT
2294 init_rootfs();
2295 init_mount_tree();
2296}
2297
6b3286ed 2298void __put_mnt_ns(struct mnt_namespace *ns)
1da177e4 2299{
6b3286ed 2300 struct vfsmount *root = ns->root;
70fbcdf4 2301 LIST_HEAD(umount_list);
6b3286ed 2302 ns->root = NULL;
1ce88cf4 2303 spin_unlock(&vfsmount_lock);
390c6843 2304 down_write(&namespace_sem);
1da177e4 2305 spin_lock(&vfsmount_lock);
a05964f3 2306 umount_tree(root, 0, &umount_list);
1da177e4 2307 spin_unlock(&vfsmount_lock);
390c6843 2308 up_write(&namespace_sem);
70fbcdf4 2309 release_mounts(&umount_list);
6b3286ed 2310 kfree(ns);
1da177e4 2311}