Version:  2.6.34 2.6.35 2.6.36 2.6.37 2.6.38 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14

Linux/fs/namespace.c

  1 /*
  2  *  linux/fs/namespace.c
  3  *
  4  * (C) Copyright Al Viro 2000, 2001
  5  *      Released under GPL v2.
  6  *
  7  * Based on code from fs/super.c, copyright Linus Torvalds and others.
  8  * Heavily rewritten.
  9  */
 10 
 11 #include <linux/syscalls.h>
 12 #include <linux/slab.h>
 13 #include <linux/sched.h>
 14 #include <linux/spinlock.h>
 15 #include <linux/percpu.h>
 16 #include <linux/init.h>
 17 #include <linux/kernel.h>
 18 #include <linux/acct.h>
 19 #include <linux/capability.h>
 20 #include <linux/cpumask.h>
 21 #include <linux/module.h>
 22 #include <linux/sysfs.h>
 23 #include <linux/seq_file.h>
 24 #include <linux/mnt_namespace.h>
 25 #include <linux/namei.h>
 26 #include <linux/nsproxy.h>
 27 #include <linux/security.h>
 28 #include <linux/mount.h>
 29 #include <linux/ramfs.h>
 30 #include <linux/log2.h>
 31 #include <linux/idr.h>
 32 #include <linux/fs_struct.h>
 33 #include <linux/fsnotify.h>
 34 #include <asm/uaccess.h>
 35 #include <asm/unistd.h>
 36 #include "pnode.h"
 37 #include "internal.h"
 38 
 39 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
 40 #define HASH_SIZE (1UL << HASH_SHIFT)
 41 
 42 static int event;
 43 static DEFINE_IDA(mnt_id_ida);
 44 static DEFINE_IDA(mnt_group_ida);
 45 static DEFINE_SPINLOCK(mnt_id_lock);
 46 static int mnt_id_start = 0;
 47 static int mnt_group_start = 1;
 48 
 49 static struct list_head *mount_hashtable __read_mostly;
 50 static struct kmem_cache *mnt_cache __read_mostly;
 51 static struct rw_semaphore namespace_sem;
 52 
 53 /* /sys/fs */
 54 struct kobject *fs_kobj;
 55 EXPORT_SYMBOL_GPL(fs_kobj);
 56 
 57 /*
 58  * vfsmount lock may be taken for read to prevent changes to the
 59  * vfsmount hash, ie. during mountpoint lookups or walking back
 60  * up the tree.
 61  *
 62  * It should be taken for write in all cases where the vfsmount
 63  * tree or hash is modified or when a vfsmount structure is modified.
 64  */
 65 DEFINE_BRLOCK(vfsmount_lock);
 66 
 67 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 68 {
 69         unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
 70         tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
 71         tmp = tmp + (tmp >> HASH_SHIFT);
 72         return tmp & (HASH_SIZE - 1);
 73 }
 74 
 75 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
 76 
 77 /*
 78  * allocation is serialized by namespace_sem, but we need the spinlock to
 79  * serialize with freeing.
 80  */
 81 static int mnt_alloc_id(struct vfsmount *mnt)
 82 {
 83         int res;
 84 
 85 retry:
 86         ida_pre_get(&mnt_id_ida, GFP_KERNEL);
 87         spin_lock(&mnt_id_lock);
 88         res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
 89         if (!res)
 90                 mnt_id_start = mnt->mnt_id + 1;
 91         spin_unlock(&mnt_id_lock);
 92         if (res == -EAGAIN)
 93                 goto retry;
 94 
 95         return res;
 96 }
 97 
 98 static void mnt_free_id(struct vfsmount *mnt)
 99 {
100         int id = mnt->mnt_id;
101         spin_lock(&mnt_id_lock);
102         ida_remove(&mnt_id_ida, id);
103         if (mnt_id_start > id)
104                 mnt_id_start = id;
105         spin_unlock(&mnt_id_lock);
106 }
107 
108 /*
109  * Allocate a new peer group ID
110  *
111  * mnt_group_ida is protected by namespace_sem
112  */
113 static int mnt_alloc_group_id(struct vfsmount *mnt)
114 {
115         int res;
116 
117         if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
118                 return -ENOMEM;
119 
120         res = ida_get_new_above(&mnt_group_ida,
121                                 mnt_group_start,
122                                 &mnt->mnt_group_id);
123         if (!res)
124                 mnt_group_start = mnt->mnt_group_id + 1;
125 
126         return res;
127 }
128 
129 /*
130  * Release a peer group ID
131  */
132 void mnt_release_group_id(struct vfsmount *mnt)
133 {
134         int id = mnt->mnt_group_id;
135         ida_remove(&mnt_group_ida, id);
136         if (mnt_group_start > id)
137                 mnt_group_start = id;
138         mnt->mnt_group_id = 0;
139 }
140 
141 /*
142  * vfsmount lock must be held for read
143  */
144 static inline void mnt_add_count(struct vfsmount *mnt, int n)
145 {
146 #ifdef CONFIG_SMP
147         this_cpu_add(mnt->mnt_pcp->mnt_count, n);
148 #else
149         preempt_disable();
150         mnt->mnt_count += n;
151         preempt_enable();
152 #endif
153 }
154 
155 static inline void mnt_set_count(struct vfsmount *mnt, int n)
156 {
157 #ifdef CONFIG_SMP
158         this_cpu_write(mnt->mnt_pcp->mnt_count, n);
159 #else
160         mnt->mnt_count = n;
161 #endif
162 }
163 
164 /*
165  * vfsmount lock must be held for read
166  */
167 static inline void mnt_inc_count(struct vfsmount *mnt)
168 {
169         mnt_add_count(mnt, 1);
170 }
171 
172 /*
173  * vfsmount lock must be held for read
174  */
175 static inline void mnt_dec_count(struct vfsmount *mnt)
176 {
177         mnt_add_count(mnt, -1);
178 }
179 
180 /*
181  * vfsmount lock must be held for write
182  */
183 unsigned int mnt_get_count(struct vfsmount *mnt)
184 {
185 #ifdef CONFIG_SMP
186         unsigned int count = 0;
187         int cpu;
188 
189         for_each_possible_cpu(cpu) {
190                 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
191         }
192 
193         return count;
194 #else
195         return mnt->mnt_count;
196 #endif
197 }
198 
199 static struct vfsmount *alloc_vfsmnt(const char *name)
200 {
201         struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
202         if (mnt) {
203                 int err;
204 
205                 err = mnt_alloc_id(mnt);
206                 if (err)
207                         goto out_free_cache;
208 
209                 if (name) {
210                         mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
211                         if (!mnt->mnt_devname)
212                                 goto out_free_id;
213                 }
214 
215 #ifdef CONFIG_SMP
216                 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
217                 if (!mnt->mnt_pcp)
218                         goto out_free_devname;
219 
220                 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
221 #else
222                 mnt->mnt_count = 1;
223                 mnt->mnt_writers = 0;
224 #endif
225 
226                 INIT_LIST_HEAD(&mnt->mnt_hash);
227                 INIT_LIST_HEAD(&mnt->mnt_child);
228                 INIT_LIST_HEAD(&mnt->mnt_mounts);
229                 INIT_LIST_HEAD(&mnt->mnt_list);
230                 INIT_LIST_HEAD(&mnt->mnt_expire);
231                 INIT_LIST_HEAD(&mnt->mnt_share);
232                 INIT_LIST_HEAD(&mnt->mnt_slave_list);
233                 INIT_LIST_HEAD(&mnt->mnt_slave);
234 #ifdef CONFIG_FSNOTIFY
235                 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
236 #endif
237         }
238         return mnt;
239 
240 #ifdef CONFIG_SMP
241 out_free_devname:
242         kfree(mnt->mnt_devname);
243 #endif
244 out_free_id:
245         mnt_free_id(mnt);
246 out_free_cache:
247         kmem_cache_free(mnt_cache, mnt);
248         return NULL;
249 }
250 
251 /*
252  * Most r/o checks on a fs are for operations that take
253  * discrete amounts of time, like a write() or unlink().
254  * We must keep track of when those operations start
255  * (for permission checks) and when they end, so that
256  * we can determine when writes are able to occur to
257  * a filesystem.
258  */
259 /*
260  * __mnt_is_readonly: check whether a mount is read-only
261  * @mnt: the mount to check for its write status
262  *
263  * This shouldn't be used directly ouside of the VFS.
264  * It does not guarantee that the filesystem will stay
265  * r/w, just that it is right *now*.  This can not and
266  * should not be used in place of IS_RDONLY(inode).
267  * mnt_want/drop_write() will _keep_ the filesystem
268  * r/w.
269  */
270 int __mnt_is_readonly(struct vfsmount *mnt)
271 {
272         if (mnt->mnt_flags & MNT_READONLY)
273                 return 1;
274         if (mnt->mnt_sb->s_flags & MS_RDONLY)
275                 return 1;
276         return 0;
277 }
278 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
279 
280 static inline void mnt_inc_writers(struct vfsmount *mnt)
281 {
282 #ifdef CONFIG_SMP
283         this_cpu_inc(mnt->mnt_pcp->mnt_writers);
284 #else
285         mnt->mnt_writers++;
286 #endif
287 }
288 
289 static inline void mnt_dec_writers(struct vfsmount *mnt)
290 {
291 #ifdef CONFIG_SMP
292         this_cpu_dec(mnt->mnt_pcp->mnt_writers);
293 #else
294         mnt->mnt_writers--;
295 #endif
296 }
297 
298 static unsigned int mnt_get_writers(struct vfsmount *mnt)
299 {
300 #ifdef CONFIG_SMP
301         unsigned int count = 0;
302         int cpu;
303 
304         for_each_possible_cpu(cpu) {
305                 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
306         }
307 
308         return count;
309 #else
310         return mnt->mnt_writers;
311 #endif
312 }
313 
314 /*
315  * Most r/o checks on a fs are for operations that take
316  * discrete amounts of time, like a write() or unlink().
317  * We must keep track of when those operations start
318  * (for permission checks) and when they end, so that
319  * we can determine when writes are able to occur to
320  * a filesystem.
321  */
322 /**
323  * mnt_want_write - get write access to a mount
324  * @mnt: the mount on which to take a write
325  *
326  * This tells the low-level filesystem that a write is
327  * about to be performed to it, and makes sure that
328  * writes are allowed before returning success.  When
329  * the write operation is finished, mnt_drop_write()
330  * must be called.  This is effectively a refcount.
331  */
332 int mnt_want_write(struct vfsmount *mnt)
333 {
334         int ret = 0;
335 
336         preempt_disable();
337         mnt_inc_writers(mnt);
338         /*
339          * The store to mnt_inc_writers must be visible before we pass
340          * MNT_WRITE_HOLD loop below, so that the slowpath can see our
341          * incremented count after it has set MNT_WRITE_HOLD.
342          */
343         smp_mb();
344         while (mnt->mnt_flags & MNT_WRITE_HOLD)
345                 cpu_relax();
346         /*
347          * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
348          * be set to match its requirements. So we must not load that until
349          * MNT_WRITE_HOLD is cleared.
350          */
351         smp_rmb();
352         if (__mnt_is_readonly(mnt)) {
353                 mnt_dec_writers(mnt);
354                 ret = -EROFS;
355                 goto out;
356         }
357 out:
358         preempt_enable();
359         return ret;
360 }
361 EXPORT_SYMBOL_GPL(mnt_want_write);
362 
363 /**
364  * mnt_clone_write - get write access to a mount
365  * @mnt: the mount on which to take a write
366  *
367  * This is effectively like mnt_want_write, except
368  * it must only be used to take an extra write reference
369  * on a mountpoint that we already know has a write reference
370  * on it. This allows some optimisation.
371  *
372  * After finished, mnt_drop_write must be called as usual to
373  * drop the reference.
374  */
375 int mnt_clone_write(struct vfsmount *mnt)
376 {
377         /* superblock may be r/o */
378         if (__mnt_is_readonly(mnt))
379                 return -EROFS;
380         preempt_disable();
381         mnt_inc_writers(mnt);
382         preempt_enable();
383         return 0;
384 }
385 EXPORT_SYMBOL_GPL(mnt_clone_write);
386 
387 /**
388  * mnt_want_write_file - get write access to a file's mount
389  * @file: the file who's mount on which to take a write
390  *
391  * This is like mnt_want_write, but it takes a file and can
392  * do some optimisations if the file is open for write already
393  */
394 int mnt_want_write_file(struct file *file)
395 {
396         struct inode *inode = file->f_dentry->d_inode;
397         if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
398                 return mnt_want_write(file->f_path.mnt);
399         else
400                 return mnt_clone_write(file->f_path.mnt);
401 }
402 EXPORT_SYMBOL_GPL(mnt_want_write_file);
403 
404 /**
405  * mnt_drop_write - give up write access to a mount
406  * @mnt: the mount on which to give up write access
407  *
408  * Tells the low-level filesystem that we are done
409  * performing writes to it.  Must be matched with
410  * mnt_want_write() call above.
411  */
412 void mnt_drop_write(struct vfsmount *mnt)
413 {
414         preempt_disable();
415         mnt_dec_writers(mnt);
416         preempt_enable();
417 }
418 EXPORT_SYMBOL_GPL(mnt_drop_write);
419 
420 static int mnt_make_readonly(struct vfsmount *mnt)
421 {
422         int ret = 0;
423 
424         br_write_lock(vfsmount_lock);
425         mnt->mnt_flags |= MNT_WRITE_HOLD;
426         /*
427          * After storing MNT_WRITE_HOLD, we'll read the counters. This store
428          * should be visible before we do.
429          */
430         smp_mb();
431 
432         /*
433          * With writers on hold, if this value is zero, then there are
434          * definitely no active writers (although held writers may subsequently
435          * increment the count, they'll have to wait, and decrement it after
436          * seeing MNT_READONLY).
437          *
438          * It is OK to have counter incremented on one CPU and decremented on
439          * another: the sum will add up correctly. The danger would be when we
440          * sum up each counter, if we read a counter before it is incremented,
441          * but then read another CPU's count which it has been subsequently
442          * decremented from -- we would see more decrements than we should.
443          * MNT_WRITE_HOLD protects against this scenario, because
444          * mnt_want_write first increments count, then smp_mb, then spins on
445          * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
446          * we're counting up here.
447          */
448         if (mnt_get_writers(mnt) > 0)
449                 ret = -EBUSY;
450         else
451                 mnt->mnt_flags |= MNT_READONLY;
452         /*
453          * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
454          * that become unheld will see MNT_READONLY.
455          */
456         smp_wmb();
457         mnt->mnt_flags &= ~MNT_WRITE_HOLD;
458         br_write_unlock(vfsmount_lock);
459         return ret;
460 }
461 
462 static void __mnt_unmake_readonly(struct vfsmount *mnt)
463 {
464         br_write_lock(vfsmount_lock);
465         mnt->mnt_flags &= ~MNT_READONLY;
466         br_write_unlock(vfsmount_lock);
467 }
468 
469 static void free_vfsmnt(struct vfsmount *mnt)
470 {
471         kfree(mnt->mnt_devname);
472         mnt_free_id(mnt);
473 #ifdef CONFIG_SMP
474         free_percpu(mnt->mnt_pcp);
475 #endif
476         kmem_cache_free(mnt_cache, mnt);
477 }
478 
479 /*
480  * find the first or last mount at @dentry on vfsmount @mnt depending on
481  * @dir. If @dir is set return the first mount else return the last mount.
482  * vfsmount_lock must be held for read or write.
483  */
484 struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
485                               int dir)
486 {
487         struct list_head *head = mount_hashtable + hash(mnt, dentry);
488         struct list_head *tmp = head;
489         struct vfsmount *p, *found = NULL;
490 
491         for (;;) {
492                 tmp = dir ? tmp->next : tmp->prev;
493                 p = NULL;
494                 if (tmp == head)
495                         break;
496                 p = list_entry(tmp, struct vfsmount, mnt_hash);
497                 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
498                         found = p;
499                         break;
500                 }
501         }
502         return found;
503 }
504 
505 /*
506  * lookup_mnt increments the ref count before returning
507  * the vfsmount struct.
508  */
509 struct vfsmount *lookup_mnt(struct path *path)
510 {
511         struct vfsmount *child_mnt;
512 
513         br_read_lock(vfsmount_lock);
514         if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
515                 mntget(child_mnt);
516         br_read_unlock(vfsmount_lock);
517         return child_mnt;
518 }
519 
520 static inline int check_mnt(struct vfsmount *mnt)
521 {
522         return mnt->mnt_ns == current->nsproxy->mnt_ns;
523 }
524 
525 /*
526  * vfsmount lock must be held for write
527  */
528 static void touch_mnt_namespace(struct mnt_namespace *ns)
529 {
530         if (ns) {
531                 ns->event = ++event;
532                 wake_up_interruptible(&ns->poll);
533         }
534 }
535 
536 /*
537  * vfsmount lock must be held for write
538  */
539 static void __touch_mnt_namespace(struct mnt_namespace *ns)
540 {
541         if (ns && ns->event != event) {
542                 ns->event = event;
543                 wake_up_interruptible(&ns->poll);
544         }
545 }
546 
547 /*
548  * Clear dentry's mounted state if it has no remaining mounts.
549  * vfsmount_lock must be held for write.
550  */
551 static void dentry_reset_mounted(struct vfsmount *mnt, struct dentry *dentry)
552 {
553         unsigned u;
554 
555         for (u = 0; u < HASH_SIZE; u++) {
556                 struct vfsmount *p;
557 
558                 list_for_each_entry(p, &mount_hashtable[u], mnt_hash) {
559                         if (p->mnt_mountpoint == dentry)
560                                 return;
561                 }
562         }
563         spin_lock(&dentry->d_lock);
564         dentry->d_flags &= ~DCACHE_MOUNTED;
565         spin_unlock(&dentry->d_lock);
566 }
567 
568 /*
569  * vfsmount lock must be held for write
570  */
571 static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
572 {
573         old_path->dentry = mnt->mnt_mountpoint;
574         old_path->mnt = mnt->mnt_parent;
575         mnt->mnt_parent = mnt;
576         mnt->mnt_mountpoint = mnt->mnt_root;
577         list_del_init(&mnt->mnt_child);
578         list_del_init(&mnt->mnt_hash);
579         dentry_reset_mounted(old_path->mnt, old_path->dentry);
580 }
581 
582 /*
583  * vfsmount lock must be held for write
584  */
585 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
586                         struct vfsmount *child_mnt)
587 {
588         child_mnt->mnt_parent = mntget(mnt);
589         child_mnt->mnt_mountpoint = dget(dentry);
590         spin_lock(&dentry->d_lock);
591         dentry->d_flags |= DCACHE_MOUNTED;
592         spin_unlock(&dentry->d_lock);
593 }
594 
595 /*
596  * vfsmount lock must be held for write
597  */
598 static void attach_mnt(struct vfsmount *mnt, struct path *path)
599 {
600         mnt_set_mountpoint(path->mnt, path->dentry, mnt);
601         list_add_tail(&mnt->mnt_hash, mount_hashtable +
602                         hash(path->mnt, path->dentry));
603         list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
604 }
605 
606 static inline void __mnt_make_longterm(struct vfsmount *mnt)
607 {
608 #ifdef CONFIG_SMP
609         atomic_inc(&mnt->mnt_longterm);
610 #endif
611 }
612 
613 /* needs vfsmount lock for write */
614 static inline void __mnt_make_shortterm(struct vfsmount *mnt)
615 {
616 #ifdef CONFIG_SMP
617         atomic_dec(&mnt->mnt_longterm);
618 #endif
619 }
620 
621 /*
622  * vfsmount lock must be held for write
623  */
624 static void commit_tree(struct vfsmount *mnt)
625 {
626         struct vfsmount *parent = mnt->mnt_parent;
627         struct vfsmount *m;
628         LIST_HEAD(head);
629         struct mnt_namespace *n = parent->mnt_ns;
630 
631         BUG_ON(parent == mnt);
632 
633         list_add_tail(&head, &mnt->mnt_list);
634         list_for_each_entry(m, &head, mnt_list) {
635                 m->mnt_ns = n;
636                 __mnt_make_longterm(m);
637         }
638 
639         list_splice(&head, n->list.prev);
640 
641         list_add_tail(&mnt->mnt_hash, mount_hashtable +
642                                 hash(parent, mnt->mnt_mountpoint));
643         list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
644         touch_mnt_namespace(n);
645 }
646 
647 static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
648 {
649         struct list_head *next = p->mnt_mounts.next;
650         if (next == &p->mnt_mounts) {
651                 while (1) {
652                         if (p == root)
653                                 return NULL;
654                         next = p->mnt_child.next;
655                         if (next != &p->mnt_parent->mnt_mounts)
656                                 break;
657                         p = p->mnt_parent;
658                 }
659         }
660         return list_entry(next, struct vfsmount, mnt_child);
661 }
662 
663 static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
664 {
665         struct list_head *prev = p->mnt_mounts.prev;
666         while (prev != &p->mnt_mounts) {
667                 p = list_entry(prev, struct vfsmount, mnt_child);
668                 prev = p->mnt_mounts.prev;
669         }
670         return p;
671 }
672 
673 struct vfsmount *
674 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
675 {
676         struct vfsmount *mnt;
677         struct dentry *root;
678 
679         if (!type)
680                 return ERR_PTR(-ENODEV);
681 
682         mnt = alloc_vfsmnt(name);
683         if (!mnt)
684                 return ERR_PTR(-ENOMEM);
685 
686         if (flags & MS_KERNMOUNT)
687                 mnt->mnt_flags = MNT_INTERNAL;
688 
689         root = mount_fs(type, flags, name, data);
690         if (IS_ERR(root)) {
691                 free_vfsmnt(mnt);
692                 return ERR_CAST(root);
693         }
694 
695         mnt->mnt_root = root;
696         mnt->mnt_sb = root->d_sb;
697         mnt->mnt_mountpoint = mnt->mnt_root;
698         mnt->mnt_parent = mnt;
699         return mnt;
700 }
701 EXPORT_SYMBOL_GPL(vfs_kern_mount);
702 
703 static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
704                                         int flag)
705 {
706         struct super_block *sb = old->mnt_sb;
707         struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
708 
709         if (mnt) {
710                 if (flag & (CL_SLAVE | CL_PRIVATE))
711                         mnt->mnt_group_id = 0; /* not a peer of original */
712                 else
713                         mnt->mnt_group_id = old->mnt_group_id;
714 
715                 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
716                         int err = mnt_alloc_group_id(mnt);
717                         if (err)
718                                 goto out_free;
719                 }
720 
721                 mnt->mnt_flags = old->mnt_flags & ~MNT_WRITE_HOLD;
722                 atomic_inc(&sb->s_active);
723                 mnt->mnt_sb = sb;
724                 mnt->mnt_root = dget(root);
725                 mnt->mnt_mountpoint = mnt->mnt_root;
726                 mnt->mnt_parent = mnt;
727 
728                 if (flag & CL_SLAVE) {
729                         list_add(&mnt->mnt_slave, &old->mnt_slave_list);
730                         mnt->mnt_master = old;
731                         CLEAR_MNT_SHARED(mnt);
732                 } else if (!(flag & CL_PRIVATE)) {
733                         if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
734                                 list_add(&mnt->mnt_share, &old->mnt_share);
735                         if (IS_MNT_SLAVE(old))
736                                 list_add(&mnt->mnt_slave, &old->mnt_slave);
737                         mnt->mnt_master = old->mnt_master;
738                 }
739                 if (flag & CL_MAKE_SHARED)
740                         set_mnt_shared(mnt);
741 
742                 /* stick the duplicate mount on the same expiry list
743                  * as the original if that was on one */
744                 if (flag & CL_EXPIRE) {
745                         if (!list_empty(&old->mnt_expire))
746                                 list_add(&mnt->mnt_expire, &old->mnt_expire);
747                 }
748         }
749         return mnt;
750 
751  out_free:
752         free_vfsmnt(mnt);
753         return NULL;
754 }
755 
756 static inline void mntfree(struct vfsmount *mnt)
757 {
758         struct super_block *sb = mnt->mnt_sb;
759 
760         /*
761          * This probably indicates that somebody messed
762          * up a mnt_want/drop_write() pair.  If this
763          * happens, the filesystem was probably unable
764          * to make r/w->r/o transitions.
765          */
766         /*
767          * The locking used to deal with mnt_count decrement provides barriers,
768          * so mnt_get_writers() below is safe.
769          */
770         WARN_ON(mnt_get_writers(mnt));
771         fsnotify_vfsmount_delete(mnt);
772         dput(mnt->mnt_root);
773         free_vfsmnt(mnt);
774         deactivate_super(sb);
775 }
776 
777 static void mntput_no_expire(struct vfsmount *mnt)
778 {
779 put_again:
780 #ifdef CONFIG_SMP
781         br_read_lock(vfsmount_lock);
782         if (likely(atomic_read(&mnt->mnt_longterm))) {
783                 mnt_dec_count(mnt);
784                 br_read_unlock(vfsmount_lock);
785                 return;
786         }
787         br_read_unlock(vfsmount_lock);
788 
789         br_write_lock(vfsmount_lock);
790         mnt_dec_count(mnt);
791         if (mnt_get_count(mnt)) {
792                 br_write_unlock(vfsmount_lock);
793                 return;
794         }
795 #else
796         mnt_dec_count(mnt);
797         if (likely(mnt_get_count(mnt)))
798                 return;
799         br_write_lock(vfsmount_lock);
800 #endif
801         if (unlikely(mnt->mnt_pinned)) {
802                 mnt_add_count(mnt, mnt->mnt_pinned + 1);
803                 mnt->mnt_pinned = 0;
804                 br_write_unlock(vfsmount_lock);
805                 acct_auto_close_mnt(mnt);
806                 goto put_again;
807         }
808         br_write_unlock(vfsmount_lock);
809         mntfree(mnt);
810 }
811 
812 void mntput(struct vfsmount *mnt)
813 {
814         if (mnt) {
815                 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
816                 if (unlikely(mnt->mnt_expiry_mark))
817                         mnt->mnt_expiry_mark = 0;
818                 mntput_no_expire(mnt);
819         }
820 }
821 EXPORT_SYMBOL(mntput);
822 
823 struct vfsmount *mntget(struct vfsmount *mnt)
824 {
825         if (mnt)
826                 mnt_inc_count(mnt);
827         return mnt;
828 }
829 EXPORT_SYMBOL(mntget);
830 
831 void mnt_pin(struct vfsmount *mnt)
832 {
833         br_write_lock(vfsmount_lock);
834         mnt->mnt_pinned++;
835         br_write_unlock(vfsmount_lock);
836 }
837 EXPORT_SYMBOL(mnt_pin);
838 
839 void mnt_unpin(struct vfsmount *mnt)
840 {
841         br_write_lock(vfsmount_lock);
842         if (mnt->mnt_pinned) {
843                 mnt_inc_count(mnt);
844                 mnt->mnt_pinned--;
845         }
846         br_write_unlock(vfsmount_lock);
847 }
848 EXPORT_SYMBOL(mnt_unpin);
849 
850 static inline void mangle(struct seq_file *m, const char *s)
851 {
852         seq_escape(m, s, " \t\n\\");
853 }
854 
855 /*
856  * Simple .show_options callback for filesystems which don't want to
857  * implement more complex mount option showing.
858  *
859  * See also save_mount_options().
860  */
861 int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
862 {
863         const char *options;
864 
865         rcu_read_lock();
866         options = rcu_dereference(mnt->mnt_sb->s_options);
867 
868         if (options != NULL && options[0]) {
869                 seq_putc(m, ',');
870                 mangle(m, options);
871         }
872         rcu_read_unlock();
873 
874         return 0;
875 }
876 EXPORT_SYMBOL(generic_show_options);
877 
878 /*
879  * If filesystem uses generic_show_options(), this function should be
880  * called from the fill_super() callback.
881  *
882  * The .remount_fs callback usually needs to be handled in a special
883  * way, to make sure, that previous options are not overwritten if the
884  * remount fails.
885  *
886  * Also note, that if the filesystem's .remount_fs function doesn't
887  * reset all options to their default value, but changes only newly
888  * given options, then the displayed options will not reflect reality
889  * any more.
890  */
891 void save_mount_options(struct super_block *sb, char *options)
892 {
893         BUG_ON(sb->s_options);
894         rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
895 }
896 EXPORT_SYMBOL(save_mount_options);
897 
898 void replace_mount_options(struct super_block *sb, char *options)
899 {
900         char *old = sb->s_options;
901         rcu_assign_pointer(sb->s_options, options);
902         if (old) {
903                 synchronize_rcu();
904                 kfree(old);
905         }
906 }
907 EXPORT_SYMBOL(replace_mount_options);
908 
909 #ifdef CONFIG_PROC_FS
910 /* iterator */
911 static void *m_start(struct seq_file *m, loff_t *pos)
912 {
913         struct proc_mounts *p = m->private;
914 
915         down_read(&namespace_sem);
916         return seq_list_start(&p->ns->list, *pos);
917 }
918 
919 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
920 {
921         struct proc_mounts *p = m->private;
922 
923         return seq_list_next(v, &p->ns->list, pos);
924 }
925 
926 static void m_stop(struct seq_file *m, void *v)
927 {
928         up_read(&namespace_sem);
929 }
930 
931 int mnt_had_events(struct proc_mounts *p)
932 {
933         struct mnt_namespace *ns = p->ns;
934         int res = 0;
935 
936         br_read_lock(vfsmount_lock);
937         if (p->m.poll_event != ns->event) {
938                 p->m.poll_event = ns->event;
939                 res = 1;
940         }
941         br_read_unlock(vfsmount_lock);
942 
943         return res;
944 }
945 
946 struct proc_fs_info {
947         int flag;
948         const char *str;
949 };
950 
951 static int show_sb_opts(struct seq_file *m, struct super_block *sb)
952 {
953         static const struct proc_fs_info fs_info[] = {
954                 { MS_SYNCHRONOUS, ",sync" },
955                 { MS_DIRSYNC, ",dirsync" },
956                 { MS_MANDLOCK, ",mand" },
957                 { 0, NULL }
958         };
959         const struct proc_fs_info *fs_infop;
960 
961         for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
962                 if (sb->s_flags & fs_infop->flag)
963                         seq_puts(m, fs_infop->str);
964         }
965 
966         return security_sb_show_options(m, sb);
967 }
968 
969 static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
970 {
971         static const struct proc_fs_info mnt_info[] = {
972                 { MNT_NOSUID, ",nosuid" },
973                 { MNT_NODEV, ",nodev" },
974                 { MNT_NOEXEC, ",noexec" },
975                 { MNT_NOATIME, ",noatime" },
976                 { MNT_NODIRATIME, ",nodiratime" },
977                 { MNT_RELATIME, ",relatime" },
978                 { 0, NULL }
979         };
980         const struct proc_fs_info *fs_infop;
981 
982         for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
983                 if (mnt->mnt_flags & fs_infop->flag)
984                         seq_puts(m, fs_infop->str);
985         }
986 }
987 
988 static void show_type(struct seq_file *m, struct super_block *sb)
989 {
990         mangle(m, sb->s_type->name);
991         if (sb->s_subtype && sb->s_subtype[0]) {
992                 seq_putc(m, '.');
993                 mangle(m, sb->s_subtype);
994         }
995 }
996 
997 static int show_vfsmnt(struct seq_file *m, void *v)
998 {
999         struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
1000         int err = 0;
1001         struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
1002 
1003         if (mnt->mnt_sb->s_op->show_devname) {
1004                 err = mnt->mnt_sb->s_op->show_devname(m, mnt);
1005                 if (err)
1006                         goto out;
1007         } else {
1008                 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
1009         }
1010         seq_putc(m, ' ');
1011         seq_path(m, &mnt_path, " \t\n\\");
1012         seq_putc(m, ' ');
1013         show_type(m, mnt->mnt_sb);
1014         seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
1015         err = show_sb_opts(m, mnt->mnt_sb);
1016         if (err)
1017                 goto out;
1018         show_mnt_opts(m, mnt);
1019         if (mnt->mnt_sb->s_op->show_options)
1020                 err = mnt->mnt_sb->s_op->show_options(m, mnt);
1021         seq_puts(m, " 0 0\n");
1022 out:
1023         return err;
1024 }
1025 
1026 const struct seq_operations mounts_op = {
1027         .start  = m_start,
1028         .next   = m_next,
1029         .stop   = m_stop,
1030         .show   = show_vfsmnt
1031 };
1032 
1033 static int show_mountinfo(struct seq_file *m, void *v)
1034 {
1035         struct proc_mounts *p = m->private;
1036         struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
1037         struct super_block *sb = mnt->mnt_sb;
1038         struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
1039         struct path root = p->root;
1040         int err = 0;
1041 
1042         seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
1043                    MAJOR(sb->s_dev), MINOR(sb->s_dev));
1044         if (sb->s_op->show_path)
1045                 err = sb->s_op->show_path(m, mnt);
1046         else
1047                 seq_dentry(m, mnt->mnt_root, " \t\n\\");
1048         if (err)
1049                 goto out;
1050         seq_putc(m, ' ');
1051 
1052         /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
1053         err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
1054         if (err)
1055                 goto out;
1056 
1057         seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
1058         show_mnt_opts(m, mnt);
1059 
1060         /* Tagged fields ("foo:X" or "bar") */
1061         if (IS_MNT_SHARED(mnt))
1062                 seq_printf(m, " shared:%i", mnt->mnt_group_id);
1063         if (IS_MNT_SLAVE(mnt)) {
1064                 int master = mnt->mnt_master->mnt_group_id;
1065                 int dom = get_dominating_id(mnt, &p->root);
1066                 seq_printf(m, " master:%i", master);
1067                 if (dom && dom != master)
1068                         seq_printf(m, " propagate_from:%i", dom);
1069         }
1070         if (IS_MNT_UNBINDABLE(mnt))
1071                 seq_puts(m, " unbindable");
1072 
1073         /* Filesystem specific data */
1074         seq_puts(m, " - ");
1075         show_type(m, sb);
1076         seq_putc(m, ' ');
1077         if (sb->s_op->show_devname)
1078                 err = sb->s_op->show_devname(m, mnt);
1079         else
1080                 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
1081         if (err)
1082                 goto out;
1083         seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
1084         err = show_sb_opts(m, sb);
1085         if (err)
1086                 goto out;
1087         if (sb->s_op->show_options)
1088                 err = sb->s_op->show_options(m, mnt);
1089         seq_putc(m, '\n');
1090 out:
1091         return err;
1092 }
1093 
1094 const struct seq_operations mountinfo_op = {
1095         .start  = m_start,
1096         .next   = m_next,
1097         .stop   = m_stop,
1098         .show   = show_mountinfo,
1099 };
1100 
1101 static int show_vfsstat(struct seq_file *m, void *v)
1102 {
1103         struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
1104         struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
1105         int err = 0;
1106 
1107         /* device */
1108         if (mnt->mnt_sb->s_op->show_devname) {
1109                 seq_puts(m, "device ");
1110                 err = mnt->mnt_sb->s_op->show_devname(m, mnt);
1111         } else {
1112                 if (mnt->mnt_devname) {
1113                         seq_puts(m, "device ");
1114                         mangle(m, mnt->mnt_devname);
1115                 } else
1116                         seq_puts(m, "no device");
1117         }
1118 
1119         /* mount point */
1120         seq_puts(m, " mounted on ");
1121         seq_path(m, &mnt_path, " \t\n\\");
1122         seq_putc(m, ' ');
1123 
1124         /* file system type */
1125         seq_puts(m, "with fstype ");
1126         show_type(m, mnt->mnt_sb);
1127 
1128         /* optional statistics */
1129         if (mnt->mnt_sb->s_op->show_stats) {
1130                 seq_putc(m, ' ');
1131                 if (!err)
1132                         err = mnt->mnt_sb->s_op->show_stats(m, mnt);
1133         }
1134 
1135         seq_putc(m, '\n');
1136         return err;
1137 }
1138 
1139 const struct seq_operations mountstats_op = {
1140         .start  = m_start,
1141         .next   = m_next,
1142         .stop   = m_stop,
1143         .show   = show_vfsstat,
1144 };
1145 #endif  /* CONFIG_PROC_FS */
1146 
1147 /**
1148  * may_umount_tree - check if a mount tree is busy
1149  * @mnt: root of mount tree
1150  *
1151  * This is called to check if a tree of mounts has any
1152  * open files, pwds, chroots or sub mounts that are
1153  * busy.
1154  */
1155 int may_umount_tree(struct vfsmount *mnt)
1156 {
1157         int actual_refs = 0;
1158         int minimum_refs = 0;
1159         struct vfsmount *p;
1160 
1161         /* write lock needed for mnt_get_count */
1162         br_write_lock(vfsmount_lock);
1163         for (p = mnt; p; p = next_mnt(p, mnt)) {
1164                 actual_refs += mnt_get_count(p);
1165                 minimum_refs += 2;
1166         }
1167         br_write_unlock(vfsmount_lock);
1168 
1169         if (actual_refs > minimum_refs)
1170                 return 0;
1171 
1172         return 1;
1173 }
1174 
1175 EXPORT_SYMBOL(may_umount_tree);
1176 
1177 /**
1178  * may_umount - check if a mount point is busy
1179  * @mnt: root of mount
1180  *
1181  * This is called to check if a mount point has any
1182  * open files, pwds, chroots or sub mounts. If the
1183  * mount has sub mounts this will return busy
1184  * regardless of whether the sub mounts are busy.
1185  *
1186  * Doesn't take quota and stuff into account. IOW, in some cases it will
1187  * give false negatives. The main reason why it's here is that we need
1188  * a non-destructive way to look for easily umountable filesystems.
1189  */
1190 int may_umount(struct vfsmount *mnt)
1191 {
1192         int ret = 1;
1193         down_read(&namespace_sem);
1194         br_write_lock(vfsmount_lock);
1195         if (propagate_mount_busy(mnt, 2))
1196                 ret = 0;
1197         br_write_unlock(vfsmount_lock);
1198         up_read(&namespace_sem);
1199         return ret;
1200 }
1201 
1202 EXPORT_SYMBOL(may_umount);
1203 
1204 void release_mounts(struct list_head *head)
1205 {
1206         struct vfsmount *mnt;
1207         while (!list_empty(head)) {
1208                 mnt = list_first_entry(head, struct vfsmount, mnt_hash);
1209                 list_del_init(&mnt->mnt_hash);
1210                 if (mnt->mnt_parent != mnt) {
1211                         struct dentry *dentry;
1212                         struct vfsmount *m;
1213 
1214                         br_write_lock(vfsmount_lock);
1215                         dentry = mnt->mnt_mountpoint;
1216                         m = mnt->mnt_parent;
1217                         mnt->mnt_mountpoint = mnt->mnt_root;
1218                         mnt->mnt_parent = mnt;
1219                         m->mnt_ghosts--;
1220                         br_write_unlock(vfsmount_lock);
1221                         dput(dentry);
1222                         mntput(m);
1223                 }
1224                 mntput(mnt);
1225         }
1226 }
1227 
1228 /*
1229  * vfsmount lock must be held for write
1230  * namespace_sem must be held for write
1231  */
1232 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
1233 {
1234         LIST_HEAD(tmp_list);
1235         struct vfsmount *p;
1236 
1237         for (p = mnt; p; p = next_mnt(p, mnt))
1238                 list_move(&p->mnt_hash, &tmp_list);
1239 
1240         if (propagate)
1241                 propagate_umount(&tmp_list);
1242 
1243         list_for_each_entry(p, &tmp_list, mnt_hash) {
1244                 list_del_init(&p->mnt_expire);
1245                 list_del_init(&p->mnt_list);
1246                 __touch_mnt_namespace(p->mnt_ns);
1247                 p->mnt_ns = NULL;
1248                 __mnt_make_shortterm(p);
1249                 list_del_init(&p->mnt_child);
1250                 if (p->mnt_parent != p) {
1251                         p->mnt_parent->mnt_ghosts++;
1252                         dentry_reset_mounted(p->mnt_parent, p->mnt_mountpoint);
1253                 }
1254                 change_mnt_propagation(p, MS_PRIVATE);
1255         }
1256         list_splice(&tmp_list, kill);
1257 }
1258 
1259 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
1260 
1261 static int do_umount(struct vfsmount *mnt, int flags)
1262 {
1263         struct super_block *sb = mnt->mnt_sb;
1264         int retval;
1265         LIST_HEAD(umount_list);
1266 
1267         retval = security_sb_umount(mnt, flags);
1268         if (retval)
1269                 return retval;
1270 
1271         /*
1272          * Allow userspace to request a mountpoint be expired rather than
1273          * unmounting unconditionally. Unmount only happens if:
1274          *  (1) the mark is already set (the mark is cleared by mntput())
1275          *  (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1276          */
1277         if (flags & MNT_EXPIRE) {
1278                 if (mnt == current->fs->root.mnt ||
1279                     flags & (MNT_FORCE | MNT_DETACH))
1280                         return -EINVAL;
1281 
1282                 /*
1283                  * probably don't strictly need the lock here if we examined
1284                  * all race cases, but it's a slowpath.
1285                  */
1286                 br_write_lock(vfsmount_lock);
1287                 if (mnt_get_count(mnt) != 2) {
1288                         br_write_unlock(vfsmount_lock);
1289                         return -EBUSY;
1290                 }
1291                 br_write_unlock(vfsmount_lock);
1292 
1293                 if (!xchg(&mnt->mnt_expiry_mark, 1))
1294                         return -EAGAIN;
1295         }
1296 
1297         /*
1298          * If we may have to abort operations to get out of this
1299          * mount, and they will themselves hold resources we must
1300          * allow the fs to do things. In the Unix tradition of
1301          * 'Gee thats tricky lets do it in userspace' the umount_begin
1302          * might fail to complete on the first run through as other tasks
1303          * must return, and the like. Thats for the mount program to worry
1304          * about for the moment.
1305          */
1306 
1307         if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1308                 sb->s_op->umount_begin(sb);
1309         }
1310 
1311         /*
1312          * No sense to grab the lock for this test, but test itself looks
1313          * somewhat bogus. Suggestions for better replacement?
1314          * Ho-hum... In principle, we might treat that as umount + switch
1315          * to rootfs. GC would eventually take care of the old vfsmount.
1316          * Actually it makes sense, especially if rootfs would contain a
1317          * /reboot - static binary that would close all descriptors and
1318          * call reboot(9). Then init(8) could umount root and exec /reboot.
1319          */
1320         if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1321                 /*
1322                  * Special case for "unmounting" root ...
1323                  * we just try to remount it readonly.
1324                  */
1325                 down_write(&sb->s_umount);
1326                 if (!(sb->s_flags & MS_RDONLY))
1327                         retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1328                 up_write(&sb->s_umount);
1329                 return retval;
1330         }
1331 
1332         down_write(&namespace_sem);
1333         br_write_lock(vfsmount_lock);
1334         event++;
1335 
1336         if (!(flags & MNT_DETACH))
1337                 shrink_submounts(mnt, &umount_list);
1338 
1339         retval = -EBUSY;
1340         if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
1341                 if (!list_empty(&mnt->mnt_list))
1342                         umount_tree(mnt, 1, &umount_list);
1343                 retval = 0;
1344         }
1345         br_write_unlock(vfsmount_lock);
1346         up_write(&namespace_sem);
1347         release_mounts(&umount_list);
1348         return retval;
1349 }
1350 
1351 /*
1352  * Now umount can handle mount points as well as block devices.
1353  * This is important for filesystems which use unnamed block devices.
1354  *
1355  * We now support a flag for forced unmount like the other 'big iron'
1356  * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1357  */
1358 
1359 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1360 {
1361         struct path path;
1362         int retval;
1363         int lookup_flags = 0;
1364 
1365         if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1366                 return -EINVAL;
1367 
1368         if (!(flags & UMOUNT_NOFOLLOW))
1369                 lookup_flags |= LOOKUP_FOLLOW;
1370 
1371         retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
1372         if (retval)
1373                 goto out;
1374         retval = -EINVAL;
1375         if (path.dentry != path.mnt->mnt_root)
1376                 goto dput_and_out;
1377         if (!check_mnt(path.mnt))
1378                 goto dput_and_out;
1379 
1380         retval = -EPERM;
1381         if (!capable(CAP_SYS_ADMIN))
1382                 goto dput_and_out;
1383 
1384         retval = do_umount(path.mnt, flags);
1385 dput_and_out:
1386         /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1387         dput(path.dentry);
1388         mntput_no_expire(path.mnt);
1389 out:
1390         return retval;
1391 }
1392 
1393 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1394 
1395 /*
1396  *      The 2.0 compatible umount. No flags.
1397  */
1398 SYSCALL_DEFINE1(oldumount, char __user *, name)
1399 {
1400         return sys_umount(name, 0);
1401 }
1402 
1403 #endif
1404 
1405 static int mount_is_safe(struct path *path)
1406 {
1407         if (capable(CAP_SYS_ADMIN))
1408                 return 0;
1409         return -EPERM;
1410 #ifdef notyet
1411         if (S_ISLNK(path->dentry->d_inode->i_mode))
1412                 return -EPERM;
1413         if (path->dentry->d_inode->i_mode & S_ISVTX) {
1414                 if (current_uid() != path->dentry->d_inode->i_uid)
1415                         return -EPERM;
1416         }
1417         if (inode_permission(path->dentry->d_inode, MAY_WRITE))
1418                 return -EPERM;
1419         return 0;
1420 #endif
1421 }
1422 
1423 struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
1424                                         int flag)
1425 {
1426         struct vfsmount *res, *p, *q, *r, *s;
1427         struct path path;
1428 
1429         if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
1430                 return NULL;
1431 
1432         res = q = clone_mnt(mnt, dentry, flag);
1433         if (!q)
1434                 goto Enomem;
1435         q->mnt_mountpoint = mnt->mnt_mountpoint;
1436 
1437         p = mnt;
1438         list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1439                 if (!is_subdir(r->mnt_mountpoint, dentry))
1440                         continue;
1441 
1442                 for (s = r; s; s = next_mnt(s, r)) {
1443                         if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
1444                                 s = skip_mnt_tree(s);
1445                                 continue;
1446                         }
1447                         while (p != s->mnt_parent) {
1448                                 p = p->mnt_parent;
1449                                 q = q->mnt_parent;
1450                         }
1451                         p = s;
1452                         path.mnt = q;
1453                         path.dentry = p->mnt_mountpoint;
1454                         q = clone_mnt(p, p->mnt_root, flag);
1455                         if (!q)
1456                                 goto Enomem;
1457                         br_write_lock(vfsmount_lock);
1458                         list_add_tail(&q->mnt_list, &res->mnt_list);
1459                         attach_mnt(q, &path);
1460                         br_write_unlock(vfsmount_lock);
1461                 }
1462         }
1463         return res;
1464 Enomem:
1465         if (res) {
1466                 LIST_HEAD(umount_list);
1467                 br_write_lock(vfsmount_lock);
1468                 umount_tree(res, 0, &umount_list);
1469                 br_write_unlock(vfsmount_lock);
1470                 release_mounts(&umount_list);
1471         }
1472         return NULL;
1473 }
1474 
1475 struct vfsmount *collect_mounts(struct path *path)
1476 {
1477         struct vfsmount *tree;
1478         down_write(&namespace_sem);
1479         tree = copy_tree(path->mnt, path->dentry, CL_COPY_ALL | CL_PRIVATE);
1480         up_write(&namespace_sem);
1481         return tree;
1482 }
1483 
1484 void drop_collected_mounts(struct vfsmount *mnt)
1485 {
1486         LIST_HEAD(umount_list);
1487         down_write(&namespace_sem);
1488         br_write_lock(vfsmount_lock);
1489         umount_tree(mnt, 0, &umount_list);
1490         br_write_unlock(vfsmount_lock);
1491         up_write(&namespace_sem);
1492         release_mounts(&umount_list);
1493 }
1494 
1495 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1496                    struct vfsmount *root)
1497 {
1498         struct vfsmount *mnt;
1499         int res = f(root, arg);
1500         if (res)
1501                 return res;
1502         list_for_each_entry(mnt, &root->mnt_list, mnt_list) {
1503                 res = f(mnt, arg);
1504                 if (res)
1505                         return res;
1506         }
1507         return 0;
1508 }
1509 
1510 static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
1511 {
1512         struct vfsmount *p;
1513 
1514         for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1515                 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1516                         mnt_release_group_id(p);
1517         }
1518 }
1519 
1520 static int invent_group_ids(struct vfsmount *mnt, bool recurse)
1521 {
1522         struct vfsmount *p;
1523 
1524         for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1525                 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1526                         int err = mnt_alloc_group_id(p);
1527                         if (err) {
1528                                 cleanup_group_ids(mnt, p);
1529                                 return err;
1530                         }
1531                 }
1532         }
1533 
1534         return 0;
1535 }
1536 
1537 /*
1538  *  @source_mnt : mount tree to be attached
1539  *  @nd         : place the mount tree @source_mnt is attached
1540  *  @parent_nd  : if non-null, detach the source_mnt from its parent and
1541  *                 store the parent mount and mountpoint dentry.
1542  *                 (done when source_mnt is moved)
1543  *
1544  *  NOTE: in the table below explains the semantics when a source mount
1545  *  of a given type is attached to a destination mount of a given type.
1546  * ---------------------------------------------------------------------------
1547  * |         BIND MOUNT OPERATION                                            |
1548  * |**************************************************************************
1549  * | source-->| shared        |       private  |       slave    | unbindable |
1550  * | dest     |               |                |                |            |
1551  * |   |      |               |                |                |            |
1552  * |   v      |               |                |                |            |
1553  * |**************************************************************************
1554  * |  shared  | shared (++)   |     shared (+) |     shared(+++)|  invalid   |
1555  * |          |               |                |                |            |
1556  * |non-shared| shared (+)    |      private   |      slave (*) |  invalid   |
1557  * ***************************************************************************
1558  * A bind operation clones the source mount and mounts the clone on the
1559  * destination mount.
1560  *
1561  * (++)  the cloned mount is propagated to all the mounts in the propagation
1562  *       tree of the destination mount and the cloned mount is added to
1563  *       the peer group of the source mount.
1564  * (+)   the cloned mount is created under the destination mount and is marked
1565  *       as shared. The cloned mount is added to the peer group of the source
1566  *       mount.
1567  * (+++) the mount is propagated to all the mounts in the propagation tree
1568  *       of the destination mount and the cloned mount is made slave
1569  *       of the same master as that of the source mount. The cloned mount
1570  *       is marked as 'shared and slave'.
1571  * (*)   the cloned mount is made a slave of the same master as that of the
1572  *       source mount.
1573  *
1574  * ---------------------------------------------------------------------------
1575  * |                    MOVE MOUNT OPERATION                                 |
1576  * |**************************************************************************
1577  * | source-->| shared        |       private  |       slave    | unbindable |
1578  * | dest     |               |                |                |            |
1579  * |   |      |               |                |                |            |
1580  * |   v      |               |                |                |            |
1581  * |**************************************************************************
1582  * |  shared  | shared (+)    |     shared (+) |    shared(+++) |  invalid   |
1583  * |          |               |                |                |            |
1584  * |non-shared| shared (+*)   |      private   |    slave (*)   | unbindable |
1585  * ***************************************************************************
1586  *
1587  * (+)  the mount is moved to the destination. And is then propagated to
1588  *      all the mounts in the propagation tree of the destination mount.
1589  * (+*)  the mount is moved to the destination.
1590  * (+++)  the mount is moved to the destination and is then propagated to
1591  *      all the mounts belonging to the destination mount's propagation tree.
1592  *      the mount is marked as 'shared and slave'.
1593  * (*)  the mount continues to be a slave at the new location.
1594  *
1595  * if the source mount is a tree, the operations explained above is
1596  * applied to each mount in the tree.
1597  * Must be called without spinlocks held, since this function can sleep
1598  * in allocations.
1599  */
1600 static int attach_recursive_mnt(struct vfsmount *source_mnt,
1601                         struct path *path, struct path *parent_path)
1602 {
1603         LIST_HEAD(tree_list);
1604         struct vfsmount *dest_mnt = path->mnt;
1605         struct dentry *dest_dentry = path->dentry;
1606         struct vfsmount *child, *p;
1607         int err;
1608 
1609         if (IS_MNT_SHARED(dest_mnt)) {
1610                 err = invent_group_ids(source_mnt, true);
1611                 if (err)
1612                         goto out;
1613         }
1614         err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
1615         if (err)
1616                 goto out_cleanup_ids;
1617 
1618         br_write_lock(vfsmount_lock);
1619 
1620         if (IS_MNT_SHARED(dest_mnt)) {
1621                 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1622                         set_mnt_shared(p);
1623         }
1624         if (parent_path) {
1625                 detach_mnt(source_mnt, parent_path);
1626                 attach_mnt(source_mnt, path);
1627                 touch_mnt_namespace(parent_path->mnt->mnt_ns);
1628         } else {
1629                 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
1630                 commit_tree(source_mnt);
1631         }
1632 
1633         list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
1634                 list_del_init(&child->mnt_hash);
1635                 commit_tree(child);
1636         }
1637         br_write_unlock(vfsmount_lock);
1638 
1639         return 0;
1640 
1641  out_cleanup_ids:
1642         if (IS_MNT_SHARED(dest_mnt))
1643                 cleanup_group_ids(source_mnt, NULL);
1644  out:
1645         return err;
1646 }
1647 
1648 static int lock_mount(struct path *path)
1649 {
1650         struct vfsmount *mnt;
1651 retry:
1652         mutex_lock(&path->dentry->d_inode->i_mutex);
1653         if (unlikely(cant_mount(path->dentry))) {
1654                 mutex_unlock(&path->dentry->d_inode->i_mutex);
1655                 return -ENOENT;
1656         }
1657         down_write(&namespace_sem);
1658         mnt = lookup_mnt(path);
1659         if (likely(!mnt))
1660                 return 0;
1661         up_write(&namespace_sem);
1662         mutex_unlock(&path->dentry->d_inode->i_mutex);
1663         path_put(path);
1664         path->mnt = mnt;
1665         path->dentry = dget(mnt->mnt_root);
1666         goto retry;
1667 }
1668 
1669 static void unlock_mount(struct path *path)
1670 {
1671         up_write(&namespace_sem);
1672         mutex_unlock(&path->dentry->d_inode->i_mutex);
1673 }
1674 
1675 static int graft_tree(struct vfsmount *mnt, struct path *path)
1676 {
1677         if (mnt->mnt_sb->s_flags & MS_NOUSER)
1678                 return -EINVAL;
1679 
1680         if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1681               S_ISDIR(mnt->mnt_root->d_inode->i_mode))
1682                 return -ENOTDIR;
1683 
1684         if (d_unlinked(path->dentry))
1685                 return -ENOENT;
1686 
1687         return attach_recursive_mnt(mnt, path, NULL);
1688 }
1689 
1690 /*
1691  * Sanity check the flags to change_mnt_propagation.
1692  */
1693 
1694 static int flags_to_propagation_type(int flags)
1695 {
1696         int type = flags & ~(MS_REC | MS_SILENT);
1697 
1698         /* Fail if any non-propagation flags are set */
1699         if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
1700                 return 0;
1701         /* Only one propagation flag should be set */
1702         if (!is_power_of_2(type))
1703                 return 0;
1704         return type;
1705 }
1706 
1707 /*
1708  * recursively change the type of the mountpoint.
1709  */
1710 static int do_change_type(struct path *path, int flag)
1711 {
1712         struct vfsmount *m, *mnt = path->mnt;
1713         int recurse = flag & MS_REC;
1714         int type;
1715         int err = 0;
1716 
1717         if (!capable(CAP_SYS_ADMIN))
1718                 return -EPERM;
1719 
1720         if (path->dentry != path->mnt->mnt_root)
1721                 return -EINVAL;
1722 
1723         type = flags_to_propagation_type(flag);
1724         if (!type)
1725                 return -EINVAL;
1726 
1727         down_write(&namespace_sem);
1728         if (type == MS_SHARED) {
1729                 err = invent_group_ids(mnt, recurse);
1730                 if (err)
1731                         goto out_unlock;
1732         }
1733 
1734         br_write_lock(vfsmount_lock);
1735         for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1736                 change_mnt_propagation(m, type);
1737         br_write_unlock(vfsmount_lock);
1738 
1739  out_unlock:
1740         up_write(&namespace_sem);
1741         return err;
1742 }
1743 
1744 /*
1745  * do loopback mount.
1746  */
1747 static int do_loopback(struct path *path, char *old_name,
1748                                 int recurse)
1749 {
1750         LIST_HEAD(umount_list);
1751         struct path old_path;
1752         struct vfsmount *mnt = NULL;
1753         int err = mount_is_safe(path);
1754         if (err)
1755                 return err;
1756         if (!old_name || !*old_name)
1757                 return -EINVAL;
1758         err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
1759         if (err)
1760                 return err;
1761 
1762         err = lock_mount(path);
1763         if (err)
1764                 goto out;
1765 
1766         err = -EINVAL;
1767         if (IS_MNT_UNBINDABLE(old_path.mnt))
1768                 goto out2;
1769 
1770         if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
1771                 goto out2;
1772 
1773         err = -ENOMEM;
1774         if (recurse)
1775                 mnt = copy_tree(old_path.mnt, old_path.dentry, 0);
1776         else
1777                 mnt = clone_mnt(old_path.mnt, old_path.dentry, 0);
1778 
1779         if (!mnt)
1780                 goto out2;
1781 
1782         err = graft_tree(mnt, path);
1783         if (err) {
1784                 br_write_lock(vfsmount_lock);
1785                 umount_tree(mnt, 0, &umount_list);
1786                 br_write_unlock(vfsmount_lock);
1787         }
1788 out2:
1789         unlock_mount(path);
1790         release_mounts(&umount_list);
1791 out:
1792         path_put(&old_path);
1793         return err;
1794 }
1795 
1796 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1797 {
1798         int error = 0;
1799         int readonly_request = 0;
1800 
1801         if (ms_flags & MS_RDONLY)
1802                 readonly_request = 1;
1803         if (readonly_request == __mnt_is_readonly(mnt))
1804                 return 0;
1805 
1806         if (readonly_request)
1807                 error = mnt_make_readonly(mnt);
1808         else
1809                 __mnt_unmake_readonly(mnt);
1810         return error;
1811 }
1812 
1813 /*
1814  * change filesystem flags. dir should be a physical root of filesystem.
1815  * If you've mounted a non-root directory somewhere and want to do remount
1816  * on it - tough luck.
1817  */
1818 static int do_remount(struct path *path, int flags, int mnt_flags,
1819                       void *data)
1820 {
1821         int err;
1822         struct super_block *sb = path->mnt->mnt_sb;
1823 
1824         if (!capable(CAP_SYS_ADMIN))
1825                 return -EPERM;
1826 
1827         if (!check_mnt(path->mnt))
1828                 return -EINVAL;
1829 
1830         if (path->dentry != path->mnt->mnt_root)
1831                 return -EINVAL;
1832 
1833         err = security_sb_remount(sb, data);
1834         if (err)
1835                 return err;
1836 
1837         down_write(&sb->s_umount);
1838         if (flags & MS_BIND)
1839                 err = change_mount_flags(path->mnt, flags);
1840         else
1841                 err = do_remount_sb(sb, flags, data, 0);
1842         if (!err) {
1843                 br_write_lock(vfsmount_lock);
1844                 mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
1845                 path->mnt->mnt_flags = mnt_flags;
1846                 br_write_unlock(vfsmount_lock);
1847         }
1848         up_write(&sb->s_umount);
1849         if (!err) {
1850                 br_write_lock(vfsmount_lock);
1851                 touch_mnt_namespace(path->mnt->mnt_ns);
1852                 br_write_unlock(vfsmount_lock);
1853         }
1854         return err;
1855 }
1856 
1857 static inline int tree_contains_unbindable(struct vfsmount *mnt)
1858 {
1859         struct vfsmount *p;
1860         for (p = mnt; p; p = next_mnt(p, mnt)) {
1861                 if (IS_MNT_UNBINDABLE(p))
1862                         return 1;
1863         }
1864         return 0;
1865 }
1866 
1867 static int do_move_mount(struct path *path, char *old_name)
1868 {
1869         struct path old_path, parent_path;
1870         struct vfsmount *p;
1871         int err = 0;
1872         if (!capable(CAP_SYS_ADMIN))
1873                 return -EPERM;
1874         if (!old_name || !*old_name)
1875                 return -EINVAL;
1876         err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
1877         if (err)
1878                 return err;
1879 
1880         err = lock_mount(path);
1881         if (err < 0)
1882                 goto out;
1883 
1884         err = -EINVAL;
1885         if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
1886                 goto out1;
1887 
1888         if (d_unlinked(path->dentry))
1889                 goto out1;
1890 
1891         err = -EINVAL;
1892         if (old_path.dentry != old_path.mnt->mnt_root)
1893                 goto out1;
1894 
1895         if (old_path.mnt == old_path.mnt->mnt_parent)
1896                 goto out1;
1897 
1898         if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1899               S_ISDIR(old_path.dentry->d_inode->i_mode))
1900                 goto out1;
1901         /*
1902          * Don't move a mount residing in a shared parent.
1903          */
1904         if (old_path.mnt->mnt_parent &&
1905             IS_MNT_SHARED(old_path.mnt->mnt_parent))
1906                 goto out1;
1907         /*
1908          * Don't move a mount tree containing unbindable mounts to a destination
1909          * mount which is shared.
1910          */
1911         if (IS_MNT_SHARED(path->mnt) &&
1912             tree_contains_unbindable(old_path.mnt))
1913                 goto out1;
1914         err = -ELOOP;
1915         for (p = path->mnt; p->mnt_parent != p; p = p->mnt_parent)
1916                 if (p == old_path.mnt)
1917                         goto out1;
1918 
1919         err = attach_recursive_mnt(old_path.mnt, path, &parent_path);
1920         if (err)
1921                 goto out1;
1922 
1923         /* if the mount is moved, it should no longer be expire
1924          * automatically */
1925         list_del_init(&old_path.mnt->mnt_expire);
1926 out1:
1927         unlock_mount(path);
1928 out:
1929         if (!err)
1930                 path_put(&parent_path);
1931         path_put(&old_path);
1932         return err;
1933 }
1934 
1935 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
1936 {
1937         int err;
1938         const char *subtype = strchr(fstype, '.');
1939         if (subtype) {
1940                 subtype++;
1941                 err = -EINVAL;
1942                 if (!subtype[0])
1943                         goto err;
1944         } else
1945                 subtype = "";
1946 
1947         mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
1948         err = -ENOMEM;
1949         if (!mnt->mnt_sb->s_subtype)
1950                 goto err;
1951         return mnt;
1952 
1953  err:
1954         mntput(mnt);
1955         return ERR_PTR(err);
1956 }
1957 
1958 struct vfsmount *
1959 do_kern_mount(const char *fstype, int flags, const char *name, void *data)
1960 {
1961         struct file_system_type *type = get_fs_type(fstype);
1962         struct vfsmount *mnt;
1963         if (!type)
1964                 return ERR_PTR(-ENODEV);
1965         mnt = vfs_kern_mount(type, flags, name, data);
1966         if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
1967             !mnt->mnt_sb->s_subtype)
1968                 mnt = fs_set_subtype(mnt, fstype);
1969         put_filesystem(type);
1970         return mnt;
1971 }
1972 EXPORT_SYMBOL_GPL(do_kern_mount);
1973 
1974 /*
1975  * add a mount into a namespace's mount tree
1976  */
1977 static int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags)
1978 {
1979         int err;
1980 
1981         mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
1982 
1983         err = lock_mount(path);
1984         if (err)
1985                 return err;
1986 
1987         err = -EINVAL;
1988         if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt))
1989                 goto unlock;
1990 
1991         /* Refuse the same filesystem on the same mount point */
1992         err = -EBUSY;
1993         if (path->mnt->mnt_sb == newmnt->mnt_sb &&
1994             path->mnt->mnt_root == path->dentry)
1995                 goto unlock;
1996 
1997         err = -EINVAL;
1998         if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
1999                 goto unlock;
2000 
2001         newmnt->mnt_flags = mnt_flags;
2002         err = graft_tree(newmnt, path);
2003 
2004 unlock:
2005         unlock_mount(path);
2006         return err;
2007 }
2008 
2009 /*
2010  * create a new mount for userspace and request it to be added into the
2011  * namespace's tree
2012  */
2013 static int do_new_mount(struct path *path, char *type, int flags,
2014                         int mnt_flags, char *name, void *data)
2015 {
2016         struct vfsmount *mnt;
2017         int err;
2018 
2019         if (!type)
2020                 return -EINVAL;
2021 
2022         /* we need capabilities... */
2023         if (!capable(CAP_SYS_ADMIN))
2024                 return -EPERM;
2025 
2026         mnt = do_kern_mount(type, flags, name, data);
2027         if (IS_ERR(mnt))
2028                 return PTR_ERR(mnt);
2029 
2030         err = do_add_mount(mnt, path, mnt_flags);
2031         if (err)
2032                 mntput(mnt);
2033         return err;
2034 }
2035 
2036 int finish_automount(struct vfsmount *m, struct path *path)
2037 {
2038         int err;
2039         /* The new mount record should have at least 2 refs to prevent it being
2040          * expired before we get a chance to add it
2041          */
2042         BUG_ON(mnt_get_count(m) < 2);
2043 
2044         if (m->mnt_sb == path->mnt->mnt_sb &&
2045             m->mnt_root == path->dentry) {
2046                 err = -ELOOP;
2047                 goto fail;
2048         }
2049 
2050         err = do_add_mount(m, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
2051         if (!err)
2052                 return 0;
2053 fail:
2054         /* remove m from any expiration list it may be on */
2055         if (!list_empty(&m->mnt_expire)) {
2056                 down_write(&namespace_sem);
2057                 br_write_lock(vfsmount_lock);
2058                 list_del_init(&m->mnt_expire);
2059                 br_write_unlock(vfsmount_lock);
2060                 up_write(&namespace_sem);
2061         }
2062         mntput(m);
2063         mntput(m);
2064         return err;
2065 }
2066 
2067 /**
2068  * mnt_set_expiry - Put a mount on an expiration list
2069  * @mnt: The mount to list.
2070  * @expiry_list: The list to add the mount to.
2071  */
2072 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2073 {
2074         down_write(&namespace_sem);
2075         br_write_lock(vfsmount_lock);
2076 
2077         list_add_tail(&mnt->mnt_expire, expiry_list);
2078 
2079         br_write_unlock(vfsmount_lock);
2080         up_write(&namespace_sem);
2081 }
2082 EXPORT_SYMBOL(mnt_set_expiry);
2083 
2084 /*
2085  * process a list of expirable mountpoints with the intent of discarding any
2086  * mountpoints that aren't in use and haven't been touched since last we came
2087  * here
2088  */
2089 void mark_mounts_for_expiry(struct list_head *mounts)
2090 {
2091         struct vfsmount *mnt, *next;
2092         LIST_HEAD(graveyard);
2093         LIST_HEAD(umounts);
2094 
2095         if (list_empty(mounts))
2096                 return;
2097 
2098         down_write(&namespace_sem);
2099         br_write_lock(vfsmount_lock);
2100 
2101         /* extract from the expiration list every vfsmount that matches the
2102          * following criteria:
2103          * - only referenced by its parent vfsmount
2104          * - still marked for expiry (marked on the last call here; marks are
2105          *   cleared by mntput())
2106          */
2107         list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
2108                 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
2109                         propagate_mount_busy(mnt, 1))
2110                         continue;
2111                 list_move(&mnt->mnt_expire, &graveyard);
2112         }
2113         while (!list_empty(&graveyard)) {
2114                 mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
2115                 touch_mnt_namespace(mnt->mnt_ns);
2116                 umount_tree(mnt, 1, &umounts);
2117         }
2118         br_write_unlock(vfsmount_lock);
2119         up_write(&namespace_sem);
2120 
2121         release_mounts(&umounts);
2122 }
2123 
2124 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
2125 
2126 /*
2127  * Ripoff of 'select_parent()'
2128  *
2129  * search the list of submounts for a given mountpoint, and move any
2130  * shrinkable submounts to the 'graveyard' list.
2131  */
2132 static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
2133 {
2134         struct vfsmount *this_parent = parent;
2135         struct list_head *next;
2136         int found = 0;
2137 
2138 repeat:
2139         next = this_parent->mnt_mounts.next;
2140 resume:
2141         while (next != &this_parent->mnt_mounts) {
2142                 struct list_head *tmp = next;
2143                 struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
2144 
2145                 next = tmp->next;
2146                 if (!(mnt->mnt_flags & MNT_SHRINKABLE))
2147                         continue;
2148                 /*
2149                  * Descend a level if the d_mounts list is non-empty.
2150                  */
2151                 if (!list_empty(&mnt->mnt_mounts)) {
2152                         this_parent = mnt;
2153                         goto repeat;
2154                 }
2155 
2156                 if (!propagate_mount_busy(mnt, 1)) {
2157                         list_move_tail(&mnt->mnt_expire, graveyard);
2158                         found++;
2159                 }
2160         }
2161         /*
2162          * All done at this level ... ascend and resume the search
2163          */
2164         if (this_parent != parent) {
2165                 next = this_parent->mnt_child.next;
2166                 this_parent = this_parent->mnt_parent;
2167                 goto resume;
2168         }
2169         return found;
2170 }
2171 
2172 /*
2173  * process a list of expirable mountpoints with the intent of discarding any
2174  * submounts of a specific parent mountpoint
2175  *
2176  * vfsmount_lock must be held for write
2177  */
2178 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
2179 {
2180         LIST_HEAD(graveyard);
2181         struct vfsmount *m;
2182 
2183         /* extract submounts of 'mountpoint' from the expiration list */
2184         while (select_submounts(mnt, &graveyard)) {
2185                 while (!list_empty(&graveyard)) {
2186                         m = list_first_entry(&graveyard, struct vfsmount,
2187                                                 mnt_expire);
2188                         touch_mnt_namespace(m->mnt_ns);
2189                         umount_tree(m, 1, umounts);
2190                 }
2191         }
2192 }
2193 
2194 /*
2195  * Some copy_from_user() implementations do not return the exact number of
2196  * bytes remaining to copy on a fault.  But copy_mount_options() requires that.
2197  * Note that this function differs from copy_from_user() in that it will oops
2198  * on bad values of `to', rather than returning a short copy.
2199  */
2200 static long exact_copy_from_user(void *to, const void __user * from,
2201                                  unsigned long n)
2202 {
2203         char *t = to;
2204         const char __user *f = from;
2205         char c;
2206 
2207         if (!access_ok(VERIFY_READ, from, n))
2208                 return n;
2209 
2210         while (n) {
2211                 if (__get_user(c, f)) {
2212                         memset(t, 0, n);
2213                         break;
2214                 }
2215                 *t++ = c;
2216                 f++;
2217                 n--;
2218         }
2219         return n;
2220 }
2221 
2222 int copy_mount_options(const void __user * data, unsigned long *where)
2223 {
2224         int i;
2225         unsigned long page;
2226         unsigned long size;
2227 
2228         *where = 0;
2229         if (!data)
2230                 return 0;
2231 
2232         if (!(page = __get_free_page(GFP_KERNEL)))
2233                 return -ENOMEM;
2234 
2235         /* We only care that *some* data at the address the user
2236          * gave us is valid.  Just in case, we'll zero
2237          * the remainder of the page.
2238          */
2239         /* copy_from_user cannot cross TASK_SIZE ! */
2240         size = TASK_SIZE - (unsigned long)data;
2241         if (size > PAGE_SIZE)
2242                 size = PAGE_SIZE;
2243 
2244         i = size - exact_copy_from_user((void *)page, data, size);
2245         if (!i) {
2246                 free_page(page);
2247                 return -EFAULT;
2248         }
2249         if (i != PAGE_SIZE)
2250                 memset((char *)page + i, 0, PAGE_SIZE - i);
2251         *where = page;
2252         return 0;
2253 }
2254 
2255 int copy_mount_string(const void __user *data, char **where)
2256 {
2257         char *tmp;
2258 
2259         if (!data) {
2260                 *where = NULL;
2261                 return 0;
2262         }
2263 
2264         tmp = strndup_user(data, PAGE_SIZE);
2265         if (IS_ERR(tmp))
2266                 return PTR_ERR(tmp);
2267 
2268         *where = tmp;
2269         return 0;
2270 }
2271 
2272 /*
2273  * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2274  * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2275  *
2276  * data is a (void *) that can point to any structure up to
2277  * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2278  * information (or be NULL).
2279  *
2280  * Pre-0.97 versions of mount() didn't have a flags word.
2281  * When the flags word was introduced its top half was required
2282  * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2283  * Therefore, if this magic number is present, it carries no information
2284  * and must be discarded.
2285  */
2286 long do_mount(char *dev_name, char *dir_name, char *type_page,
2287                   unsigned long flags, void *data_page)
2288 {
2289         struct path path;
2290         int retval = 0;
2291         int mnt_flags = 0;
2292 
2293         /* Discard magic */
2294         if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
2295                 flags &= ~MS_MGC_MSK;
2296 
2297         /* Basic sanity checks */
2298 
2299         if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
2300                 return -EINVAL;
2301 
2302         if (data_page)
2303                 ((char *)data_page)[PAGE_SIZE - 1] = 0;
2304 
2305         /* ... and get the mountpoint */
2306         retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
2307         if (retval)
2308                 return retval;
2309 
2310         retval = security_sb_mount(dev_name, &path,
2311                                    type_page, flags, data_page);
2312         if (retval)
2313                 goto dput_out;
2314 
2315         /* Default to relatime unless overriden */
2316         if (!(flags & MS_NOATIME))
2317                 mnt_flags |= MNT_RELATIME;
2318 
2319         /* Separate the per-mountpoint flags */
2320         if (flags & MS_NOSUID)
2321                 mnt_flags |= MNT_NOSUID;
2322         if (flags & MS_NODEV)
2323                 mnt_flags |= MNT_NODEV;
2324         if (flags & MS_NOEXEC)
2325                 mnt_flags |= MNT_NOEXEC;
2326         if (flags & MS_NOATIME)
2327                 mnt_flags |= MNT_NOATIME;
2328         if (flags & MS_NODIRATIME)
2329                 mnt_flags |= MNT_NODIRATIME;
2330         if (flags & MS_STRICTATIME)
2331                 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2332         if (flags & MS_RDONLY)
2333                 mnt_flags |= MNT_READONLY;
2334 
2335         flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
2336                    MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
2337                    MS_STRICTATIME);
2338 
2339         if (flags & MS_REMOUNT)
2340                 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
2341                                     data_page);
2342         else if (flags & MS_BIND)
2343                 retval = do_loopback(&path, dev_name, flags & MS_REC);
2344         else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2345                 retval = do_change_type(&path, flags);
2346         else if (flags & MS_MOVE)
2347                 retval = do_move_mount(&path, dev_name);
2348         else
2349                 retval = do_new_mount(&path, type_page, flags, mnt_flags,
2350                                       dev_name, data_page);
2351 dput_out:
2352         path_put(&path);
2353         return retval;
2354 }
2355 
2356 static struct mnt_namespace *alloc_mnt_ns(void)
2357 {
2358         struct mnt_namespace *new_ns;
2359 
2360         new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
2361         if (!new_ns)
2362                 return ERR_PTR(-ENOMEM);
2363         atomic_set(&new_ns->count, 1);
2364         new_ns->root = NULL;
2365         INIT_LIST_HEAD(&new_ns->list);
2366         init_waitqueue_head(&new_ns->poll);
2367         new_ns->event = 0;
2368         return new_ns;
2369 }
2370 
2371 void mnt_make_longterm(struct vfsmount *mnt)
2372 {
2373         __mnt_make_longterm(mnt);
2374 }
2375 
2376 void mnt_make_shortterm(struct vfsmount *mnt)
2377 {
2378 #ifdef CONFIG_SMP
2379         if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
2380                 return;
2381         br_write_lock(vfsmount_lock);
2382         atomic_dec(&mnt->mnt_longterm);
2383         br_write_unlock(vfsmount_lock);
2384 #endif
2385 }
2386 
2387 /*
2388  * Allocate a new namespace structure and populate it with contents
2389  * copied from the namespace of the passed in task structure.
2390  */
2391 static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2392                 struct fs_struct *fs)
2393 {
2394         struct mnt_namespace *new_ns;
2395         struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
2396         struct vfsmount *p, *q;
2397 
2398         new_ns = alloc_mnt_ns();
2399         if (IS_ERR(new_ns))
2400                 return new_ns;
2401 
2402         down_write(&namespace_sem);
2403         /* First pass: copy the tree topology */
2404         new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
2405                                         CL_COPY_ALL | CL_EXPIRE);
2406         if (!new_ns->root) {
2407                 up_write(&namespace_sem);
2408                 kfree(new_ns);
2409                 return ERR_PTR(-ENOMEM);
2410         }
2411         br_write_lock(vfsmount_lock);
2412         list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
2413         br_write_unlock(vfsmount_lock);
2414 
2415         /*
2416          * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2417          * as belonging to new namespace.  We have already acquired a private
2418          * fs_struct, so tsk->fs->lock is not needed.
2419          */
2420         p = mnt_ns->root;
2421         q = new_ns->root;
2422         while (p) {
2423                 q->mnt_ns = new_ns;
2424                 __mnt_make_longterm(q);
2425                 if (fs) {
2426                         if (p == fs->root.mnt) {
2427                                 fs->root.mnt = mntget(q);
2428                                 __mnt_make_longterm(q);
2429                                 mnt_make_shortterm(p);
2430                                 rootmnt = p;
2431                         }
2432                         if (p == fs->pwd.mnt) {
2433                                 fs->pwd.mnt = mntget(q);
2434                                 __mnt_make_longterm(q);
2435                                 mnt_make_shortterm(p);
2436                                 pwdmnt = p;
2437                         }
2438                 }
2439                 p = next_mnt(p, mnt_ns->root);
2440                 q = next_mnt(q, new_ns->root);
2441         }
2442         up_write(&namespace_sem);
2443 
2444         if (rootmnt)
2445                 mntput(rootmnt);
2446         if (pwdmnt)
2447                 mntput(pwdmnt);
2448 
2449         return new_ns;
2450 }
2451 
2452 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2453                 struct fs_struct *new_fs)
2454 {
2455         struct mnt_namespace *new_ns;
2456 
2457         BUG_ON(!ns);
2458         get_mnt_ns(ns);
2459 
2460         if (!(flags & CLONE_NEWNS))
2461                 return ns;
2462 
2463         new_ns = dup_mnt_ns(ns, new_fs);
2464 
2465         put_mnt_ns(ns);
2466         return new_ns;
2467 }
2468 
2469 /**
2470  * create_mnt_ns - creates a private namespace and adds a root filesystem
2471  * @mnt: pointer to the new root filesystem mountpoint
2472  */
2473 struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
2474 {
2475         struct mnt_namespace *new_ns;
2476 
2477         new_ns = alloc_mnt_ns();
2478         if (!IS_ERR(new_ns)) {
2479                 mnt->mnt_ns = new_ns;
2480                 __mnt_make_longterm(mnt);
2481                 new_ns->root = mnt;
2482                 list_add(&new_ns->list, &new_ns->root->mnt_list);
2483         } else {
2484                 mntput(mnt);
2485         }
2486         return new_ns;
2487 }
2488 EXPORT_SYMBOL(create_mnt_ns);
2489 
2490 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2491 {
2492         struct mnt_namespace *ns;
2493         struct super_block *s;
2494         struct path path;
2495         int err;
2496 
2497         ns = create_mnt_ns(mnt);
2498         if (IS_ERR(ns))
2499                 return ERR_CAST(ns);
2500 
2501         err = vfs_path_lookup(mnt->mnt_root, mnt,
2502                         name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2503 
2504         put_mnt_ns(ns);
2505 
2506         if (err)
2507                 return ERR_PTR(err);
2508 
2509         /* trade a vfsmount reference for active sb one */
2510         s = path.mnt->mnt_sb;
2511         atomic_inc(&s->s_active);
2512         mntput(path.mnt);
2513         /* lock the sucker */
2514         down_write(&s->s_umount);
2515         /* ... and return the root of (sub)tree on it */
2516         return path.dentry;
2517 }
2518 EXPORT_SYMBOL(mount_subtree);
2519 
2520 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
2521                 char __user *, type, unsigned long, flags, void __user *, data)
2522 {
2523         int ret;
2524         char *kernel_type;
2525         char *kernel_dir;
2526         char *kernel_dev;
2527         unsigned long data_page;
2528 
2529         ret = copy_mount_string(type, &kernel_type);
2530         if (ret < 0)
2531                 goto out_type;
2532 
2533         kernel_dir = getname(dir_name);
2534         if (IS_ERR(kernel_dir)) {
2535                 ret = PTR_ERR(kernel_dir);
2536                 goto out_dir;
2537         }
2538 
2539         ret = copy_mount_string(dev_name, &kernel_dev);
2540         if (ret < 0)
2541                 goto out_dev;
2542 
2543         ret = copy_mount_options(data, &data_page);
2544         if (ret < 0)
2545                 goto out_data;
2546 
2547         ret = do_mount(kernel_dev, kernel_dir, kernel_type, flags,
2548                 (void *) data_page);
2549 
2550         free_page(data_page);
2551 out_data:
2552         kfree(kernel_dev);
2553 out_dev:
2554         putname(kernel_dir);
2555 out_dir:
2556         kfree(kernel_type);
2557 out_type:
2558         return ret;
2559 }
2560 
2561 /*
2562  * pivot_root Semantics:
2563  * Moves the root file system of the current process to the directory put_old,
2564  * makes new_root as the new root file system of the current process, and sets
2565  * root/cwd of all processes which had them on the current root to new_root.
2566  *
2567  * Restrictions:
2568  * The new_root and put_old must be directories, and  must not be on the
2569  * same file  system as the current process root. The put_old  must  be
2570  * underneath new_root,  i.e. adding a non-zero number of /.. to the string
2571  * pointed to by put_old must yield the same directory as new_root. No other
2572  * file system may be mounted on put_old. After all, new_root is a mountpoint.
2573  *
2574  * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2575  * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2576  * in this situation.
2577  *
2578  * Notes:
2579  *  - we don't move root/cwd if they are not at the root (reason: if something
2580  *    cared enough to change them, it's probably wrong to force them elsewhere)
2581  *  - it's okay to pick a root that isn't the root of a file system, e.g.
2582  *    /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2583  *    though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2584  *    first.
2585  */
2586 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2587                 const char __user *, put_old)
2588 {
2589         struct vfsmount *tmp;
2590         struct path new, old, parent_path, root_parent, root;
2591         int error;
2592 
2593         if (!capable(CAP_SYS_ADMIN))
2594                 return -EPERM;
2595 
2596         error = user_path_dir(new_root, &new);
2597         if (error)
2598                 goto out0;
2599 
2600         error = user_path_dir(put_old, &old);
2601         if (error)
2602                 goto out1;
2603 
2604         error = security_sb_pivotroot(&old, &new);
2605         if (error)
2606                 goto out2;
2607 
2608         get_fs_root(current->fs, &root);
2609         error = lock_mount(&old);
2610         if (error)
2611                 goto out3;
2612 
2613         error = -EINVAL;
2614         if (IS_MNT_SHARED(old.mnt) ||
2615                 IS_MNT_SHARED(new.mnt->mnt_parent) ||
2616                 IS_MNT_SHARED(root.mnt->mnt_parent))
2617                 goto out4;
2618         if (!check_mnt(root.mnt) || !check_mnt(new.mnt))
2619                 goto out4;
2620         error = -ENOENT;
2621         if (d_unlinked(new.dentry))
2622                 goto out4;
2623         if (d_unlinked(old.dentry))
2624                 goto out4;
2625         error = -EBUSY;
2626         if (new.mnt == root.mnt ||
2627             old.mnt == root.mnt)
2628                 goto out4; /* loop, on the same file system  */
2629         error = -EINVAL;
2630         if (root.mnt->mnt_root != root.dentry)
2631                 goto out4; /* not a mountpoint */
2632         if (root.mnt->mnt_parent == root.mnt)
2633                 goto out4; /* not attached */
2634         if (new.mnt->mnt_root != new.dentry)
2635                 goto out4; /* not a mountpoint */
2636         if (new.mnt->mnt_parent == new.mnt)
2637                 goto out4; /* not attached */
2638         /* make sure we can reach put_old from new_root */
2639         tmp = old.mnt;
2640         if (tmp != new.mnt) {
2641                 for (;;) {
2642                         if (tmp->mnt_parent == tmp)
2643                                 goto out4; /* already mounted on put_old */
2644                         if (tmp->mnt_parent == new.mnt)
2645                                 break;
2646                         tmp = tmp->mnt_parent;
2647                 }
2648                 if (!is_subdir(tmp->mnt_mountpoint, new.dentry))
2649                         goto out4;
2650         } else if (!is_subdir(old.dentry, new.dentry))
2651                 goto out4;
2652         br_write_lock(vfsmount_lock);
2653         detach_mnt(new.mnt, &parent_path);
2654         detach_mnt(root.mnt, &root_parent);
2655         /* mount old root on put_old */
2656         attach_mnt(root.mnt, &old);
2657         /* mount new_root on / */
2658         attach_mnt(new.mnt, &root_parent);
2659         touch_mnt_namespace(current->nsproxy->mnt_ns);
2660         br_write_unlock(vfsmount_lock);
2661         chroot_fs_refs(&root, &new);
2662         error = 0;
2663 out4:
2664         unlock_mount(&old);
2665         if (!error) {
2666                 path_put(&root_parent);
2667                 path_put(&parent_path);
2668         }
2669 out3:
2670         path_put(&root);
2671 out2:
2672         path_put(&old);
2673 out1:
2674         path_put(&new);
2675 out0:
2676         return error;
2677 }
2678 
2679 static void __init init_mount_tree(void)
2680 {
2681         struct vfsmount *mnt;
2682         struct mnt_namespace *ns;
2683         struct path root;
2684 
2685         mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
2686         if (IS_ERR(mnt))
2687                 panic("Can't create rootfs");
2688 
2689         ns = create_mnt_ns(mnt);
2690         if (IS_ERR(ns))
2691                 panic("Can't allocate initial namespace");
2692 
2693         init_task.nsproxy->mnt_ns = ns;
2694         get_mnt_ns(ns);
2695 
2696         root.mnt = ns->root;
2697         root.dentry = ns->root->mnt_root;
2698 
2699         set_fs_pwd(current->fs, &root);
2700         set_fs_root(current->fs, &root);
2701 }
2702 
2703 void __init mnt_init(void)
2704 {
2705         unsigned u;
2706         int err;
2707 
2708         init_rwsem(&namespace_sem);
2709 
2710         mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
2711                         0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2712 
2713         mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
2714 
2715         if (!mount_hashtable)
2716                 panic("Failed to allocate mount hash table\n");
2717 
2718         printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE);
2719 
2720         for (u = 0; u < HASH_SIZE; u++)
2721                 INIT_LIST_HEAD(&mount_hashtable[u]);
2722 
2723         br_lock_init(vfsmount_lock);
2724 
2725         err = sysfs_init();
2726         if (err)
2727                 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
2728                         __func__, err);
2729         fs_kobj = kobject_create_and_add("fs", NULL);
2730         if (!fs_kobj)
2731                 printk(KERN_WARNING "%s: kobj create error\n", __func__);
2732         init_rootfs();
2733         init_mount_tree();
2734 }
2735 
2736 void put_mnt_ns(struct mnt_namespace *ns)
2737 {
2738         LIST_HEAD(umount_list);
2739 
2740         if (!atomic_dec_and_test(&ns->count))
2741                 return;
2742         down_write(&namespace_sem);
2743         br_write_lock(vfsmount_lock);
2744         umount_tree(ns->root, 0, &umount_list);
2745         br_write_unlock(vfsmount_lock);
2746         up_write(&namespace_sem);
2747         release_mounts(&umount_list);
2748         kfree(ns);
2749 }
2750 EXPORT_SYMBOL(put_mnt_ns);
2751 
2752 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
2753 {
2754         struct vfsmount *mnt;
2755         mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
2756         if (!IS_ERR(mnt)) {
2757                 /*
2758                  * it is a longterm mount, don't release mnt until
2759                  * we unmount before file sys is unregistered
2760                 */
2761                 mnt_make_longterm(mnt);
2762         }
2763         return mnt;
2764 }
2765 EXPORT_SYMBOL_GPL(kern_mount_data);
2766 
2767 void kern_unmount(struct vfsmount *mnt)
2768 {
2769         /* release long term mount so mount point can be released */
2770         if (!IS_ERR_OR_NULL(mnt)) {
2771                 mnt_make_shortterm(mnt);
2772                 mntput(mnt);
2773         }
2774 }
2775 EXPORT_SYMBOL(kern_unmount);
2776 
2777 bool our_mnt(struct vfsmount *mnt)
2778 {
2779         return check_mnt(mnt);
2780 }
2781 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us