ASoC: rt5651: add alc5651 ASRC switch for HDMIIn
[firefly-linux-kernel-4.4.55.git] / fs / namespace.c
index 4aad64ad9ad0a38a0f021ab943849290dd59f475..c1477882a8537094ea5ee2b5470b3be0d68c2994 100644 (file)
@@ -638,28 +638,6 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
        return NULL;
 }
 
-/*
- * find the last mount at @dentry on vfsmount @mnt.
- * mount_lock must be held.
- */
-struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
-{
-       struct mount *p, *res = NULL;
-       p = __lookup_mnt(mnt, dentry);
-       if (!p)
-               goto out;
-       if (!(p->mnt.mnt_flags & MNT_UMOUNT))
-               res = p;
-       hlist_for_each_entry_continue(p, mnt_hash) {
-               if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
-                       break;
-               if (!(p->mnt.mnt_flags & MNT_UMOUNT))
-                       res = p;
-       }
-out:
-       return res;
-}
-
 /*
  * lookup_mnt - Return the first child mount mounted at path
  *
@@ -880,6 +858,13 @@ void mnt_set_mountpoint(struct mount *mnt,
        hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
 }
 
+static void __attach_mnt(struct mount *mnt, struct mount *parent)
+{
+       hlist_add_head_rcu(&mnt->mnt_hash,
+                          m_hash(&parent->mnt, mnt->mnt_mountpoint));
+       list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+}
+
 /*
  * vfsmount lock must be held for write
  */
@@ -888,28 +873,45 @@ static void attach_mnt(struct mount *mnt,
                        struct mountpoint *mp)
 {
        mnt_set_mountpoint(parent, mp, mnt);
-       hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
-       list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+       __attach_mnt(mnt, parent);
 }
 
-static void attach_shadowed(struct mount *mnt,
-                       struct mount *parent,
-                       struct mount *shadows)
+void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
 {
-       if (shadows) {
-               hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash);
-               list_add(&mnt->mnt_child, &shadows->mnt_child);
-       } else {
-               hlist_add_head_rcu(&mnt->mnt_hash,
-                               m_hash(&parent->mnt, mnt->mnt_mountpoint));
-               list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
-       }
+       struct mountpoint *old_mp = mnt->mnt_mp;
+       struct dentry *old_mountpoint = mnt->mnt_mountpoint;
+       struct mount *old_parent = mnt->mnt_parent;
+
+       list_del_init(&mnt->mnt_child);
+       hlist_del_init(&mnt->mnt_mp_list);
+       hlist_del_init_rcu(&mnt->mnt_hash);
+
+       attach_mnt(mnt, parent, mp);
+
+       put_mountpoint(old_mp);
+
+       /*
+        * Safely avoid even the suggestion this code might sleep or
+        * lock the mount hash by taking advantage of the knowledge that
+        * mnt_change_mountpoint will not release the final reference
+        * to a mountpoint.
+        *
+        * During mounting, the mount passed in as the parent mount will
+        * continue to use the old mountpoint and during unmounting, the
+        * old mountpoint will continue to exist until namespace_unlock,
+        * which happens well after mnt_change_mountpoint.
+        */
+       spin_lock(&old_mountpoint->d_lock);
+       old_mountpoint->d_lockref.count--;
+       spin_unlock(&old_mountpoint->d_lock);
+
+       mnt_add_count(old_parent, -1);
 }
 
 /*
  * vfsmount lock must be held for write
  */
-static void commit_tree(struct mount *mnt, struct mount *shadows)
+static void commit_tree(struct mount *mnt)
 {
        struct mount *parent = mnt->mnt_parent;
        struct mount *m;
@@ -924,7 +926,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)
 
        list_splice(&head, n->list.prev);
 
-       attach_shadowed(mnt, parent, shadows);
+       __attach_mnt(mnt, parent);
        touch_mnt_namespace(n);
 }
 
@@ -1738,7 +1740,6 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
                        continue;
 
                for (s = r; s; s = next_mnt(s, r)) {
-                       struct mount *t = NULL;
                        if (!(flag & CL_COPY_UNBINDABLE) &&
                            IS_MNT_UNBINDABLE(s)) {
                                s = skip_mnt_tree(s);
@@ -1760,14 +1761,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
                                goto out;
                        lock_mount_hash();
                        list_add_tail(&q->mnt_list, &res->mnt_list);
-                       mnt_set_mountpoint(parent, p->mnt_mp, q);
-                       if (!list_empty(&parent->mnt_mounts)) {
-                               t = list_last_entry(&parent->mnt_mounts,
-                                       struct mount, mnt_child);
-                               if (t->mnt_mp != p->mnt_mp)
-                                       t = NULL;
-                       }
-                       attach_shadowed(q, parent, t);
+                       attach_mnt(q, parent, p->mnt_mp);
                        unlock_mount_hash();
                }
        }
@@ -1945,10 +1939,18 @@ static int attach_recursive_mnt(struct mount *source_mnt,
                        struct path *parent_path)
 {
        HLIST_HEAD(tree_list);
+       struct mountpoint *smp;
        struct mount *child, *p;
        struct hlist_node *n;
        int err;
 
+       /* Preallocate a mountpoint in case the new mounts need
+        * to be tucked under other mounts.
+        */
+       smp = get_mountpoint(source_mnt->mnt.mnt_root);
+       if (IS_ERR(smp))
+               return PTR_ERR(smp);
+
        if (IS_MNT_SHARED(dest_mnt)) {
                err = invent_group_ids(source_mnt, true);
                if (err)
@@ -1968,16 +1970,19 @@ static int attach_recursive_mnt(struct mount *source_mnt,
                touch_mnt_namespace(source_mnt->mnt_ns);
        } else {
                mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
-               commit_tree(source_mnt, NULL);
+               commit_tree(source_mnt);
        }
 
        hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
                struct mount *q;
                hlist_del_init(&child->mnt_hash);
-               q = __lookup_mnt_last(&child->mnt_parent->mnt,
-                                     child->mnt_mountpoint);
-               commit_tree(child, q);
+               q = __lookup_mnt(&child->mnt_parent->mnt,
+                                child->mnt_mountpoint);
+               if (q)
+                       mnt_change_mountpoint(child, smp, q);
+               commit_tree(child);
        }
+       put_mountpoint(smp);
        unlock_mount_hash();
 
        return 0;
@@ -1990,6 +1995,10 @@ static int attach_recursive_mnt(struct mount *source_mnt,
        unlock_mount_hash();
        cleanup_group_ids(source_mnt, NULL);
  out:
+       read_seqlock_excl(&mount_lock);
+       put_mountpoint(smp);
+       read_sequnlock_excl(&mount_lock);
+
        return err;
 }