static struct list_head *mount_hashtable;
 static int hash_mask __read_mostly, hash_bits __read_mostly;
 static kmem_cache_t *mnt_cache;
+static struct rw_semaphore namespace_sem;
 
 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 {
        struct list_head *p;
        loff_t l = *pos;
 
-       down_read(&n->sem);
+       down_read(&namespace_sem);
        list_for_each(p, &n->list)
                if (!l--)
                        return list_entry(p, struct vfsmount, mnt_list);
 
 static void m_stop(struct seq_file *m, void *v)
 {
-       struct namespace *n = m->private;
-       up_read(&n->sem);
+       up_read(&namespace_sem);
 }
 
 static inline void mangle(struct seq_file *m, const char *s)
                return retval;
        }
 
-       down_write(¤t->namespace->sem);
+       down_write(&namespace_sem);
        spin_lock(&vfsmount_lock);
        event++;
 
        spin_unlock(&vfsmount_lock);
        if (retval)
                security_sb_umount_busy(mnt);
-       up_write(¤t->namespace->sem);
+       up_write(&namespace_sem);
        release_mounts(&umount_list);
        return retval;
 }
        if (err)
                return err;
 
-       down_write(¤t->namespace->sem);
+       down_write(&namespace_sem);
        err = -EINVAL;
        if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
                goto out;
        }
 
 out:
-       up_write(¤t->namespace->sem);
+       up_write(&namespace_sem);
        path_release(&old_nd);
        return err;
 }
        if (err)
                return err;
 
-       down_write(¤t->namespace->sem);
+       down_write(&namespace_sem);
        while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
                ;
        err = -EINVAL;
 out1:
        up(&nd->dentry->d_inode->i_sem);
 out:
-       up_write(¤t->namespace->sem);
+       up_write(&namespace_sem);
        if (!err)
                path_release(&parent_nd);
        path_release(&old_nd);
 {
        int err;
 
-       down_write(¤t->namespace->sem);
+       down_write(&namespace_sem);
        /* Something was mounted here while we slept */
        while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
                ;
                list_add_tail(&newmnt->mnt_expire, fslist);
                spin_unlock(&vfsmount_lock);
        }
-       up_write(¤t->namespace->sem);
+       up_write(&namespace_sem);
        return 0;
 
 unlock:
-       up_write(¤t->namespace->sem);
+       up_write(&namespace_sem);
        mntput(newmnt);
        return err;
 }
                get_namespace(namespace);
 
                spin_unlock(&vfsmount_lock);
-               down_write(&namespace->sem);
+               down_write(&namespace_sem);
                expire_mount(mnt, mounts, &umounts);
-               up_write(&namespace->sem);
+               up_write(&namespace_sem);
                release_mounts(&umounts);
                mntput(mnt);
                put_namespace(namespace);
                goto out;
 
        atomic_set(&new_ns->count, 1);
-       init_rwsem(&new_ns->sem);
        INIT_LIST_HEAD(&new_ns->list);
        init_waitqueue_head(&new_ns->poll);
        new_ns->event = 0;
 
-       down_write(&tsk->namespace->sem);
+       down_write(&namespace_sem);
        /* First pass: copy the tree topology */
        new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root,
                                        CL_EXPIRE);
        if (!new_ns->root) {
-               up_write(&tsk->namespace->sem);
+               up_write(&namespace_sem);
                kfree(new_ns);
                goto out;
        }
                p = next_mnt(p, namespace->root);
                q = next_mnt(q, new_ns->root);
        }
-       up_write(&tsk->namespace->sem);
+       up_write(&namespace_sem);
 
        tsk->namespace = new_ns;
 
        user_nd.mnt = mntget(current->fs->rootmnt);
        user_nd.dentry = dget(current->fs->root);
        read_unlock(¤t->fs->lock);
-       down_write(¤t->namespace->sem);
+       down_write(&namespace_sem);
        down(&old_nd.dentry->d_inode->i_sem);
        error = -EINVAL;
        if (!check_mnt(user_nd.mnt))
        path_release(&parent_nd);
 out2:
        up(&old_nd.dentry->d_inode->i_sem);
-       up_write(¤t->namespace->sem);
+       up_write(&namespace_sem);
        path_release(&user_nd);
        path_release(&old_nd);
 out1:
                panic("Can't allocate initial namespace");
        atomic_set(&namespace->count, 1);
        INIT_LIST_HEAD(&namespace->list);
-       init_rwsem(&namespace->sem);
        init_waitqueue_head(&namespace->poll);
        namespace->event = 0;
        list_add(&mnt->mnt_list, &namespace->list);
        unsigned int nr_hash;
        int i;
 
+       init_rwsem(&namespace_sem);
+
        mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL);
 
        LIST_HEAD(umount_list);
        namespace->root = NULL;
        spin_unlock(&vfsmount_lock);
-       down_write(&namespace->sem);
+       down_write(&namespace_sem);
        spin_lock(&vfsmount_lock);
        umount_tree(root, &umount_list);
        spin_unlock(&vfsmount_lock);
-       up_write(&namespace->sem);
+       up_write(&namespace_sem);
        release_mounts(&umount_list);
        kfree(namespace);
 }