vhost: Remove custom vhost rcu usage
authorAsias He <asias@redhat.com>
Tue, 7 May 2013 06:54:36 +0000 (14:54 +0800)
committerMichael S. Tsirkin <mst@redhat.com>
Thu, 11 Jul 2013 12:38:40 +0000 (15:38 +0300)
Now, vq->private_data is always accessed under vq mutex. No need to play
the vhost rcu trick.

Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/vhost/test.c
drivers/vhost/vhost.h

index 99f8d63491aa2723ecd63e77241b8b1ebfdf3606..969a85960e9f6bf09a5bb9a6b1ea828d9f5ae9fb 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/moduleparam.h>
 #include <linux/mutex.h>
 #include <linux/workqueue.h>
-#include <linux/rcupdate.h>
 #include <linux/file.h>
 #include <linux/slab.h>
 
@@ -749,8 +748,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
        struct vhost_poll *poll = n->poll + (nvq - n->vqs);
        struct socket *sock;
 
-       sock = rcu_dereference_protected(vq->private_data,
-                                        lockdep_is_held(&vq->mutex));
+       sock = vq->private_data;
        if (!sock)
                return 0;
 
@@ -763,10 +761,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
        struct socket *sock;
 
        mutex_lock(&vq->mutex);
-       sock = rcu_dereference_protected(vq->private_data,
-                                        lockdep_is_held(&vq->mutex));
+       sock = vq->private_data;
        vhost_net_disable_vq(n, vq);
-       rcu_assign_pointer(vq->private_data, NULL);
+       vq->private_data = NULL;
        mutex_unlock(&vq->mutex);
        return sock;
 }
@@ -922,8 +919,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
        }
 
        /* start polling new socket */
-       oldsock = rcu_dereference_protected(vq->private_data,
-                                           lockdep_is_held(&vq->mutex));
+       oldsock = vq->private_data;
        if (sock != oldsock) {
                ubufs = vhost_net_ubuf_alloc(vq,
                                             sock && vhost_sock_zcopy(sock));
@@ -933,7 +929,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
                }
 
                vhost_net_disable_vq(n, vq);
-               rcu_assign_pointer(vq->private_data, sock);
+               vq->private_data = sock;
                r = vhost_init_used(vq);
                if (r)
                        goto err_used;
@@ -967,7 +963,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
        return 0;
 
 err_used:
-       rcu_assign_pointer(vq->private_data, oldsock);
+       vq->private_data = oldsock;
        vhost_net_enable_vq(n, vq);
        if (ubufs)
                vhost_net_ubuf_put_wait_and_free(ubufs);
index 45365396dbbcf2b3c07ffc1933873cbcb0b04cbd..35ab0ce984145f57b3e8a0a9b8f18c067cf5bef6 100644 (file)
@@ -1223,9 +1223,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                       sizeof(vs->vs_vhost_wwpn));
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
                        vq = &vs->vqs[i].vq;
-                       /* Flushing the vhost_work acts as synchronize_rcu */
                        mutex_lock(&vq->mutex);
-                       rcu_assign_pointer(vq->private_data, vs_tpg);
+                       vq->private_data = vs_tpg;
                        vhost_init_used(vq);
                        mutex_unlock(&vq->mutex);
                }
@@ -1304,9 +1303,8 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
        if (match) {
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
                        vq = &vs->vqs[i].vq;
-                       /* Flushing the vhost_work acts as synchronize_rcu */
                        mutex_lock(&vq->mutex);
-                       rcu_assign_pointer(vq->private_data, NULL);
+                       vq->private_data = NULL;
                        mutex_unlock(&vq->mutex);
                }
        }
index a73ea217f24dc79a0f92a95cca39c416e828a6b9..339eae85859a58afbd6ea563b4b9fae3f667100c 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/workqueue.h>
-#include <linux/rcupdate.h>
 #include <linux/file.h>
 #include <linux/slab.h>
 
@@ -200,9 +199,8 @@ static long vhost_test_run(struct vhost_test *n, int test)
                priv = test ? n : NULL;
 
                /* start polling new socket */
-               oldpriv = rcu_dereference_protected(vq->private_data,
-                                                   lockdep_is_held(&vq->mutex));
-               rcu_assign_pointer(vq->private_data, priv);
+               oldpriv = vq->private_data;
+               vq->private_data = priv;
 
                r = vhost_init_used(&n->vqs[index]);
 
index 42298cd23c73842e6a4ec7f8434428b56e700098..4465ed5f316d6e9b36e1f43c05df1c980aa908ae 100644 (file)
@@ -103,14 +103,8 @@ struct vhost_virtqueue {
        struct iovec iov[UIO_MAXIOV];
        struct iovec *indirect;
        struct vring_used_elem *heads;
-       /* We use a kind of RCU to access private pointer.
-        * All readers access it from worker, which makes it possible to
-        * flush the vhost_work instead of synchronize_rcu. Therefore readers do
-        * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
-        * vhost_work execution acts instead of rcu_read_lock() and the end of
-        * vhost_work execution acts instead of rcu_read_unlock().
-        * Writers use virtqueue mutex. */
-       void __rcu *private_data;
+       /* Protected by virtqueue mutex. */
+       void *private_data;
        /* Log write descriptors */
        void __user *log_base;
        struct vhost_log *log;