nvmem: rockchip-efuse: add support for rk3288 secure efuse
[firefly-linux-kernel-4.4.55.git] / net / 9p / trans_virtio.c
index e1c26b10183067d02a66f56f8ffb3094fb13f2cc..6e70ddb158b4bc121a0f32e7a53fecf8125e8354 100644 (file)
@@ -164,8 +164,7 @@ static void req_done(struct virtqueue *vq)
                p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc);
                p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
                req = p9_tag_lookup(chan->client, rc->tag);
-               req->status = REQ_STATUS_RCVD;
-               p9_client_cb(chan->client, req);
+               p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
        }
 }
 
@@ -218,15 +217,15 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
  * @start: which segment of the sg_list to start at
  * @pdata: a list of pages to add into sg.
  * @nr_pages: number of pages to pack into the scatter/gather list
- * @data: data to pack into scatter/gather list
+ * @offs: amount of data in the beginning of first page _not_ to pack
  * @count: amount of data to pack into the scatter/gather list
  */
 static int
 pack_sg_list_p(struct scatterlist *sg, int start, int limit,
-              struct page **pdata, int nr_pages, char *data, int count)
+              struct page **pdata, int nr_pages, size_t offs, int count)
 {
        int i = 0, s;
-       int data_off;
+       int data_off = offs;
        int index = start;
 
        BUG_ON(nr_pages > (limit - start));
@@ -234,16 +233,14 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
         * if the first page doesn't start at
         * page boundary find the offset
         */
-       data_off = offset_in_page(data);
        while (nr_pages) {
-               s = rest_of_page(data);
+               s = PAGE_SIZE - data_off;
                if (s > count)
                        s = count;
                /* Make sure we don't terminate early. */
                sg_unmark_end(&sg[index]);
                sg_set_page(&sg[index++], pdata[i++], s, data_off);
                data_off = 0;
-               data += s;
                count -= s;
                nr_pages--;
        }
@@ -315,11 +312,20 @@ req_retry:
 }
 
 static int p9_get_mapped_pages(struct virtio_chan *chan,
-                              struct page **pages, char *data,
-                              int nr_pages, int write, int kern_buf)
+                              struct page ***pages,
+                              struct iov_iter *data,
+                              int count,
+                              size_t *offs,
+                              int *need_drop)
 {
+       int nr_pages;
        int err;
-       if (!kern_buf) {
+
+       if (!iov_iter_count(data))
+               return 0;
+
+       if (!(data->type & ITER_KVEC)) {
+               int n;
                /*
                 * We allow only p9_max_pages pinned. We wait for the
                 * Other zc request to finish here
@@ -330,23 +336,49 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
                        if (err == -ERESTARTSYS)
                                return err;
                }
-               err = p9_payload_gup(data, &nr_pages, pages, write);
-               if (err < 0)
-                       return err;
+               n = iov_iter_get_pages_alloc(data, pages, count, offs);
+               if (n < 0)
+                       return n;
+               *need_drop = 1;
+               nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE);
                atomic_add(nr_pages, &vp_pinned);
+               return n;
        } else {
                /* kernel buffer, no need to pin pages */
-               int s, index = 0;
-               int count = nr_pages;
-               while (nr_pages) {
-                       s = rest_of_page(data);
-                       pages[index++] = kmap_to_page(data);
-                       data += s;
-                       nr_pages--;
+               int index;
+               size_t len;
+               void *p;
+
+               /* we'd already checked that it's non-empty */
+               while (1) {
+                       len = iov_iter_single_seg_count(data);
+                       if (likely(len)) {
+                               p = data->kvec->iov_base + data->iov_offset;
+                               break;
+                       }
+                       iov_iter_advance(data, 0);
+               }
+               if (len > count)
+                       len = count;
+
+               nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
+                          (unsigned long)p / PAGE_SIZE;
+
+               *pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
+               if (!*pages)
+                       return -ENOMEM;
+
+               *need_drop = 0;
+               p -= (*offs = (unsigned long)p % PAGE_SIZE);
+               for (index = 0; index < nr_pages; index++) {
+                       if (is_vmalloc_addr(p))
+                               (*pages)[index] = vmalloc_to_page(p);
+                       else
+                               (*pages)[index] = kmap_to_page(p);
+                       p += PAGE_SIZE;
                }
-               nr_pages = count;
+               return len;
        }
-       return nr_pages;
 }
 
 /**
@@ -362,8 +394,8 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
  */
 static int
 p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
-                    char *uidata, char *uodata, int inlen,
-                    int outlen, int in_hdr_len, int kern_buf)
+                    struct iov_iter *uidata, struct iov_iter *uodata,
+                    int inlen, int outlen, int in_hdr_len)
 {
        int in, out, err, out_sgs, in_sgs;
        unsigned long flags;
@@ -371,41 +403,32 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
        struct page **in_pages = NULL, **out_pages = NULL;
        struct virtio_chan *chan = client->trans;
        struct scatterlist *sgs[4];
+       size_t offs;
+       int need_drop = 0;
 
        p9_debug(P9_DEBUG_TRANS, "virtio request\n");
 
        if (uodata) {
-               out_nr_pages = p9_nr_pages(uodata, outlen);
-               out_pages = kmalloc(sizeof(struct page *) * out_nr_pages,
-                                   GFP_NOFS);
-               if (!out_pages) {
-                       err = -ENOMEM;
-                       goto err_out;
-               }
-               out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata,
-                                                  out_nr_pages, 0, kern_buf);
-               if (out_nr_pages < 0) {
-                       err = out_nr_pages;
-                       kfree(out_pages);
-                       out_pages = NULL;
-                       goto err_out;
-               }
-       }
-       if (uidata) {
-               in_nr_pages = p9_nr_pages(uidata, inlen);
-               in_pages = kmalloc(sizeof(struct page *) * in_nr_pages,
-                                  GFP_NOFS);
-               if (!in_pages) {
-                       err = -ENOMEM;
-                       goto err_out;
+               int n = p9_get_mapped_pages(chan, &out_pages, uodata,
+                                           outlen, &offs, &need_drop);
+               if (n < 0)
+                       return n;
+               out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
+               if (n != outlen) {
+                       __le32 v = cpu_to_le32(n);
+                       memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
+                       outlen = n;
                }
-               in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata,
-                                                 in_nr_pages, 1, kern_buf);
-               if (in_nr_pages < 0) {
-                       err = in_nr_pages;
-                       kfree(in_pages);
-                       in_pages = NULL;
-                       goto err_out;
+       } else if (uidata) {
+               int n = p9_get_mapped_pages(chan, &in_pages, uidata,
+                                           inlen, &offs, &need_drop);
+               if (n < 0)
+                       return n;
+               in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
+               if (n != inlen) {
+                       __le32 v = cpu_to_le32(n);
+                       memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
+                       inlen = n;
                }
        }
        req->status = REQ_STATUS_SENT;
@@ -424,7 +447,7 @@ req_retry_pinned:
        if (out_pages) {
                sgs[out_sgs++] = chan->sg + out;
                out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
-                                     out_pages, out_nr_pages, uodata, outlen);
+                                     out_pages, out_nr_pages, offs, outlen);
        }
                
        /*
@@ -442,7 +465,7 @@ req_retry_pinned:
        if (in_pages) {
                sgs[out_sgs + in_sgs++] = chan->sg + out + in;
                in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
-                                    in_pages, in_nr_pages, uidata, inlen);
+                                    in_pages, in_nr_pages, offs, inlen);
        }
 
        BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
@@ -476,7 +499,7 @@ req_retry_pinned:
         * Non kernel buffers are pinned, unpin them
         */
 err_out:
-       if (!kern_buf) {
+       if (need_drop) {
                if (in_pages) {
                        p9_release_pages(in_pages, in_nr_pages);
                        atomic_sub(in_nr_pages, &vp_pinned);
@@ -502,7 +525,10 @@ static ssize_t p9_mount_tag_show(struct device *dev,
        vdev = dev_to_virtio(dev);
        chan = vdev->priv;
 
-       return snprintf(buf, chan->tag_len + 1, "%s", chan->tag);
+       memcpy(buf, chan->tag, chan->tag_len);
+       buf[chan->tag_len] = 0;
+
+       return chan->tag_len + 1;
 }
 
 static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL);
@@ -522,6 +548,12 @@ static int p9_virtio_probe(struct virtio_device *vdev)
        int err;
        struct virtio_chan *chan;
 
+       if (!vdev->config->get) {
+               dev_err(&vdev->dev, "%s failure: config access disabled\n",
+                       __func__);
+               return -EINVAL;
+       }
+
        chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
        if (!chan) {
                pr_err("Failed to allocate virtio 9P channel\n");
@@ -544,9 +576,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
 
        chan->inuse = false;
        if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
-               vdev->config->get(vdev,
-                               offsetof(struct virtio_9p_config, tag_len),
-                               &tag_len, sizeof(tag_len));
+               virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
        } else {
                err = -EINVAL;
                goto out_free_vq;
@@ -556,8 +586,9 @@ static int p9_virtio_probe(struct virtio_device *vdev)
                err = -ENOMEM;
                goto out_free_vq;
        }
-       vdev->config->get(vdev, offsetof(struct virtio_9p_config, tag),
-                       tag, tag_len);
+
+       virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag),
+                          tag, tag_len);
        chan->tag = tag;
        chan->tag_len = tag_len;
        err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
@@ -574,9 +605,15 @@ static int p9_virtio_probe(struct virtio_device *vdev)
        /* Ceiling limit to avoid denial of service attacks */
        chan->p9_max_pages = nr_free_buffer_pages()/4;
 
+       virtio_device_ready(vdev);
+
        mutex_lock(&virtio_9p_lock);
        list_add_tail(&chan->chan_list, &virtio_chan_list);
        mutex_unlock(&virtio_9p_lock);
+
+       /* Let udev rules use the new mount_tag attribute. */
+       kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
+
        return 0;
 
 out_free_tag:
@@ -645,15 +682,33 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
 static void p9_virtio_remove(struct virtio_device *vdev)
 {
        struct virtio_chan *chan = vdev->priv;
-
-       if (chan->inuse)
-               p9_virtio_close(chan->client);
-       vdev->config->del_vqs(vdev);
+       unsigned long warning_time;
 
        mutex_lock(&virtio_9p_lock);
+
+       /* Remove self from list so we don't get new users. */
        list_del(&chan->chan_list);
+       warning_time = jiffies;
+
+       /* Wait for existing users to close. */
+       while (chan->inuse) {
+               mutex_unlock(&virtio_9p_lock);
+               msleep(250);
+               if (time_after(jiffies, warning_time + 10 * HZ)) {
+                       dev_emerg(&vdev->dev,
+                                 "p9_virtio_remove: waiting for device in use.\n");
+                       warning_time = jiffies;
+               }
+               mutex_lock(&virtio_9p_lock);
+       }
+
        mutex_unlock(&virtio_9p_lock);
+
+       vdev->config->reset(vdev);
+       vdev->config->del_vqs(vdev);
+
        sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
+       kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
        kfree(chan->tag);
        kfree(chan->vc_wq);
        kfree(chan);
@@ -694,7 +749,7 @@ static struct p9_trans_module p9_virtio_trans = {
         * page in zero copy.
         */
        .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
-       .def = 0,
+       .def = 1,
        .owner = THIS_MODULE,
 };