firmware: Kconfig: ROCKCHIP_SIP depends on HAVE_ARM_SMCCC and ARCH_ROCKCHIP
[firefly-linux-kernel-4.4.55.git] / fs / ceph / file.c
index 0c62868b5c561b37fa866b68bbe34f3ffb1fd6a6..c8222bfe1e5669b00874274d2d4ee697d9ee2102 100644 (file)
  * need to wait for MDS acknowledgement.
  */
 
+/*
+ * Calculate the length sum of direct io vectors that can
+ * be combined into one page vector.
+ */
+static size_t dio_get_pagev_size(const struct iov_iter *it)
+{
+    const struct iovec *iov = it->iov;
+    const struct iovec *iovend = iov + it->nr_segs;
+    size_t size;
+
+    size = iov->iov_len - it->iov_offset;
+    /*
+     * An iov can be page vectored when both the current tail
+     * and the next base are page aligned.
+     */
+    while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
+           (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
+        size += iov->iov_len;
+    }
+    dout("dio_get_pagevlen len = %zu\n", size);
+    return size;
+}
+
+/*
+ * Allocate a page vector based on (@it, @nbytes).
+ * The return value is the tuple describing a page vector,
+ * that is (@pages, @page_align, @num_pages).
+ */
+static struct page **
+dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
+                   size_t *page_align, int *num_pages)
+{
+       struct iov_iter tmp_it = *it;
+       size_t align;
+       struct page **pages;
+       int ret = 0, idx, npages;
+
+       align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
+               (PAGE_SIZE - 1);
+       npages = calc_pages_for(align, nbytes);
+       pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
+       if (!pages) {
+               pages = vmalloc(sizeof(*pages) * npages);
+               if (!pages)
+                       return ERR_PTR(-ENOMEM);
+       }
+
+       for (idx = 0; idx < npages; ) {
+               size_t start;
+               ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
+                                        npages - idx, &start);
+               if (ret < 0)
+                       goto fail;
+
+               iov_iter_advance(&tmp_it, ret);
+               nbytes -= ret;
+               idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
+       }
+
+       BUG_ON(nbytes != 0);
+       *num_pages = npages;
+       *page_align = align;
+       dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
+       return pages;
+fail:
+       ceph_put_page_vector(pages, idx, false);
+       return ERR_PTR(ret);
+}
 
 /*
  * Prepare an open request.  Preallocate ceph_cap to avoid an
@@ -458,11 +526,10 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
                        size_t start;
                        ssize_t n;
 
-                       n = iov_iter_get_pages_alloc(i, &pages, INT_MAX, &start);
-                       if (n < 0)
-                               return n;
-
-                       num_pages = (n + start + PAGE_SIZE - 1) / PAGE_SIZE;
+                       n = dio_get_pagev_size(i);
+                       pages = dio_get_pages_alloc(i, n, &start, &num_pages);
+                       if (IS_ERR(pages))
+                               return PTR_ERR(pages);
 
                        ret = striped_read(inode, off, n,
                                           pages, num_pages, checkeof,
@@ -592,7 +659,7 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                CEPH_OSD_FLAG_WRITE;
 
        while (iov_iter_count(from) > 0) {
-               u64 len = iov_iter_single_seg_count(from);
+               u64 len = dio_get_pagev_size(from);
                size_t start;
                ssize_t n;
 
@@ -611,14 +678,14 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
 
                osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
 
-               n = iov_iter_get_pages_alloc(from, &pages, len, &start);
-               if (unlikely(n < 0)) {
-                       ret = n;
+               n = len;
+               pages = dio_get_pages_alloc(from, len, &start, &num_pages);
+               if (IS_ERR(pages)) {
                        ceph_osdc_put_request(req);
+                       ret = PTR_ERR(pages);
                        break;
                }
 
-               num_pages = (n + start + PAGE_SIZE - 1) / PAGE_SIZE;
                /*
                 * throw out any page cache pages in this range. this
                 * may block.
@@ -862,7 +929,8 @@ again:
                statret = __ceph_do_getattr(inode, page,
                                            CEPH_STAT_CAP_INLINE_DATA, !!page);
                if (statret < 0) {
-                        __free_page(page);
+                       if (page)
+                               __free_page(page);
                        if (statret == -ENODATA) {
                                BUG_ON(retry_op != READ_INLINE);
                                goto again;