debugfs: leave freeing a symlink body until inode eviction
[firefly-linux-kernel-4.4.55.git] / block / bio.c
index a69a9c9e7c93899dc1e8ce4d9e6f577a453c5c0c..f66a4eae16ee4a96c9469c7a9311de3437a923c5 100644 (file)
@@ -1036,43 +1036,66 @@ static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
                       sizeof(struct iovec) * iov_count, gfp_mask);
 }
 
-static int __bio_copy_iov(struct bio *bio, const struct iov_iter *iter,
-                         int to_user, int from_user)
+/**
+ * bio_copy_from_iter - copy all pages from iov_iter to bio
+ * @bio: The &struct bio which describes the I/O as destination
+ * @iter: iov_iter as source
+ *
+ * Copy all pages from iov_iter to bio.
+ * Returns 0 on success, or error on failure.
+ */
+static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
 {
-       int ret = 0, i;
+       int i;
        struct bio_vec *bvec;
-       struct iov_iter iov_iter = *iter;
 
        bio_for_each_segment_all(bvec, bio, i) {
-               char *bv_addr = page_address(bvec->bv_page);
-               unsigned int bv_len = bvec->bv_len;
-
-               while (bv_len && iov_iter.count) {
-                       struct iovec iov = iov_iter_iovec(&iov_iter);
-                       unsigned int bytes = min_t(unsigned int, bv_len,
-                                                  iov.iov_len);
-
-                       if (!ret) {
-                               if (to_user)
-                                       ret = copy_to_user(iov.iov_base,
-                                                          bv_addr, bytes);
-
-                               if (from_user)
-                                       ret = copy_from_user(bv_addr,
-                                                            iov.iov_base,
-                                                            bytes);
-
-                               if (ret)
-                                       ret = -EFAULT;
-                       }
+               ssize_t ret;
 
-                       bv_len -= bytes;
-                       bv_addr += bytes;
-                       iov_iter_advance(&iov_iter, bytes);
-               }
+               ret = copy_page_from_iter(bvec->bv_page,
+                                         bvec->bv_offset,
+                                         bvec->bv_len,
+                                         &iter);
+
+               if (!iov_iter_count(&iter))
+                       break;
+
+               if (ret < bvec->bv_len)
+                       return -EFAULT;
        }
 
-       return ret;
+       return 0;
+}
+
+/**
+ * bio_copy_to_iter - copy all pages from bio to iov_iter
+ * @bio: The &struct bio which describes the I/O as source
+ * @iter: iov_iter as destination
+ *
+ * Copy all pages from bio to iov_iter.
+ * Returns 0 on success, or error on failure.
+ */
+static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
+{
+       int i;
+       struct bio_vec *bvec;
+
+       bio_for_each_segment_all(bvec, bio, i) {
+               ssize_t ret;
+
+               ret = copy_page_to_iter(bvec->bv_page,
+                                       bvec->bv_offset,
+                                       bvec->bv_len,
+                                       &iter);
+
+               if (!iov_iter_count(&iter))
+                       break;
+
+               if (ret < bvec->bv_len)
+                       return -EFAULT;
+       }
+
+       return 0;
 }
 
 static void bio_free_pages(struct bio *bio)
@@ -1101,9 +1124,8 @@ int bio_uncopy_user(struct bio *bio)
                 * if we're in a workqueue, the request is orphaned, so
                 * don't copy into a random user address space, just free.
                 */
-               if (current->mm)
-                       ret = __bio_copy_iov(bio, &bmd->iter,
-                                            bio_data_dir(bio) == READ, 0);
+               if (current->mm && bio_data_dir(bio) == READ)
+                       ret = bio_copy_to_iter(bio, bmd->iter);
                if (bmd->is_our_pages)
                        bio_free_pages(bio);
        }
@@ -1228,7 +1250,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
         */
        if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
            (map_data && map_data->from_user)) {
-               ret = __bio_copy_iov(bio, iter, 0, 1);
+               ret = bio_copy_from_iter(bio, *iter);
                if (ret)
                        goto cleanup;
        }
@@ -1244,10 +1266,18 @@ out_bmd:
        return ERR_PTR(ret);
 }
 
-static struct bio *__bio_map_user_iov(struct request_queue *q,
-                                     struct block_device *bdev,
-                                     const struct iov_iter *iter,
-                                     gfp_t gfp_mask)
+/**
+ *     bio_map_user_iov - map user iovec into bio
+ *     @q:             the struct request_queue for the bio
+ *     @iter:          iovec iterator
+ *     @gfp_mask:      memory allocation flags
+ *
+ *     Map the user space address into a bio suitable for io to a block
+ *     device. Returns an error pointer in case of error.
+ */
+struct bio *bio_map_user_iov(struct request_queue *q,
+                            const struct iov_iter *iter,
+                            gfp_t gfp_mask)
 {
        int j;
        int nr_pages = 0;
@@ -1343,8 +1373,15 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
        if (iter->type & WRITE)
                bio->bi_rw |= REQ_WRITE;
 
-       bio->bi_bdev = bdev;
        bio->bi_flags |= (1 << BIO_USER_MAPPED);
+
+       /*
+        * subtle -- if __bio_map_user() ended up bouncing a bio,
+        * it would normally disappear when its bi_end_io is run.
+        * however, we need it for the unmap, so grab an extra
+        * reference to it
+        */
+       bio_get(bio);
        return bio;
 
  out_unmap:
@@ -1359,37 +1396,6 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
        return ERR_PTR(ret);
 }
 
-/**
- *     bio_map_user_iov - map user iovec into bio
- *     @q:             the struct request_queue for the bio
- *     @bdev:          destination block device
- *     @iter:          iovec iterator
- *     @gfp_mask:      memory allocation flags
- *
- *     Map the user space address into a bio suitable for io to a block
- *     device. Returns an error pointer in case of error.
- */
-struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
-                            const struct iov_iter *iter,
-                            gfp_t gfp_mask)
-{
-       struct bio *bio;
-
-       bio = __bio_map_user_iov(q, bdev, iter, gfp_mask);
-       if (IS_ERR(bio))
-               return bio;
-
-       /*
-        * subtle -- if __bio_map_user() ended up bouncing a bio,
-        * it would normally disappear when its bi_end_io is run.
-        * however, we need it for the unmap, so grab an extra
-        * reference to it
-        */
-       bio_get(bio);
-
-       return bio;
-}
-
 static void __bio_unmap_user(struct bio *bio)
 {
        struct bio_vec *bvec;