arm64: dts: rockchip: rk3368: init aclk_cci_pre 576M
[firefly-linux-kernel-4.4.55.git] / block / blk-merge.c
index e01405a3e8b3f51ce0424a844fadb3304bda5e44..7225511cf0b48b1644ef3c56bf77d1f58e4911ca 100644 (file)
@@ -68,6 +68,18 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
        return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
 }
 
+static inline unsigned get_max_io_size(struct request_queue *q,
+                                      struct bio *bio)
+{
+       unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
+       unsigned mask = queue_logical_block_size(q) - 1;
+
+       /* aligned to logical block size */
+       sectors &= ~(mask >> 9);
+
+       return sectors;
+}
+
 static struct bio *blk_bio_segment_split(struct request_queue *q,
                                         struct bio *bio,
                                         struct bio_set *bs,
@@ -79,9 +91,29 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
        unsigned front_seg_size = bio->bi_seg_front_size;
        bool do_split = true;
        struct bio *new = NULL;
+       const unsigned max_sectors = get_max_io_size(q, bio);
+       unsigned bvecs = 0;
 
        bio_for_each_segment(bv, bio, iter) {
-               if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
+               /*
+                * With arbitrary bio size, the incoming bio may be very
+                * big. We have to split the bio into small bios so that
+                * each holds at most BIO_MAX_PAGES bvecs because
+                * bio_clone() can fail to allocate big bvecs.
+                *
+                * It should have been better to apply the limit per
+                * request queue in which bio_clone() is involved,
+                * instead of globally. The biggest blocker is the
+                * bio_clone() in bio bounce.
+                *
+                * If bio is splitted by this reason, we should have
+                * allowed to continue bios merging, but don't do
+                * that now for making the change simple.
+                *
+                * TODO: deal with bio bounce's bio_clone() gracefully
+                * and convert the global limit into per-queue limit.
+                */
+               if (bvecs++ >= BIO_MAX_PAGES)
                        goto split;
 
                /*
@@ -91,6 +123,21 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
                if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
                        goto split;
 
+               if (sectors + (bv.bv_len >> 9) > max_sectors) {
+                       /*
+                        * Consider this a new segment if we're splitting in
+                        * the middle of this vector.
+                        */
+                       if (nsegs < queue_max_segments(q) &&
+                           sectors < max_sectors) {
+                               nsegs++;
+                               sectors = max_sectors;
+                       }
+                       if (sectors)
+                               goto split;
+                       /* Make this single bvec as the 1st segment */
+               }
+
                if (bvprvp && blk_queue_cluster(q)) {
                        if (seg_size + bv.bv_len > queue_max_segment_size(q))
                                goto new_segment;