video/rockchip: rga: replace system_nrt_wq with system_wq
[firefly-linux-kernel-4.4.55.git] / block / blk-merge.c
1 /*
2  * Functions related to segment and merge handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9
10 #include "blk.h"
11
12 static struct bio *blk_bio_discard_split(struct request_queue *q,
13                                          struct bio *bio,
14                                          struct bio_set *bs,
15                                          unsigned *nsegs)
16 {
17         unsigned int max_discard_sectors, granularity;
18         int alignment;
19         sector_t tmp;
20         unsigned split_sectors;
21
22         *nsegs = 1;
23
24         /* Zero-sector (unknown) and one-sector granularities are the same.  */
25         granularity = max(q->limits.discard_granularity >> 9, 1U);
26
27         max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
28         max_discard_sectors -= max_discard_sectors % granularity;
29
30         if (unlikely(!max_discard_sectors)) {
31                 /* XXX: warn */
32                 return NULL;
33         }
34
35         if (bio_sectors(bio) <= max_discard_sectors)
36                 return NULL;
37
38         split_sectors = max_discard_sectors;
39
40         /*
41          * If the next starting sector would be misaligned, stop the discard at
42          * the previous aligned sector.
43          */
44         alignment = (q->limits.discard_alignment >> 9) % granularity;
45
46         tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
47         tmp = sector_div(tmp, granularity);
48
49         if (split_sectors > tmp)
50                 split_sectors -= tmp;
51
52         return bio_split(bio, split_sectors, GFP_NOIO, bs);
53 }
54
55 static struct bio *blk_bio_write_same_split(struct request_queue *q,
56                                             struct bio *bio,
57                                             struct bio_set *bs,
58                                             unsigned *nsegs)
59 {
60         *nsegs = 1;
61
62         if (!q->limits.max_write_same_sectors)
63                 return NULL;
64
65         if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
66                 return NULL;
67
68         return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
69 }
70
71 static inline unsigned get_max_io_size(struct request_queue *q,
72                                        struct bio *bio)
73 {
74         unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
75         unsigned mask = queue_logical_block_size(q) - 1;
76
77         /* aligned to logical block size */
78         sectors &= ~(mask >> 9);
79
80         return sectors;
81 }
82
83 static struct bio *blk_bio_segment_split(struct request_queue *q,
84                                          struct bio *bio,
85                                          struct bio_set *bs,
86                                          unsigned *segs)
87 {
88         struct bio_vec bv, bvprv, *bvprvp = NULL;
89         struct bvec_iter iter;
90         unsigned seg_size = 0, nsegs = 0, sectors = 0;
91         unsigned front_seg_size = bio->bi_seg_front_size;
92         bool do_split = true;
93         struct bio *new = NULL;
94         const unsigned max_sectors = get_max_io_size(q, bio);
95         unsigned bvecs = 0;
96
97         bio_for_each_segment(bv, bio, iter) {
98                 /*
99                  * With arbitrary bio size, the incoming bio may be very
100                  * big. We have to split the bio into small bios so that
101                  * each holds at most BIO_MAX_PAGES bvecs because
102                  * bio_clone() can fail to allocate big bvecs.
103                  *
104                  * It should have been better to apply the limit per
105                  * request queue in which bio_clone() is involved,
106                  * instead of globally. The biggest blocker is the
107                  * bio_clone() in bio bounce.
108                  *
109                  * If bio is splitted by this reason, we should have
110                  * allowed to continue bios merging, but don't do
111                  * that now for making the change simple.
112                  *
113                  * TODO: deal with bio bounce's bio_clone() gracefully
114                  * and convert the global limit into per-queue limit.
115                  */
116                 if (bvecs++ >= BIO_MAX_PAGES)
117                         goto split;
118
119                 /*
120                  * If the queue doesn't support SG gaps and adding this
121                  * offset would create a gap, disallow it.
122                  */
123                 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
124                         goto split;
125
126                 if (sectors + (bv.bv_len >> 9) > max_sectors) {
127                         /*
128                          * Consider this a new segment if we're splitting in
129                          * the middle of this vector.
130                          */
131                         if (nsegs < queue_max_segments(q) &&
132                             sectors < max_sectors) {
133                                 nsegs++;
134                                 sectors = max_sectors;
135                         }
136                         if (sectors)
137                                 goto split;
138                         /* Make this single bvec as the 1st segment */
139                 }
140
141                 if (bvprvp && blk_queue_cluster(q)) {
142                         if (seg_size + bv.bv_len > queue_max_segment_size(q))
143                                 goto new_segment;
144                         if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
145                                 goto new_segment;
146                         if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
147                                 goto new_segment;
148
149                         seg_size += bv.bv_len;
150                         bvprv = bv;
151                         bvprvp = &bvprv;
152                         sectors += bv.bv_len >> 9;
153
154                         if (nsegs == 1 && seg_size > front_seg_size)
155                                 front_seg_size = seg_size;
156                         continue;
157                 }
158 new_segment:
159                 if (nsegs == queue_max_segments(q))
160                         goto split;
161
162                 nsegs++;
163                 bvprv = bv;
164                 bvprvp = &bvprv;
165                 seg_size = bv.bv_len;
166                 sectors += bv.bv_len >> 9;
167
168                 if (nsegs == 1 && seg_size > front_seg_size)
169                         front_seg_size = seg_size;
170         }
171
172         do_split = false;
173 split:
174         *segs = nsegs;
175
176         if (do_split) {
177                 new = bio_split(bio, sectors, GFP_NOIO, bs);
178                 if (new)
179                         bio = new;
180         }
181
182         bio->bi_seg_front_size = front_seg_size;
183         if (seg_size > bio->bi_seg_back_size)
184                 bio->bi_seg_back_size = seg_size;
185
186         return do_split ? new : NULL;
187 }
188
189 void blk_queue_split(struct request_queue *q, struct bio **bio,
190                      struct bio_set *bs)
191 {
192         struct bio *split, *res;
193         unsigned nsegs;
194
195         if ((*bio)->bi_rw & REQ_DISCARD)
196                 split = blk_bio_discard_split(q, *bio, bs, &nsegs);
197         else if ((*bio)->bi_rw & REQ_WRITE_SAME)
198                 split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
199         else
200                 split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
201
202         /* physical segments can be figured out during splitting */
203         res = split ? split : *bio;
204         res->bi_phys_segments = nsegs;
205         bio_set_flag(res, BIO_SEG_VALID);
206
207         if (split) {
208                 /* there isn't chance to merge the splitted bio */
209                 split->bi_rw |= REQ_NOMERGE;
210
211                 bio_chain(split, *bio);
212                 generic_make_request(*bio);
213                 *bio = split;
214         }
215 }
216 EXPORT_SYMBOL(blk_queue_split);
217
218 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
219                                              struct bio *bio,
220                                              bool no_sg_merge)
221 {
222         struct bio_vec bv, bvprv = { NULL };
223         int cluster, prev = 0;
224         unsigned int seg_size, nr_phys_segs;
225         struct bio *fbio, *bbio;
226         struct bvec_iter iter;
227
228         if (!bio)
229                 return 0;
230
231         /*
232          * This should probably be returning 0, but blk_add_request_payload()
233          * (Christoph!!!!)
234          */
235         if (bio->bi_rw & REQ_DISCARD)
236                 return 1;
237
238         if (bio->bi_rw & REQ_WRITE_SAME)
239                 return 1;
240
241         fbio = bio;
242         cluster = blk_queue_cluster(q);
243         seg_size = 0;
244         nr_phys_segs = 0;
245         for_each_bio(bio) {
246                 bio_for_each_segment(bv, bio, iter) {
247                         /*
248                          * If SG merging is disabled, each bio vector is
249                          * a segment
250                          */
251                         if (no_sg_merge)
252                                 goto new_segment;
253
254                         if (prev && cluster) {
255                                 if (seg_size + bv.bv_len
256                                     > queue_max_segment_size(q))
257                                         goto new_segment;
258                                 if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
259                                         goto new_segment;
260                                 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
261                                         goto new_segment;
262
263                                 seg_size += bv.bv_len;
264                                 bvprv = bv;
265                                 continue;
266                         }
267 new_segment:
268                         if (nr_phys_segs == 1 && seg_size >
269                             fbio->bi_seg_front_size)
270                                 fbio->bi_seg_front_size = seg_size;
271
272                         nr_phys_segs++;
273                         bvprv = bv;
274                         prev = 1;
275                         seg_size = bv.bv_len;
276                 }
277                 bbio = bio;
278         }
279
280         if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
281                 fbio->bi_seg_front_size = seg_size;
282         if (seg_size > bbio->bi_seg_back_size)
283                 bbio->bi_seg_back_size = seg_size;
284
285         return nr_phys_segs;
286 }
287
288 void blk_recalc_rq_segments(struct request *rq)
289 {
290         bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
291                         &rq->q->queue_flags);
292
293         rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
294                         no_sg_merge);
295 }
296
297 void blk_recount_segments(struct request_queue *q, struct bio *bio)
298 {
299         unsigned short seg_cnt;
300
301         /* estimate segment number by bi_vcnt for non-cloned bio */
302         if (bio_flagged(bio, BIO_CLONED))
303                 seg_cnt = bio_segments(bio);
304         else
305                 seg_cnt = bio->bi_vcnt;
306
307         if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
308                         (seg_cnt < queue_max_segments(q)))
309                 bio->bi_phys_segments = seg_cnt;
310         else {
311                 struct bio *nxt = bio->bi_next;
312
313                 bio->bi_next = NULL;
314                 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
315                 bio->bi_next = nxt;
316         }
317
318         bio_set_flag(bio, BIO_SEG_VALID);
319 }
320 EXPORT_SYMBOL(blk_recount_segments);
321
322 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
323                                    struct bio *nxt)
324 {
325         struct bio_vec end_bv = { NULL }, nxt_bv;
326         struct bvec_iter iter;
327
328         if (!blk_queue_cluster(q))
329                 return 0;
330
331         if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
332             queue_max_segment_size(q))
333                 return 0;
334
335         if (!bio_has_data(bio))
336                 return 1;
337
338         bio_for_each_segment(end_bv, bio, iter)
339                 if (end_bv.bv_len == iter.bi_size)
340                         break;
341
342         nxt_bv = bio_iovec(nxt);
343
344         if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
345                 return 0;
346
347         /*
348          * bio and nxt are contiguous in memory; check if the queue allows
349          * these two to be merged into one
350          */
351         if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
352                 return 1;
353
354         return 0;
355 }
356
357 static inline void
358 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
359                      struct scatterlist *sglist, struct bio_vec *bvprv,
360                      struct scatterlist **sg, int *nsegs, int *cluster)
361 {
362
363         int nbytes = bvec->bv_len;
364
365         if (*sg && *cluster) {
366                 if ((*sg)->length + nbytes > queue_max_segment_size(q))
367                         goto new_segment;
368
369                 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
370                         goto new_segment;
371                 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
372                         goto new_segment;
373
374                 (*sg)->length += nbytes;
375         } else {
376 new_segment:
377                 if (!*sg)
378                         *sg = sglist;
379                 else {
380                         /*
381                          * If the driver previously mapped a shorter
382                          * list, we could see a termination bit
383                          * prematurely unless it fully inits the sg
384                          * table on each mapping. We KNOW that there
385                          * must be more entries here or the driver
386                          * would be buggy, so force clear the
387                          * termination bit to avoid doing a full
388                          * sg_init_table() in drivers for each command.
389                          */
390                         sg_unmark_end(*sg);
391                         *sg = sg_next(*sg);
392                 }
393
394                 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
395                 (*nsegs)++;
396         }
397         *bvprv = *bvec;
398 }
399
400 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
401                              struct scatterlist *sglist,
402                              struct scatterlist **sg)
403 {
404         struct bio_vec bvec, bvprv = { NULL };
405         struct bvec_iter iter;
406         int nsegs, cluster;
407
408         nsegs = 0;
409         cluster = blk_queue_cluster(q);
410
411         if (bio->bi_rw & REQ_DISCARD) {
412                 /*
413                  * This is a hack - drivers should be neither modifying the
414                  * biovec, nor relying on bi_vcnt - but because of
415                  * blk_add_request_payload(), a discard bio may or may not have
416                  * a payload we need to set up here (thank you Christoph) and
417                  * bi_vcnt is really the only way of telling if we need to.
418                  */
419
420                 if (bio->bi_vcnt)
421                         goto single_segment;
422
423                 return 0;
424         }
425
426         if (bio->bi_rw & REQ_WRITE_SAME) {
427 single_segment:
428                 *sg = sglist;
429                 bvec = bio_iovec(bio);
430                 sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
431                 return 1;
432         }
433
434         for_each_bio(bio)
435                 bio_for_each_segment(bvec, bio, iter)
436                         __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
437                                              &nsegs, &cluster);
438
439         return nsegs;
440 }
441
442 /*
443  * map a request to scatterlist, return number of sg entries setup. Caller
444  * must make sure sg can hold rq->nr_phys_segments entries
445  */
446 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
447                   struct scatterlist *sglist)
448 {
449         struct scatterlist *sg = NULL;
450         int nsegs = 0;
451
452         if (rq->bio)
453                 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
454
455         if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
456             (blk_rq_bytes(rq) & q->dma_pad_mask)) {
457                 unsigned int pad_len =
458                         (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
459
460                 sg->length += pad_len;
461                 rq->extra_len += pad_len;
462         }
463
464         if (q->dma_drain_size && q->dma_drain_needed(rq)) {
465                 if (rq->cmd_flags & REQ_WRITE)
466                         memset(q->dma_drain_buffer, 0, q->dma_drain_size);
467
468                 sg_unmark_end(sg);
469                 sg = sg_next(sg);
470                 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
471                             q->dma_drain_size,
472                             ((unsigned long)q->dma_drain_buffer) &
473                             (PAGE_SIZE - 1));
474                 nsegs++;
475                 rq->extra_len += q->dma_drain_size;
476         }
477
478         if (sg)
479                 sg_mark_end(sg);
480
481         /*
482          * Something must have been wrong if the figured number of
483          * segment is bigger than number of req's physical segments
484          */
485         WARN_ON(nsegs > rq->nr_phys_segments);
486
487         return nsegs;
488 }
489 EXPORT_SYMBOL(blk_rq_map_sg);
490
491 static inline int ll_new_hw_segment(struct request_queue *q,
492                                     struct request *req,
493                                     struct bio *bio)
494 {
495         int nr_phys_segs = bio_phys_segments(q, bio);
496
497         if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
498                 goto no_merge;
499
500         if (blk_integrity_merge_bio(q, req, bio) == false)
501                 goto no_merge;
502
503         /*
504          * This will form the start of a new hw segment.  Bump both
505          * counters.
506          */
507         req->nr_phys_segments += nr_phys_segs;
508         return 1;
509
510 no_merge:
511         req->cmd_flags |= REQ_NOMERGE;
512         if (req == q->last_merge)
513                 q->last_merge = NULL;
514         return 0;
515 }
516
517 int ll_back_merge_fn(struct request_queue *q, struct request *req,
518                      struct bio *bio)
519 {
520         if (req_gap_back_merge(req, bio))
521                 return 0;
522         if (blk_integrity_rq(req) &&
523             integrity_req_gap_back_merge(req, bio))
524                 return 0;
525         if (blk_rq_sectors(req) + bio_sectors(bio) >
526             blk_rq_get_max_sectors(req)) {
527                 req->cmd_flags |= REQ_NOMERGE;
528                 if (req == q->last_merge)
529                         q->last_merge = NULL;
530                 return 0;
531         }
532         if (!bio_flagged(req->biotail, BIO_SEG_VALID))
533                 blk_recount_segments(q, req->biotail);
534         if (!bio_flagged(bio, BIO_SEG_VALID))
535                 blk_recount_segments(q, bio);
536
537         return ll_new_hw_segment(q, req, bio);
538 }
539
540 int ll_front_merge_fn(struct request_queue *q, struct request *req,
541                       struct bio *bio)
542 {
543
544         if (req_gap_front_merge(req, bio))
545                 return 0;
546         if (blk_integrity_rq(req) &&
547             integrity_req_gap_front_merge(req, bio))
548                 return 0;
549         if (blk_rq_sectors(req) + bio_sectors(bio) >
550             blk_rq_get_max_sectors(req)) {
551                 req->cmd_flags |= REQ_NOMERGE;
552                 if (req == q->last_merge)
553                         q->last_merge = NULL;
554                 return 0;
555         }
556         if (!bio_flagged(bio, BIO_SEG_VALID))
557                 blk_recount_segments(q, bio);
558         if (!bio_flagged(req->bio, BIO_SEG_VALID))
559                 blk_recount_segments(q, req->bio);
560
561         return ll_new_hw_segment(q, req, bio);
562 }
563
564 /*
565  * blk-mq uses req->special to carry normal driver per-request payload, it
566  * does not indicate a prepared command that we cannot merge with.
567  */
568 static bool req_no_special_merge(struct request *req)
569 {
570         struct request_queue *q = req->q;
571
572         return !q->mq_ops && req->special;
573 }
574
575 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
576                                 struct request *next)
577 {
578         int total_phys_segments;
579         unsigned int seg_size =
580                 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
581
582         /*
583          * First check if the either of the requests are re-queued
584          * requests.  Can't merge them if they are.
585          */
586         if (req_no_special_merge(req) || req_no_special_merge(next))
587                 return 0;
588
589         if (req_gap_back_merge(req, next->bio))
590                 return 0;
591
592         /*
593          * Will it become too large?
594          */
595         if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
596             blk_rq_get_max_sectors(req))
597                 return 0;
598
599         total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
600         if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
601                 if (req->nr_phys_segments == 1)
602                         req->bio->bi_seg_front_size = seg_size;
603                 if (next->nr_phys_segments == 1)
604                         next->biotail->bi_seg_back_size = seg_size;
605                 total_phys_segments--;
606         }
607
608         if (total_phys_segments > queue_max_segments(q))
609                 return 0;
610
611         if (blk_integrity_merge_rq(q, req, next) == false)
612                 return 0;
613
614         /* Merge is OK... */
615         req->nr_phys_segments = total_phys_segments;
616         return 1;
617 }
618
619 /**
620  * blk_rq_set_mixed_merge - mark a request as mixed merge
621  * @rq: request to mark as mixed merge
622  *
623  * Description:
624  *     @rq is about to be mixed merged.  Make sure the attributes
625  *     which can be mixed are set in each bio and mark @rq as mixed
626  *     merged.
627  */
628 void blk_rq_set_mixed_merge(struct request *rq)
629 {
630         unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
631         struct bio *bio;
632
633         if (rq->cmd_flags & REQ_MIXED_MERGE)
634                 return;
635
636         /*
637          * @rq will no longer represent mixable attributes for all the
638          * contained bios.  It will just track those of the first one.
639          * Distributes the attributs to each bio.
640          */
641         for (bio = rq->bio; bio; bio = bio->bi_next) {
642                 WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
643                              (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
644                 bio->bi_rw |= ff;
645         }
646         rq->cmd_flags |= REQ_MIXED_MERGE;
647 }
648
649 static void blk_account_io_merge(struct request *req)
650 {
651         if (blk_do_io_stat(req)) {
652                 struct hd_struct *part;
653                 int cpu;
654
655                 cpu = part_stat_lock();
656                 part = req->part;
657
658                 part_round_stats(cpu, part);
659                 part_dec_in_flight(part, rq_data_dir(req));
660
661                 hd_struct_put(part);
662                 part_stat_unlock();
663         }
664 }
665
666 /*
667  * Has to be called with the request spinlock acquired
668  */
669 static int attempt_merge(struct request_queue *q, struct request *req,
670                           struct request *next)
671 {
672         if (!rq_mergeable(req) || !rq_mergeable(next))
673                 return 0;
674
675         if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
676                 return 0;
677
678         /*
679          * not contiguous
680          */
681         if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
682                 return 0;
683
684         if (rq_data_dir(req) != rq_data_dir(next)
685             || req->rq_disk != next->rq_disk
686             || req_no_special_merge(next))
687                 return 0;
688
689         if (req->cmd_flags & REQ_WRITE_SAME &&
690             !blk_write_same_mergeable(req->bio, next->bio))
691                 return 0;
692
693         /*
694          * If we are allowed to merge, then append bio list
695          * from next to rq and release next. merge_requests_fn
696          * will have updated segment counts, update sector
697          * counts here.
698          */
699         if (!ll_merge_requests_fn(q, req, next))
700                 return 0;
701
702         /*
703          * If failfast settings disagree or any of the two is already
704          * a mixed merge, mark both as mixed before proceeding.  This
705          * makes sure that all involved bios have mixable attributes
706          * set properly.
707          */
708         if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
709             (req->cmd_flags & REQ_FAILFAST_MASK) !=
710             (next->cmd_flags & REQ_FAILFAST_MASK)) {
711                 blk_rq_set_mixed_merge(req);
712                 blk_rq_set_mixed_merge(next);
713         }
714
715         /*
716          * At this point we have either done a back merge
717          * or front merge. We need the smaller start_time of
718          * the merged requests to be the current request
719          * for accounting purposes.
720          */
721         if (time_after(req->start_time, next->start_time))
722                 req->start_time = next->start_time;
723
724         req->biotail->bi_next = next->bio;
725         req->biotail = next->biotail;
726
727         req->__data_len += blk_rq_bytes(next);
728
729         elv_merge_requests(q, req, next);
730
731         /*
732          * 'next' is going away, so update stats accordingly
733          */
734         blk_account_io_merge(next);
735
736         req->ioprio = ioprio_best(req->ioprio, next->ioprio);
737         if (blk_rq_cpu_valid(next))
738                 req->cpu = next->cpu;
739
740         /* owner-ship of bio passed from next to req */
741         next->bio = NULL;
742         __blk_put_request(q, next);
743         return 1;
744 }
745
746 int attempt_back_merge(struct request_queue *q, struct request *rq)
747 {
748         struct request *next = elv_latter_request(q, rq);
749
750         if (next)
751                 return attempt_merge(q, rq, next);
752
753         return 0;
754 }
755
756 int attempt_front_merge(struct request_queue *q, struct request *rq)
757 {
758         struct request *prev = elv_former_request(q, rq);
759
760         if (prev)
761                 return attempt_merge(q, prev, rq);
762
763         return 0;
764 }
765
766 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
767                           struct request *next)
768 {
769         return attempt_merge(q, rq, next);
770 }
771
772 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
773 {
774         if (!rq_mergeable(rq) || !bio_mergeable(bio))
775                 return false;
776
777         if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
778                 return false;
779
780         /* different data direction or already started, don't merge */
781         if (bio_data_dir(bio) != rq_data_dir(rq))
782                 return false;
783
784         /* must be same device and not a special request */
785         if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
786                 return false;
787
788         /* only merge integrity protected bio into ditto rq */
789         if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
790                 return false;
791
792         /* must be using the same buffer */
793         if (rq->cmd_flags & REQ_WRITE_SAME &&
794             !blk_write_same_mergeable(rq->bio, bio))
795                 return false;
796
797         return true;
798 }
799
800 int blk_try_merge(struct request *rq, struct bio *bio)
801 {
802         if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
803                 return ELEVATOR_BACK_MERGE;
804         else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
805                 return ELEVATOR_FRONT_MERGE;
806         return ELEVATOR_NO_MERGE;
807 }