arm64: dts: rockchip: rk3368: init aclk_cci_pre 576M
[firefly-linux-kernel-4.4.55.git] / block / blk-mq.c
index 6d6f8feb48c08ab875e67c496193a743709b0621..d8d63c38bf295567cd0532bfe58da0ce01376aad 100644 (file)
@@ -601,8 +601,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
                 * If a request wasn't started before the queue was
                 * marked dying, kill it here or it'll go unnoticed.
                 */
-               if (unlikely(blk_queue_dying(rq->q)))
-                       blk_mq_complete_request(rq, -EIO);
+               if (unlikely(blk_queue_dying(rq->q))) {
+                       rq->errors = -EIO;
+                       blk_mq_end_request(rq, rq->errors);
+               }
                return;
        }
        if (rq->cmd_flags & REQ_NO_TIMEOUT)
@@ -778,7 +780,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
                switch (ret) {
                case BLK_MQ_RQ_QUEUE_OK:
                        queued++;
-                       continue;
+                       break;
                case BLK_MQ_RQ_QUEUE_BUSY:
                        list_add(&rq->queuelist, &rq_list);
                        __blk_mq_requeue_request(rq);
@@ -840,7 +842,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
                return WORK_CPU_UNBOUND;
 
        if (--hctx->next_cpu_batch <= 0) {
-               int cpu = hctx->next_cpu, next_cpu;
+               int next_cpu;
 
                next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
                if (next_cpu >= nr_cpu_ids)
@@ -848,8 +850,6 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
 
                hctx->next_cpu = next_cpu;
                hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
-
-               return cpu;
        }
 
        return hctx->next_cpu;
@@ -1259,12 +1259,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q)) {
-               if (blk_attempt_plug_merge(q, bio, &request_count,
-                                          &same_queue_rq))
-                       return BLK_QC_T_NONE;
-       } else
-               request_count = blk_plug_queued_count(q);
+       if (!is_flush_fua && !blk_queue_nomerges(q) &&
+           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
+               return BLK_QC_T_NONE;
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
@@ -1311,9 +1308,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_put_ctx(data.ctx);
                if (!old_rq)
                        goto done;
-               if (!blk_mq_direct_issue_request(old_rq, &cookie))
-                       goto done;
-               blk_mq_insert_request(old_rq, false, true, true);
+               if (test_bit(BLK_MQ_S_STOPPED, &data.hctx->state) ||
+                   blk_mq_direct_issue_request(old_rq, &cookie) != 0)
+                       blk_mq_insert_request(old_rq, false, true, true);
                goto done;
        }
 
@@ -1355,9 +1352,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, NULL))
-               return BLK_QC_T_NONE;
+       if (!is_flush_fua && !blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+                       return BLK_QC_T_NONE;
+       } else
+               request_count = blk_plug_queued_count(q);
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))