X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=block%2Fll_rw_blk.c;h=ee5ed98db4cd15ff2ab7f5ae5b3a893466691abe;hb=b2faf597d93bdf5e2d12d93ea0815935a73f749e;hp=5c62507a510d1d9a3d1320e6e6b3a1378f94bd6a;hpb=2cb2e147a6d20bffd1d6b7a79be7301560f751c3;p=firefly-linux-kernel-4.4.55.git diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 5c62507a510d..ee5ed98db4cd 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -304,6 +304,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq) * blk_queue_ordered - does this queue support ordered writes * @q: the request queue * @ordered: one of QUEUE_ORDERED_* + * @prepare_flush_fn: rq setup helper for cache flush ordered writes * * Description: * For journalled file systems, doing ordered writes on a commit @@ -332,6 +333,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered, return -EINVAL; } + q->ordered = ordered; q->next_ordered = ordered; q->prepare_flush_fn = prepare_flush_fn; @@ -506,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q, int blk_do_ordered(request_queue_t *q, struct request **rqp) { - struct request *rq = *rqp, *allowed_rq; + struct request *rq = *rqp; int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); if (!q->ordseq) { @@ -530,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp) } } + /* + * Ordered sequence in progress + */ + + /* Special requests are not subject to ordering rules. */ + if (!blk_fs_request(rq) && + rq != &q->pre_flush_rq && rq != &q->post_flush_rq) + return 1; + if (q->ordered & QUEUE_ORDERED_TAG) { + /* Ordered by tag. Blocking the next barrier is enough. */ if (is_barrier && rq != &q->bar_rq) *rqp = NULL; - return 1; - } - - switch (blk_ordered_cur_seq(q)) { - case QUEUE_ORDSEQ_PREFLUSH: - allowed_rq = &q->pre_flush_rq; - break; - case QUEUE_ORDSEQ_BAR: - allowed_rq = &q->bar_rq; - break; - case QUEUE_ORDSEQ_POSTFLUSH: - allowed_rq = &q->post_flush_rq; - break; - default: - allowed_rq = NULL; - break; + } else { + /* Ordered by draining. Wait for turn. */ + WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); + if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) + *rqp = NULL; } - if (rq != allowed_rq && - (blk_fs_request(rq) || rq == &q->pre_flush_rq || - rq == &q->post_flush_rq)) - *rqp = NULL; - return 1; } @@ -2577,6 +2573,8 @@ void disk_round_stats(struct gendisk *disk) disk->stamp = now; } +EXPORT_SYMBOL_GPL(disk_round_stats); + /* * queue lock must be held */ @@ -2632,6 +2630,7 @@ EXPORT_SYMBOL(blk_put_request); /** * blk_end_sync_rq - executes a completion event on a request * @rq: request to complete + * @error: end io status of the request */ void blk_end_sync_rq(struct request *rq, int error) { @@ -3153,7 +3152,7 @@ static int __end_that_request_first(struct request *req, int uptodate, if (blk_fs_request(req) && req->rq_disk) { const int rw = rq_data_dir(req); - __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); + disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); } total_bytes = bio_nbytes = 0; @@ -3448,7 +3447,7 @@ int __init blk_dev_init(void) iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); - for (i = 0; i < NR_CPUS; i++) + for_each_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);