X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=block%2Felevator.c;h=c3555c9c672f94c1f13c3cd3c75c037e7c8110a7;hb=b9eeaf94f6836f32f483ea4cc1751ee281c8ad11;hp=eba5b04c29b135bdc6b84ac80690d380d59dd8a5;hpb=91e14b294f3fe4d8b28516b21ceef187ea9b1bdf;p=firefly-linux-kernel-4.4.55.git diff --git a/block/elevator.c b/block/elevator.c index eba5b04c29b1..c3555c9c672f 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -35,11 +35,11 @@ #include #include #include +#include #include #include "blk.h" -#include "blk-cgroup.h" static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); @@ -150,14 +150,14 @@ void __init load_default_elevator_module(void) static struct kobj_type elv_ktype; -static struct elevator_queue *elevator_alloc(struct request_queue *q, +struct elevator_queue *elevator_alloc(struct request_queue *q, struct elevator_type *e) { struct elevator_queue *eq; - eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); + eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); if (unlikely(!eq)) - goto err; + return NULL; eq->type = e; kobject_init(&eq->kobj, &elv_ktype); @@ -165,11 +165,8 @@ static struct elevator_queue *elevator_alloc(struct request_queue *q, hash_init(eq->hash); return eq; -err: - kfree(eq); - elevator_put(e); - return NULL; } +EXPORT_SYMBOL(elevator_alloc); static void elevator_release(struct kobject *kobj) { @@ -185,6 +182,12 @@ int elevator_init(struct request_queue *q, char *name) struct elevator_type *e = NULL; int err; + /* + * q->sysfs_lock must be held to provide mutual exclusion between + * elevator_switch() and here. + */ + lockdep_assert_held(&q->sysfs_lock); + if (unlikely(q->elevator)) return 0; @@ -221,17 +224,10 @@ int elevator_init(struct request_queue *q, char *name) } } - q->elevator = elevator_alloc(q, e); - if (!q->elevator) - return -ENOMEM; - - err = e->ops.elevator_init_fn(q); - if (err) { - kobject_put(&q->elevator->kobj); - return err; - } - - return 0; + err = e->ops.elevator_init_fn(q, e); + if (err) + elevator_put(e); + return err; } EXPORT_SYMBOL(elevator_init); @@ -249,6 +245,7 @@ EXPORT_SYMBOL(elevator_exit); static inline void __elv_rqhash_del(struct request *rq) { hash_del(&rq->hash); + rq->cmd_flags &= ~REQ_HASHED; } static void elv_rqhash_del(struct request_queue *q, struct request *rq) @@ -263,6 +260,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq) BUG_ON(ELV_ON_HASH(rq)); hash_add(e->hash, &rq->hash, rq_hash_key(rq)); + rq->cmd_flags |= REQ_HASHED; } static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) @@ -422,7 +420,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) * noxmerges: Only simple one-hit cache try * merges: All merge tries attempted */ - if (blk_queue_nomerges(q)) + if (blk_queue_nomerges(q) || !bio_mergeable(bio)) return ELEVATOR_NO_MERGE; /* @@ -442,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) /* * See if our hash lookup can find a potential backmerge. */ - __rq = elv_rqhash_find(q, bio->bi_sector); + __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); if (__rq && elv_rq_merge_ok(__rq, bio)) { *req = __rq; return ELEVATOR_BACK_MERGE; @@ -537,7 +535,7 @@ void elv_bio_merged(struct request_queue *q, struct request *rq, e->type->ops.elevator_bio_merged_fn(q, rq, bio); } -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static void blk_pm_requeue_request(struct request *rq) { if (rq->q->dev && !(rq->cmd_flags & REQ_PM)) @@ -729,26 +727,6 @@ int elv_may_queue(struct request_queue *q, int rw) return ELV_MQUEUE_MAY; } -void elv_abort_queue(struct request_queue *q) -{ - struct request *rq; - - blk_abort_flushes(q); - - while (!list_empty(&q->queue_head)) { - rq = list_entry_rq(q->queue_head.next); - rq->cmd_flags |= REQ_QUIET; - trace_block_rq_abort(q, rq); - /* - * Mark this request as started so we don't trigger - * any debug logic in the end I/O path. - */ - blk_start_request(rq); - __blk_end_request_all(rq, -EIO); - } -} -EXPORT_SYMBOL(elv_abort_queue); - void elv_completed_request(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; @@ -828,6 +806,8 @@ int elv_register_queue(struct request_queue *q) } kobject_uevent(&e->kobj, KOBJ_ADD); e->registered = 1; + if (e->type->ops.elevator_registered_fn) + e->type->ops.elevator_registered_fn(q); } return error; } @@ -935,17 +915,10 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) spin_unlock_irq(q->queue_lock); /* allocate, init and register new elevator */ - err = -ENOMEM; - q->elevator = elevator_alloc(q, new_e); - if (!q->elevator) + err = new_e->ops.elevator_init_fn(q, new_e); + if (err) goto fail_init; - err = new_e->ops.elevator_init_fn(q); - if (err) { - kobject_put(&q->elevator->kobj); - goto fail_init; - } - if (registered) { err = elv_register_queue(q); if (err) @@ -974,7 +947,7 @@ fail_init: /* * Switch this queue to the given IO scheduler. */ -int elevator_change(struct request_queue *q, const char *name) +static int __elevator_change(struct request_queue *q, const char *name) { char elevator_name[ELV_NAME_MAX]; struct elevator_type *e; @@ -996,6 +969,18 @@ int elevator_change(struct request_queue *q, const char *name) return elevator_switch(q, e); } + +int elevator_change(struct request_queue *q, const char *name) +{ + int ret; + + /* Protect q->elevator from elevator_init() */ + mutex_lock(&q->sysfs_lock); + ret = __elevator_change(q, name); + mutex_unlock(&q->sysfs_lock); + + return ret; +} EXPORT_SYMBOL(elevator_change); ssize_t elv_iosched_store(struct request_queue *q, const char *name, @@ -1006,7 +991,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, if (!q->elevator) return count; - ret = elevator_change(q, name); + ret = __elevator_change(q, name); if (!ret) return count;