Merge branch 'for-linus' into for-3.18/core
authorJens Axboe <axboe@fb.com>
Thu, 11 Sep 2014 15:31:18 +0000 (09:31 -0600)
committerJens Axboe <axboe@fb.com>
Thu, 11 Sep 2014 15:31:18 +0000 (09:31 -0600)
A bit of churn on the for-linus side that would be nice to have
in the core bits for 3.18, so pull it in to catch us up and make
forward progress easier.

Signed-off-by: Jens Axboe <axboe@fb.com>
Conflicts:
block/scsi_ioctl.c

31 files changed:
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-core.c
block/blk-mq.c
block/bsg.c
block/cfq-iosched.c
block/compat_ioctl.c
block/ioctl.c
block/scsi_ioctl.c
drivers/block/paride/pd.c
drivers/block/pktcdvd.c
drivers/block/sx8.c
drivers/cdrom/cdrom.c
drivers/ide/ide-park.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/device_handler/scsi_dh_emc.c
drivers/scsi/device_handler/scsi_dh_hp_sw.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/osst.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/sg.c
drivers/scsi/st.c
drivers/target/target_core_pscsi.c
fs/block_dev.c
fs/btrfs/disk-io.c
fs/xfs/xfs_buf.c
include/linux/backing-dev.h
include/linux/blkdev.h
mm/backing-dev.c

index e17da947f6bd5d3a988e8acaa7cecf685ae21218..0ac817b750dbc7a15f0c69c21ed184d5882e1696 100644 (file)
@@ -822,7 +822,6 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
 static struct cgroup_subsys_state *
 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
 {
-       static atomic64_t id_seq = ATOMIC64_INIT(0);
        struct blkcg *blkcg;
 
        if (!parent_css) {
@@ -836,7 +835,6 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
 
        blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
        blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
-       blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
 done:
        spin_lock_init(&blkcg->lock);
        INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
index d3fd7aa3d2a369f9ef0017ece713965c95529e82..c567865b5f1df6baea2cdabf89663994a67e8ade 100644 (file)
@@ -50,9 +50,6 @@ struct blkcg {
        struct blkcg_gq                 *blkg_hint;
        struct hlist_head               blkg_list;
 
-       /* for policies to test whether associated blkcg has changed */
-       uint64_t                        id;
-
        /* TODO: per-policy storage in blkcg */
        unsigned int                    cfq_weight;     /* belongs to cfq */
        unsigned int                    cfq_leaf_weight;
index bf930f481d437ac1b1731d32d453280309773445..6946a4275e6ff2e689de8b60dff18e7999fb3f80 100644 (file)
@@ -83,18 +83,14 @@ void blk_queue_congestion_threshold(struct request_queue *q)
  * @bdev:      device
  *
  * Locates the passed device's request queue and returns the address of its
- * backing_dev_info
- *
- * Will return NULL if the request queue cannot be located.
+ * backing_dev_info.  This function can only be called if @bdev is opened
+ * and the return value is never NULL.
  */
 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
 {
-       struct backing_dev_info *ret = NULL;
        struct request_queue *q = bdev_get_queue(bdev);
 
-       if (q)
-               ret = &q->backing_dev_info;
-       return ret;
+       return &q->backing_dev_info;
 }
 EXPORT_SYMBOL(blk_get_backing_dev_info);
 
@@ -933,9 +929,9 @@ static struct io_context *rq_ioc(struct bio *bio)
  * Get a free request from @q.  This function may fail under memory
  * pressure or if @q is dead.
  *
- * Must be callled with @q->queue_lock held and,
- * Returns %NULL on failure, with @q->queue_lock held.
- * Returns !%NULL on success, with @q->queue_lock *not held*.
+ * Must be called with @q->queue_lock held and,
+ * Returns ERR_PTR on failure, with @q->queue_lock held.
+ * Returns request pointer on success, with @q->queue_lock *not held*.
  */
 static struct request *__get_request(struct request_list *rl, int rw_flags,
                                     struct bio *bio, gfp_t gfp_mask)
@@ -949,7 +945,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
        int may_queue;
 
        if (unlikely(blk_queue_dying(q)))
-               return NULL;
+               return ERR_PTR(-ENODEV);
 
        may_queue = elv_may_queue(q, rw_flags);
        if (may_queue == ELV_MQUEUE_NO)
@@ -974,7 +970,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
                                         * process is not a "batcher", and not
                                         * exempted by the IO scheduler
                                         */
-                                       return NULL;
+                                       return ERR_PTR(-ENOMEM);
                                }
                        }
                }
@@ -992,7 +988,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
         * allocated with any setting of ->nr_requests
         */
        if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        q->nr_rqs[is_sync]++;
        rl->count[is_sync]++;
@@ -1097,7 +1093,7 @@ fail_alloc:
 rq_starved:
        if (unlikely(rl->count[is_sync] == 0))
                rl->starved[is_sync] = 1;
-       return NULL;
+       return ERR_PTR(-ENOMEM);
 }
 
 /**
@@ -1110,9 +1106,9 @@ rq_starved:
  * Get a free request from @q.  If %__GFP_WAIT is set in @gfp_mask, this
  * function keeps retrying under memory pressure and fails iff @q is dead.
  *
- * Must be callled with @q->queue_lock held and,
- * Returns %NULL on failure, with @q->queue_lock held.
- * Returns !%NULL on success, with @q->queue_lock *not held*.
+ * Must be called with @q->queue_lock held and,
+ * Returns ERR_PTR on failure, with @q->queue_lock held.
+ * Returns request pointer on success, with @q->queue_lock *not held*.
  */
 static struct request *get_request(struct request_queue *q, int rw_flags,
                                   struct bio *bio, gfp_t gfp_mask)
@@ -1125,12 +1121,12 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
        rl = blk_get_rl(q, bio);        /* transferred to @rq on success */
 retry:
        rq = __get_request(rl, rw_flags, bio, gfp_mask);
-       if (rq)
+       if (!IS_ERR(rq))
                return rq;
 
        if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
                blk_put_rl(rl);
-               return NULL;
+               return rq;
        }
 
        /* wait on @rl and retry */
@@ -1167,7 +1163,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
 
        spin_lock_irq(q->queue_lock);
        rq = get_request(q, rw, NULL, gfp_mask);
-       if (!rq)
+       if (IS_ERR(rq))
                spin_unlock_irq(q->queue_lock);
        /* q->queue_lock is unlocked at this point */
 
@@ -1219,8 +1215,8 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
 {
        struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
 
-       if (unlikely(!rq))
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(rq))
+               return rq;
 
        blk_rq_set_block_pc(rq);
 
@@ -1614,8 +1610,8 @@ get_rq:
         * Returns with the queue unlocked.
         */
        req = get_request(q, rw_flags, bio, GFP_NOIO);
-       if (unlikely(!req)) {
-               bio_endio(bio, -ENODEV);        /* @q is dead */
+       if (IS_ERR(req)) {
+               bio_endio(bio, PTR_ERR(req));   /* @q is dead */
                goto out_unlock;
        }
 
index 383ea0cb1f0a295463789d956aae820b334f4a55..067e600002d3834b040801f28dc9000ed1474664 100644 (file)
@@ -224,9 +224,11 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
        struct blk_mq_hw_ctx *hctx;
        struct request *rq;
        struct blk_mq_alloc_data alloc_data;
+       int ret;
 
-       if (blk_mq_queue_enter(q))
-               return NULL;
+       ret = blk_mq_queue_enter(q);
+       if (ret)
+               return ERR_PTR(ret);
 
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
@@ -246,6 +248,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
                ctx = alloc_data.ctx;
        }
        blk_mq_put_ctx(ctx);
+       if (!rq)
+               return ERR_PTR(-EWOULDBLOCK);
        return rq;
 }
 EXPORT_SYMBOL(blk_mq_alloc_request);
index ff46addde5d8e0877d390901bb6a91bac591bb13..276e869e686cbbdb39112f19b163e353f0ddc3ef 100644 (file)
@@ -270,8 +270,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
         * map scatter-gather elements separately and string them to request
         */
        rq = blk_get_request(q, rw, GFP_KERNEL);
-       if (!rq)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(rq))
+               return rq;
        blk_rq_set_block_pc(rq);
 
        ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
@@ -285,8 +285,9 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
                }
 
                next_rq = blk_get_request(q, READ, GFP_KERNEL);
-               if (!next_rq) {
-                       ret = -ENOMEM;
+               if (IS_ERR(next_rq)) {
+                       ret = PTR_ERR(next_rq);
+                       next_rq = NULL;
                        goto out;
                }
                rq->next_rq = next_rq;
index 3f31cf9508e6204c144c2b665fb90aa67df3c9a2..6f2751d305dede39f138f77e8b110a42460b2182 100644 (file)
@@ -299,7 +299,7 @@ struct cfq_io_cq {
        struct cfq_ttime        ttime;
        int                     ioprio;         /* the current ioprio */
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-       uint64_t                blkcg_id;       /* the current blkcg ID */
+       uint64_t                blkcg_serial_nr; /* the current blkcg serial */
 #endif
 };
 
@@ -3547,17 +3547,17 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
 {
        struct cfq_data *cfqd = cic_to_cfqd(cic);
        struct cfq_queue *sync_cfqq;
-       uint64_t id;
+       uint64_t serial_nr;
 
        rcu_read_lock();
-       id = bio_blkcg(bio)->id;
+       serial_nr = bio_blkcg(bio)->css.serial_nr;
        rcu_read_unlock();
 
        /*
         * Check whether blkcg has changed.  The condition may trigger
         * spuriously on a newly created cic but there's no harm.
         */
-       if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
+       if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
                return;
 
        sync_cfqq = cic_to_cfqq(cic, 1);
@@ -3571,7 +3571,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
                cfq_put_queue(sync_cfqq);
        }
 
-       cic->blkcg_id = id;
+       cic->blkcg_serial_nr = serial_nr;
 }
 #else
 static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
index 18b282ce361e12b20ac8952b1bfb1e7c178d1649..f678c733df404189bdff2a87a01225c6aa124816 100644 (file)
@@ -709,8 +709,6 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                if (!arg)
                        return -EINVAL;
                bdi = blk_get_backing_dev_info(bdev);
-               if (bdi == NULL)
-                       return -ENOTTY;
                return compat_put_long(arg,
                                       (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
        case BLKROGET: /* compatible */
@@ -731,8 +729,6 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                if (!capable(CAP_SYS_ADMIN))
                        return -EACCES;
                bdi = blk_get_backing_dev_info(bdev);
-               if (bdi == NULL)
-                       return -ENOTTY;
                bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
                return 0;
        case BLKGETSIZE:
index d6cda8147c91ea828ea0dcd45f21d4ec6773abd6..6c7bf903742f923c2f720869364da218ea1bbe95 100644 (file)
@@ -356,8 +356,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                if (!arg)
                        return -EINVAL;
                bdi = blk_get_backing_dev_info(bdev);
-               if (bdi == NULL)
-                       return -ENOTTY;
                return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
        case BLKROGET:
                return put_int(arg, bdev_read_only(bdev) != 0);
@@ -386,8 +384,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                if(!capable(CAP_SYS_ADMIN))
                        return -EACCES;
                bdi = blk_get_backing_dev_info(bdev);
-               if (bdi == NULL)
-                       return -ENOTTY;
                bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
                return 0;
        case BLKBSZSET:
index 9b8eaeca6a794b5be8e732567aceffbe6ab6ebdb..abb2e65b24ccb4cbd64e0194d5bdaf9751e2b145 100644 (file)
@@ -316,8 +316,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 
        ret = -ENOMEM;
        rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
-       if (!rq)
-               goto out;
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
        blk_rq_set_block_pc(rq);
 
        if (hdr->cmd_len > BLK_MAX_CDB) {
@@ -387,7 +387,6 @@ out_free_cdb:
                kfree(rq->cmd);
 out_put_request:
        blk_put_request(rq);
-out:
        return ret;
 }
 
@@ -457,8 +456,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
        }
 
        rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
-       if (!rq) {
-               err = -ENOMEM;
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
                goto error;
        }
        blk_rq_set_block_pc(rq);
@@ -548,6 +547,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
        int err;
 
        rq = blk_get_request(q, WRITE, __GFP_WAIT);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
        blk_rq_set_block_pc(rq);
        rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
        rq->cmd[0] = cmd;
index fea7e76a00de66e7d20dd6859ad851d8ecb40a35..d48715b287e667bea5205ec75f6e6c40792149a4 100644 (file)
@@ -722,6 +722,8 @@ static int pd_special_command(struct pd_unit *disk,
        int err = 0;
 
        rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
 
        rq->cmd_type = REQ_TYPE_SPECIAL;
        rq->special = func;
index 758ac442c5b5dd0e8c1d2e2689ac1a0d8c06604d..09e628dafd9d829eadd9abb68c6956d182a582f8 100644 (file)
@@ -704,6 +704,8 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
 
        rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
                             WRITE : READ, __GFP_WAIT);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
        blk_rq_set_block_pc(rq);
 
        if (cgc->buflen) {
index d5e2d12b9d9e329d77fb21560b8f21e5e98a11f4..5d552857de412e32864ba4378a2313014c7b3564 100644 (file)
@@ -568,7 +568,7 @@ static struct carm_request *carm_get_special(struct carm_host *host)
                return NULL;
 
        rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL);
-       if (!rq) {
+       if (IS_ERR(rq)) {
                spin_lock_irqsave(&host->lock, flags);
                carm_put_request(host, crq);
                spin_unlock_irqrestore(&host->lock, flags);
index 898b84bba28a88b3b61e7cfda3e43f2f7fabb16a..5d28a45d2960c6a40945b134c755ff03a1117c76 100644 (file)
@@ -2180,8 +2180,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
                len = nr * CD_FRAMESIZE_RAW;
 
                rq = blk_get_request(q, READ, GFP_KERNEL);
-               if (!rq) {
-                       ret = -ENOMEM;
+               if (IS_ERR(rq)) {
+                       ret = PTR_ERR(rq);
                        break;
                }
                blk_rq_set_block_pc(rq);
index f41558a0bcd1742a75503a52028399e093d46403..ca958604cda21217f6d6a1bce6240fbf07fb2d7f 100644 (file)
@@ -46,7 +46,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
         * timeout has expired, so power management will be reenabled.
         */
        rq = blk_get_request(q, READ, GFP_NOWAIT);
-       if (unlikely(!rq))
+       if (IS_ERR(rq))
                goto out;
 
        rq->cmd[0] = REQ_UNPARK_HEADS;
index 7bcf67eec921e5fb0fde14b6cc2097c09b1b4879..e99507ed0e3c9ab5ad36cf19f26814d61a78ec76 100644 (file)
@@ -115,7 +115,7 @@ static struct request *get_alua_req(struct scsi_device *sdev,
 
        rq = blk_get_request(q, rw, GFP_NOIO);
 
-       if (!rq) {
+       if (IS_ERR(rq)) {
                sdev_printk(KERN_INFO, sdev,
                            "%s: blk_get_request failed\n", __func__);
                return NULL;
index 6f07f7fe3aa11e3603bad3858a817a0c485f26e0..84765384c47ca4486caf3a7e4366521af193be3f 100644 (file)
@@ -275,7 +275,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
 
        rq = blk_get_request(sdev->request_queue,
                        (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
-       if (!rq) {
+       if (IS_ERR(rq)) {
                sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
                return NULL;
        }
index e9d9fea9e272baf797bf5e95d5f729e29eae90eb..4ee2759f5299a531b62ff15e24eb89079bffb7e7 100644 (file)
@@ -117,7 +117,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
 
 retry:
        req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
-       if (!req)
+       if (IS_ERR(req))
                return SCSI_DH_RES_TEMP_UNAVAIL;
 
        blk_rq_set_block_pc(req);
@@ -247,7 +247,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
        struct request *req;
 
        req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC);
-       if (!req)
+       if (IS_ERR(req))
                return SCSI_DH_RES_TEMP_UNAVAIL;
 
        blk_rq_set_block_pc(req);
index 826069db9848e08d1b928986387484f23da5abdb..1b5bc9293e37d416769974055dbe122d38ef0c8c 100644 (file)
@@ -274,7 +274,7 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
 
        rq = blk_get_request(q, rw, GFP_NOIO);
 
-       if (!rq) {
+       if (IS_ERR(rq)) {
                sdev_printk(KERN_INFO, sdev,
                                "get_rdac_req: blk_get_request failed.\n");
                return NULL;
index 5f4cbf0c47592287fbc3c1a5208fbb15fc9a4b05..fd19fd8468acf37a895d09121a62326cc0911f90 100644 (file)
@@ -1567,8 +1567,8 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
                struct request *req;
 
                req = blk_get_request(q, has_write ? WRITE : READ, flags);
-               if (unlikely(!req))
-                       return ERR_PTR(-ENOMEM);
+               if (IS_ERR(req))
+                       return req;
 
                blk_rq_set_block_pc(req);
                return req;
index 0727ea7cc3874633c863702794a90c36024af98e..dff37a250d7906ab7c203099e6938bd197d1bb04 100644 (file)
@@ -362,7 +362,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
        int write = (data_direction == DMA_TO_DEVICE);
 
        req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL);
-       if (!req)
+       if (IS_ERR(req))
                return DRIVER_ERROR << 24;
 
        blk_rq_set_block_pc(req);
index 5db8454474eefa7cae0e9a0274b2954381293e95..a2c3d3d255a1ad845e867bb8415a76547427b212 100644 (file)
@@ -1960,6 +1960,8 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
         * request becomes available
         */
        req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
+       if (IS_ERR(req))
+               return;
 
        blk_rq_set_block_pc(req);
 
index d837dc180522142fa1dcaf5b4e09c141effbfc4a..1f2bae475cb7d74999f8bd3a94bf224a95147ffb 100644 (file)
@@ -221,7 +221,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
        int ret = DRIVER_ERROR << 24;
 
        req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
-       if (!req)
+       if (IS_ERR(req))
                return ret;
        blk_rq_set_block_pc(req);
 
index 01cf88888797898de231bc9a8dac484be598ee6a..60354449d9ed1cc16f7ca43fdef340c935d2b1c8 100644 (file)
@@ -1711,9 +1711,9 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
        }
 
        rq = blk_get_request(q, rw, GFP_ATOMIC);
-       if (!rq) {
+       if (IS_ERR(rq)) {
                kfree(long_cmdp);
-               return -ENOMEM;
+               return PTR_ERR(rq);
        }
 
        blk_rq_set_block_pc(rq);
index aff9689de0f7079690e0a81653138469fa01bb68..59db5bfc11db3cc2a2b243bb8a99dd18db6d807c 100644 (file)
@@ -490,7 +490,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
 
        req = blk_get_request(SRpnt->stp->device->request_queue, write,
                              GFP_KERNEL);
-       if (!req)
+       if (IS_ERR(req))
                return DRIVER_ERROR << 24;
 
        blk_rq_set_block_pc(req);
index 943b1dbe859afc1c34ef6c9a7c9cbb2caa091527..70d9f6dabba067b66c8b66fafa79242339315294 100644 (file)
@@ -1050,7 +1050,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
                req = blk_get_request(pdv->pdv_sd->request_queue,
                                (data_direction == DMA_TO_DEVICE),
                                GFP_KERNEL);
-               if (!req) {
+               if (IS_ERR(req)) {
                        pr_err("PSCSI: blk_get_request() failed\n");
                        ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        goto fail;
index 6d7274619bf916c2dcf0d7744ba8d888d948d711..cc8d68ac29aa4b71d091eba55c5af1b9f94285fc 100644 (file)
@@ -50,32 +50,22 @@ inline struct block_device *I_BDEV(struct inode *inode)
 EXPORT_SYMBOL(I_BDEV);
 
 /*
- * Move the inode from its current bdi to a new bdi. If the inode is dirty we
- * need to move it onto the dirty list of @dst so that the inode is always on
- * the right list.
+ * Move the inode from its current bdi to a new bdi.  Make sure the inode
+ * is clean before moving so that it doesn't linger on the old bdi.
  */
 static void bdev_inode_switch_bdi(struct inode *inode,
                        struct backing_dev_info *dst)
 {
-       struct backing_dev_info *old = inode->i_data.backing_dev_info;
-       bool wakeup_bdi = false;
-
-       if (unlikely(dst == old))               /* deadlock avoidance */
-               return;
-       bdi_lock_two(&old->wb, &dst->wb);
-       spin_lock(&inode->i_lock);
-       inode->i_data.backing_dev_info = dst;
-       if (inode->i_state & I_DIRTY) {
-               if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb))
-                       wakeup_bdi = true;
-               list_move(&inode->i_wb_list, &dst->wb.b_dirty);
+       while (true) {
+               spin_lock(&inode->i_lock);
+               if (!(inode->i_state & I_DIRTY)) {
+                       inode->i_data.backing_dev_info = dst;
+                       spin_unlock(&inode->i_lock);
+                       return;
+               }
+               spin_unlock(&inode->i_lock);
+               WARN_ON_ONCE(write_inode_now(inode, true));
        }
-       spin_unlock(&inode->i_lock);
-       spin_unlock(&old->wb.list_lock);
-       spin_unlock(&dst->wb.list_lock);
-
-       if (wakeup_bdi)
-               bdi_wakeup_thread_delayed(dst);
 }
 
 /* Kill _all_ buffers and pagecache , dirty or not.. */
@@ -1173,8 +1163,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                        if (!ret) {
                                bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
                                bdi = blk_get_backing_dev_info(bdev);
-                               if (bdi == NULL)
-                                       bdi = &default_backing_dev_info;
                                bdev_inode_switch_bdi(bdev->bd_inode, bdi);
                        }
 
index a1d36e62179c528041f292675e7452102863de45..7e221b090308476390ab22031e9062dfcbc66b10 100644 (file)
@@ -1699,7 +1699,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
                if (!device->bdev)
                        continue;
                bdi = blk_get_backing_dev_info(device->bdev);
-               if (bdi && bdi_congested(bdi, bdi_bits)) {
+               if (bdi_congested(bdi, bdi_bits)) {
                        ret = 1;
                        break;
                }
index cd7b8ca9b06410c5d34e92161906532a6221ef66..497fcde381d77e55a2e63eab89b4d88a3a1798f7 100644 (file)
@@ -1678,8 +1678,6 @@ xfs_alloc_buftarg(
        btp->bt_dev =  bdev->bd_dev;
        btp->bt_bdev = bdev;
        btp->bt_bdi = blk_get_backing_dev_info(bdev);
-       if (!btp->bt_bdi)
-               goto error;
 
        if (xfs_setsize_buftarg_early(btp, bdev))
                goto error;
index e488e9459a93f5e3e39ff339a7b0a1bbdee08165..5da6012b7a141e4bf6c10ec62b1573756215176c 100644 (file)
@@ -28,12 +28,10 @@ struct dentry;
  * Bits in backing_dev_info.state
  */
 enum bdi_state {
-       BDI_wb_alloc,           /* Default embedded wb allocated */
        BDI_async_congested,    /* The async (write) queue is getting full */
        BDI_sync_congested,     /* The sync queue is getting full */
        BDI_registered,         /* bdi_register() was done */
        BDI_writeback_running,  /* Writeback is in progress */
-       BDI_unused,             /* Available bits start here */
 };
 
 typedef int (congested_fn)(void *, int);
@@ -50,7 +48,6 @@ enum bdi_stat_item {
 
 struct bdi_writeback {
        struct backing_dev_info *bdi;   /* our parent bdi */
-       unsigned int nr;
 
        unsigned long last_old_flush;   /* last old data flush */
 
@@ -124,7 +121,6 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi);
 void bdi_writeback_workfn(struct work_struct *work);
 int bdi_has_dirty_io(struct backing_dev_info *bdi);
 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
-void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
 
 extern spinlock_t bdi_lock;
 extern struct list_head bdi_list;
index 518b46555b80968c3d29df956f677763fe292c51..e267bf0db5590f295d359a906fdbd6099dc20db4 100644 (file)
@@ -865,7 +865,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 {
-       return bdev->bd_disk->queue;
+       return bdev->bd_disk->queue;    /* this is never NULL */
 }
 
 /*
index 1706cbbdf5f0381aaf81f21f6bc47b1133e2746b..7d63d5e9d3de637f249bffbbc1fbfb35a4290d24 100644 (file)
@@ -40,7 +40,7 @@ LIST_HEAD(bdi_list);
 /* bdi_wq serves all asynchronous writeback tasks */
 struct workqueue_struct *bdi_wq;
 
-void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
+static void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
 {
        if (wb1 < wb2) {
                spin_lock(&wb1->list_lock);
@@ -376,13 +376,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
        mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
        flush_delayed_work(&bdi->wb.dwork);
        WARN_ON(!list_empty(&bdi->work_list));
-
-       /*
-        * This shouldn't be necessary unless @bdi for some reason has
-        * unflushed dirty IO after work_list is drained.  Do it anyway
-        * just in case.
-        */
-       cancel_delayed_work_sync(&bdi->wb.dwork);
+       WARN_ON(delayed_work_pending(&bdi->wb.dwork));
 }
 
 /*
@@ -402,21 +396,15 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
 
 void bdi_unregister(struct backing_dev_info *bdi)
 {
-       struct device *dev = bdi->dev;
-
-       if (dev) {
+       if (bdi->dev) {
                bdi_set_min_ratio(bdi, 0);
                trace_writeback_bdi_unregister(bdi);
                bdi_prune_sb(bdi);
 
                bdi_wb_shutdown(bdi);
                bdi_debug_unregister(bdi);
-
-               spin_lock_bh(&bdi->wb_lock);
+               device_unregister(bdi->dev);
                bdi->dev = NULL;
-               spin_unlock_bh(&bdi->wb_lock);
-
-               device_unregister(dev);
        }
 }
 EXPORT_SYMBOL(bdi_unregister);
@@ -487,8 +475,17 @@ void bdi_destroy(struct backing_dev_info *bdi)
        int i;
 
        /*
-        * Splice our entries to the default_backing_dev_info, if this
-        * bdi disappears
+        * Splice our entries to the default_backing_dev_info.  This
+        * condition shouldn't happen.  @wb must be empty at this point and
+        * dirty inodes on it might cause other issues.  This workaround is
+        * added by ce5f8e779519 ("writeback: splice dirty inode entries to
+        * default bdi on bdi_destroy()") without root-causing the issue.
+        *
+        * http://lkml.kernel.org/g/1253038617-30204-11-git-send-email-jens.axboe@oracle.com
+        * http://thread.gmane.org/gmane.linux.file-systems/35341/focus=35350
+        *
+        * We should probably add WARN_ON() to find out whether it still
+        * happens and track it down if so.
         */
        if (bdi_has_dirty_io(bdi)) {
                struct bdi_writeback *dst = &default_backing_dev_info.wb;
@@ -503,12 +500,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
 
        bdi_unregister(bdi);
 
-       /*
-        * If bdi_unregister() had already been called earlier, the dwork
-        * could still be pending because bdi_prune_sb() can race with the
-        * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty().
-        */
-       cancel_delayed_work_sync(&bdi->wb.dwork);
+       WARN_ON(delayed_work_pending(&bdi->wb.dwork));
 
        for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
                percpu_counter_destroy(&bdi->bdi_stat[i]);