Merge branch 'for-linus' into for-3.18/core
authorJens Axboe <axboe@fb.com>
Thu, 11 Sep 2014 15:31:18 +0000 (09:31 -0600)
committerJens Axboe <axboe@fb.com>
Thu, 11 Sep 2014 15:31:18 +0000 (09:31 -0600)
A bit of churn on the for-linus side that would be nice to have
in the core bits for 3.18, so pull it in to catch us up and make
forward progress easier.

Signed-off-by: Jens Axboe <axboe@fb.com>
Conflicts:
block/scsi_ioctl.c

1  2 
block/blk-core.c
block/blk-mq.c
block/cfq-iosched.c
block/scsi_ioctl.c
drivers/scsi/scsi_lib.c
fs/btrfs/disk-io.c

diff --combined block/blk-core.c
index 81744617548955b84b6c912b3ab270c65674d274,bf930f481d437ac1b1731d32d453280309773445..6946a4275e6ff2e689de8b60dff18e7999fb3f80
@@@ -83,14 -83,18 +83,14 @@@ void blk_queue_congestion_threshold(str
   * @bdev:     device
   *
   * Locates the passed device's request queue and returns the address of its
 - * backing_dev_info
 - *
 - * Will return NULL if the request queue cannot be located.
 + * backing_dev_info.  This function can only be called if @bdev is opened
 + * and the return value is never NULL.
   */
  struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
  {
 -      struct backing_dev_info *ret = NULL;
        struct request_queue *q = bdev_get_queue(bdev);
  
 -      if (q)
 -              ret = &q->backing_dev_info;
 -      return ret;
 +      return &q->backing_dev_info;
  }
  EXPORT_SYMBOL(blk_get_backing_dev_info);
  
@@@ -929,9 -933,9 +929,9 @@@ static struct io_context *rq_ioc(struc
   * Get a free request from @q.  This function may fail under memory
   * pressure or if @q is dead.
   *
 - * Must be callled with @q->queue_lock held and,
 - * Returns %NULL on failure, with @q->queue_lock held.
 - * Returns !%NULL on success, with @q->queue_lock *not held*.
 + * Must be called with @q->queue_lock held and,
 + * Returns ERR_PTR on failure, with @q->queue_lock held.
 + * Returns request pointer on success, with @q->queue_lock *not held*.
   */
  static struct request *__get_request(struct request_list *rl, int rw_flags,
                                     struct bio *bio, gfp_t gfp_mask)
        int may_queue;
  
        if (unlikely(blk_queue_dying(q)))
 -              return NULL;
 +              return ERR_PTR(-ENODEV);
  
        may_queue = elv_may_queue(q, rw_flags);
        if (may_queue == ELV_MQUEUE_NO)
                                         * process is not a "batcher", and not
                                         * exempted by the IO scheduler
                                         */
 -                                      return NULL;
 +                                      return ERR_PTR(-ENOMEM);
                                }
                        }
                }
         * allocated with any setting of ->nr_requests
         */
        if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
 -              return NULL;
 +              return ERR_PTR(-ENOMEM);
  
        q->nr_rqs[is_sync]++;
        rl->count[is_sync]++;
@@@ -1093,7 -1097,7 +1093,7 @@@ fail_alloc
  rq_starved:
        if (unlikely(rl->count[is_sync] == 0))
                rl->starved[is_sync] = 1;
 -      return NULL;
 +      return ERR_PTR(-ENOMEM);
  }
  
  /**
   * Get a free request from @q.  If %__GFP_WAIT is set in @gfp_mask, this
   * function keeps retrying under memory pressure and fails iff @q is dead.
   *
 - * Must be callled with @q->queue_lock held and,
 - * Returns %NULL on failure, with @q->queue_lock held.
 - * Returns !%NULL on success, with @q->queue_lock *not held*.
 + * Must be called with @q->queue_lock held and,
 + * Returns ERR_PTR on failure, with @q->queue_lock held.
 + * Returns request pointer on success, with @q->queue_lock *not held*.
   */
  static struct request *get_request(struct request_queue *q, int rw_flags,
                                   struct bio *bio, gfp_t gfp_mask)
        rl = blk_get_rl(q, bio);        /* transferred to @rq on success */
  retry:
        rq = __get_request(rl, rw_flags, bio, gfp_mask);
 -      if (rq)
 +      if (!IS_ERR(rq))
                return rq;
  
        if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
                blk_put_rl(rl);
 -              return NULL;
 +              return rq;
        }
  
        /* wait on @rl and retry */
@@@ -1163,7 -1167,7 +1163,7 @@@ static struct request *blk_old_get_requ
  
        spin_lock_irq(q->queue_lock);
        rq = get_request(q, rw, NULL, gfp_mask);
 -      if (!rq)
 +      if (IS_ERR(rq))
                spin_unlock_irq(q->queue_lock);
        /* q->queue_lock is unlocked at this point */
  
@@@ -1215,8 -1219,8 +1215,8 @@@ struct request *blk_make_request(struc
  {
        struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
  
 -      if (unlikely(!rq))
 -              return ERR_PTR(-ENOMEM);
 +      if (IS_ERR(rq))
 +              return rq;
  
        blk_rq_set_block_pc(rq);
  
@@@ -1248,7 -1252,6 +1248,6 @@@ void blk_rq_set_block_pc(struct reques
        rq->__sector = (sector_t) -1;
        rq->bio = rq->biotail = NULL;
        memset(rq->__cmd, 0, sizeof(rq->__cmd));
-       rq->cmd = rq->__cmd;
  }
  EXPORT_SYMBOL(blk_rq_set_block_pc);
  
@@@ -1611,8 -1614,8 +1610,8 @@@ get_rq
         * Returns with the queue unlocked.
         */
        req = get_request(q, rw_flags, bio, GFP_NOIO);
 -      if (unlikely(!req)) {
 -              bio_endio(bio, -ENODEV);        /* @q is dead */
 +      if (IS_ERR(req)) {
 +              bio_endio(bio, PTR_ERR(req));   /* @q is dead */
                goto out_unlock;
        }
  
diff --combined block/blk-mq.c
index 940aa8a34b70b656d5babca89ad2fd7ac5ad2849,383ea0cb1f0a295463789d956aae820b334f4a55..067e600002d3834b040801f28dc9000ed1474664
@@@ -112,18 -112,22 +112,22 @@@ static void blk_mq_usage_counter_releas
   */
  void blk_mq_freeze_queue(struct request_queue *q)
  {
+       bool freeze;
        spin_lock_irq(q->queue_lock);
-       q->mq_freeze_depth++;
+       freeze = !q->mq_freeze_depth++;
        spin_unlock_irq(q->queue_lock);
  
-       percpu_ref_kill(&q->mq_usage_counter);
-       blk_mq_run_queues(q, false);
+       if (freeze) {
+               percpu_ref_kill(&q->mq_usage_counter);
+               blk_mq_run_queues(q, false);
+       }
        wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
  }
  
  static void blk_mq_unfreeze_queue(struct request_queue *q)
  {
-       bool wake = false;
+       bool wake;
  
        spin_lock_irq(q->queue_lock);
        wake = !--q->mq_freeze_depth;
@@@ -172,6 -176,8 +176,8 @@@ static void blk_mq_rq_ctx_init(struct r
        /* tag was already set */
        rq->errors = 0;
  
+       rq->cmd = rq->__cmd;
        rq->extra_len = 0;
        rq->sense_len = 0;
        rq->resid_len = 0;
@@@ -218,11 -224,9 +224,11 @@@ struct request *blk_mq_alloc_request(st
        struct blk_mq_hw_ctx *hctx;
        struct request *rq;
        struct blk_mq_alloc_data alloc_data;
 +      int ret;
  
 -      if (blk_mq_queue_enter(q))
 -              return NULL;
 +      ret = blk_mq_queue_enter(q);
 +      if (ret)
 +              return ERR_PTR(ret);
  
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
                ctx = alloc_data.ctx;
        }
        blk_mq_put_ctx(ctx);
 +      if (!rq)
 +              return ERR_PTR(-EWOULDBLOCK);
        return rq;
  }
  EXPORT_SYMBOL(blk_mq_alloc_request);
@@@ -1072,13 -1074,17 +1078,17 @@@ static void blk_mq_bio_to_request(struc
                blk_account_io_start(rq, 1);
  }
  
+ static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
+ {
+       return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
+               !blk_queue_nomerges(hctx->queue);
+ }
  static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
                                         struct blk_mq_ctx *ctx,
                                         struct request *rq, struct bio *bio)
  {
-       struct request_queue *q = hctx->queue;
-       if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
+       if (!hctx_allow_merges(hctx)) {
                blk_mq_bio_to_request(rq, bio);
                spin_lock(&ctx->lock);
  insert_rq:
                spin_unlock(&ctx->lock);
                return false;
        } else {
+               struct request_queue *q = hctx->queue;
                spin_lock(&ctx->lock);
                if (!blk_mq_attempt_merge(q, ctx, bio)) {
                        blk_mq_bio_to_request(rq, bio);
@@@ -1313,6 -1321,7 +1325,7 @@@ static void blk_mq_free_rq_map(struct b
                                continue;
                        set->ops->exit_request(set->driver_data, tags->rqs[i],
                                                hctx_idx, i);
+                       tags->rqs[i] = NULL;
                }
        }
  
@@@ -1346,8 -1355,9 +1359,9 @@@ static struct blk_mq_tags *blk_mq_init_
  
        INIT_LIST_HEAD(&tags->page_list);
  
-       tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
-                                       GFP_KERNEL, set->numa_node);
+       tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
+                                GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+                                set->numa_node);
        if (!tags->rqs) {
                blk_mq_free_tags(tags);
                return NULL;
                        this_order--;
  
                do {
-                       page = alloc_pages_node(set->numa_node, GFP_KERNEL,
-                                               this_order);
+                       page = alloc_pages_node(set->numa_node,
+                               GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+                               this_order);
                        if (page)
                                break;
                        if (!this_order--)
                        if (set->ops->init_request) {
                                if (set->ops->init_request(set->driver_data,
                                                tags->rqs[i], hctx_idx, i,
-                                               set->numa_node))
+                                               set->numa_node)) {
+                                       tags->rqs[i] = NULL;
                                        goto fail;
+                               }
                        }
  
                        p += rq_size;
        return tags;
  
  fail:
-       pr_warn("%s: failed to allocate requests\n", __func__);
        blk_mq_free_rq_map(set, tags, hctx_idx);
        return NULL;
  }
@@@ -1578,7 -1590,7 +1594,7 @@@ static int blk_mq_init_hw_queues(struc
                hctx->tags = set->tags[i];
  
                /*
-                * Allocate space for all possible cpus to avoid allocation in
+                * Allocate space for all possible cpus to avoid allocation at
                 * runtime
                 */
                hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
@@@ -1666,8 -1678,8 +1682,8 @@@ static void blk_mq_map_swqueue(struct r
  
        queue_for_each_hw_ctx(q, hctx, i) {
                /*
-                * If not software queues are mapped to this hardware queue,
-                * disable it and free the request entries
+                * If no software queues are mapped to this hardware queue,
+                * disable it and free the request entries.
                 */
                if (!hctx->nr_ctx) {
                        struct blk_mq_tag_set *set = q->tag_set;
@@@ -1717,14 -1729,10 +1733,10 @@@ static void blk_mq_del_queue_tag_set(st
  {
        struct blk_mq_tag_set *set = q->tag_set;
  
-       blk_mq_freeze_queue(q);
        mutex_lock(&set->tag_list_lock);
        list_del_init(&q->tag_set_list);
        blk_mq_update_tag_set_depth(set);
        mutex_unlock(&set->tag_list_lock);
-       blk_mq_unfreeze_queue(q);
  }
  
  static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
@@@ -1932,6 -1940,61 +1944,61 @@@ static int blk_mq_queue_reinit_notify(s
        return NOTIFY_OK;
  }
  
+ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+ {
+       int i;
+       for (i = 0; i < set->nr_hw_queues; i++) {
+               set->tags[i] = blk_mq_init_rq_map(set, i);
+               if (!set->tags[i])
+                       goto out_unwind;
+       }
+       return 0;
+ out_unwind:
+       while (--i >= 0)
+               blk_mq_free_rq_map(set, set->tags[i], i);
+       set->tags = NULL;
+       return -ENOMEM;
+ }
+ /*
+  * Allocate the request maps associated with this tag_set. Note that this
+  * may reduce the depth asked for, if memory is tight. set->queue_depth
+  * will be updated to reflect the allocated depth.
+  */
+ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+ {
+       unsigned int depth;
+       int err;
+       depth = set->queue_depth;
+       do {
+               err = __blk_mq_alloc_rq_maps(set);
+               if (!err)
+                       break;
+               set->queue_depth >>= 1;
+               if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
+                       err = -ENOMEM;
+                       break;
+               }
+       } while (set->queue_depth);
+       if (!set->queue_depth || err) {
+               pr_err("blk-mq: failed to allocate request map\n");
+               return -ENOMEM;
+       }
+       if (depth != set->queue_depth)
+               pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
+                                               depth, set->queue_depth);
+       return 0;
+ }
  /*
   * Alloc a tag set to be associated with one or more request queues.
   * May fail with EINVAL for various error conditions. May adjust the
   */
  int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
  {
-       int i;
        if (!set->nr_hw_queues)
                return -EINVAL;
        if (!set->queue_depth)
                                 sizeof(struct blk_mq_tags *),
                                 GFP_KERNEL, set->numa_node);
        if (!set->tags)
-               goto out;
+               return -ENOMEM;
  
-       for (i = 0; i < set->nr_hw_queues; i++) {
-               set->tags[i] = blk_mq_init_rq_map(set, i);
-               if (!set->tags[i])
-                       goto out_unwind;
-       }
+       if (blk_mq_alloc_rq_maps(set))
+               goto enomem;
  
        mutex_init(&set->tag_list_lock);
        INIT_LIST_HEAD(&set->tag_list);
  
        return 0;
- out_unwind:
-       while (--i >= 0)
-               blk_mq_free_rq_map(set, set->tags[i], i);
- out:
+ enomem:
+       kfree(set->tags);
+       set->tags = NULL;
        return -ENOMEM;
  }
  EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@@ -1993,6 -2049,7 +2053,7 @@@ void blk_mq_free_tag_set(struct blk_mq_
        }
  
        kfree(set->tags);
+       set->tags = NULL;
  }
  EXPORT_SYMBOL(blk_mq_free_tag_set);
  
diff --combined block/cfq-iosched.c
index 900f569afcc506a53f534b253d6fb9e5c4e54a37,3f31cf9508e6204c144c2b665fb90aa67df3c9a2..6f2751d305dede39f138f77e8b110a42460b2182
@@@ -299,7 -299,7 +299,7 @@@ struct cfq_io_cq 
        struct cfq_ttime        ttime;
        int                     ioprio;         /* the current ioprio */
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
 -      uint64_t                blkcg_id;       /* the current blkcg ID */
 +      uint64_t                blkcg_serial_nr; /* the current blkcg serial */
  #endif
  };
  
@@@ -1272,15 -1272,22 +1272,22 @@@ __cfq_group_service_tree_add(struct cfq
        rb_insert_color(&cfqg->rb_node, &st->rb);
  }
  
+ /*
+  * This has to be called only on activation of cfqg
+  */
  static void
  cfq_update_group_weight(struct cfq_group *cfqg)
  {
-       BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
        if (cfqg->new_weight) {
                cfqg->weight = cfqg->new_weight;
                cfqg->new_weight = 0;
        }
+ }
+ static void
+ cfq_update_group_leaf_weight(struct cfq_group *cfqg)
+ {
+       BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  
        if (cfqg->new_leaf_weight) {
                cfqg->leaf_weight = cfqg->new_leaf_weight;
@@@ -1299,7 -1306,12 +1306,12 @@@ cfq_group_service_tree_add(struct cfq_r
        /* add to the service tree */
        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  
-       cfq_update_group_weight(cfqg);
+       /*
+        * Update leaf_weight.  We cannot update weight at this point
+        * because cfqg might already have been activated and is
+        * contributing its current weight to the parent's child_weight.
+        */
+       cfq_update_group_leaf_weight(cfqg);
        __cfq_group_service_tree_add(st, cfqg);
  
        /*
         */
        while ((parent = cfqg_parent(pos))) {
                if (propagate) {
+                       cfq_update_group_weight(pos);
                        propagate = !parent->nr_active++;
                        parent->children_weight += pos->weight;
                }
@@@ -3534,17 -3547,17 +3547,17 @@@ static void check_blkcg_changed(struct 
  {
        struct cfq_data *cfqd = cic_to_cfqd(cic);
        struct cfq_queue *sync_cfqq;
 -      uint64_t id;
 +      uint64_t serial_nr;
  
        rcu_read_lock();
 -      id = bio_blkcg(bio)->id;
 +      serial_nr = bio_blkcg(bio)->css.serial_nr;
        rcu_read_unlock();
  
        /*
         * Check whether blkcg has changed.  The condition may trigger
         * spuriously on a newly created cic but there's no harm.
         */
 -      if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
 +      if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
                return;
  
        sync_cfqq = cic_to_cfqq(cic, 1);
                cfq_put_queue(sync_cfqq);
        }
  
 -      cic->blkcg_id = id;
 +      cic->blkcg_serial_nr = serial_nr;
  }
  #else
  static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
diff --combined block/scsi_ioctl.c
index a8b0d02084485d933d4fb6b490411e2874621c86,9b8eaeca6a794b5be8e732567aceffbe6ab6ebdb..abb2e65b24ccb4cbd64e0194d5bdaf9751e2b145
@@@ -279,7 -279,6 +279,6 @@@ static int blk_complete_sghdr_rq(struc
        r = blk_rq_unmap_user(bio);
        if (!ret)
                ret = r;
-       blk_put_request(rq);
  
        return ret;
  }
@@@ -297,8 -296,6 +296,6 @@@ static int sg_io(struct request_queue *
  
        if (hdr->interface_id != 'S')
                return -EINVAL;
-       if (hdr->cmd_len > BLK_MAX_CDB)
-               return -EINVAL;
  
        if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
                return -EIO;
        if (hdr->flags & SG_FLAG_Q_AT_HEAD)
                at_head = 1;
  
+       ret = -ENOMEM;
        rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
 -      if (!rq)
 -              goto out;
 +      if (IS_ERR(rq))
 +              return PTR_ERR(rq);
        blk_rq_set_block_pc(rq);
  
-       if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
-               blk_put_request(rq);
-               return -EFAULT;
+       if (hdr->cmd_len > BLK_MAX_CDB) {
+               rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
+               if (!rq->cmd)
+                       goto out_put_request;
        }
  
+       ret = -EFAULT;
+       if (blk_fill_sghdr_rq(q, rq, hdr, mode))
+               goto out_free_cdb;
+       ret = 0;
        if (hdr->iovec_count) {
                size_t iov_data_len;
                struct iovec *iov = NULL;
                                            0, NULL, &iov);
                if (ret < 0) {
                        kfree(iov);
-                       goto out;
+                       goto out_free_cdb;
                }
  
                iov_data_len = ret;
                                      GFP_KERNEL);
  
        if (ret)
-               goto out;
+               goto out_free_cdb;
  
        bio = rq->bio;
        memset(sense, 0, sizeof(sense));
  
        hdr->duration = jiffies_to_msecs(jiffies - start_time);
  
-       return blk_complete_sghdr_rq(rq, hdr, bio);
- out:
+       ret = blk_complete_sghdr_rq(rq, hdr, bio);
+ out_free_cdb:
+       if (rq->cmd != rq->__cmd)
+               kfree(rq->cmd);
+ out_put_request:
        blk_put_request(rq);
 -out:
        return ret;
  }
  
@@@ -448,10 -457,11 +456,11 @@@ int sg_scsi_ioctl(struct request_queue 
        }
  
        rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
 -      if (!rq) {
 -              err = -ENOMEM;
 +      if (IS_ERR(rq)) {
 +              err = PTR_ERR(rq);
-               goto error_free_buffer;
+               goto error;
        }
+       blk_rq_set_block_pc(rq);
  
        cmdlen = COMMAND_SIZE(opcode);
  
        memset(sense, 0, sizeof(sense));
        rq->sense = sense;
        rq->sense_len = 0;
-       blk_rq_set_block_pc(rq);
  
        blk_execute_rq(q, disk, rq, 0);
  
@@@ -524,9 -533,9 +532,9 @@@ out
        }
        
  error:
-       blk_put_request(rq);
- error_free_buffer:
        kfree(buffer);
+       if (rq)
+               blk_put_request(rq);
        return err;
  }
  EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
@@@ -539,8 -548,6 +547,8 @@@ static int __blk_send_generic(struct re
        int err;
  
        rq = blk_get_request(q, WRITE, __GFP_WAIT);
 +      if (IS_ERR(rq))
 +              return PTR_ERR(rq);
        blk_rq_set_block_pc(rq);
        rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
        rq->cmd[0] = cmd;
diff --combined drivers/scsi/scsi_lib.c
index 972d0a8adf2ec677488834e3939c35fe2304f4cd,d837dc180522142fa1dcaf5b4e09c141effbfc4a..1f2bae475cb7d74999f8bd3a94bf224a95147ffb
@@@ -221,7 -221,7 +221,7 @@@ int scsi_execute(struct scsi_device *sd
        int ret = DRIVER_ERROR << 24;
  
        req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
 -      if (!req)
 +      if (IS_ERR(req))
                return ret;
        blk_rq_set_block_pc(req);
  
@@@ -1808,7 -1808,6 +1808,6 @@@ static int scsi_mq_prep_fn(struct reque
  
        cmd->tag = req->tag;
  
-       req->cmd = req->__cmd;
        cmd->cmnd = req->cmd;
        cmd->prot_op = SCSI_PROT_NORMAL;
  
diff --combined fs/btrfs/disk-io.c
index 39ff591ae1b4a8b0feeaebe3f39098ce47c4c525,a1d36e62179c528041f292675e7452102863de45..7e221b090308476390ab22031e9062dfcbc66b10
@@@ -39,7 -39,6 +39,6 @@@
  #include "btrfs_inode.h"
  #include "volumes.h"
  #include "print-tree.h"
- #include "async-thread.h"
  #include "locking.h"
  #include "tree-log.h"
  #include "free-space-cache.h"
@@@ -693,35 -692,41 +692,41 @@@ static void end_workqueue_bio(struct bi
  {
        struct end_io_wq *end_io_wq = bio->bi_private;
        struct btrfs_fs_info *fs_info;
+       struct btrfs_workqueue *wq;
+       btrfs_work_func_t func;
  
        fs_info = end_io_wq->info;
        end_io_wq->error = err;
-       btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
  
        if (bio->bi_rw & REQ_WRITE) {
-               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
-                       btrfs_queue_work(fs_info->endio_meta_write_workers,
-                                        &end_io_wq->work);
-               else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
-                       btrfs_queue_work(fs_info->endio_freespace_worker,
-                                        &end_io_wq->work);
-               else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
-                       btrfs_queue_work(fs_info->endio_raid56_workers,
-                                        &end_io_wq->work);
-               else
-                       btrfs_queue_work(fs_info->endio_write_workers,
-                                        &end_io_wq->work);
+               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
+                       wq = fs_info->endio_meta_write_workers;
+                       func = btrfs_endio_meta_write_helper;
+               } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
+                       wq = fs_info->endio_freespace_worker;
+                       func = btrfs_freespace_write_helper;
+               } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
+                       wq = fs_info->endio_raid56_workers;
+                       func = btrfs_endio_raid56_helper;
+               } else {
+                       wq = fs_info->endio_write_workers;
+                       func = btrfs_endio_write_helper;
+               }
        } else {
-               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
-                       btrfs_queue_work(fs_info->endio_raid56_workers,
-                                        &end_io_wq->work);
-               else if (end_io_wq->metadata)
-                       btrfs_queue_work(fs_info->endio_meta_workers,
-                                        &end_io_wq->work);
-               else
-                       btrfs_queue_work(fs_info->endio_workers,
-                                        &end_io_wq->work);
+               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
+                       wq = fs_info->endio_raid56_workers;
+                       func = btrfs_endio_raid56_helper;
+               } else if (end_io_wq->metadata) {
+                       wq = fs_info->endio_meta_workers;
+                       func = btrfs_endio_meta_helper;
+               } else {
+                       wq = fs_info->endio_workers;
+                       func = btrfs_endio_helper;
+               }
        }
+       btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
+       btrfs_queue_work(wq, &end_io_wq->work);
  }
  
  /*
@@@ -828,7 -833,7 +833,7 @@@ int btrfs_wq_submit_bio(struct btrfs_fs
        async->submit_bio_start = submit_bio_start;
        async->submit_bio_done = submit_bio_done;
  
-       btrfs_init_work(&async->work, run_one_async_start,
+       btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
                        run_one_async_done, run_one_async_free);
  
        async->bio_flags = bio_flags;
@@@ -1694,7 -1699,7 +1699,7 @@@ static int btrfs_congested_fn(void *con
                if (!device->bdev)
                        continue;
                bdi = blk_get_backing_dev_info(device->bdev);
 -              if (bdi && bdi_congested(bdi, bdi_bits)) {
 +              if (bdi_congested(bdi, bdi_bits)) {
                        ret = 1;
                        break;
                }
@@@ -3450,7 -3455,8 +3455,8 @@@ static int write_all_supers(struct btrf
                btrfs_set_stack_device_generation(dev_item, 0);
                btrfs_set_stack_device_type(dev_item, dev->type);
                btrfs_set_stack_device_id(dev_item, dev->devid);
-               btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
+               btrfs_set_stack_device_total_bytes(dev_item,
+                                                  dev->disk_total_bytes);
                btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
                btrfs_set_stack_device_io_align(dev_item, dev->io_align);
                btrfs_set_stack_device_io_width(dev_item, dev->io_width);