Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / transaction.c
index 8be4278e25e8e06e32d52b5ac412201d7b20a53c..5628e25250c0d2cca318f03d41f4e6d64b4d40e0 100644 (file)
@@ -35,7 +35,7 @@
 
 #define BTRFS_ROOT_TRANS_TAG 0
 
-static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
+static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
        [TRANS_STATE_RUNNING]           = 0U,
        [TRANS_STATE_BLOCKED]           = (__TRANS_USERSPACE |
                                           __TRANS_START),
@@ -64,6 +64,9 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
        if (atomic_dec_and_test(&transaction->use_count)) {
                BUG_ON(!list_empty(&transaction->list));
                WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
+               if (transaction->delayed_refs.pending_csums)
+                       printk(KERN_ERR "pending csums is %llu\n",
+                              transaction->delayed_refs.pending_csums);
                while (!list_empty(&transaction->pending_chunks)) {
                        struct extent_map *em;
 
@@ -93,11 +96,8 @@ static void clear_btree_io_tree(struct extent_io_tree *tree)
                 */
                ASSERT(!waitqueue_active(&state->wq));
                free_extent_state(state);
-               if (need_resched()) {
-                       spin_unlock(&tree->lock);
-                       cond_resched();
-                       spin_lock(&tree->lock);
-               }
+
+               cond_resched_lock(&tree->lock);
        }
        spin_unlock(&tree->lock);
 }
@@ -222,10 +222,12 @@ loop:
        atomic_set(&cur_trans->use_count, 2);
        cur_trans->have_free_bgs = 0;
        cur_trans->start_time = get_seconds();
+       cur_trans->dirty_bg_run = 0;
 
        cur_trans->delayed_refs.href_root = RB_ROOT;
        atomic_set(&cur_trans->delayed_refs.num_entries, 0);
        cur_trans->delayed_refs.num_heads_ready = 0;
+       cur_trans->delayed_refs.pending_csums = 0;
        cur_trans->delayed_refs.num_heads = 0;
        cur_trans->delayed_refs.flushing = 0;
        cur_trans->delayed_refs.run_delayed_start = 0;
@@ -250,6 +252,9 @@ loop:
        INIT_LIST_HEAD(&cur_trans->switch_commits);
        INIT_LIST_HEAD(&cur_trans->pending_ordered);
        INIT_LIST_HEAD(&cur_trans->dirty_bgs);
+       INIT_LIST_HEAD(&cur_trans->io_bgs);
+       mutex_init(&cur_trans->cache_write_mutex);
+       cur_trans->num_dirty_bgs = 0;
        spin_lock_init(&cur_trans->dirty_bgs_lock);
        list_add_tail(&cur_trans->list, &fs_info->trans_list);
        extent_io_tree_init(&cur_trans->dirty_pages,
@@ -721,7 +726,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
        updates = trans->delayed_ref_updates;
        trans->delayed_ref_updates = 0;
        if (updates) {
-               err = btrfs_run_delayed_refs(trans, root, updates);
+               err = btrfs_run_delayed_refs(trans, root, updates * 2);
                if (err) /* Error code will also eval true */
                        return err;
        }
@@ -1057,6 +1062,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
+       struct list_head *io_bgs = &trans->transaction->io_bgs;
        struct list_head *next;
        struct extent_buffer *eb;
        int ret;
@@ -1110,7 +1116,7 @@ again:
                        return ret;
        }
 
-       while (!list_empty(dirty_bgs)) {
+       while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
                ret = btrfs_write_dirty_block_groups(trans, root);
                if (ret)
                        return ret;
@@ -1810,6 +1816,37 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                return ret;
        }
 
+       if (!cur_trans->dirty_bg_run) {
+               int run_it = 0;
+
+               /* this mutex is also taken before trying to set
+                * block groups readonly.  We need to make sure
+                * that nobody has set a block group readonly
+                * after a extents from that block group have been
+                * allocated for cache files.  btrfs_set_block_group_ro
+                * will wait for the transaction to commit if it
+                * finds dirty_bg_run = 1
+                *
+                * The dirty_bg_run flag is also used to make sure only
+                * one process starts all the block group IO.  It wouldn't
+                * hurt to have more than one go through, but there's no
+                * real advantage to it either.
+                */
+               mutex_lock(&root->fs_info->ro_block_group_mutex);
+               if (!cur_trans->dirty_bg_run) {
+                       run_it = 1;
+                       cur_trans->dirty_bg_run = 1;
+               }
+               mutex_unlock(&root->fs_info->ro_block_group_mutex);
+
+               if (run_it)
+                       ret = btrfs_start_dirty_block_groups(trans, root);
+       }
+       if (ret) {
+               btrfs_end_transaction(trans, root);
+               return ret;
+       }
+
        spin_lock(&root->fs_info->trans_lock);
        list_splice(&trans->ordered, &cur_trans->pending_ordered);
        if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
@@ -2003,6 +2040,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        assert_qgroups_uptodate(trans);
        ASSERT(list_empty(&cur_trans->dirty_bgs));
+       ASSERT(list_empty(&cur_trans->io_bgs));
        update_super_roots(root);
 
        btrfs_set_super_log_root(root->fs_info->super_copy, 0);