btrfs: qgroup: Record possible quota-related extent for qgroup.
authorQu Wenruo <quwenruo@cn.fujitsu.com>
Thu, 16 Apr 2015 06:34:17 +0000 (14:34 +0800)
committerChris Mason <clm@fb.com>
Wed, 10 Jun 2015 16:25:32 +0000 (09:25 -0700)
Add hook in add_delayed_ref_head() to record quota-related extent record
into delayed_ref_root->dirty_extent_record rb-tree for later qgroup
accounting.

Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
Signed-off-by: Chris Mason <clm@fb.com>
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/qgroup.c
fs/btrfs/qgroup.h
fs/btrfs/transaction.c

index fc9563d4269354a58305d376fc4a8276a220103b..fd64fd0f011aa2505d667aecaa63b13a2a5daf6e 100644 (file)
@@ -22,6 +22,7 @@
 #include "ctree.h"
 #include "delayed-ref.h"
 #include "transaction.h"
+#include "qgroup.h"
 
 struct kmem_cache *btrfs_delayed_ref_head_cachep;
 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
@@ -420,12 +421,14 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
 static noinline struct btrfs_delayed_ref_head *
 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
                     struct btrfs_trans_handle *trans,
-                    struct btrfs_delayed_ref_node *ref, u64 bytenr,
-                    u64 num_bytes, int action, int is_data)
+                    struct btrfs_delayed_ref_node *ref,
+                    struct btrfs_qgroup_extent_record *qrecord,
+                    u64 bytenr, u64 num_bytes, int action, int is_data)
 {
        struct btrfs_delayed_ref_head *existing;
        struct btrfs_delayed_ref_head *head_ref = NULL;
        struct btrfs_delayed_ref_root *delayed_refs;
+       struct btrfs_qgroup_extent_record *qexisting;
        int count_mod = 1;
        int must_insert_reserved = 0;
 
@@ -474,6 +477,18 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
        head_ref->processing = 0;
        head_ref->total_ref_mod = count_mod;
 
+       /* Record qgroup extent info if provided */
+       if (qrecord) {
+               qrecord->bytenr = bytenr;
+               qrecord->num_bytes = num_bytes;
+               qrecord->old_roots = NULL;
+
+               qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
+                                                            qrecord);
+               if (qexisting)
+                       kfree(qrecord);
+       }
+
        spin_lock_init(&head_ref->lock);
        mutex_init(&head_ref->mutex);
 
@@ -624,6 +639,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
        struct btrfs_delayed_tree_ref *ref;
        struct btrfs_delayed_ref_head *head_ref;
        struct btrfs_delayed_ref_root *delayed_refs;
+       struct btrfs_qgroup_extent_record *record = NULL;
 
        if (!is_fstree(ref_root) || !fs_info->quota_enabled)
                no_quota = 0;
@@ -639,6 +655,15 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
                return -ENOMEM;
        }
 
+       if (fs_info->quota_enabled && is_fstree(ref_root)) {
+               record = kmalloc(sizeof(*record), GFP_NOFS);
+               if (!record) {
+                       kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+                       kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
+                       return -ENOMEM;
+               }
+       }
+
        head_ref->extent_op = extent_op;
 
        delayed_refs = &trans->transaction->delayed_refs;
@@ -648,7 +673,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
         * insert both the head node and the new ref without dropping
         * the spin lock
         */
-       head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
+       head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
                                        bytenr, num_bytes, action, 0);
 
        add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
@@ -673,6 +698,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
        struct btrfs_delayed_data_ref *ref;
        struct btrfs_delayed_ref_head *head_ref;
        struct btrfs_delayed_ref_root *delayed_refs;
+       struct btrfs_qgroup_extent_record *record = NULL;
 
        if (!is_fstree(ref_root) || !fs_info->quota_enabled)
                no_quota = 0;
@@ -688,6 +714,16 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
                return -ENOMEM;
        }
 
+       if (fs_info->quota_enabled && is_fstree(ref_root)) {
+               record = kmalloc(sizeof(*record), GFP_NOFS);
+               if (!record) {
+                       kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+                       kmem_cache_free(btrfs_delayed_ref_head_cachep,
+                                       head_ref);
+                       return -ENOMEM;
+               }
+       }
+
        head_ref->extent_op = extent_op;
 
        delayed_refs = &trans->transaction->delayed_refs;
@@ -697,7 +733,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
         * insert both the head node and the new ref without dropping
         * the spin lock
         */
-       head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
+       head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
                                        bytenr, num_bytes, action, 1);
 
        add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
@@ -725,9 +761,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
        delayed_refs = &trans->transaction->delayed_refs;
        spin_lock(&delayed_refs->lock);
 
-       add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
-                                  num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
-                                  extent_op->is_data);
+       add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
+                            num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
+                            extent_op->is_data);
 
        spin_unlock(&delayed_refs->lock);
        return 0;
index 362ca57cfeb7d7f74b1639e2627390a09a5a2c0f..4016f963599ed90d5776aa18a7c74bd4073bc4f8 100644 (file)
@@ -148,6 +148,9 @@ struct btrfs_delayed_ref_root {
        /* head ref rbtree */
        struct rb_root href_root;
 
+       /* dirty extent records */
+       struct rb_root dirty_extent_root;
+
        /* this spin lock protects the rbtree and the entries inside */
        spinlock_t lock;
 
index 2f185eee2387eaebb4660779749745aa5049363b..55465d5d788e7b82d064033f8cf81e0f912b0ea9 100644 (file)
@@ -1553,6 +1553,37 @@ int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+struct btrfs_qgroup_extent_record
+*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
+                                 struct btrfs_qgroup_extent_record *record)
+{
+       struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
+       struct rb_node *parent_node = NULL;
+       struct btrfs_qgroup_extent_record *entry;
+       u64 bytenr = record->bytenr;
+
+       while (*p) {
+               parent_node = *p;
+               entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
+                                node);
+               if (bytenr < entry->bytenr)
+                       p = &(*p)->rb_left;
+               else if (bytenr > entry->bytenr)
+                       p = &(*p)->rb_right;
+               else
+                       return entry;
+       }
+
+       rb_link_node(&record->node, parent_node, p);
+       rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
+       return NULL;
+}
+
+/*
+ * The easy accounting, if we are adding/removing the only ref for an extent
+ * then this qgroup and all of the parent qgroups get their refrence and
+ * exclusive counts adjusted.
+ */
 static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
                                  struct btrfs_qgroup_operation *oper)
 {
index c5242aa9a4b2a1cc7237bd42487c063a0790d87f..e58155d0390caae88af9d13dd7d40bd308fa1570 100644 (file)
@@ -19,6 +19,9 @@
 #ifndef __BTRFS_QGROUP__
 #define __BTRFS_QGROUP__
 
+#include "ulist.h"
+#include "delayed-ref.h"
+
 /*
  * A description of the operations, all of these operations only happen when we
  * are adding the 1st reference for that subvolume in the case of adding space
@@ -58,6 +61,17 @@ struct btrfs_qgroup_operation {
        struct list_head list;
 };
 
+/*
+ * Record a dirty extent, and info qgroup to update quota on it
+ * TODO: Use kmem cache to alloc it.
+ */
+struct btrfs_qgroup_extent_record {
+       struct rb_node node;
+       u64 bytenr;
+       u64 num_bytes;
+       struct ulist *old_roots;
+};
+
 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
                       struct btrfs_fs_info *fs_info);
 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
@@ -84,6 +98,9 @@ int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
                            u64 bytenr, u64 num_bytes,
                            enum btrfs_qgroup_operation_type type,
                            int mod_seq);
+struct btrfs_qgroup_extent_record
+*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
+                                 struct btrfs_qgroup_extent_record *record);
 int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans,
                                    struct btrfs_fs_info *fs_info);
 void btrfs_remove_qgroup_operation(struct btrfs_trans_handle *trans,
index 03a3ec7e31ea588862d346a3b20e772cf3bd13fc..3694d57e759f60e8923ecf2e85ef1936417f37b8 100644 (file)
@@ -225,6 +225,7 @@ loop:
        cur_trans->dirty_bg_run = 0;
 
        cur_trans->delayed_refs.href_root = RB_ROOT;
+       cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
        atomic_set(&cur_trans->delayed_refs.num_entries, 0);
        cur_trans->delayed_refs.num_heads_ready = 0;
        cur_trans->delayed_refs.pending_csums = 0;